content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_jjm.R
\name{tidy_JJM}
\alias{tidy_JJM}
\title{Tidy results of JJM model}
\usage{
tidy_JJM(models)
}
\arguments{
\item{models}{an object of class jjm.output}
}
\value{
a list of tidy dataframes
}
\description{
Tidy results of JJM model
}
\examples{
\dontrun{
mod0.00 <- readJJM("h2_0.00", path = "config", input = "input")
tidy_jjm_results <- tidy_JJM(mod0.00)
}
}
| /man/tidy_JJM.Rd | no_license | SPRFMO/jjmr | R | false | true | 453 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tidy_jjm.R
\name{tidy_JJM}
\alias{tidy_JJM}
\title{Tidy results of JJM model}
\usage{
tidy_JJM(models)
}
\arguments{
\item{models}{an object of class jjm.output}
}
\value{
a list of tidy dataframes
}
\description{
Tidy results of JJM model
}
\examples{
\dontrun{
mod0.00 <- readJJM("h2_0.00", path = "config", input = "input")
tidy_jjm_results <- tidy_JJM(mod0.00)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Model-class.R
\docType{class}
\name{LogisticNormalFixedMixture-class}
\alias{LogisticNormalFixedMixture-class}
\alias{.LogisticNormalFixedMixture}
\title{Standard logistic model with fixed mixture of multiple bivariate (log) normal priors}
\description{
This is standard logistic regression model with a mixture of multiple bivariate
(log) normal priors on the intercept and slope parameters. The weights of the
normal priors are fixed, hence no additional model parameters are introduced.
This type of prior is often used to better approximate a given posterior
distribution, or when the information is given in terms of a mixture.
}
\details{
The covariate is the natural logarithm of the dose \eqn{x} divided by
the reference dose \eqn{x^{*}}:
\deqn{logit[p(x)] = \alpha + \beta \cdot \log(x/x^{*})}
where \eqn{p(x)} is the probability of observing a DLT for a given dose
\eqn{x}.
The prior is
\deqn{(\alpha, \beta) \sim
\sum_{j=1}^{K} w_{j} Normal(\mu_{j}, \Sigma_{j})}
if a normal prior is used and
\deqn{(\alpha, \log(\beta)) \sim
\sum_{j=1}^{K} w_{j} Normal(\mu_{j}, \Sigma_{j})}
if a log normal prior is used.
The weight \eqn{w_{j}} of the components are fixed and sum to 1.
The (additional) slots of this class comprise two lists, containing the mean
vector, the covariance and precision matrices of the two bivariate normal
distributions each, the parameters of the beta prior for the first component
weight, as well as the reference dose. Moreover, a slot specifies whether a
log normal prior is used.
}
\section{Slots}{
\describe{
\item{\code{components}}{a list with one entry per component of the mixture.
Each entry is a list with \code{mean}, \code{cov} and \code{prec} for the
bivariate normal prior}
\item{\code{weights}}{the weights of the components, these must be positive and sum
to 1}
\item{\code{refDose}}{the reference dose \eqn{x^{*}}}
\item{\code{logNormal}}{is a log normal prior specified for each of the components?}
}}
\examples{
model <- LogisticNormalFixedMixture(components =
list(comp1 = list(mean = c(-0.85, 1),
cov = matrix(c(1, -0.5, -0.5, 1),
nrow = 2)),
comp2 = list(mean = c(1, 1.5),
cov = matrix(c(1.2, -0.45, -0.45, 0.6),
nrow = 2))),
weights = c(0.3,0.7),
refDose = 50)
}
\keyword{classes}
| /man/LogisticNormalFixedMixture-class.Rd | no_license | cran/crmPack | R | false | true | 2,730 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Model-class.R
\docType{class}
\name{LogisticNormalFixedMixture-class}
\alias{LogisticNormalFixedMixture-class}
\alias{.LogisticNormalFixedMixture}
\title{Standard logistic model with fixed mixture of multiple bivariate (log) normal priors}
\description{
This is standard logistic regression model with a mixture of multiple bivariate
(log) normal priors on the intercept and slope parameters. The weights of the
normal priors are fixed, hence no additional model parameters are introduced.
This type of prior is often used to better approximate a given posterior
distribution, or when the information is given in terms of a mixture.
}
\details{
The covariate is the natural logarithm of the dose \eqn{x} divided by
the reference dose \eqn{x^{*}}:
\deqn{logit[p(x)] = \alpha + \beta \cdot \log(x/x^{*})}
where \eqn{p(x)} is the probability of observing a DLT for a given dose
\eqn{x}.
The prior is
\deqn{(\alpha, \beta) \sim
\sum_{j=1}^{K} w_{j} Normal(\mu_{j}, \Sigma_{j})}
if a normal prior is used and
\deqn{(\alpha, \log(\beta)) \sim
\sum_{j=1}^{K} w_{j} Normal(\mu_{j}, \Sigma_{j})}
if a log normal prior is used.
The weight \eqn{w_{j}} of the components are fixed and sum to 1.
The (additional) slots of this class comprise two lists, containing the mean
vector, the covariance and precision matrices of the two bivariate normal
distributions each, the parameters of the beta prior for the first component
weight, as well as the reference dose. Moreover, a slot specifies whether a
log normal prior is used.
}
\section{Slots}{
\describe{
\item{\code{components}}{a list with one entry per component of the mixture.
Each entry is a list with \code{mean}, \code{cov} and \code{prec} for the
bivariate normal prior}
\item{\code{weights}}{the weights of the components, these must be positive and sum
to 1}
\item{\code{refDose}}{the reference dose \eqn{x^{*}}}
\item{\code{logNormal}}{is a log normal prior specified for each of the components?}
}}
\examples{
model <- LogisticNormalFixedMixture(components =
list(comp1 = list(mean = c(-0.85, 1),
cov = matrix(c(1, -0.5, -0.5, 1),
nrow = 2)),
comp2 = list(mean = c(1, 1.5),
cov = matrix(c(1.2, -0.45, -0.45, 0.6),
nrow = 2))),
weights = c(0.3,0.7),
refDose = 50)
}
\keyword{classes}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_image.R
\name{writeImage}
\alias{writeImage}
\title{This function writes 2- or 3-dimensional image data to a file}
\usage{
writeImage(data, file_name, ...)
}
\arguments{
\item{data}{a 2- or 3-dimensional object (matrix, data frame or array)}
\item{file_name}{a string specifying the name of the new file}
\item{...}{further arguments for the writePNG, writeJPEG and writeTIFF functions}
}
\value{
a saved image file
}
\description{
This function writes 2- or 3-dimensional image data to a file. Supported types are .png, .jpeg, .jpg, .tiff (or .tif, .TIFF, .TIF)
}
\details{
This function takes as input a matrix, data frame or array and saves the data in one of the supported image types ( .png, .jpeg, .jpg, .tiff ).
Extension types similar to .tiff such as .tif, .TIFF, .TIF are also supported
}
\examples{
# path = system.file("tmp_images", "1.png", package = "OpenImageR")
# im = readImage(path)
# writeImage(im, 'new_image.jpeg')
}
| /man/writeImage.Rd | no_license | kota7/OpenImageR | R | false | true | 1,029 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_image.R
\name{writeImage}
\alias{writeImage}
\title{This function writes 2- or 3-dimensional image data to a file}
\usage{
writeImage(data, file_name, ...)
}
\arguments{
\item{data}{a 2- or 3-dimensional object (matrix, data frame or array)}
\item{file_name}{a string specifying the name of the new file}
\item{...}{further arguments for the writePNG, writeJPEG and writeTIFF functions}
}
\value{
a saved image file
}
\description{
This function writes 2- or 3-dimensional image data to a file. Supported types are .png, .jpeg, .jpg, .tiff (or .tif, .TIFF, .TIF)
}
\details{
This function takes as input a matrix, data frame or array and saves the data in one of the supported image types ( .png, .jpeg, .jpg, .tiff ).
Extension types similar to .tiff such as .tif, .TIFF, .TIF are also supported
}
\examples{
# path = system.file("tmp_images", "1.png", package = "OpenImageR")
# im = readImage(path)
# writeImage(im, 'new_image.jpeg')
}
|
library(testthat)
library(ml.tools)
test_check("ml.tools")
| /tests/testthat.R | no_license | decisionpatterns/ml.tools | R | false | false | 60 | r | library(testthat)
library(ml.tools)
test_check("ml.tools")
|
library(svIDE)
### Name: getKeywords
### Title: get all keywords for syntax highlighting
### Aliases: getKeywords
### Keywords: utilities
### ** Examples
getKeywords(1:2)
| /data/genthat_extracted_code/svIDE/examples/getKeywords.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 178 | r | library(svIDE)
### Name: getKeywords
### Title: get all keywords for syntax highlighting
### Aliases: getKeywords
### Keywords: utilities
### ** Examples
getKeywords(1:2)
|
## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ---- results='asis', echo=FALSE-----------------------------------------
cat(paste(readLines("JuliaCall_in_Jupyter_R_Notebook1.md"), collapse = "\n"))
| /libs/JuliaCall/doc/JuliaCall_in_Jupyter_R_Notebook.R | no_license | mpg-age-bioinformatics/shiny-LifeSpanCurves | R | false | false | 267 | r | ## ----setup, include=FALSE------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ---- results='asis', echo=FALSE-----------------------------------------
cat(paste(readLines("JuliaCall_in_Jupyter_R_Notebook1.md"), collapse = "\n"))
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(7.69395670445668e+35, 1.18563453592977e+223, 5.95835080989286e-136, 2.07507571253324e-322, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807203693963e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615848675-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 720 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(7.69395670445668e+35, 1.18563453592977e+223, 5.95835080989286e-136, 2.07507571253324e-322, 0, 0, 0, 0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807203693963e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
library(funcy)
### Name: plot-methods
### Title: Methods for Function 'plot' in Package 'funcy'
### Aliases: plot,funcyOutList,missing-method plot,funcyOut,missing-method
### plot,funcyOut,ANY-method plot,funcyOutMbc-fitfclust,missing-method
### plot,funcyOutMbc-fscm,missing-method
### Keywords: plot
### ** Examples
set.seed(2804)
ds <- sampleFuncy(obsNr=60, k=4, timeNrMin=5, timeNrMax=10, reg=FALSE)
data <- Data(ds)
clusters <- Cluster(ds)
res <- funcit(data=data, clusters=clusters,
methods=c("fitfclust","distclust", "iterSubspace") ,
k=4, parallel=TRUE)
plot(res)
plot(res, select="fitfclust", type="conf")
plot(res, select="fitfclust", type="discrim")
plot(res, select="distclust", type="shadow")
| /data/genthat_extracted_code/funcy/examples/plot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 745 | r | library(funcy)
### Name: plot-methods
### Title: Methods for Function 'plot' in Package 'funcy'
### Aliases: plot,funcyOutList,missing-method plot,funcyOut,missing-method
### plot,funcyOut,ANY-method plot,funcyOutMbc-fitfclust,missing-method
### plot,funcyOutMbc-fscm,missing-method
### Keywords: plot
### ** Examples
set.seed(2804)
ds <- sampleFuncy(obsNr=60, k=4, timeNrMin=5, timeNrMax=10, reg=FALSE)
data <- Data(ds)
clusters <- Cluster(ds)
res <- funcit(data=data, clusters=clusters,
methods=c("fitfclust","distclust", "iterSubspace") ,
k=4, parallel=TRUE)
plot(res)
plot(res, select="fitfclust", type="conf")
plot(res, select="fitfclust", type="discrim")
plot(res, select="distclust", type="shadow")
|
context("net_sensspec")
suppressWarnings(library(Rfast))
library(utils)
library(assertthat)
test_that("different numbeer of tests", {
# 4 tests
expect_equal(net_sensspec(pos_threshold = 2,
sens = c(0.1,0.2,0.3,0.4),
spec = c(0.1,0.2,0.3,0.4)),
list(net_sens = 0.2572, net_spec = 0.0428))
# 3 tests
expect_equal(net_sensspec(pos_threshold = 2,
sens = c(0.1,0.2,0.3),
spec = c(0.1,0.2,0.3)),
list(net_sens = 0.098, net_spec = 0.098))
# 2 tests
expect_equal(net_sensspec(pos_threshold = 2,
sens = c(0.1,0.2),
spec = c(0.1,0.2)),
list(net_sens = 0.02, net_spec = 0.28))
})
test_that("edge cases", {
expect_equal(net_sensspec(pos_threshold = 1,
sens = 0.1,
spec = 0.2),
list(net_sens = 0.1, net_spec = 0.2))
expect_equal(net_sensspec(pos_threshold = 0,
sens = 0.1,
spec = 0.2),
list(net_sens = 1, net_spec = 0))
expect_equal(net_sensspec(pos_threshold = 2,
sens = 0.1,
spec = 0.2),
list(net_sens = 0, net_spec = 1))
})
test_that("errors and warnings", {
expect_error(net_sensspec(pos_threshold = 1,
sens = 0.1,
spec = 2))
expect_error(net_sensspec(pos_threshold = 1,
sens = 2,
spec = 0.2))
expect_error(net_sensspec(pos_threshold = -1,
sens = 0.1,
spec = 0.2))
expect_message(net_sensspec(pos_threshold = 1.2,
sens = 0.1,
spec = 0.2))
})
| /tests/testthat/test-net_sensspec.R | permissive | n8thangreen/netdiagnostics | R | false | false | 1,925 | r | context("net_sensspec")
suppressWarnings(library(Rfast))
library(utils)
library(assertthat)
test_that("different numbeer of tests", {
# 4 tests
expect_equal(net_sensspec(pos_threshold = 2,
sens = c(0.1,0.2,0.3,0.4),
spec = c(0.1,0.2,0.3,0.4)),
list(net_sens = 0.2572, net_spec = 0.0428))
# 3 tests
expect_equal(net_sensspec(pos_threshold = 2,
sens = c(0.1,0.2,0.3),
spec = c(0.1,0.2,0.3)),
list(net_sens = 0.098, net_spec = 0.098))
# 2 tests
expect_equal(net_sensspec(pos_threshold = 2,
sens = c(0.1,0.2),
spec = c(0.1,0.2)),
list(net_sens = 0.02, net_spec = 0.28))
})
test_that("edge cases", {
expect_equal(net_sensspec(pos_threshold = 1,
sens = 0.1,
spec = 0.2),
list(net_sens = 0.1, net_spec = 0.2))
expect_equal(net_sensspec(pos_threshold = 0,
sens = 0.1,
spec = 0.2),
list(net_sens = 1, net_spec = 0))
expect_equal(net_sensspec(pos_threshold = 2,
sens = 0.1,
spec = 0.2),
list(net_sens = 0, net_spec = 1))
})
test_that("errors and warnings", {
expect_error(net_sensspec(pos_threshold = 1,
sens = 0.1,
spec = 2))
expect_error(net_sensspec(pos_threshold = 1,
sens = 2,
spec = 0.2))
expect_error(net_sensspec(pos_threshold = -1,
sens = 0.1,
spec = 0.2))
expect_message(net_sensspec(pos_threshold = 1.2,
sens = 0.1,
spec = 0.2))
})
|
library(tidyr)
context("test unpivot")
test_that("unpivot works", {
result <-
structure(
list(
col1 = c(
"a1",
"a1",
"a1",
"a1",
"a1",
"a1",
"a1",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"a2",
"a2",
"a2",
"a2",
"a2",
"a2",
"a2",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total general",
"Total general",
"Total general",
"Total general",
"Total general",
"Total general",
"Total general"
),
col2 = c(
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"",
"",
"",
"",
"",
"",
"",
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
""
),
row1 = c(
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general"
),
row2 = c(
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
""
),
value = c(
"2,99",
"1,02",
"4,01",
"4,06",
"1,32",
"5,38",
"9,39",
"3,89",
"3,65",
"7,54",
"5,55",
"",
"5,55",
"13,09",
"2,33",
"",
"2,33",
"1,87",
"",
"1,87",
"4,2",
"9,21",
"4,67",
"13,88",
"11,48",
"1,32",
"12,8",
"26,68",
"5,62",
"1,94",
"7,56",
"4,59",
"2,13",
"6,72",
"14,28",
"3,82",
"7,72",
"11,54",
"4,78",
"2,94",
"7,72",
"19,26",
"5,36",
"6,38",
"11,74",
"1,69",
"1,78",
"3,47",
"15,21",
"14,8",
"16,04",
"30,84",
"11,06",
"6,85",
"17,91",
"48,75",
"24,01",
"20,71",
"44,72",
"22,54",
"8,17",
"30,71",
"75,43"
)
),
row.names = c(NA, -63L),
class = c("tbl_df",
"tbl", "data.frame")
)
pt <-
list_pt_ie[[1]] %>%
remove_top(1) %>%
define_labels(n_col = 2, n_row = 2) %>%
unpivot(include_page = FALSE)
expect_equal(pt, result)
})
| /tests/testthat/test-unpivot.R | no_license | cran/flattabler | R | false | false | 6,218 | r | library(tidyr)
context("test unpivot")
test_that("unpivot works", {
result <-
structure(
list(
col1 = c(
"a1",
"a1",
"a1",
"a1",
"a1",
"a1",
"a1",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"Total a1",
"a2",
"a2",
"a2",
"a2",
"a2",
"a2",
"a2",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total a2",
"Total general",
"Total general",
"Total general",
"Total general",
"Total general",
"Total general",
"Total general"
),
col2 = c(
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"",
"",
"",
"",
"",
"",
"",
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b1",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b2",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"b3",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
"",
""
),
row1 = c(
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general",
"e1",
"",
"Total e1",
"e2",
"",
"Total e2",
"Total general"
),
row2 = c(
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
"",
"d1",
"d2",
"",
"d1",
"d2",
"",
""
),
value = c(
"2,99",
"1,02",
"4,01",
"4,06",
"1,32",
"5,38",
"9,39",
"3,89",
"3,65",
"7,54",
"5,55",
"",
"5,55",
"13,09",
"2,33",
"",
"2,33",
"1,87",
"",
"1,87",
"4,2",
"9,21",
"4,67",
"13,88",
"11,48",
"1,32",
"12,8",
"26,68",
"5,62",
"1,94",
"7,56",
"4,59",
"2,13",
"6,72",
"14,28",
"3,82",
"7,72",
"11,54",
"4,78",
"2,94",
"7,72",
"19,26",
"5,36",
"6,38",
"11,74",
"1,69",
"1,78",
"3,47",
"15,21",
"14,8",
"16,04",
"30,84",
"11,06",
"6,85",
"17,91",
"48,75",
"24,01",
"20,71",
"44,72",
"22,54",
"8,17",
"30,71",
"75,43"
)
),
row.names = c(NA, -63L),
class = c("tbl_df",
"tbl", "data.frame")
)
pt <-
list_pt_ie[[1]] %>%
remove_top(1) %>%
define_labels(n_col = 2, n_row = 2) %>%
unpivot(include_page = FALSE)
expect_equal(pt, result)
})
|
library(shiny)
library(datasets)
# Data pre-processing ----
# Tweak the "am" variable to have nicer factor labels -- since this
# doesn't rely on any user inputs, we can do this once at startup
# and then use the value throughout the lifetime of the app
mpgData <- mtcars
mpgData$am <- factor(mpgData$am, labels = c("Automatic", "Manual"))
# Define UI for miles per gallon app ----
ui <- fluidPage(
# App title ----
titlePanel("Miles Per Gallon"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for variable to plot against mpg ----
selectInput("variable", "Variable:",
c("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
# Input: Checkbox for whether outliers should be included ----
checkboxInput("outliers", "Show outliers", TRUE)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption")),
# Output: Plot of the requested variable against mpg ----
plotOutput("mpgPlot")
)
)
)
# Define server logic to plot various variables against mpg ----
server <- function(input, output) {
# Compute the formula text ----
# This is in a reactive expression since it is shared by the
# output$caption and output$mpgPlot functions
formulaText <- reactive({
paste("mpg ~", input$variable)
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg ----
# and only exclude outliers if requested
output$mpgPlot <- renderPlot({
boxplot(as.formula(formulaText()),
data = mpgData,
outline = input$outliers,
col = "#75AADB", pch = 19)
})
}
# Create Shiny app ----
shinyApp(ui, server) | /Examples/04_mpg.R | no_license | vectormars/R-Shiny | R | false | false | 2,098 | r | library(shiny)
library(datasets)
# Data pre-processing ----
# Tweak the "am" variable to have nicer factor labels -- since this
# doesn't rely on any user inputs, we can do this once at startup
# and then use the value throughout the lifetime of the app
mpgData <- mtcars
mpgData$am <- factor(mpgData$am, labels = c("Automatic", "Manual"))
# Define UI for miles per gallon app ----
ui <- fluidPage(
# App title ----
titlePanel("Miles Per Gallon"),
# Sidebar layout with input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for variable to plot against mpg ----
selectInput("variable", "Variable:",
c("Cylinders" = "cyl",
"Transmission" = "am",
"Gears" = "gear")),
# Input: Checkbox for whether outliers should be included ----
checkboxInput("outliers", "Show outliers", TRUE)
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Formatted text for caption ----
h3(textOutput("caption")),
# Output: Plot of the requested variable against mpg ----
plotOutput("mpgPlot")
)
)
)
# Define server logic to plot various variables against mpg ----
server <- function(input, output) {
# Compute the formula text ----
# This is in a reactive expression since it is shared by the
# output$caption and output$mpgPlot functions
formulaText <- reactive({
paste("mpg ~", input$variable)
})
# Return the formula text for printing as a caption ----
output$caption <- renderText({
formulaText()
})
# Generate a plot of the requested variable against mpg ----
# and only exclude outliers if requested
output$mpgPlot <- renderPlot({
boxplot(as.formula(formulaText()),
data = mpgData,
outline = input$outliers,
col = "#75AADB", pch = 19)
})
}
# Create Shiny app ----
shinyApp(ui, server) |
library(tidyverse)
library(grid)
best <- read.csv("/projects/churchill-lab/projects/JAC/Takemon_DO_crosssectional_kidney/results/QTLscan/protein/BestMarker_SexInt_protein_nobatch.csv")
# picking blunt threshold
LODthreshold_diff <- 7.5
# Using best to create plot
# need to reorder "chr" factors
chr_full <- c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","X","Y", "MT")
best$chr <- factor(best$chr, levels = chr_full)
best$IntSexChr <- factor(best$IntSexChr, levels= chr_full)
# Subset out chr 1-19,X from data
chr <- c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","X")
best <- best[best$chr %in% chr, ]
# Plot Interactive-Age eQTLs
# Subset Int-Age LOD above total/full and diff
Int_age <- best[(best$IntSexLODDiff > LODthreshold_diff),] # above diff threshold
# Annotate Interactive-Age postion with genes and save file for sharing
#save_int_age <- arrange(Int_age, IntSexChr, IntSexPos)
#write_csv(save_int_age, path = paste0("./QTLscan/scanBestMarker_protein/BestMarker_BestperGene_protein_thr",LODthreshold_diff,".csv"))
# Convert transcript and qtl position relative to chromosome positions
# Convert to megabases
chrlen <- sapply(split(Int_age$end, Int_age$chr), max) * 1e-6
chrsum <- c(0, cumsum(chrlen))
names(chrsum) = names(chrlen)
t.gmb <- Int_age$start * 1e-6 # Transcript
q.gmb <- Int_age$IntSexPos # qtl
# Cumulative sum of previosu positions
for(i in c(2:19, "X")) {
wh <- which(Int_age$chr == i)
t.gmb[wh] <- t.gmb[wh] + chrsum[i]
wh <- which(Int_age$IntSexChr == i)
q.gmb[wh] <- q.gmb[wh] + chrsum[i]
}
Int_age$t_gbm <- t.gmb
Int_age$q_gbm <- q.gmb
# Custom lablels & lines
# Only display chr1:19,X
chrtick <- chrsum[1:20]
# Shift axis tick to half way point
max <- max(Int_age$q_gbm)
chrtick_half <- NULL
for (i in 1:length(chrtick)){
if (i == 20){
x <- (chrtick[i] + max)/2
chrtick_half <- c(chrtick_half, x)
} else {
x <- (chrtick[i] + chrtick[i + 1])/2
chrtick_half <- c(chrtick_half, x)
}
}
#to adjust eQTL plot a bit to the right to match density
chrtick_halfy <- chrtick_half
names(chrtick_halfy)[20] <- "X "
# eQTL plot
pPlot <- ggplot(Int_age, aes(x= q_gbm, y= t_gbm)) +
geom_point(alpha = 0.2) +
scale_x_continuous("QTL position",
breaks = chrtick_half,
limits = c(min(Int_age$q_gbm), max(Int_age$q_gbm)),
expand = c(0,0)) +
scale_y_continuous("Gene position",
breaks = chrtick_halfy,
limits = c(min(Int_age$t_gbm), max(Int_age$t_gbm)),
expand = c(0,0)) +
geom_vline(xintercept = chrtick[2:20], colour = "grey", size = 0.2) +
geom_hline(yintercept = chrtick[2:20], colour = "grey", size = 0.2) +
labs( title = "Interactive-Sex pQTLscan by Marker") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5),
panel.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
legend.position = "top",
panel.border = element_rect(colour = "black", size = 0.2, fill = NA))
pdensity <- ggplot(Int_age, aes(q_gbm, colour = "grey", fill = "grey")) +
geom_histogram(breaks = seq(0,max(Int_age$q_gbm), by = 10)) +
scale_colour_manual(name = NA, values = c(grey = "grey"), guide = FALSE) +
scale_fill_manual(name = NA, values = c(grey = "grey"), guide = FALSE) +
scale_x_continuous("QTL position",
breaks = chrtick_half,
limits = c(min(Int_age$q_gbm), max(Int_age$q_gbm)),
expand = c(0,0)) +
scale_y_continuous(name ="Density",
breaks = seq(0,450, by = 10)) +
geom_vline(xintercept = chrtick[2:20], colour = "grey", size = 0.2) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5),
panel.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_rect(colour = "black", size = 0.2, fill = NA))
pdf("/projects/churchill-lab/projects/JAC/Takemon_DO_crosssectional_kidney/results/Plots/QTLscan/pQTL_BestMarker_sexint.pdf", height = 8, width = 7)
pushViewport(viewport( layout = grid.layout(10,10)))
print(pPlot, vp = viewport(layout.pos.row = 1:8, layout.pos.col = 1:10))
print(pdensity, vp = viewport(layout.pos.row = 9:10, layout.pos.col = 1:10))
dev.off()
| /chuchill-lab/QTLscan_plots/pQTL_sexint.R | permissive | ytakemon/JAC_DO_Kidney | R | false | false | 4,436 | r | library(tidyverse)
library(grid)
best <- read.csv("/projects/churchill-lab/projects/JAC/Takemon_DO_crosssectional_kidney/results/QTLscan/protein/BestMarker_SexInt_protein_nobatch.csv")
# picking blunt threshold
LODthreshold_diff <- 7.5
# Using best to create plot
# need to reorder "chr" factors
chr_full <- c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","X","Y", "MT")
best$chr <- factor(best$chr, levels = chr_full)
best$IntSexChr <- factor(best$IntSexChr, levels= chr_full)
# Subset out chr 1-19,X from data
chr <- c("1","2","3","4","5","6","7","8","9","10","11","12","13","14","15","16","17","18","19","X")
best <- best[best$chr %in% chr, ]
# Plot Interactive-Age eQTLs
# Subset Int-Age LOD above total/full and diff
Int_age <- best[(best$IntSexLODDiff > LODthreshold_diff),] # above diff threshold
# Annotate Interactive-Age postion with genes and save file for sharing
#save_int_age <- arrange(Int_age, IntSexChr, IntSexPos)
#write_csv(save_int_age, path = paste0("./QTLscan/scanBestMarker_protein/BestMarker_BestperGene_protein_thr",LODthreshold_diff,".csv"))
# Convert transcript and qtl position relative to chromosome positions
# Convert to megabases
chrlen <- sapply(split(Int_age$end, Int_age$chr), max) * 1e-6
chrsum <- c(0, cumsum(chrlen))
names(chrsum) = names(chrlen)
t.gmb <- Int_age$start * 1e-6 # Transcript
q.gmb <- Int_age$IntSexPos # qtl
# Cumulative sum of previosu positions
for(i in c(2:19, "X")) {
wh <- which(Int_age$chr == i)
t.gmb[wh] <- t.gmb[wh] + chrsum[i]
wh <- which(Int_age$IntSexChr == i)
q.gmb[wh] <- q.gmb[wh] + chrsum[i]
}
Int_age$t_gbm <- t.gmb
Int_age$q_gbm <- q.gmb
# Custom lablels & lines
# Only display chr1:19,X
chrtick <- chrsum[1:20]
# Shift axis tick to half way point
max <- max(Int_age$q_gbm)
chrtick_half <- NULL
for (i in 1:length(chrtick)){
if (i == 20){
x <- (chrtick[i] + max)/2
chrtick_half <- c(chrtick_half, x)
} else {
x <- (chrtick[i] + chrtick[i + 1])/2
chrtick_half <- c(chrtick_half, x)
}
}
#to adjust eQTL plot a bit to the right to match density
chrtick_halfy <- chrtick_half
names(chrtick_halfy)[20] <- "X "
# eQTL plot
pPlot <- ggplot(Int_age, aes(x= q_gbm, y= t_gbm)) +
geom_point(alpha = 0.2) +
scale_x_continuous("QTL position",
breaks = chrtick_half,
limits = c(min(Int_age$q_gbm), max(Int_age$q_gbm)),
expand = c(0,0)) +
scale_y_continuous("Gene position",
breaks = chrtick_halfy,
limits = c(min(Int_age$t_gbm), max(Int_age$t_gbm)),
expand = c(0,0)) +
geom_vline(xintercept = chrtick[2:20], colour = "grey", size = 0.2) +
geom_hline(yintercept = chrtick[2:20], colour = "grey", size = 0.2) +
labs( title = "Interactive-Sex pQTLscan by Marker") +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5),
panel.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
legend.position = "top",
panel.border = element_rect(colour = "black", size = 0.2, fill = NA))
pdensity <- ggplot(Int_age, aes(q_gbm, colour = "grey", fill = "grey")) +
geom_histogram(breaks = seq(0,max(Int_age$q_gbm), by = 10)) +
scale_colour_manual(name = NA, values = c(grey = "grey"), guide = FALSE) +
scale_fill_manual(name = NA, values = c(grey = "grey"), guide = FALSE) +
scale_x_continuous("QTL position",
breaks = chrtick_half,
limits = c(min(Int_age$q_gbm), max(Int_age$q_gbm)),
expand = c(0,0)) +
scale_y_continuous(name ="Density",
breaks = seq(0,450, by = 10)) +
geom_vline(xintercept = chrtick[2:20], colour = "grey", size = 0.2) +
theme_bw() +
theme(plot.title = element_text(hjust = 0.5),
panel.background = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_rect(colour = "black", size = 0.2, fill = NA))
pdf("/projects/churchill-lab/projects/JAC/Takemon_DO_crosssectional_kidney/results/Plots/QTLscan/pQTL_BestMarker_sexint.pdf", height = 8, width = 7)
pushViewport(viewport( layout = grid.layout(10,10)))
print(pPlot, vp = viewport(layout.pos.row = 1:8, layout.pos.col = 1:10))
print(pdensity, vp = viewport(layout.pos.row = 9:10, layout.pos.col = 1:10))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treeSkeleton.R
\name{treeSkeleton__first_leaf}
\alias{treeSkeleton__first_leaf}
\title{Find the first leaf in a tree.}
\usage{
treeSkeleton__first_leaf()
}
\value{
The first leaf, that is, the first terminal child node.
}
\description{
Find the first leaf in a tree.
}
| /man/treeSkeleton__first_leaf.Rd | permissive | syberia/stagerunner | R | false | true | 347 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/treeSkeleton.R
\name{treeSkeleton__first_leaf}
\alias{treeSkeleton__first_leaf}
\title{Find the first leaf in a tree.}
\usage{
treeSkeleton__first_leaf()
}
\value{
The first leaf, that is, the first terminal child node.
}
\description{
Find the first leaf in a tree.
}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(-6.67707850404722e+133, 5.32948612168953e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(8.57286817725031e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889882874073e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.6160581762258e+76, -1.18078903777423e-90, 5.48977020003552e+73, -5.58551357556946e+160, 2.00994342527714e-162, 1.04568522234333e+254, -1.44288984971022e+71, -7.00861543724366e-295, -4.5541495757366e-200))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615842142-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,038 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(-6.67707850404722e+133, 5.32948612168953e-320, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(8.57286817725031e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889882874073e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.6160581762258e+76, -1.18078903777423e-90, 5.48977020003552e+73, -5.58551357556946e+160, 2.00994342527714e-162, 1.04568522234333e+254, -1.44288984971022e+71, -7.00861543724366e-295, -4.5541495757366e-200))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
create_output_lin <- function(x, silent=TRUE, rep_filter= "_rep", residuals, nsims) {
## Packages
packages <- c("expss", "filesstrings")
package.check <- lapply(packages, FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
})
files <- x
## Split each file name in 2 after the rep_filter term, save what comes after
title <- files[[1]][[1]]$title
nparam <- nrow(files[[1]]$parameters$unstandardized)
est<-matrix(0,nrow=length(files),ncol=nparam) #vector of length #outputs
#est_mean <- matrix(0,nrow=1, ncol = 13)
L <- matrix(0,nrow=length(files),ncol=nparam)
U <- matrix(0,nrow=length(files),ncol=nparam)
bias<-matrix(0,nrow=nparam,ncol=1)
for(i in 1:length(files)){
for(d in 1:ncol(est)){
est[i,d]<-as.numeric(files[[i]][[9]][[1]][[3]][d])
L[i,d]<-as.numeric(files[[i]][[9]][[1]][[6]][d])
U[i,d]<-as.numeric(files[[i]][[9]][[1]][[7]][d])
}
DIC <- mean(as.numeric(files[[i]][[7]]$DIC))
}
for(i in 1:length(files)){
rows<-paste(files[[i]][[9]][[1]][[1]],files[[i]][[9]][[1]][[2]])
# files[[i]] <- as.data.frame(files[[i]][[9]][[1]])#[3:8], row.names = files[[i]][[9]][[1]][1])
}
if (residuals == "high"){
bias[3,] <- mean(((as.numeric(est[,3])-0.5)/.5)*100)
bias[4,] <- mean(((as.numeric(est[,4])-2)/2)*100)
bias[9,] <- mean(((as.numeric(est[,9])-0.5)/0.5)*100)
Y2residw <- sqrt(mean((est[,3]-0.5)^2))
Etaresidw <- sqrt(mean((est[,4]-2)^2))
sresidt <- sqrt(mean((est[,9]-0.5)^2))
true_value <- c(1.00,0.50,0.50,2.00,-1.00,1.00,0.00,0.00,0.50,
0.00,0.00,0.50,0.00)
}
if (residuals == "low"){
bias[3,] <- mean(((as.numeric(est[,3])-0.2)/.2)*100)
bias[4,] <- mean(((as.numeric(est[,4])-1)/1)*100)
bias[9,] <- mean(((as.numeric(est[,9])-0.2)/0.2)*100)
Y2residw <- sqrt(mean((est[,3]-0.2)^2))
Etaresidw <- sqrt(mean((est[,4]-1)^2))
sresidt <- sqrt(mean((est[,9]-0.2)^2))
true_value <- c(1.00,0.50,0.20,1.00,-1.00,1.00,0.00,0.00,0.20,
0.00,0.00,0.50,0.00)
}
bias[1,] <- "NA"
bias[2,] <- mean(((as.numeric(est[,2])-0.5)/.5)*100)
bias[5,] <- mean(((as.numeric(est[,5])-(-1))/-1)*100)
bias[6,] <- mean(((as.numeric(est[,6])-1)/1)*100)
bias[7,] <- mean((as.numeric(est[,7])-0))
bias[8,] <- mean((as.numeric(est[,8])-0))
bias[10,] <- mean((as.numeric(est[,10])-0))
bias[11,] <- "NA"
bias[12,] <- mean(((as.numeric(est[,12])-0.5)/0.5)*100)
bias[13,] <- "NA"
eta_eta1 <- sqrt(mean((est[,2]-0.5)^2))
s_x2 <- sqrt(mean((est[,5]-(-1))^2))
y_x1 <- sqrt(mean((est[,6]-1)^2))
s <- sqrt(mean((est[,7]-0)^2))
Y2residt <- sqrt(mean((est[,8]-0)^2))
Y2means <- sqrt(mean((est[,10]-0)^2))
Y2vars <- sqrt(mean((est[,12]-0.5)^2))
rmse<-rbind("NA",eta_eta1,Y2residw,Etaresidw,s_x2,y_x1,s,Y2residt,sresidt,Y2means, "NA",Y2vars, "NA")
est_mean <- colMeans(est)
est <- as.data.frame(cbind(est_mean), row.names = rows)
meanL <- round(colMeans(L), digits = 2)
meanU <- round(colMeans(U), digits = 2)
power <- mean(L>0 | U<0)
CI <- paste0("(",meanL,",", meanU,")")
results <- cbind(true_value,est, "95% CI"=CI, bias, rmse)
convergence <- noquote(paste0(round((length(files)/nsims)*100, digits = 4), "%"))
DIC <- as.data.frame(rbind(DIC,power, convergence), row.names = c("DIC", "Power", "convergence"))
colnames(DIC) <-"Diagnostics"
# results = apply_labels(results,
# true_value = "True Value",
# est_mean = "Estimate",
# CI = "95% CI",
# bias = "Bias",
# rmse = "RMSE")
# DIC = apply_labels(DIC, )
#print(title)
#print(results)
#print(DIC)
# print(power)
list("Title" = title, "Results" = results, "Diagnostics"=DIC)
} | /CreateOutputTable_linear.R | no_license | leomartinsjf/Dissertation_Files | R | false | false | 3,820 | r | create_output_lin <- function(x, silent=TRUE, rep_filter= "_rep", residuals, nsims) {
## Packages
packages <- c("expss", "filesstrings")
package.check <- lapply(packages, FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
})
files <- x
## Split each file name in 2 after the rep_filter term, save what comes after
title <- files[[1]][[1]]$title
nparam <- nrow(files[[1]]$parameters$unstandardized)
est<-matrix(0,nrow=length(files),ncol=nparam) #vector of length #outputs
#est_mean <- matrix(0,nrow=1, ncol = 13)
L <- matrix(0,nrow=length(files),ncol=nparam)
U <- matrix(0,nrow=length(files),ncol=nparam)
bias<-matrix(0,nrow=nparam,ncol=1)
for(i in 1:length(files)){
for(d in 1:ncol(est)){
est[i,d]<-as.numeric(files[[i]][[9]][[1]][[3]][d])
L[i,d]<-as.numeric(files[[i]][[9]][[1]][[6]][d])
U[i,d]<-as.numeric(files[[i]][[9]][[1]][[7]][d])
}
DIC <- mean(as.numeric(files[[i]][[7]]$DIC))
}
for(i in 1:length(files)){
rows<-paste(files[[i]][[9]][[1]][[1]],files[[i]][[9]][[1]][[2]])
# files[[i]] <- as.data.frame(files[[i]][[9]][[1]])#[3:8], row.names = files[[i]][[9]][[1]][1])
}
if (residuals == "high"){
bias[3,] <- mean(((as.numeric(est[,3])-0.5)/.5)*100)
bias[4,] <- mean(((as.numeric(est[,4])-2)/2)*100)
bias[9,] <- mean(((as.numeric(est[,9])-0.5)/0.5)*100)
Y2residw <- sqrt(mean((est[,3]-0.5)^2))
Etaresidw <- sqrt(mean((est[,4]-2)^2))
sresidt <- sqrt(mean((est[,9]-0.5)^2))
true_value <- c(1.00,0.50,0.50,2.00,-1.00,1.00,0.00,0.00,0.50,
0.00,0.00,0.50,0.00)
}
if (residuals == "low"){
bias[3,] <- mean(((as.numeric(est[,3])-0.2)/.2)*100)
bias[4,] <- mean(((as.numeric(est[,4])-1)/1)*100)
bias[9,] <- mean(((as.numeric(est[,9])-0.2)/0.2)*100)
Y2residw <- sqrt(mean((est[,3]-0.2)^2))
Etaresidw <- sqrt(mean((est[,4]-1)^2))
sresidt <- sqrt(mean((est[,9]-0.2)^2))
true_value <- c(1.00,0.50,0.20,1.00,-1.00,1.00,0.00,0.00,0.20,
0.00,0.00,0.50,0.00)
}
bias[1,] <- "NA"
bias[2,] <- mean(((as.numeric(est[,2])-0.5)/.5)*100)
bias[5,] <- mean(((as.numeric(est[,5])-(-1))/-1)*100)
bias[6,] <- mean(((as.numeric(est[,6])-1)/1)*100)
bias[7,] <- mean((as.numeric(est[,7])-0))
bias[8,] <- mean((as.numeric(est[,8])-0))
bias[10,] <- mean((as.numeric(est[,10])-0))
bias[11,] <- "NA"
bias[12,] <- mean(((as.numeric(est[,12])-0.5)/0.5)*100)
bias[13,] <- "NA"
eta_eta1 <- sqrt(mean((est[,2]-0.5)^2))
s_x2 <- sqrt(mean((est[,5]-(-1))^2))
y_x1 <- sqrt(mean((est[,6]-1)^2))
s <- sqrt(mean((est[,7]-0)^2))
Y2residt <- sqrt(mean((est[,8]-0)^2))
Y2means <- sqrt(mean((est[,10]-0)^2))
Y2vars <- sqrt(mean((est[,12]-0.5)^2))
rmse<-rbind("NA",eta_eta1,Y2residw,Etaresidw,s_x2,y_x1,s,Y2residt,sresidt,Y2means, "NA",Y2vars, "NA")
est_mean <- colMeans(est)
est <- as.data.frame(cbind(est_mean), row.names = rows)
meanL <- round(colMeans(L), digits = 2)
meanU <- round(colMeans(U), digits = 2)
power <- mean(L>0 | U<0)
CI <- paste0("(",meanL,",", meanU,")")
results <- cbind(true_value,est, "95% CI"=CI, bias, rmse)
convergence <- noquote(paste0(round((length(files)/nsims)*100, digits = 4), "%"))
DIC <- as.data.frame(rbind(DIC,power, convergence), row.names = c("DIC", "Power", "convergence"))
colnames(DIC) <-"Diagnostics"
# results = apply_labels(results,
# true_value = "True Value",
# est_mean = "Estimate",
# CI = "95% CI",
# bias = "Bias",
# rmse = "RMSE")
# DIC = apply_labels(DIC, )
#print(title)
#print(results)
#print(DIC)
# print(power)
list("Title" = title, "Results" = results, "Diagnostics"=DIC)
} |
# Regression Models
# Coursera
# Quiz 1
# Question 1
x <- c(0.18, -1.54, 0.42, 0.95)
w <- c(2, 1, 3, 1)
# Manual method
mu <- sum(w*x)/sum(w)
round(mu,4)
# Alternate method:
lm(x ~ 1, weights = w)$coefficients
# Question 2
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
fit.origin <- lm( y ~ x - 1 )
summary(fit.origin)
lm(y ~ 0 + x)$coefficients
# Question 3
data(mtcars)
fit <- lm(mpg ~ wt, mtcars)
summary(fit)
lm(mpg ~ wt, data = mtcars)$coefficients
#Question 4
$$ \begin{align} \hat \beta_1 &= Cor(X,Y) \frac{Sd(Y)}{Sd(X)} \ &= (0.5) \frac{Sd(Y)}{0.5Sd(Y)} \ &= 1 \end{align} $$
#Question 5
1.5 * 0.4
# Question 6
x <- c(8.58, 10.46, 9.01, 9.64, 8.86)
xbar = (x - mean(x)) / sd(x)
xbar[1]
# Question 7
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
lm(y ~ x)$coefficients
#Question 8
#You know that both the predictor and response have mean 0. What can be said about the intercept when you fit a linear regression?
#It must be exactly one.
#It must be identically 0. <--
#Nothing about the intercept can be said from the information given.
#It is undefined as you have to divide by zero.
# Question 9
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
mean(x)
#Question 10
# Consider taking the slope having fit Y as the outcome and X as the predictor, $\beta_1$ and the slope from fitting X as the outcome and Y as the predictor, $\gamma_1$, and dividing the two as $\beta_1/\gamma_1$. What is this ratio always equal to?
#
# $$ \begin{align} \beta_1 &= \frac{\sum_i X_i Y_i}{\sum_i Y_i^2} \ \gamma_1 &= \frac{\sum_i X_i Y_i}{\sum_i X_i^2} \ \frac{\beta_1}{\gamma_1} &= \frac{\sum_i Y_i^2}{\sum_i X_i^2} \ &= \frac{Var(X)}{Var(Y)} \end{align} $$
#
# Cor(Y,X)
# 1
# 2SD(Y)/SD(X)
# Var(Y)/Var(X) <-
| /Week1/regression models quiz 1.r | no_license | jmacarter/Regression-Models | R | false | false | 1,919 | r | # Regression Models
# Coursera
# Quiz 1
# Question 1
x <- c(0.18, -1.54, 0.42, 0.95)
w <- c(2, 1, 3, 1)
# Manual method
mu <- sum(w*x)/sum(w)
round(mu,4)
# Alternate method:
lm(x ~ 1, weights = w)$coefficients
# Question 2
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
fit.origin <- lm( y ~ x - 1 )
summary(fit.origin)
lm(y ~ 0 + x)$coefficients
# Question 3
data(mtcars)
fit <- lm(mpg ~ wt, mtcars)
summary(fit)
lm(mpg ~ wt, data = mtcars)$coefficients
#Question 4
$$ \begin{align} \hat \beta_1 &= Cor(X,Y) \frac{Sd(Y)}{Sd(X)} \ &= (0.5) \frac{Sd(Y)}{0.5Sd(Y)} \ &= 1 \end{align} $$
#Question 5
1.5 * 0.4
# Question 6
x <- c(8.58, 10.46, 9.01, 9.64, 8.86)
xbar = (x - mean(x)) / sd(x)
xbar[1]
# Question 7
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
lm(y ~ x)$coefficients
#Question 8
#You know that both the predictor and response have mean 0. What can be said about the intercept when you fit a linear regression?
#It must be exactly one.
#It must be identically 0. <--
#Nothing about the intercept can be said from the information given.
#It is undefined as you have to divide by zero.
# Question 9
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
mean(x)
#Question 10
# Consider taking the slope having fit Y as the outcome and X as the predictor, $\beta_1$ and the slope from fitting X as the outcome and Y as the predictor, $\gamma_1$, and dividing the two as $\beta_1/\gamma_1$. What is this ratio always equal to?
#
# $$ \begin{align} \beta_1 &= \frac{\sum_i X_i Y_i}{\sum_i Y_i^2} \ \gamma_1 &= \frac{\sum_i X_i Y_i}{\sum_i X_i^2} \ \frac{\beta_1}{\gamma_1} &= \frac{\sum_i Y_i^2}{\sum_i X_i^2} \ &= \frac{Var(X)}{Var(Y)} \end{align} $$
#
# Cor(Y,X)
# 1
# 2SD(Y)/SD(X)
# Var(Y)/Var(X) <-
|
#EasyCharts团队出品,
#如有问题修正与深入学习,可联系微信:EasyCharts
library(ggplot2)
library(RColorBrewer)
freq <- 10 ^ ((1:4))
df <- data.frame(
group = rep(letters[seq_along(freq)], freq),
x = rnorm(sum(freq),3,1)
)
ggplot(df, aes(group,x))+
geom_boxplot(aes(fill = group),notch = TRUE, varwidth = TRUE) +
scale_fill_manual(values=c(brewer.pal(7,"Set2")[c(1,2,4,5)]))+
theme_classic()+
theme(panel.background=element_rect(fill="white",colour="black",size=0.25),
axis.line=element_line(colour="black",size=0.25),
axis.title=element_text(size=13,face="plain",color="black"),
axis.text = element_text(size=12,face="plain",color="black"),
legend.position="none"
)
| /R语言数据可视化-增强版/第5章 数据分布型图表/图5-2-6 箱型图系列.r | no_license | Sherlock-Ni/R-Projects | R | false | false | 737 | r | #EasyCharts团队出品,
#如有问题修正与深入学习,可联系微信:EasyCharts
library(ggplot2)
library(RColorBrewer)
freq <- 10 ^ ((1:4))
df <- data.frame(
group = rep(letters[seq_along(freq)], freq),
x = rnorm(sum(freq),3,1)
)
ggplot(df, aes(group,x))+
geom_boxplot(aes(fill = group),notch = TRUE, varwidth = TRUE) +
scale_fill_manual(values=c(brewer.pal(7,"Set2")[c(1,2,4,5)]))+
theme_classic()+
theme(panel.background=element_rect(fill="white",colour="black",size=0.25),
axis.line=element_line(colour="black",size=0.25),
axis.title=element_text(size=13,face="plain",color="black"),
axis.text = element_text(size=12,face="plain",color="black"),
legend.position="none"
)
|
##Chapter 16 : Queuing Systems
##Example 6-4 : Page 649
#Queueing library to process different queueing models
library(queueing)
#creating a MMK instance with the following parameters
x=NewInput.MM1K(lambda=4, mu=6,k=5)
#Solving the model which returns a list
y=QueueingModel(x)
summary(y) | /Operations_Research:_An_Introduction_by_Hamdy_A_Taha/CH16/EX16.6.4/16.6.4.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 290 | r | ##Chapter 16 : Queuing Systems
##Example 6-4 : Page 649
#Queueing library to process different queueing models
library(queueing)
#creating a MMK instance with the following parameters
x=NewInput.MM1K(lambda=4, mu=6,k=5)
#Solving the model which returns a list
y=QueueingModel(x)
summary(y) |
publishGitHub <- function (repo, username = getOption("github.user"))
{
if (!file.exists("libraries")) {
message("Please set mode to selfcontained and run Slidify")
message("This would place library files in the slide folder")
message("making it self-contained")
invisible(return(FALSE))
}
if (!file.exists(".git")) {
init_repo()
}
if (!file.exists(".nojekyll")) {
message("Adding .nojekyll to your repo...")
file.create(".nojekyll")
}
message("Publishing deck to ", username, "/", repo)
system("git add .")
system("git commit -a -m \"publishing deck\"")
system(sprintf("git push git@github.io:%s/%s gh-pages",
username, repo))
link = sprintf("http://%s.github.io/%s", username, repo)
message("You can now view your slide deck at ", link)
browseURL(link)
} | /publishGitHub.R | no_license | winterwang/NEslides | R | false | false | 829 | r | publishGitHub <- function (repo, username = getOption("github.user"))
{
if (!file.exists("libraries")) {
message("Please set mode to selfcontained and run Slidify")
message("This would place library files in the slide folder")
message("making it self-contained")
invisible(return(FALSE))
}
if (!file.exists(".git")) {
init_repo()
}
if (!file.exists(".nojekyll")) {
message("Adding .nojekyll to your repo...")
file.create(".nojekyll")
}
message("Publishing deck to ", username, "/", repo)
system("git add .")
system("git commit -a -m \"publishing deck\"")
system(sprintf("git push git@github.io:%s/%s gh-pages",
username, repo))
link = sprintf("http://%s.github.io/%s", username, repo)
message("You can now view your slide deck at ", link)
browseURL(link)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edar_doc_data.R
\docType{package}
\name{edar}
\alias{edar}
\title{edar: A package for code-efficient Exploratory Data Analysis in R}
\description{
The package provides functions that allows efficient exploratory
data analyses with few lines of code. It also contains some
functions to check balance of covariates among control and treatment
groups, analyse output of model estimation, create summary tables and
plots, and conduct robustness checks of the model results (multiple
imputation, post-stratification, etc). Quantitative researchers conduct
those tasks repeatedly. The package provides functions to conduct them
fast and with with minimum code.
}
\details{
See vignette(edar) for examples of workflow
}
| /man/edar.Rd | permissive | DiogoFerrari/edar | R | false | true | 791 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/edar_doc_data.R
\docType{package}
\name{edar}
\alias{edar}
\title{edar: A package for code-efficient Exploratory Data Analysis in R}
\description{
The package provides functions that allows efficient exploratory
data analyses with few lines of code. It also contains some
functions to check balance of covariates among control and treatment
groups, analyse output of model estimation, create summary tables and
plots, and conduct robustness checks of the model results (multiple
imputation, post-stratification, etc). Quantitative researchers conduct
those tasks repeatedly. The package provides functions to conduct them
fast and with with minimum code.
}
\details{
See vignette(edar) for examples of workflow
}
|
#library(foreign)
#setwd("c:/home/st/java-eim/da-101/ch/101-ch03-histograms/r/")
#dataset = read.spss("2009_dati.sav", to.data.frame=TRUE)
# http://www.cookbook-r.com/Graphs/Plotting_distributions_(ggplot2)/
require(ggplot2)
xx <- c(1,2,3,4,5,6)
yy <- c(11,12,13,12,14,15)
df <- data.frame(x = xx, y = yy)
ggplot(df,aes(x=x,y=y)) +
# geom_bar(stat='identity') +
geom_histogram(breaks = c(0, 3, 5,7),
stat="identity",
col="black",
fill = "white")
| /data-analysis-course/chapters/basic-p03-histograms/r/manual-histograms.R | permissive | kapsitis/ddgatve-stat | R | false | false | 510 | r | #library(foreign)
#setwd("c:/home/st/java-eim/da-101/ch/101-ch03-histograms/r/")
#dataset = read.spss("2009_dati.sav", to.data.frame=TRUE)
# http://www.cookbook-r.com/Graphs/Plotting_distributions_(ggplot2)/
require(ggplot2)
xx <- c(1,2,3,4,5,6)
yy <- c(11,12,13,12,14,15)
df <- data.frame(x = xx, y = yy)
ggplot(df,aes(x=x,y=y)) +
# geom_bar(stat='identity') +
geom_histogram(breaks = c(0, 3, 5,7),
stat="identity",
col="black",
fill = "white")
|
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Create an SVG gradient to use as a pattern
#'
#' Create an SVG pattern which will \code{fill} an element with a colour gradient.
#'
#' @inheritParams create_pattern_stripe
#' @param colour1,colour2 the start and end colours of the gradient
#'
#' @return minisvg::SVGPattern object
#'
#' @import minisvg
#' @import glue
#' @export
#'
#'
#' @examples
#' \dontrun{
#' # Create an SVG document
#' library(minisvg)
#' doc <- minisvg::svg_doc()
#'
#' # Create the pattern and add to the SVG definitions
#' my_pattern <- create_pattern_gradient(id = 'mypattern')
#' doc$defs(my_pattern)
#'
#' # Create a rectangle with the animation
#' rect <- stag$rect(
#' x = "10%",
#' y = "10%",
#' width = "80%",
#' height = "80%",
#' stroke = 'black',
#' fill = my_pattern
#' )
#'
#' # Add this rectangle to the document, show the SVG text, then render it
#' doc$append(rect)
#' doc
#' doc$show()
#' }
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
create_pattern_gradient <- function(id,
angle = 45,
colour1 = '#ffffff',
colour2 = '#000000',
alpha = 1.0,
...) {
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# invert angle to cope with inverted y axis in svg coords.
# i.e. convert angle so clockwise from x axis to be positive
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
angle <- (360 - (angle %% 360)) %% 360
tinc <- tan((angle %% 45) * pi/180)
tdec <- 1 - tinc
if (angle < 1 * 45) {
x1 = 0; y1 = 0; x2 = 1; y2 = tinc;
} else if (angle < 2 * 45) {
x1 = 0; y1 = 0; x2 = tdec; y2 = 1;
} else if (angle < 3 * 45) {
x1 = 1; y1 = 0; x2 = tdec; y2 = 1;
} else if (angle < 4 * 45) {
x1 = 1; y1 = 0; x2 = 0; y2 = tdec;
} else if (angle < 5 * 45) {
x1 = 1; y1 = 1; x2 = 0; y2 = tdec;
} else if (angle < 6 * 45) {
x1 = 1; y1 = 1; x2 = tinc; y2 = 0;
} else if (angle < 7 * 45) {
x1 = 0; y1 = 1; x2 = tinc; y2 = 0;
} else if (angle < 8 * 45) {
x1 = 0; y1 = 1; x2 = 1; y2 = tinc;
} else {
x1 = 0; y1 = 0; x2 = 1; y2 = 1
}
# Format as percentages
x1 <- paste0(round(x1 * 100, 2), '%')
y1 <- paste0(round(y1 * 100, 2), '%')
x2 <- paste0(round(x2 * 100, 2), '%')
y2 <- paste0(round(y2 * 100, 2), '%')
pattern <- minisvg::svg_pattern(
name = 'linearGradient',
id = id,
x1 = x1,
y1 = y1,
x2 = x2,
y2 = y2,
minisvg::stag$stop(offset = "0%", style = glue::glue("stop-color:{colour1};stop-opacity:{alpha}")),
minisvg::stag$stop(offset = "100%", style = glue::glue("stop-color:{colour2};stop-opacity:{alpha}"))
)
pattern
}
| /R/pattern-gradient.R | permissive | coolbutuseless/svgpatternsimple | R | false | false | 2,954 | r |
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#' Create an SVG gradient to use as a pattern
#'
#' Create an SVG pattern which will \code{fill} an element with a colour gradient.
#'
#' @inheritParams create_pattern_stripe
#' @param colour1,colour2 the start and end colours of the gradient
#'
#' @return minisvg::SVGPattern object
#'
#' @import minisvg
#' @import glue
#' @export
#'
#'
#' @examples
#' \dontrun{
#' # Create an SVG document
#' library(minisvg)
#' doc <- minisvg::svg_doc()
#'
#' # Create the pattern and add to the SVG definitions
#' my_pattern <- create_pattern_gradient(id = 'mypattern')
#' doc$defs(my_pattern)
#'
#' # Create a rectangle with the animation
#' rect <- stag$rect(
#' x = "10%",
#' y = "10%",
#' width = "80%",
#' height = "80%",
#' stroke = 'black',
#' fill = my_pattern
#' )
#'
#' # Add this rectangle to the document, show the SVG text, then render it
#' doc$append(rect)
#' doc
#' doc$show()
#' }
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
create_pattern_gradient <- function(id,
angle = 45,
colour1 = '#ffffff',
colour2 = '#000000',
alpha = 1.0,
...) {
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# invert angle to cope with inverted y axis in svg coords.
# i.e. convert angle so clockwise from x axis to be positive
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
angle <- (360 - (angle %% 360)) %% 360
tinc <- tan((angle %% 45) * pi/180)
tdec <- 1 - tinc
if (angle < 1 * 45) {
x1 = 0; y1 = 0; x2 = 1; y2 = tinc;
} else if (angle < 2 * 45) {
x1 = 0; y1 = 0; x2 = tdec; y2 = 1;
} else if (angle < 3 * 45) {
x1 = 1; y1 = 0; x2 = tdec; y2 = 1;
} else if (angle < 4 * 45) {
x1 = 1; y1 = 0; x2 = 0; y2 = tdec;
} else if (angle < 5 * 45) {
x1 = 1; y1 = 1; x2 = 0; y2 = tdec;
} else if (angle < 6 * 45) {
x1 = 1; y1 = 1; x2 = tinc; y2 = 0;
} else if (angle < 7 * 45) {
x1 = 0; y1 = 1; x2 = tinc; y2 = 0;
} else if (angle < 8 * 45) {
x1 = 0; y1 = 1; x2 = 1; y2 = tinc;
} else {
x1 = 0; y1 = 0; x2 = 1; y2 = 1
}
# Format as percentages
x1 <- paste0(round(x1 * 100, 2), '%')
y1 <- paste0(round(y1 * 100, 2), '%')
x2 <- paste0(round(x2 * 100, 2), '%')
y2 <- paste0(round(y2 * 100, 2), '%')
pattern <- minisvg::svg_pattern(
name = 'linearGradient',
id = id,
x1 = x1,
y1 = y1,
x2 = x2,
y2 = y2,
minisvg::stag$stop(offset = "0%", style = glue::glue("stop-color:{colour1};stop-opacity:{alpha}")),
minisvg::stag$stop(offset = "100%", style = glue::glue("stop-color:{colour2};stop-opacity:{alpha}"))
)
pattern
}
|
#' Apply the generalized Bayesian two-stage robust-based causal model with instrumental variables.
#'
#' @description The \code{gts.nrobust} function applies the generalized Bayesian two-stage
#' robust-based causal model to the categorical treatment data.
#' The model best suits the outcome data that contain outliers
#' and are ignorably missing (i.e., MCAR or MAR).
#'
#' @param formula An object of class formula: a symbolic description of the model to be fitted.
#' The details of the model specification are given under "Details".
#' @param data A dataframe with the variables to be used in the model.
#' @param advanced Logical; if FALSE (default), the model is specified using the formula argument,
#' if TRUE, self-defined models can be specified using the adv.model argument.
#' @param adv.model Specify the self-defined model. Used when advanced=TRUE.
#' @param b0 The mean hyperparameter of the normal distribution (prior distribution)
#' for the first-stage generalized causal model coefficients, i.e., coefficients for the instrumental variables.
#' This can either be a numerical value or a vector with dimensions equal to the number of coefficients
#' for the instrumental variables. If this takes a numerical value, then that values will
#' serve as the mean hyperparameter for all of the coefficients for the instrumental variables.
#' Default value of 0 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param B0 The precision hyperparameter of the normal distribution (prior distribution)
#' for the first stage generalized causal model coefficients.
#' This can either be a numerical value or a vector with dimensions equal to the number of coefficients
#' for the instrumental variables. If this takes a numerical value, then that values will
#' serve as the precision hyperparameter for all of the coefficients for the instrumental variables.
#' Default value of 1.0E-6 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param g0 The mean hyperparameter of the normal distribution (prior distribution)
#' for the second-stage generalized causal model coefficients,
#' i.e., coefficients for the treatment variable and other regression covariates.
#' This can either be a numerical value if there is only one treatment variable in the model,
#' or a if there is a treatment variable and multiple regression covariates,
#' with dimensions equal to the total number of coefficients for the treatment variable and covariates.
#' Default value of 0 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param G0 The precision hyperparameter of the normal distribution (prior distribution)
#' for the second-stage generalized causal model coefficients.
#' This can either be a numerical value if there is only one treatment variable in the model,
#' or a vector if there is a treatment variable and multiple regression covariates,
#' with dimensions equal to the total number of coefficients for the treatment variable and covariates.
#' Default value of 1.0E-6 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param e0 The location hyperparameter of the inverse Gamma distribution (prior for the scale parameter
#' of Student's t distribution on the model residual).
#' Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.
#' @param E0 The shape hyperparameter of the inverse Gamma distribution (prior for the scale parameter
#' of Student's t distribution on the model residual).
#' Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.
#' @param v0 The lower boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
#' parameter of Student's t distribution).
#' @param V0 The upper boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
#' parameter of Student's t distribution).
#' @param beta.start The starting values for the first-stage generalized causal model coefficients,
#' i.e., coefficients for the instrumental variables.
#' This can either be a numerical value or a column vector with dimensions
#' equal to the number of first-stage coefficients.
#' The default value of NA will use the IWLS (iteratively reweighted least squares) estimate
#' of first-stage coefficients as the starting value.
#' If this is a numerical value, that value will
#' serve as the starting value mean for all the first-stage beta coefficients.
#' @param gamma.start The starting values for the second-stage generalized causal model coefficients,
#' i.e., coefficients for the treatment variable and the model covariates.
#' This can either be a numerical value or a column vector with dimensions
#' equal to the number of second-stage coefficients.
#' The default value of NA will use the IWLS (iteratively reweighted least squares) estimate
#' of second-stage coefficients as the starting value.
#' If this is a numerical value, that value will
#' serve as the starting value mean for all the second-stage gamma coefficients.
#' @param e.start The starting value for the precision hyperparameter of the inverse gamma distribution
#' (prior for the scale parameter of Student's t distribution of the model residual).
#' The default value of NA will use the inverse of the residual variance from the
#' IWLS (iteratively reweighted least square) estimate of the second-stage model.
#' @param df.start The starting value for the degrees of freedom of Student's t distribution.
#' @param n.chains The number of Markov chains. The default is 1.
#' @param n.iter The number of total iterations per chain (including burnin). The default is 10000.
#' @param n.burnin Length of burn in, i.e., number of iterations to discard at the beginning.
#' Default is n.iter/2, that is, discarding the first half of the simulations.
#' @param n.thin The thinning rate. Must be a positive integer. The default is 1.
#' @param DIC Logical; if TRUE (default), compute deviance, pD, and DIC. The rule pD=Dbar-Dhat is used.
#' @param codaPkg Logical; if FALSE (default), an object is returned; if TRUE,
#' file names of the output are returned.
#'
#' @return
#' If \emph{codaPkg=FALSE}(default), returns an object containing summary statistics of
#' the saved parameters, including
#' \item{s1.intercept}{Estimate of the intercept from the first stage.}
#' \item{s1.slopeP}{Estimate of the pth slope from the first stage. }
#' \item{s2.intercept}{Estimate of the intercept from the second stage.}
#' \item{s2.slopeP}{Estimate of the pth slope from the second stage (the first slope is always
#' the \strong{LATE}).}
#' \item{var.e.s2}{Estimate of the residual variance at the second stage.}
#' \item{df.est}{Estimate of the degrees of freedom for the Student's t distribution.}
#' \item{DIC}{Deviance Information Criterion.}
#' If \emph{codaPkg=TRUE}, the returned value is the path for the output file
#' containing the Markov chain Monte Carlo output.
#'
#' @details
#' \enumerate{
#' \item{The formula takes the form \emph{response ~ terms|instrumental_variables}.}
#' \code{\link{gts.nnormal}} provides a detailed description of the formula rule.
#' \item{DIC is computed as \emph{mean(deviance)+pD}.}
#' \item{Prior distributions used in ALMOND.}
#' \itemize{
#' \item Generalized causal model coefficients at both stages: normal distributions.
#' \item The generalized causal model residual: Student's t distribution.
#' }
#' }
#'
#' @references
#' Gelman, A., Carlin, J.B., Stern, H.S., Rubin, D.B. (2003).
#' \emph{Bayesian data analysis}, 2nd edition. Chapman and Hall/CRC Press.
#'
#' Spiegelhalter, D. J., Thomas, A., Best, N. G., Gilks, W., & Lunn, D. (1996).
#' BUGS: Bayesian inference using Gibbs sampling.
#' \href{http://www.mrc-bsu.cam.ac.uk/bugs}{http://www.mrc-bsu.cam.ac.uk/bugs}, 19.
#'
#' @examples
#' \donttest{
#' # Run the model
#' model1 <- gts.nrobust(neighborhoodRating~voucherProgram|extraBedroom,data=simVoucher)
#'
#' # Run the model with the self-defined advanced feature
#' my.robust.model<- function(){
#' for (i in 1:N){
#' logit(p[i]) <- beta0 + beta1*z[i]
#' x[i] ~ dbern(p[i])
#' muY[i] <- gamma0 + gamma1*p[i]
#' y[i] ~ dt(muY[i], pre.u2, df)
#' }
#'
#' beta0 ~ dnorm(0,1)
#' beta1 ~ dnorm(1, 1)
#' gamma0 ~ dnorm(0, 1)
#' gamma1 ~ dnorm(.5, 1)
#'
#' pre.u2 ~ dgamma(.001, .001)
#'
#' df ~ dunif(0,100)
#'
#' s1.intercept <- beta0
#' s1.slope1 <- beta1
#' s2.intercept <- gamma0
#' s2.slope1 <- gamma1
#' df.est <- df
#' var.e.s2 <- 1/pre.u2
#' }
#'
#' model2 <- gts.nrobust(neighborhoodRating~voucherProgram|extraBedroom,data=simVoucher,
#' advanced=TRUE, adv.model=my.robust.model)
#'
#' # Extract the model DIC
#' model1$DIC
#'
#' # Extract the MCMC output
#' model3 <- gts.nrobust(neighborhoodRating~voucherProgram|extraBedroom,data=simVoucher,
#' codaPkg=TRUE)
#' }
#'
#' @export
gts.nrobust<-function(formula,data,advanced=FALSE, adv.model,
b0=1,B0=1.0E-6, g0=0,G0=1.0E-6, e0=0.001,E0=0.001, v0=0,V0=100,
beta.start=NULL, gamma.start=NULL, e.start=NULL, df.start=5,
n.chains=1,n.burnin=floor(n.iter/2),n.iter=10000,n.thin=1,DIC,debug=FALSE,
codaPkg=FALSE){
.alReplaceSciNotR <- function(x,digits=5){
x[abs(x)<1e-3||abs(x)>1e+4] <-
formatC(x[abs(x)<1e-3||abs(x)>1e+4],
digits=digits,
format="E")
return(x)
}
require(Formula)
require(R2OpenBUGS)
formula1 <- formula(formula)
y <- model.response(model.frame(as.Formula(formula1),data=data,na.action=NULL))
x <- as.matrix(model.frame(as.Formula(formula1),data=data,na.action=NULL,rhs=1)[,-1])
z <- as.matrix(model.frame(as.Formula(formula1),data=data,na.action=NULL,rhs=2)[,-1])
N <- length(y)
if (length(levels(factor(x[,1])))!=2){
stop('The factor level of the treatment variable should be 2')
}else{
xList <- lapply(1:ncol(x),function(i){x[,i]})
zList <- lapply(1:ncol(z),function(i){z[,i]})
if(length(b0)==1){
b0 <- rep(b0,length(zList)+1)
} else if(length(b0)>(length(zList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(zList)+1," values of b0 will be used."))
b0 <- b0[1:(length(zList)+1)]
}
if(length(B0)==1){
B0 <- rep(B0,length(zList)+1)
} else if(length(B0)>(length(zList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(zList)+1," values of B0 will be used."))
B0 <- B0[1:(length(zList)+1)]
}
if(length(g0)==1){
g0 <- rep(g0,length(xList)+1)
} else if(length(g0)>(length(xList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(xList)+1," values of g0 will be used."))
g0 <- g0[1:(length(xList)+1)]
}
if(length(G0)==1){
G0 <- rep(G0,length(xList)+1)
} else if(length(G0)>(length(xList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(xList)+1," values of G0 will be used."))
G0 <- G0[1:(length(xList)+1)]
}
if (length(xList)==1){
names(xList) <- "x"
}else{
names(xList) <- c("x",paste0("x",1:(length(xList)-1)))
}
if (length(zList)==1){
names(zList) <- "z"
}else{
names(zList) <- paste0("z",1:length(zList))
}
data = c(list("N"=N,"y"=y),xList,zList)
lm.data = as.data.frame(cbind(y,x,z))
if (length(xList)==1){
xnames = "x"
}else{
xnames = c("x",paste0("x",1:(length(xList)-1)))
}
if (length(zList)==1){
znames = "z"
}else{
znames = paste0("z",1:length(zList))
}
colnames(lm.data) = c("y",xnames,znames)
if (length(zList)==1){
coefList1s = as.list(coefficients(summary(glm(x~z,data=data,family='binomial',na.action=na.omit)))[,1])
names(coefList1s) = c("beta0","beta1")
xpred <- predict(glm(x~z,data=data,family='binomial',na.action=na.exclude))
}else{
coefList1s = as.list(coefficients(summary(lm(formula(paste0("x~",
paste0("z",1:length(zList),collapse="+"))),data=data,na.action=na.omit)))[,1])
names(coefList1s) <- paste0("beta",0:(length(coefList1s)-1))
xpred <- predict(lm(formula(paste0("x~",paste0("z",1:length(zList),collapse='+'))),data=data,
na.action=na.exclude))
}
if (length(xList)==1){
coefList2s = as.list(coefficients(summary(lm(y~xpred,na.action=na.omit)))[,1])
names(coefList2s) <- c("gamma0","gamma1")
pre.u2 = 1/(summary(lm(y~xpred,na.action=na.omit))$sigma)
}else{
coefList2s = as.list(coefficients(summary(lm(formula(paste0("y~",
paste0("xpred+",paste0("x",1:(length(xList)-1),collapse="+")))),
data=data,na.action=na.omit)))[,1])
names(coefList2s) <- paste0("gamma",0:(length(coefList2s)-1))
pre.u2 = 1/summary(lm(formula(paste0("y~",
paste0("xpred+",paste0("x",1:(length(xList)-1),collapse="+")))),
data=data,na.action=na.omit))$sigma
}
names(pre.u2) = c("pre.u2")
if (is.null(beta.start)==TRUE){
beta.start = coefList1s
} else if (length(beta.start) < length(coefList1s)){
warning("Number of starting values is fewer than the number of parameters. The first
element of beta.start ",beta.start[1], " will be used for all betas." )
beta.start = rep(list(beta.start[1]),length(coefList1s))
} else if (length(beta.start) > length(coefList1s)){
warning(paste0("Number of starting values is greater than the number of parameters. Only the first ",
length(coefList1s)," values of betas will be used."))
beta.start = as.list(beta.start[1:(length(coefList1s))])
} else{
beta.start = as.list(beta.start)
}
names(beta.start) = paste0("beta",0:(length(beta.start)-1))
if (is.null(gamma.start)==TRUE){
gamma.start = coefList2s
} else if (length(gamma.start) < length(coefList2s)){
warning("Number of starting values is fewer than the number of parameters. The first
element ",gamma.start[1], "will be used for all gammas." )
gamma.start = rep(list(gamma.start[1]),length(coefList2s))
} else if (length(gamma.start) > length(coefList2s)){
warning(paste0("Number of starting values is greater than the number of parameters. Only the first ",
length(coefList2s)," values of gammas will be used."))
gamma.start = as.list(gamma.start[1:(length(coefList2s))])
} else {
gamma.start = as.list(gamma.start)
}
names(gamma.start) <- paste0("gamma",0:(length(gamma.start)-1))
if (is.null(e.start)==TRUE){
e.start = pre.u2
} else if (length(e.start)>1){
warning(paste0("Number of starting values has length > 1.
Only the first element will be used."))
e.start = e.start[1]
} else {
e.start = e.start
}
names(e.start) = c("pre.u2")
if (advanced==FALSE){
L1 <- paste0("model\n{\n\tfor (i in 1:N){","\n")
if (length(zList)==1){
L2 <- paste0("\t\tlogit(p[i]) <- beta0 + beta1*z[i]")
}else{
L2 <- paste0("\t\tlogit(p[i]) <- beta0+",paste0("beta",1:(length(zList)),"*z",1:(length(zList)),
"[i]",collapse="+"),"\n")
}
L3 <- paste0("\t\tx[i] ~ dbern(p[i])","\n")
if (length(xList)==1){
L4 <- paste0("\t\tmuY[i] <- gamma0+gamma1*p[i]")
}else{
L4 <- paste0("\t\tmuY[i] <- gamma0+gamma1*p[i]+",paste0("gamma",2:(length(xList)),"*x",
1:(length(xList)-1),"[i]",collapse="+"),"\n")
}
L5 <- paste0("\t\ty[i] ~ dt(muY[i], pre.u2, df)\n\t}\n")
LFormulasBeta <- do.call(paste0,lapply(1:(length(zList)+1),function(i){
paste0("\tbeta",i-1," ~ dnorm(",.alReplaceSciNotR(b0[i]),",",
.alReplaceSciNotR(B0[i]),")","\n")
}))
LFormulasGamma <- do.call(paste0,lapply(1:(length(xList)+1),function(i){
paste0("\tgamma",i-1," ~ dnorm(",.alReplaceSciNotR(g0[i]),
",",.alReplaceSciNotR(G0[i]),")","\n")
}))
LN <- paste0("\tpre.u2 ~ dgamma(",.alReplaceSciNotR(e0),
",",.alReplaceSciNotR(E0),")\n\n")
Ldf <- paste0("\tdf ~ dunif(",.alReplaceSciNotR(v0),
",",.alReplaceSciNotR(V0),")\n\n")
tempParNamesOrig <- c(paste0("beta",0:length(zList)),
paste0("gamma",0:(length(xList))),
"1/pre.u2",
"df")
tempParNamesNew <- c("s1.intercept",paste0("s1.slope",1:length(zList)),
"s2.intercept",
paste0("s2.slope",1:length(xList)),
"var.e.s2",
"df.est")
LPara <- do.call(paste0,lapply(1:(length(tempParNamesOrig)),
function(j){
paste0("\t",tempParNamesNew[j]," <- ",tempParNamesOrig[j],"\n")
}
))
tmpModel <- paste0(L1,L2,L3,L4,L5,LFormulasBeta,LFormulasGamma,LN,Ldf,LPara,"}\n")
}else{
tmpModel <- capture.output(adv.model)
tmpModel[1] <- "model\n{"
tmpModel <- paste(tmpModel,collapse="\n")
tempParNamesNew <- c("s1.intercept",paste0("s1.slope",1:length(zList)),
"s2.intercept",
paste0("s2.slope",1:length(xList)),
"var.e.s2",
"df.est")
}
tempFileName <- tempfile("model")
tempFileName <- paste(tempFileName, "txt", sep = ".")
writeLines(tmpModel,con = tempFileName)
modelLoc <- gsub("\\\\", "/", tempFileName)
inits<- function(){
c(beta.start,gamma.start,e.start,df=df.start)
}
parameters<- tempParNamesNew
output<-bugs(data,inits,parameters,modelLoc,
n.chains=as.vector(n.chains),n.thin=as.vector(n.thin),
n.burnin=as.integer(n.burnin),n.iter=as.integer(n.iter),
DIC=TRUE,debug=as.logical(debug),codaPkg=as.logical(codaPkg))
print(output,digits.summary=3)
}
}
| /R/gts.nrobust.R | no_license | dingjshi/ALMOND | R | false | false | 18,737 | r | #' Apply the generalized Bayesian two-stage robust-based causal model with instrumental variables.
#'
#' @description The \code{gts.nrobust} function applies the generalized Bayesian two-stage
#' robust-based causal model to the categorical treatment data.
#' The model best suits the outcome data that contain outliers
#' and are ignorably missing (i.e., MCAR or MAR).
#'
#' @param formula An object of class formula: a symbolic description of the model to be fitted.
#' The details of the model specification are given under "Details".
#' @param data A dataframe with the variables to be used in the model.
#' @param advanced Logical; if FALSE (default), the model is specified using the formula argument,
#' if TRUE, self-defined models can be specified using the adv.model argument.
#' @param adv.model Specify the self-defined model. Used when advanced=TRUE.
#' @param b0 The mean hyperparameter of the normal distribution (prior distribution)
#' for the first-stage generalized causal model coefficients, i.e., coefficients for the instrumental variables.
#' This can either be a numerical value or a vector with dimensions equal to the number of coefficients
#' for the instrumental variables. If this takes a numerical value, then that values will
#' serve as the mean hyperparameter for all of the coefficients for the instrumental variables.
#' Default value of 0 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param B0 The precision hyperparameter of the normal distribution (prior distribution)
#' for the first stage generalized causal model coefficients.
#' This can either be a numerical value or a vector with dimensions equal to the number of coefficients
#' for the instrumental variables. If this takes a numerical value, then that values will
#' serve as the precision hyperparameter for all of the coefficients for the instrumental variables.
#' Default value of 1.0E-6 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param g0 The mean hyperparameter of the normal distribution (prior distribution)
#' for the second-stage generalized causal model coefficients,
#' i.e., coefficients for the treatment variable and other regression covariates.
#' This can either be a numerical value if there is only one treatment variable in the model,
#' or a if there is a treatment variable and multiple regression covariates,
#' with dimensions equal to the total number of coefficients for the treatment variable and covariates.
#' Default value of 0 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param G0 The precision hyperparameter of the normal distribution (prior distribution)
#' for the second-stage generalized causal model coefficients.
#' This can either be a numerical value if there is only one treatment variable in the model,
#' or a vector if there is a treatment variable and multiple regression covariates,
#' with dimensions equal to the total number of coefficients for the treatment variable and covariates.
#' Default value of 1.0E-6 is equivalent to a noninformative prior for the normal distributions.
#' Used when advanced=FALSE.
#' @param e0 The location hyperparameter of the inverse Gamma distribution (prior for the scale parameter
#' of Student's t distribution on the model residual).
#' Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.
#' @param E0 The shape hyperparameter of the inverse Gamma distribution (prior for the scale parameter
#' of Student's t distribution on the model residual).
#' Default of 0.001 is equivalent to the noninformative prior for the inverse Gamma distribution.
#' @param v0 The lower boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
#' parameter of Student's t distribution).
#' @param V0 The upper boundary hyperparameter of the uniform distribution (prior for the degrees of freedom
#' parameter of Student's t distribution).
#' @param beta.start The starting values for the first-stage generalized causal model coefficients,
#' i.e., coefficients for the instrumental variables.
#' This can either be a numerical value or a column vector with dimensions
#' equal to the number of first-stage coefficients.
#' The default value of NA will use the IWLS (iteratively reweighted least squares) estimate
#' of first-stage coefficients as the starting value.
#' If this is a numerical value, that value will
#' serve as the starting value mean for all the first-stage beta coefficients.
#' @param gamma.start The starting values for the second-stage generalized causal model coefficients,
#' i.e., coefficients for the treatment variable and the model covariates.
#' This can either be a numerical value or a column vector with dimensions
#' equal to the number of second-stage coefficients.
#' The default value of NA will use the IWLS (iteratively reweighted least squares) estimate
#' of second-stage coefficients as the starting value.
#' If this is a numerical value, that value will
#' serve as the starting value mean for all the second-stage gamma coefficients.
#' @param e.start The starting value for the precision hyperparameter of the inverse gamma distribution
#' (prior for the scale parameter of Student's t distribution of the model residual).
#' The default value of NA will use the inverse of the residual variance from the
#' IWLS (iteratively reweighted least square) estimate of the second-stage model.
#' @param df.start The starting value for the degrees of freedom of Student's t distribution.
#' @param n.chains The number of Markov chains. The default is 1.
#' @param n.iter The number of total iterations per chain (including burnin). The default is 10000.
#' @param n.burnin Length of burn in, i.e., number of iterations to discard at the beginning.
#' Default is n.iter/2, that is, discarding the first half of the simulations.
#' @param n.thin The thinning rate. Must be a positive integer. The default is 1.
#' @param DIC Logical; if TRUE (default), compute deviance, pD, and DIC. The rule pD=Dbar-Dhat is used.
#' @param codaPkg Logical; if FALSE (default), an object is returned; if TRUE,
#' file names of the output are returned.
#'
#' @return
#' If \emph{codaPkg=FALSE}(default), returns an object containing summary statistics of
#' the saved parameters, including
#' \item{s1.intercept}{Estimate of the intercept from the first stage.}
#' \item{s1.slopeP}{Estimate of the pth slope from the first stage. }
#' \item{s2.intercept}{Estimate of the intercept from the second stage.}
#' \item{s2.slopeP}{Estimate of the pth slope from the second stage (the first slope is always
#' the \strong{LATE}).}
#' \item{var.e.s2}{Estimate of the residual variance at the second stage.}
#' \item{df.est}{Estimate of the degrees of freedom for the Student's t distribution.}
#' \item{DIC}{Deviance Information Criterion.}
#' If \emph{codaPkg=TRUE}, the returned value is the path for the output file
#' containing the Markov chain Monte Carlo output.
#'
#' @details
#' \enumerate{
#' \item{The formula takes the form \emph{response ~ terms|instrumental_variables}.}
#' \code{\link{gts.nnormal}} provides a detailed description of the formula rule.
#' \item{DIC is computed as \emph{mean(deviance)+pD}.}
#' \item{Prior distributions used in ALMOND.}
#' \itemize{
#' \item Generalized causal model coefficients at both stages: normal distributions.
#' \item The generalized causal model residual: Student's t distribution.
#' }
#' }
#'
#' @references
#' Gelman, A., Carlin, J.B., Stern, H.S., Rubin, D.B. (2003).
#' \emph{Bayesian data analysis}, 2nd edition. Chapman and Hall/CRC Press.
#'
#' Spiegelhalter, D. J., Thomas, A., Best, N. G., Gilks, W., & Lunn, D. (1996).
#' BUGS: Bayesian inference using Gibbs sampling.
#' \href{http://www.mrc-bsu.cam.ac.uk/bugs}{http://www.mrc-bsu.cam.ac.uk/bugs}, 19.
#'
#' @examples
#' \donttest{
#' # Run the model
#' model1 <- gts.nrobust(neighborhoodRating~voucherProgram|extraBedroom,data=simVoucher)
#'
#' # Run the model with the self-defined advanced feature
#' my.robust.model<- function(){
#' for (i in 1:N){
#' logit(p[i]) <- beta0 + beta1*z[i]
#' x[i] ~ dbern(p[i])
#' muY[i] <- gamma0 + gamma1*p[i]
#' y[i] ~ dt(muY[i], pre.u2, df)
#' }
#'
#' beta0 ~ dnorm(0,1)
#' beta1 ~ dnorm(1, 1)
#' gamma0 ~ dnorm(0, 1)
#' gamma1 ~ dnorm(.5, 1)
#'
#' pre.u2 ~ dgamma(.001, .001)
#'
#' df ~ dunif(0,100)
#'
#' s1.intercept <- beta0
#' s1.slope1 <- beta1
#' s2.intercept <- gamma0
#' s2.slope1 <- gamma1
#' df.est <- df
#' var.e.s2 <- 1/pre.u2
#' }
#'
#' model2 <- gts.nrobust(neighborhoodRating~voucherProgram|extraBedroom,data=simVoucher,
#' advanced=TRUE, adv.model=my.robust.model)
#'
#' # Extract the model DIC
#' model1$DIC
#'
#' # Extract the MCMC output
#' model3 <- gts.nrobust(neighborhoodRating~voucherProgram|extraBedroom,data=simVoucher,
#' codaPkg=TRUE)
#' }
#'
#' @export
gts.nrobust<-function(formula,data,advanced=FALSE, adv.model,
b0=1,B0=1.0E-6, g0=0,G0=1.0E-6, e0=0.001,E0=0.001, v0=0,V0=100,
beta.start=NULL, gamma.start=NULL, e.start=NULL, df.start=5,
n.chains=1,n.burnin=floor(n.iter/2),n.iter=10000,n.thin=1,DIC,debug=FALSE,
codaPkg=FALSE){
.alReplaceSciNotR <- function(x,digits=5){
x[abs(x)<1e-3||abs(x)>1e+4] <-
formatC(x[abs(x)<1e-3||abs(x)>1e+4],
digits=digits,
format="E")
return(x)
}
require(Formula)
require(R2OpenBUGS)
formula1 <- formula(formula)
y <- model.response(model.frame(as.Formula(formula1),data=data,na.action=NULL))
x <- as.matrix(model.frame(as.Formula(formula1),data=data,na.action=NULL,rhs=1)[,-1])
z <- as.matrix(model.frame(as.Formula(formula1),data=data,na.action=NULL,rhs=2)[,-1])
N <- length(y)
if (length(levels(factor(x[,1])))!=2){
stop('The factor level of the treatment variable should be 2')
}else{
xList <- lapply(1:ncol(x),function(i){x[,i]})
zList <- lapply(1:ncol(z),function(i){z[,i]})
if(length(b0)==1){
b0 <- rep(b0,length(zList)+1)
} else if(length(b0)>(length(zList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(zList)+1," values of b0 will be used."))
b0 <- b0[1:(length(zList)+1)]
}
if(length(B0)==1){
B0 <- rep(B0,length(zList)+1)
} else if(length(B0)>(length(zList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(zList)+1," values of B0 will be used."))
B0 <- B0[1:(length(zList)+1)]
}
if(length(g0)==1){
g0 <- rep(g0,length(xList)+1)
} else if(length(g0)>(length(xList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(xList)+1," values of g0 will be used."))
g0 <- g0[1:(length(xList)+1)]
}
if(length(G0)==1){
G0 <- rep(G0,length(xList)+1)
} else if(length(G0)>(length(xList)+1)){
warning(paste0("Number of priors is greater than number of parameters. Only first ",
length(xList)+1," values of G0 will be used."))
G0 <- G0[1:(length(xList)+1)]
}
if (length(xList)==1){
names(xList) <- "x"
}else{
names(xList) <- c("x",paste0("x",1:(length(xList)-1)))
}
if (length(zList)==1){
names(zList) <- "z"
}else{
names(zList) <- paste0("z",1:length(zList))
}
data = c(list("N"=N,"y"=y),xList,zList)
lm.data = as.data.frame(cbind(y,x,z))
if (length(xList)==1){
xnames = "x"
}else{
xnames = c("x",paste0("x",1:(length(xList)-1)))
}
if (length(zList)==1){
znames = "z"
}else{
znames = paste0("z",1:length(zList))
}
colnames(lm.data) = c("y",xnames,znames)
if (length(zList)==1){
coefList1s = as.list(coefficients(summary(glm(x~z,data=data,family='binomial',na.action=na.omit)))[,1])
names(coefList1s) = c("beta0","beta1")
xpred <- predict(glm(x~z,data=data,family='binomial',na.action=na.exclude))
}else{
coefList1s = as.list(coefficients(summary(lm(formula(paste0("x~",
paste0("z",1:length(zList),collapse="+"))),data=data,na.action=na.omit)))[,1])
names(coefList1s) <- paste0("beta",0:(length(coefList1s)-1))
xpred <- predict(lm(formula(paste0("x~",paste0("z",1:length(zList),collapse='+'))),data=data,
na.action=na.exclude))
}
if (length(xList)==1){
coefList2s = as.list(coefficients(summary(lm(y~xpred,na.action=na.omit)))[,1])
names(coefList2s) <- c("gamma0","gamma1")
pre.u2 = 1/(summary(lm(y~xpred,na.action=na.omit))$sigma)
}else{
coefList2s = as.list(coefficients(summary(lm(formula(paste0("y~",
paste0("xpred+",paste0("x",1:(length(xList)-1),collapse="+")))),
data=data,na.action=na.omit)))[,1])
names(coefList2s) <- paste0("gamma",0:(length(coefList2s)-1))
pre.u2 = 1/summary(lm(formula(paste0("y~",
paste0("xpred+",paste0("x",1:(length(xList)-1),collapse="+")))),
data=data,na.action=na.omit))$sigma
}
names(pre.u2) = c("pre.u2")
if (is.null(beta.start)==TRUE){
beta.start = coefList1s
} else if (length(beta.start) < length(coefList1s)){
warning("Number of starting values is fewer than the number of parameters. The first
element of beta.start ",beta.start[1], " will be used for all betas." )
beta.start = rep(list(beta.start[1]),length(coefList1s))
} else if (length(beta.start) > length(coefList1s)){
warning(paste0("Number of starting values is greater than the number of parameters. Only the first ",
length(coefList1s)," values of betas will be used."))
beta.start = as.list(beta.start[1:(length(coefList1s))])
} else{
beta.start = as.list(beta.start)
}
names(beta.start) = paste0("beta",0:(length(beta.start)-1))
if (is.null(gamma.start)==TRUE){
gamma.start = coefList2s
} else if (length(gamma.start) < length(coefList2s)){
warning("Number of starting values is fewer than the number of parameters. The first
element ",gamma.start[1], "will be used for all gammas." )
gamma.start = rep(list(gamma.start[1]),length(coefList2s))
} else if (length(gamma.start) > length(coefList2s)){
warning(paste0("Number of starting values is greater than the number of parameters. Only the first ",
length(coefList2s)," values of gammas will be used."))
gamma.start = as.list(gamma.start[1:(length(coefList2s))])
} else {
gamma.start = as.list(gamma.start)
}
names(gamma.start) <- paste0("gamma",0:(length(gamma.start)-1))
if (is.null(e.start)==TRUE){
e.start = pre.u2
} else if (length(e.start)>1){
warning(paste0("Number of starting values has length > 1.
Only the first element will be used."))
e.start = e.start[1]
} else {
e.start = e.start
}
names(e.start) = c("pre.u2")
if (advanced==FALSE){
L1 <- paste0("model\n{\n\tfor (i in 1:N){","\n")
if (length(zList)==1){
L2 <- paste0("\t\tlogit(p[i]) <- beta0 + beta1*z[i]")
}else{
L2 <- paste0("\t\tlogit(p[i]) <- beta0+",paste0("beta",1:(length(zList)),"*z",1:(length(zList)),
"[i]",collapse="+"),"\n")
}
L3 <- paste0("\t\tx[i] ~ dbern(p[i])","\n")
if (length(xList)==1){
L4 <- paste0("\t\tmuY[i] <- gamma0+gamma1*p[i]")
}else{
L4 <- paste0("\t\tmuY[i] <- gamma0+gamma1*p[i]+",paste0("gamma",2:(length(xList)),"*x",
1:(length(xList)-1),"[i]",collapse="+"),"\n")
}
L5 <- paste0("\t\ty[i] ~ dt(muY[i], pre.u2, df)\n\t}\n")
LFormulasBeta <- do.call(paste0,lapply(1:(length(zList)+1),function(i){
paste0("\tbeta",i-1," ~ dnorm(",.alReplaceSciNotR(b0[i]),",",
.alReplaceSciNotR(B0[i]),")","\n")
}))
LFormulasGamma <- do.call(paste0,lapply(1:(length(xList)+1),function(i){
paste0("\tgamma",i-1," ~ dnorm(",.alReplaceSciNotR(g0[i]),
",",.alReplaceSciNotR(G0[i]),")","\n")
}))
LN <- paste0("\tpre.u2 ~ dgamma(",.alReplaceSciNotR(e0),
",",.alReplaceSciNotR(E0),")\n\n")
Ldf <- paste0("\tdf ~ dunif(",.alReplaceSciNotR(v0),
",",.alReplaceSciNotR(V0),")\n\n")
tempParNamesOrig <- c(paste0("beta",0:length(zList)),
paste0("gamma",0:(length(xList))),
"1/pre.u2",
"df")
tempParNamesNew <- c("s1.intercept",paste0("s1.slope",1:length(zList)),
"s2.intercept",
paste0("s2.slope",1:length(xList)),
"var.e.s2",
"df.est")
LPara <- do.call(paste0,lapply(1:(length(tempParNamesOrig)),
function(j){
paste0("\t",tempParNamesNew[j]," <- ",tempParNamesOrig[j],"\n")
}
))
tmpModel <- paste0(L1,L2,L3,L4,L5,LFormulasBeta,LFormulasGamma,LN,Ldf,LPara,"}\n")
}else{
tmpModel <- capture.output(adv.model)
tmpModel[1] <- "model\n{"
tmpModel <- paste(tmpModel,collapse="\n")
tempParNamesNew <- c("s1.intercept",paste0("s1.slope",1:length(zList)),
"s2.intercept",
paste0("s2.slope",1:length(xList)),
"var.e.s2",
"df.est")
}
tempFileName <- tempfile("model")
tempFileName <- paste(tempFileName, "txt", sep = ".")
writeLines(tmpModel,con = tempFileName)
modelLoc <- gsub("\\\\", "/", tempFileName)
inits<- function(){
c(beta.start,gamma.start,e.start,df=df.start)
}
parameters<- tempParNamesNew
output<-bugs(data,inits,parameters,modelLoc,
n.chains=as.vector(n.chains),n.thin=as.vector(n.thin),
n.burnin=as.integer(n.burnin),n.iter=as.integer(n.iter),
DIC=TRUE,debug=as.logical(debug),codaPkg=as.logical(codaPkg))
print(output,digits.summary=3)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enaControl.R
\name{enaControl}
\alias{enaControl}
\title{Control Analyses of Ecological Networks}
\usage{
enaControl(x, zero.na = TRUE, balance.override = FALSE)
}
\arguments{
\item{x}{A network object.}
\item{zero.na}{Makes undefined (NA) values zero.}
\item{balance.override}{Turns off balancing and checks of network balance.}
}
\value{
\item{CN}{Control matrix using flow values.} \item{CQ}{Control
matrix using storage values.} \item{CR}{Schramski Control Ratio Matrix}
\item{CD}{Schramski Control Difference Matrix} \item{CA}{Control Allocation
Matrix} \item{CDep}{Control Dependency Matrix} \item{sc}{Schramski System
Control vector} \item{scp}{Schramski system control vector as percent of
total control} \item{ns}{vector of network-level summary statistics}
}
\description{
Analyses for analyzing the control amongst the nodes in ecological networks.
}
\examples{
data(troModels)
enaControl(troModels[[6]])
}
\references{
Fath, B. D., Borrett, S. R. 2006. A MATLAB function for Network
Environ Analysis. Environmental Modelling & Software 21:375-405
Schramski, J.R., Gattie, D.K., Patten, B.C., Borrett S.R., Fath, B.D.,
Thomas, C.R., and Whipple, S.J. 2006. Indirect effects and distributed
control in ecosystems: Distributed control in the environ networks of a
seven compartment model of nitrogen flow in the Neuse River Estuary, USA
Steady-state analysis. Ecological Modelling 194:189-201
Schramski, J.R., Gattie, D.K., Patten, B.C., Borrett S.R., Fath, B.D., and
Whipple, S.J. 2007. Indirect effects and distributed control in ecosystems:
Distributed control in the environ networks of a seven compartment model of
nitrogen flow in the Neuse River Estuary, USA Time series analysis.
Ecological Modelling 206:18-30
Chen, S., Fath, B.D., Chen, B. 2011. Information-based network environ
analysis: a system perspective for ecologcial risk assessment. Ecol. Ind.
11:1664-1672.
Chen, S. and Chen, B. 2015. Urban energy consumption: Different insights
from energy flow analysis, input-output analysis and ecological network
analysis. Applied Energy 138:99-107.
}
\seealso{
\code{\link{enaStorage}}
}
\author{
Matthew K. Lau Stuart R. Borrett Pawandeep Singh
}
| /man/enaControl.Rd | no_license | SEELab/enaR | R | false | true | 2,261 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enaControl.R
\name{enaControl}
\alias{enaControl}
\title{Control Analyses of Ecological Networks}
\usage{
enaControl(x, zero.na = TRUE, balance.override = FALSE)
}
\arguments{
\item{x}{A network object.}
\item{zero.na}{Makes undefined (NA) values zero.}
\item{balance.override}{Turns off balancing and checks of network balance.}
}
\value{
\item{CN}{Control matrix using flow values.} \item{CQ}{Control
matrix using storage values.} \item{CR}{Schramski Control Ratio Matrix}
\item{CD}{Schramski Control Difference Matrix} \item{CA}{Control Allocation
Matrix} \item{CDep}{Control Dependency Matrix} \item{sc}{Schramski System
Control vector} \item{scp}{Schramski system control vector as percent of
total control} \item{ns}{vector of network-level summary statistics}
}
\description{
Analyses for analyzing the control amongst the nodes in ecological networks.
}
\examples{
data(troModels)
enaControl(troModels[[6]])
}
\references{
Fath, B. D., Borrett, S. R. 2006. A MATLAB function for Network
Environ Analysis. Environmental Modelling & Software 21:375-405
Schramski, J.R., Gattie, D.K., Patten, B.C., Borrett S.R., Fath, B.D.,
Thomas, C.R., and Whipple, S.J. 2006. Indirect effects and distributed
control in ecosystems: Distributed control in the environ networks of a
seven compartment model of nitrogen flow in the Neuse River Estuary, USA
Steady-state analysis. Ecological Modelling 194:189-201
Schramski, J.R., Gattie, D.K., Patten, B.C., Borrett S.R., Fath, B.D., and
Whipple, S.J. 2007. Indirect effects and distributed control in ecosystems:
Distributed control in the environ networks of a seven compartment model of
nitrogen flow in the Neuse River Estuary, USA Time series analysis.
Ecological Modelling 206:18-30
Chen, S., Fath, B.D., Chen, B. 2011. Information-based network environ
analysis: a system perspective for ecologcial risk assessment. Ecol. Ind.
11:1664-1672.
Chen, S. and Chen, B. 2015. Urban energy consumption: Different insights
from energy flow analysis, input-output analysis and ecological network
analysis. Applied Energy 138:99-107.
}
\seealso{
\code{\link{enaStorage}}
}
\author{
Matthew K. Lau Stuart R. Borrett Pawandeep Singh
}
|
library(googleway)
library(raster)
library(readstata13)
library(sp)
library(stringdist)
library(tidyverse)
accidents <- read.dta13("matatu_data/incidents.dta") %>% filter(p_station != "")
geonames_cols <- c("geonameid", "name", "asciiname", "alternatenames", "latitude", "longitude",
"featureclass", "featurecode", "countrycode", "cc2", "adm1", "adm2", "adm3", "adm4",
"population", "elevation", "dem", "timezone", "modificationdate")
geonames <- read_tsv("supporting_files/KE/KE.txt", col_names = geonames_cols)
# how many unique police stations are there?
matatu_police_stations <- unique(accidents$p_station)
length(matatu_police_stations)
#### Geocode Police Stations using the Google Places API
geocode_stations <- function(x) {
results <- google_places(x, place_type = "police")
coords <- results$results$geometry$location[1,]
if(!is.null(coords)) {
tibble(p_station = x, long = coords$lng, lat = coords$lat)
} else{
tibble(p_station = x, long = NA, lat = NA)
}
}
geocoded_stations <- tibble()
for(i in matatu_police_stations) {
station <- geocode_stations(i)
geocoded_stations <- rbind(geocoded_stations, station)
rm(station)
}
## Now let's see if the stations that weren't geocoded can be matched with geocoded stations
stations_geo_index <- which(!is.na(geocoded_stations$long))
stations_geo <- geocoded_stations[stations_geo_index, "p_station"][[1]]
stations_no_geo <- geocoded_stations[-stations_geo_index, "p_station"][[1]]
## find matches of police stations that don't have exact matches
match_stations <- function(station) {
maxDist <- length(str_split(station, pattern = "")[[1]])
matched <- amatch(station, stations_geo, method = "lv", maxDist=3)
matched_station <- stations_geo[matched]
geocoded_matched <- geocoded_stations[match(matched_station, geocoded_stations$p_station), c("p_station", "long", "lat")]
geocoded_matched <- cbind(geocoded_matched, original_station = station)
}
matched_geocoded_stations <- tibble()
for(i in stations_no_geo) {
matched <- match_stations(i)
matched_geocoded_stations <- rbind(matched_geocoded_stations, matched)
rm(matched)
}
## Manually go through the matches and keep the ones that make sense
matches2keep <- c(66, 104, 200)
matched_geocoded_stations_verified <- matched_geocoded_stations[matches2keep,]
matched_geocoded_stations_names_unverified <- as.character(matched_geocoded_stations[-matches2keep, "original_station"])
## Now join exact matches followed by near matches
accidents_geo <- accidents %>%
left_join(geocoded_stations[stations_geo_index,]) %>%
left_join(matched_geocoded_stations_verified, by = c("p_station" = "original_station")) %>%
mutate(long = case_when(!is.na(long.x) ~ long.x, is.na(long.x) ~ long.y)) %>%
mutate(lat = case_when(!is.na(lat.x) ~ lat.x, is.na(lat.x) ~ lat.y)) %>%
dplyr::select(-c(long.x, lat.x, long.y, lat.y)) %>%
rename(matched_station = p_station.y)
# find how many unique police stations are in the geonames database
geonames_police_stations <- geonames %>% filter(featurecode == "PP")
length(geonames_police_stations$name)
geonames_stations <- geonames_police_stations$name
# check if these can be matched to non-geocoded stations
geonames_match_stations <- function(station) {
station <- tolower(gsub("(POLICE STATION)|(POLICE POST)", "", station))
matched <- amatch(station, geonames_stations, method = "lv", maxDist=3)
matched_station <- matched_geocoded_stations_names_unverified[matched]
geocoded_matched <- geonames_police_stations[match(matched_station, geonames_police_stations$name),
c("name", "longitude", "latitude")]
geocoded_matched <- cbind(geocoded_matched, original_station = station)
}
matched_geocoded_geonames_stations <- tibble()
for(i in matched_geocoded_stations_names_unverified) {
matched <- geonames_match_stations(i)
matched_geocoded_geonames_stations <- rbind(matched_geocoded_geonames_stations, matched)
rm(matched)
}
#### Add a counties column
length(which(!is.na(accidents_geo$long)))/nrow(accidents_geo)
ke_country <- getData("GADM", level = 0, country = "KEN")
ke_counties <- getData("GADM", level = 1, country = "KEN")
plot(ke_counties)
points(accidents_geo[,c("long", "lat")])
names(accidents_geo)
accidents_sp <- na.omit(accidents_geo[,c("p_station", "long", "lat",
"incidentdate", "passengers", "maincategory",
"injuredpersons")])
accidents_geo2 <- accidents_sp
coordinates(accidents_sp) <- ~ long + lat
projection(accidents_sp) <- projection(ke_counties)
accidents_geo2$county <- NA
counties <- c("Nairobi", "Kiambu", "Murang'a", "Kajiado", "Machakos")
for(county in counties) {
ke_county <- ke_counties[ke_counties@data$NAME_1 == county,]
over_accidents <- over(accidents_sp, ke_county)
accidents_geo2[which(!is.na(over_accidents$ID_0)),"county"] <- county
rm(over_accidents)
}
#### Add a constituency column
ke_constituencies <- getData("GADM", level = 2, country = "KEN")
nairobi_constituencies <- ke_constituencies[ke_constituencies@data$NAME_1 %in% counties,]
constituencies <- nairobi_constituencies@data$NAME_2
for(constituency in constituencies) {
nairobi_constituency <- nairobi_constituencies[nairobi_constituencies@data$NAME_2 == constituency,]
over_accidents <- over(accidents_sp, nairobi_constituency)
accidents_geo2[which(!is.na(over_accidents$ID_0)),"constituency"] <- constituency
rm(over_accidents)
}
#### save final data for matatu accidents
write_csv(accidents_geo, "matatu_data/geocoded_unprocessed_incidents.csv")
write_csv(accidents_geo2, "matatu_data/processed_incidents.csv")
| /scripts/matatu_processing.R | no_license | robtenorio/classify-accidents | R | false | false | 5,757 | r | library(googleway)
library(raster)
library(readstata13)
library(sp)
library(stringdist)
library(tidyverse)
accidents <- read.dta13("matatu_data/incidents.dta") %>% filter(p_station != "")
geonames_cols <- c("geonameid", "name", "asciiname", "alternatenames", "latitude", "longitude",
"featureclass", "featurecode", "countrycode", "cc2", "adm1", "adm2", "adm3", "adm4",
"population", "elevation", "dem", "timezone", "modificationdate")
geonames <- read_tsv("supporting_files/KE/KE.txt", col_names = geonames_cols)
# how many unique police stations are there?
matatu_police_stations <- unique(accidents$p_station)
length(matatu_police_stations)
#### Geocode Police Stations using the Google Places API
geocode_stations <- function(x) {
results <- google_places(x, place_type = "police")
coords <- results$results$geometry$location[1,]
if(!is.null(coords)) {
tibble(p_station = x, long = coords$lng, lat = coords$lat)
} else{
tibble(p_station = x, long = NA, lat = NA)
}
}
geocoded_stations <- tibble()
for(i in matatu_police_stations) {
station <- geocode_stations(i)
geocoded_stations <- rbind(geocoded_stations, station)
rm(station)
}
## Now let's see if the stations that weren't geocoded can be matched with geocoded stations
stations_geo_index <- which(!is.na(geocoded_stations$long))
stations_geo <- geocoded_stations[stations_geo_index, "p_station"][[1]]
stations_no_geo <- geocoded_stations[-stations_geo_index, "p_station"][[1]]
## find matches of police stations that don't have exact matches
match_stations <- function(station) {
maxDist <- length(str_split(station, pattern = "")[[1]])
matched <- amatch(station, stations_geo, method = "lv", maxDist=3)
matched_station <- stations_geo[matched]
geocoded_matched <- geocoded_stations[match(matched_station, geocoded_stations$p_station), c("p_station", "long", "lat")]
geocoded_matched <- cbind(geocoded_matched, original_station = station)
}
matched_geocoded_stations <- tibble()
for(i in stations_no_geo) {
matched <- match_stations(i)
matched_geocoded_stations <- rbind(matched_geocoded_stations, matched)
rm(matched)
}
## Manually go through the matches and keep the ones that make sense
matches2keep <- c(66, 104, 200)
matched_geocoded_stations_verified <- matched_geocoded_stations[matches2keep,]
matched_geocoded_stations_names_unverified <- as.character(matched_geocoded_stations[-matches2keep, "original_station"])
## Now join exact matches followed by near matches
accidents_geo <- accidents %>%
left_join(geocoded_stations[stations_geo_index,]) %>%
left_join(matched_geocoded_stations_verified, by = c("p_station" = "original_station")) %>%
mutate(long = case_when(!is.na(long.x) ~ long.x, is.na(long.x) ~ long.y)) %>%
mutate(lat = case_when(!is.na(lat.x) ~ lat.x, is.na(lat.x) ~ lat.y)) %>%
dplyr::select(-c(long.x, lat.x, long.y, lat.y)) %>%
rename(matched_station = p_station.y)
# find how many unique police stations are in the geonames database
geonames_police_stations <- geonames %>% filter(featurecode == "PP")
length(geonames_police_stations$name)
geonames_stations <- geonames_police_stations$name
# check if these can be matched to non-geocoded stations
geonames_match_stations <- function(station) {
station <- tolower(gsub("(POLICE STATION)|(POLICE POST)", "", station))
matched <- amatch(station, geonames_stations, method = "lv", maxDist=3)
matched_station <- matched_geocoded_stations_names_unverified[matched]
geocoded_matched <- geonames_police_stations[match(matched_station, geonames_police_stations$name),
c("name", "longitude", "latitude")]
geocoded_matched <- cbind(geocoded_matched, original_station = station)
}
matched_geocoded_geonames_stations <- tibble()
for(i in matched_geocoded_stations_names_unverified) {
matched <- geonames_match_stations(i)
matched_geocoded_geonames_stations <- rbind(matched_geocoded_geonames_stations, matched)
rm(matched)
}
#### Add a counties column
length(which(!is.na(accidents_geo$long)))/nrow(accidents_geo)
ke_country <- getData("GADM", level = 0, country = "KEN")
ke_counties <- getData("GADM", level = 1, country = "KEN")
plot(ke_counties)
points(accidents_geo[,c("long", "lat")])
names(accidents_geo)
accidents_sp <- na.omit(accidents_geo[,c("p_station", "long", "lat",
"incidentdate", "passengers", "maincategory",
"injuredpersons")])
accidents_geo2 <- accidents_sp
coordinates(accidents_sp) <- ~ long + lat
projection(accidents_sp) <- projection(ke_counties)
accidents_geo2$county <- NA
counties <- c("Nairobi", "Kiambu", "Murang'a", "Kajiado", "Machakos")
for(county in counties) {
ke_county <- ke_counties[ke_counties@data$NAME_1 == county,]
over_accidents <- over(accidents_sp, ke_county)
accidents_geo2[which(!is.na(over_accidents$ID_0)),"county"] <- county
rm(over_accidents)
}
#### Add a constituency column
ke_constituencies <- getData("GADM", level = 2, country = "KEN")
nairobi_constituencies <- ke_constituencies[ke_constituencies@data$NAME_1 %in% counties,]
constituencies <- nairobi_constituencies@data$NAME_2
for(constituency in constituencies) {
nairobi_constituency <- nairobi_constituencies[nairobi_constituencies@data$NAME_2 == constituency,]
over_accidents <- over(accidents_sp, nairobi_constituency)
accidents_geo2[which(!is.na(over_accidents$ID_0)),"constituency"] <- constituency
rm(over_accidents)
}
#### save final data for matatu accidents
write_csv(accidents_geo, "matatu_data/geocoded_unprocessed_incidents.csv")
write_csv(accidents_geo2, "matatu_data/processed_incidents.csv")
|
library(pracma)
# 10.5.1
# (c) ii
alpha <- 5
C.clayton <- function(u, v) (u ** (-alpha) + v ** (-alpha) - 1) ** (-1 / alpha)
C.clayton.bar <- function(u, v) u + v - 1 + C.clayton(1 - u, 1 - v)
# U_1, U_2
Fu1u2 <- function(u, v) C.clayton(u, v)
Fu1u2.bar <- function(u, v) 1 - u - v + Fu1u2(u, v)
Eu1u2 <- quad2d(Fu1u2.bar, 0, 1, 0, 1)
covu1u2 <- Eu1u2 - 0.5 ** 2
covu1u2 / sqrt(1 / 12 * 1 / 12)
# U_1', U_2'
Fu1u2 <- function(u, v) C.clayton.bar(u, v)
Fu1u2.bar <- function(u, v) 1 - u - v + Fu1u2(u, v)
Eu1u2 <- quad2d(Fu1u2.bar, 0, 1, 0, 1)
covu1u2 <- Eu1u2 - 0.5 ** 2
covu1u2 / sqrt(1 / 12 * 1 / 12)
# (c) iii
EX <- 1
VX <- 4
mean(rlnorm(1000000, - 0.5 * log(5), sqrt(log(5))))
var(rlnorm(1000000, - 0.5 * log(5), sqrt(log(5))))
F1 <- function(x) plnorm(x, - 0.5 * log(5), sqrt(log(5)))
F2 <- function(x) plnorm(x, - 0.5 * log(5), sqrt(log(5)))
Fx1x2 <- function(x, y) C.clayton(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
Fx1x2 <- function(x, y) C.clayton.bar(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
# (c) iv
nsim <- 1000000
set.seed(2017)
vV <- matrix(runif(nsim * 3), nsim, 3, byrow = T)
vTheta <- qgamma(vV[, 1], 1 / alpha, 1)
vY <- sapply(1:2, function(t) qexp(vV[, t + 1], vTheta))
vU <- (1 + vY) ** (-1 / alpha)
(mean(vU[, 1] * vU[, 2]) - prod(colMeans(vU))) / sqrt( var(vU[, 1]) * var(vU[, 2]) )
# (c) v
vU.prime <- 1 - vU
(mean(vU.prime[, 1] * vU.prime[, 2]) - prod(colMeans(vU.prime))) / sqrt( var(vU.prime[, 1]) * var(vU.prime[, 2]) )
# (c) vi
X1X2 <- qlnorm(vU, - 0.5 * log(5), sqrt(log(5)))
X1X2.prime <- qlnorm(vU.prime, - 0.5 * log(5), sqrt(log(5)))
# (c) vii
S <- rowSums(X1X2)
S.prime <- rowSums(X1X2.prime)
# plot(ecdf(S))
# plot(ecdf(S.prime), add = TRUE)
# (c) viii
(VaRS <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S)[t * nsim]))
(VaRS.prime <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S.prime)[t * nsim]))
(TVaRS <- sapply(VaRS, function(t) mean(S[S > t])))
(TVaRS <- sapply(VaRS.prime, function(t) mean(S.prime[S.prime > t])))
# Effectuer les calculs suivants pour les deux hypothèses et pour alpha tel que rhoP = 0.5
rhox1x2 <- function(alpha){
C.clayton <- function(u, v) (u ** (-alpha) + v ** (-alpha) - 1) ** (-1 / alpha)
Fx1x2 <- function(x, y) C.clayton(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
}
TrouverAlpha <- function(rho){
optimize(function(x) abs(rhox1x2(x) - rho), c(0, 20))$minimum
}
(alpha <- TrouverAlpha(0.5))
# (c) iv
nsim <- 1000000
set.seed(2017)
vV <- matrix(runif(nsim * 3), nsim, 3, byrow = T)
vTheta <- qgamma(vV[, 1], 1 / alpha, 1)
vY <- sapply(1:2, function(t) qexp(vV[, t + 1], vTheta))
vU <- (1 + vY) ** (-1 / alpha)
(mean(vU[, 1] * vU[, 2]) - prod(colMeans(vU))) / sqrt( var(vU[, 1]) * var(vU[, 2]) )
# (c) v
vU.prime <- 1 - vU
(mean(vU.prime[, 1] * vU.prime[, 2]) - prod(colMeans(vU.prime))) / sqrt( var(vU.prime[, 1]) * var(vU.prime[, 2]) )
# (c) vi
X1X2 <- qlnorm(vU, - 0.5 * log(5), sqrt(log(5)))
X1X2.prime <- qlnorm(vU.prime, - 0.5 * log(5), sqrt(log(5)))
# (c) vii
S <- rowSums(X1X2)
S.prime <- rowSums(X1X2.prime)
# plot(ecdf(S))
# plot(ecdf(S.prime), add = TRUE)
# (c) viii
(VaRS <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S)[t * nsim]))
(VaRS.prime <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S.prime)[t * nsim]))
(TVaRS <- sapply(VaRS, function(t) mean(S[S > t])))
(TVaRS <- sapply(VaRS.prime, function(t) mean(S.prime[S.prime > t])))
rhox1x2 <- function(alpha){
C.clayton <- function(u, v) (u ** (-alpha) + v ** (-alpha) - 1) ** (-1 / alpha)
C.clayton.bar <- function(u, v) u + v - 1 + C.clayton(1 - u, 1 - v)
Fx1x2 <- function(x, y) C.clayton.bar(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
}
TrouverAlpha <- function(rho){
optimize(function(x) abs(rhox1x2(x) - rho), c(0, 10))$minimum
}
(alpha <- TrouverAlpha(0.5))
# (c) iv
nsim <- 1000000
set.seed(2017)
vV <- matrix(runif(nsim * 3), nsim, 3, byrow = T)
vTheta <- qgamma(vV[, 1], 1 / alpha, 1)
vY <- sapply(1:2, function(t) qexp(vV[, t + 1], vTheta))
vU <- (1 + vY) ** (-1 / alpha)
(mean(vU[, 1] * vU[, 2]) - prod(colMeans(vU))) / sqrt( var(vU[, 1]) * var(vU[, 2]) )
# (c) v
vU.prime <- 1 - vU
(mean(vU.prime[, 1] * vU.prime[, 2]) - prod(colMeans(vU.prime))) / sqrt( var(vU.prime[, 1]) * var(vU.prime[, 2]) )
# (c) vi
X1X2 <- qlnorm(vU, - 0.5 * log(5), sqrt(log(5)))
X1X2.prime <- qlnorm(vU.prime, - 0.5 * log(5), sqrt(log(5)))
# (c) vii
S <- rowSums(X1X2)
S.prime <- rowSums(X1X2.prime)
# plot(ecdf(S))
# plot(ecdf(S.prime), add = TRUE)
# (c) viii
(VaRS <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S)[t * nsim]))
(VaRS.prime <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S.prime)[t * nsim]))
(TVaRS <- sapply(VaRS, function(t) mean(S[S > t])))
(TVaRS <- sapply(VaRS.prime, function(t) mean(S.prime[S.prime > t])))
| /depannage/semaine-11-2.R | no_license | alec42/act-3000 | R | false | false | 5,303 | r | library(pracma)
# 10.5.1
# (c) ii
alpha <- 5
C.clayton <- function(u, v) (u ** (-alpha) + v ** (-alpha) - 1) ** (-1 / alpha)
C.clayton.bar <- function(u, v) u + v - 1 + C.clayton(1 - u, 1 - v)
# U_1, U_2
Fu1u2 <- function(u, v) C.clayton(u, v)
Fu1u2.bar <- function(u, v) 1 - u - v + Fu1u2(u, v)
Eu1u2 <- quad2d(Fu1u2.bar, 0, 1, 0, 1)
covu1u2 <- Eu1u2 - 0.5 ** 2
covu1u2 / sqrt(1 / 12 * 1 / 12)
# U_1', U_2'
Fu1u2 <- function(u, v) C.clayton.bar(u, v)
Fu1u2.bar <- function(u, v) 1 - u - v + Fu1u2(u, v)
Eu1u2 <- quad2d(Fu1u2.bar, 0, 1, 0, 1)
covu1u2 <- Eu1u2 - 0.5 ** 2
covu1u2 / sqrt(1 / 12 * 1 / 12)
# (c) iii
EX <- 1
VX <- 4
mean(rlnorm(1000000, - 0.5 * log(5), sqrt(log(5))))
var(rlnorm(1000000, - 0.5 * log(5), sqrt(log(5))))
F1 <- function(x) plnorm(x, - 0.5 * log(5), sqrt(log(5)))
F2 <- function(x) plnorm(x, - 0.5 * log(5), sqrt(log(5)))
Fx1x2 <- function(x, y) C.clayton(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
Fx1x2 <- function(x, y) C.clayton.bar(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
# (c) iv
nsim <- 1000000
set.seed(2017)
vV <- matrix(runif(nsim * 3), nsim, 3, byrow = T)
vTheta <- qgamma(vV[, 1], 1 / alpha, 1)
vY <- sapply(1:2, function(t) qexp(vV[, t + 1], vTheta))
vU <- (1 + vY) ** (-1 / alpha)
(mean(vU[, 1] * vU[, 2]) - prod(colMeans(vU))) / sqrt( var(vU[, 1]) * var(vU[, 2]) )
# (c) v
vU.prime <- 1 - vU
(mean(vU.prime[, 1] * vU.prime[, 2]) - prod(colMeans(vU.prime))) / sqrt( var(vU.prime[, 1]) * var(vU.prime[, 2]) )
# (c) vi
X1X2 <- qlnorm(vU, - 0.5 * log(5), sqrt(log(5)))
X1X2.prime <- qlnorm(vU.prime, - 0.5 * log(5), sqrt(log(5)))
# (c) vii
S <- rowSums(X1X2)
S.prime <- rowSums(X1X2.prime)
# plot(ecdf(S))
# plot(ecdf(S.prime), add = TRUE)
# (c) viii
(VaRS <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S)[t * nsim]))
(VaRS.prime <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S.prime)[t * nsim]))
(TVaRS <- sapply(VaRS, function(t) mean(S[S > t])))
(TVaRS <- sapply(VaRS.prime, function(t) mean(S.prime[S.prime > t])))
# Effectuer les calculs suivants pour les deux hypothèses et pour alpha tel que rhoP = 0.5
rhox1x2 <- function(alpha){
C.clayton <- function(u, v) (u ** (-alpha) + v ** (-alpha) - 1) ** (-1 / alpha)
Fx1x2 <- function(x, y) C.clayton(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
}
TrouverAlpha <- function(rho){
optimize(function(x) abs(rhox1x2(x) - rho), c(0, 20))$minimum
}
(alpha <- TrouverAlpha(0.5))
# (c) iv
nsim <- 1000000
set.seed(2017)
vV <- matrix(runif(nsim * 3), nsim, 3, byrow = T)
vTheta <- qgamma(vV[, 1], 1 / alpha, 1)
vY <- sapply(1:2, function(t) qexp(vV[, t + 1], vTheta))
vU <- (1 + vY) ** (-1 / alpha)
(mean(vU[, 1] * vU[, 2]) - prod(colMeans(vU))) / sqrt( var(vU[, 1]) * var(vU[, 2]) )
# (c) v
vU.prime <- 1 - vU
(mean(vU.prime[, 1] * vU.prime[, 2]) - prod(colMeans(vU.prime))) / sqrt( var(vU.prime[, 1]) * var(vU.prime[, 2]) )
# (c) vi
X1X2 <- qlnorm(vU, - 0.5 * log(5), sqrt(log(5)))
X1X2.prime <- qlnorm(vU.prime, - 0.5 * log(5), sqrt(log(5)))
# (c) vii
S <- rowSums(X1X2)
S.prime <- rowSums(X1X2.prime)
# plot(ecdf(S))
# plot(ecdf(S.prime), add = TRUE)
# (c) viii
(VaRS <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S)[t * nsim]))
(VaRS.prime <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S.prime)[t * nsim]))
(TVaRS <- sapply(VaRS, function(t) mean(S[S > t])))
(TVaRS <- sapply(VaRS.prime, function(t) mean(S.prime[S.prime > t])))
rhox1x2 <- function(alpha){
C.clayton <- function(u, v) (u ** (-alpha) + v ** (-alpha) - 1) ** (-1 / alpha)
C.clayton.bar <- function(u, v) u + v - 1 + C.clayton(1 - u, 1 - v)
Fx1x2 <- function(x, y) C.clayton.bar(F1(x), F2(y))
Fx1x2.bar <- function(x, y) 1 - F1(x) - F2(y) + Fx1x2(x, y)
Ex1x2 <- quad2d(Fx1x2.bar, 0, 100, 0, 100)
covx1x2 <- Ex1x2 - EX ** 2
covx1x2 / sqrt(VX ** 2)
}
TrouverAlpha <- function(rho){
optimize(function(x) abs(rhox1x2(x) - rho), c(0, 10))$minimum
}
(alpha <- TrouverAlpha(0.5))
# (c) iv
nsim <- 1000000
set.seed(2017)
vV <- matrix(runif(nsim * 3), nsim, 3, byrow = T)
vTheta <- qgamma(vV[, 1], 1 / alpha, 1)
vY <- sapply(1:2, function(t) qexp(vV[, t + 1], vTheta))
vU <- (1 + vY) ** (-1 / alpha)
(mean(vU[, 1] * vU[, 2]) - prod(colMeans(vU))) / sqrt( var(vU[, 1]) * var(vU[, 2]) )
# (c) v
vU.prime <- 1 - vU
(mean(vU.prime[, 1] * vU.prime[, 2]) - prod(colMeans(vU.prime))) / sqrt( var(vU.prime[, 1]) * var(vU.prime[, 2]) )
# (c) vi
X1X2 <- qlnorm(vU, - 0.5 * log(5), sqrt(log(5)))
X1X2.prime <- qlnorm(vU.prime, - 0.5 * log(5), sqrt(log(5)))
# (c) vii
S <- rowSums(X1X2)
S.prime <- rowSums(X1X2.prime)
# plot(ecdf(S))
# plot(ecdf(S.prime), add = TRUE)
# (c) viii
(VaRS <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S)[t * nsim]))
(VaRS.prime <- sapply(c(0.9, 0.99, 0.999, 0.9999), function(t) sort(S.prime)[t * nsim]))
(TVaRS <- sapply(VaRS, function(t) mean(S[S > t])))
(TVaRS <- sapply(VaRS.prime, function(t) mean(S.prime[S.prime > t])))
|
#' Fit a hierarchical multivariate model to data
#'
#' @param groups A character, integer, or factor of group labels, with length
#' `nrow(dat)`.
#' @inheritParams fit_mvnorm
#' @inherit fit_mvnorm return
#' @export
fit_mvnorm_hier <- function(dat,
groups,
niter = 5000,
priors = list(),
inits = list(),
nchains = 3,
autofit = FALSE,
max_attempts = 10,
keep_samples = Inf,
threshold = 1.15,
save_progress = NULL,
progress = NULL) {
if (is.null(progress)) {
progress <- inherits(future::plan(), "sequential")
}
stopifnot(is.matrix(dat), length(groups) == nrow(dat))
chainseq <- seq_len(nchains)
nparam <- ncol(dat)
param_names <- colnames(dat)
if (is.null(param_names)) {
param_names <- sprintf("par%02d", seq_len(nparam))
}
ngroup <- length(unique(groups))
if (is.character(groups)) {
groups <- factor(groups)
}
if (is.factor(groups)) {
group_names <- levels(groups)
} else {
group_names <- sprintf("group%02d", seq_len(ngroup))
}
igroups <- as.integer(groups)
ugroups <- sort(unique(igroups))
setup_bygroup <- lapply(
ugroups,
function(x) setup_missing(dat[igroups == x, ])
)
# Where missing, use default priors
default_priors <- gibbs_default_priors(nparam, ngroup)
if (!is.null(priors)) {
priors <- modifyList(default_priors, priors)
if (length(priors) != length(default_priors)) {
stop(
"Length of priors (", length(priors), ") ",
"does not equal length of default priors (",
length(default_priors), "). ",
"There is likely a typo in your `prior` name.\n",
"names(priors): ", paste(names(priors), collapse = ", "),
"\n",
"names(default_priors): ", paste(names(default_priors), collapse = ", ")
)
}
} else {
priors <- default_priors
}
# Set priors in environment
mu0_global <- priors[["mu_global"]]
Sigma0_global <- priors[["Sigma_global"]]
v0_global <- priors[["v_global"]]
S0_global <- priors[["S_global"]]
mu0_group <- priors[["mu_group"]]
Sigma0_group <- priors[["Sigma_group"]]
v0_group <- priors[["v_group"]]
S0_group <- priors[["S_group"]]
# Precalculate certain quantities
Sigma0_global_inv <- solve(Sigma0_global)
Sigma0_group_inv <- vapply(
seq_len(ngroup),
function(x) solve(Sigma0_group[x,,]),
Sigma0_global_inv
)
Sigma0_group_inv <- aperm(Sigma0_group_inv, c(3, 1, 2))
# Draw initial conditions from priors
mu_global <- list()
Sigma_global <- list()
mu_group <- list()
Sigma_group <- list()
for (n in chainseq) {
mu_global[[n]] <- random_mvnorm(1, mu0_global, Sigma0_global)[1, ]
names(mu_global[[n]]) <- param_names
Sigma_global[[n]] <- solve(rWishart(1, v0_global + nparam + 1,
S0_global)[,,1])
dimnames(Sigma_global[[n]]) <- list(param_names, param_names)
mu_group[[n]] <- matrix(NA_real_, nrow = ngroup, ncol = nparam)
dimnames(mu_group[[n]]) <- list(group_names, param_names)
Sigma_group[[n]] <- array(NA_real_, c(ngroup, nparam, nparam))
dimnames(Sigma_group[[n]]) <- list(group_names, param_names, param_names)
for (i in seq_len(ngroup)) {
mu_group[[n]][i, ] <- random_mvnorm(1, mu0_group[i,], Sigma0_group[i,,])
#Sigma_group[[n]][i,,] <- solve(rWishart(1, v0_group[i] + nparam + 1, S0_group[i,,])[,,1])
Sigma_group[[n]][i, , ] <- diag(1, nparam)
}
}
default_inits <- list(mu_global = mu_global,
Sigma_global = Sigma_global,
mu_group = mu_group,
Sigma_group = Sigma_group)
if (!is.null(inits)) {
inits <- modifyList(default_inits, inits)
} else {
inits <- default_inits
}
sampler <- list(
fun = sample_mvnorm_hier,
init_fun = function(n, inits) {
list(
mu_global = inits[["mu_global"]][[n]],
Sigma_global = inits[["Sigma_global"]][[n]],
mu_group = inits[["mu_group"]][[n]],
Sigma_group = inits[["Sigma_group"]][[n]]
)
},
args = list(
niter = niter,
dat = dat,
groups = igroups,
mu0_global = mu0_global,
Sigma0_global = Sigma0_global,
mu0_group = mu0_group,
Sigma0_group_inv = Sigma0_group_inv,
v0_global = v0_global,
S0_global = S0_global,
v0_group = v0_group,
S0_group = S0_group,
setup_bygroup = setup_bygroup,
progress = progress
)
)
message("Running sampler...")
raw_samples <- run_until_converged(
sampler = sampler,
model_type = "hier",
inits = inits,
nchains = nchains,
max_attempts = max_attempts,
save_progress = save_progress,
threshold = threshold,
keep_samples = keep_samples,
autofit = autofit
)
message("Calculating correlation matrices...")
raw_samples_corr <- add_correlations(raw_samples,
hier = TRUE, ngroups = ngroup)
message("Converting samples to coda mcmc.list object...")
samples_mcmc <- results2mcmclist(raw_samples_corr, type = "hier")
niter <- coda::niter(samples_mcmc)
message("Preparing summary table...")
summary_table <- summary_df(
window(samples_mcmc, start = floor(niter / 2)),
group = TRUE
)
stats <- c("Mean", "2.5%", "97.5%")
mu_global_stats <- sapply(
stats,
function(x) summary2vec(summary_table, x,
variable == "mu", group == "global"),
simplify = FALSE,
USE.NAMES = TRUE
)
Sigma_global_stats <- sapply(
stats,
function(x) summary2mat(summary_table, x,
variable == "Sigma", group == "global"),
simplify = FALSE,
USE.NAMES = TRUE
)
Corr_global_stats <- sapply(
stats,
function(x) summary2mat(summary_table, x,
variable == "Corr", group == "global"),
simplify = FALSE,
USE.NAMES = TRUE
)
get_mu_group <- function(grp) {
sapply(
stats,
function(x) summary2vec(summary_table, x, variable == "mu", group == grp),
simplify = FALSE,
USE.NAMES = TRUE
)
}
get_mat_group <- function(grp, var) {
sapply(
stats,
function(x) summary2mat(summary_table, x, variable == var, group == grp),
simplify = FALSE,
USE.NAMES = TRUE
)
}
mu_group_stats <- sapply(
group_names,
get_mu_group,
simplify = FALSE,
USE.NAMES = TRUE
)
Sigma_group_stats <- sapply(
group_names,
get_mat_group,
var = "Sigma",
simplify = FALSE,
USE.NAMES = TRUE
)
Corr_group_stats <- sapply(
group_names,
get_mat_group,
var = "Corr",
simplify = FALSE,
USE.NAMES = TRUE
)
list(
summary_table = summary_table,
stats = list(
mu_global = mu_global_stats,
Sigma_global = Sigma_global_stats,
Corr_global = Corr_global_stats,
mu_group = mu_group_stats,
Sigma_group = Sigma_group_stats,
Corr_group = Corr_group_stats
),
samples = samples_mcmc
)
}
| /R/fit_mvnorm_hier.R | no_license | ashiklom/mvtraits | R | false | false | 7,275 | r | #' Fit a hierarchical multivariate model to data
#'
#' @param groups A character, integer, or factor of group labels, with length
#' `nrow(dat)`.
#' @inheritParams fit_mvnorm
#' @inherit fit_mvnorm return
#' @export
fit_mvnorm_hier <- function(dat,
groups,
niter = 5000,
priors = list(),
inits = list(),
nchains = 3,
autofit = FALSE,
max_attempts = 10,
keep_samples = Inf,
threshold = 1.15,
save_progress = NULL,
progress = NULL) {
if (is.null(progress)) {
progress <- inherits(future::plan(), "sequential")
}
stopifnot(is.matrix(dat), length(groups) == nrow(dat))
chainseq <- seq_len(nchains)
nparam <- ncol(dat)
param_names <- colnames(dat)
if (is.null(param_names)) {
param_names <- sprintf("par%02d", seq_len(nparam))
}
ngroup <- length(unique(groups))
if (is.character(groups)) {
groups <- factor(groups)
}
if (is.factor(groups)) {
group_names <- levels(groups)
} else {
group_names <- sprintf("group%02d", seq_len(ngroup))
}
igroups <- as.integer(groups)
ugroups <- sort(unique(igroups))
setup_bygroup <- lapply(
ugroups,
function(x) setup_missing(dat[igroups == x, ])
)
# Where missing, use default priors
default_priors <- gibbs_default_priors(nparam, ngroup)
if (!is.null(priors)) {
priors <- modifyList(default_priors, priors)
if (length(priors) != length(default_priors)) {
stop(
"Length of priors (", length(priors), ") ",
"does not equal length of default priors (",
length(default_priors), "). ",
"There is likely a typo in your `prior` name.\n",
"names(priors): ", paste(names(priors), collapse = ", "),
"\n",
"names(default_priors): ", paste(names(default_priors), collapse = ", ")
)
}
} else {
priors <- default_priors
}
# Set priors in environment
mu0_global <- priors[["mu_global"]]
Sigma0_global <- priors[["Sigma_global"]]
v0_global <- priors[["v_global"]]
S0_global <- priors[["S_global"]]
mu0_group <- priors[["mu_group"]]
Sigma0_group <- priors[["Sigma_group"]]
v0_group <- priors[["v_group"]]
S0_group <- priors[["S_group"]]
# Precalculate certain quantities
Sigma0_global_inv <- solve(Sigma0_global)
Sigma0_group_inv <- vapply(
seq_len(ngroup),
function(x) solve(Sigma0_group[x,,]),
Sigma0_global_inv
)
Sigma0_group_inv <- aperm(Sigma0_group_inv, c(3, 1, 2))
# Draw initial conditions from priors
mu_global <- list()
Sigma_global <- list()
mu_group <- list()
Sigma_group <- list()
for (n in chainseq) {
mu_global[[n]] <- random_mvnorm(1, mu0_global, Sigma0_global)[1, ]
names(mu_global[[n]]) <- param_names
Sigma_global[[n]] <- solve(rWishart(1, v0_global + nparam + 1,
S0_global)[,,1])
dimnames(Sigma_global[[n]]) <- list(param_names, param_names)
mu_group[[n]] <- matrix(NA_real_, nrow = ngroup, ncol = nparam)
dimnames(mu_group[[n]]) <- list(group_names, param_names)
Sigma_group[[n]] <- array(NA_real_, c(ngroup, nparam, nparam))
dimnames(Sigma_group[[n]]) <- list(group_names, param_names, param_names)
for (i in seq_len(ngroup)) {
mu_group[[n]][i, ] <- random_mvnorm(1, mu0_group[i,], Sigma0_group[i,,])
#Sigma_group[[n]][i,,] <- solve(rWishart(1, v0_group[i] + nparam + 1, S0_group[i,,])[,,1])
Sigma_group[[n]][i, , ] <- diag(1, nparam)
}
}
default_inits <- list(mu_global = mu_global,
Sigma_global = Sigma_global,
mu_group = mu_group,
Sigma_group = Sigma_group)
if (!is.null(inits)) {
inits <- modifyList(default_inits, inits)
} else {
inits <- default_inits
}
sampler <- list(
fun = sample_mvnorm_hier,
init_fun = function(n, inits) {
list(
mu_global = inits[["mu_global"]][[n]],
Sigma_global = inits[["Sigma_global"]][[n]],
mu_group = inits[["mu_group"]][[n]],
Sigma_group = inits[["Sigma_group"]][[n]]
)
},
args = list(
niter = niter,
dat = dat,
groups = igroups,
mu0_global = mu0_global,
Sigma0_global = Sigma0_global,
mu0_group = mu0_group,
Sigma0_group_inv = Sigma0_group_inv,
v0_global = v0_global,
S0_global = S0_global,
v0_group = v0_group,
S0_group = S0_group,
setup_bygroup = setup_bygroup,
progress = progress
)
)
message("Running sampler...")
raw_samples <- run_until_converged(
sampler = sampler,
model_type = "hier",
inits = inits,
nchains = nchains,
max_attempts = max_attempts,
save_progress = save_progress,
threshold = threshold,
keep_samples = keep_samples,
autofit = autofit
)
message("Calculating correlation matrices...")
raw_samples_corr <- add_correlations(raw_samples,
hier = TRUE, ngroups = ngroup)
message("Converting samples to coda mcmc.list object...")
samples_mcmc <- results2mcmclist(raw_samples_corr, type = "hier")
niter <- coda::niter(samples_mcmc)
message("Preparing summary table...")
summary_table <- summary_df(
window(samples_mcmc, start = floor(niter / 2)),
group = TRUE
)
stats <- c("Mean", "2.5%", "97.5%")
mu_global_stats <- sapply(
stats,
function(x) summary2vec(summary_table, x,
variable == "mu", group == "global"),
simplify = FALSE,
USE.NAMES = TRUE
)
Sigma_global_stats <- sapply(
stats,
function(x) summary2mat(summary_table, x,
variable == "Sigma", group == "global"),
simplify = FALSE,
USE.NAMES = TRUE
)
Corr_global_stats <- sapply(
stats,
function(x) summary2mat(summary_table, x,
variable == "Corr", group == "global"),
simplify = FALSE,
USE.NAMES = TRUE
)
get_mu_group <- function(grp) {
sapply(
stats,
function(x) summary2vec(summary_table, x, variable == "mu", group == grp),
simplify = FALSE,
USE.NAMES = TRUE
)
}
get_mat_group <- function(grp, var) {
sapply(
stats,
function(x) summary2mat(summary_table, x, variable == var, group == grp),
simplify = FALSE,
USE.NAMES = TRUE
)
}
mu_group_stats <- sapply(
group_names,
get_mu_group,
simplify = FALSE,
USE.NAMES = TRUE
)
Sigma_group_stats <- sapply(
group_names,
get_mat_group,
var = "Sigma",
simplify = FALSE,
USE.NAMES = TRUE
)
Corr_group_stats <- sapply(
group_names,
get_mat_group,
var = "Corr",
simplify = FALSE,
USE.NAMES = TRUE
)
list(
summary_table = summary_table,
stats = list(
mu_global = mu_global_stats,
Sigma_global = Sigma_global_stats,
Corr_global = Corr_global_stats,
mu_group = mu_group_stats,
Sigma_group = Sigma_group_stats,
Corr_group = Corr_group_stats
),
samples = samples_mcmc
)
}
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree75time70.txt")
sim.chrom<-read.table("chrom75time70.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax75tree70.csv",sep=",")
| /Simulations tree height/75 my/optim75tree70.R | no_license | roszenil/Bichromdryad | R | false | false | 821 | r | library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree75time70.txt")
sim.chrom<-read.table("chrom75time70.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax75tree70.csv",sep=",")
|
# QUESTION
#
# Test to see whether there are any outliers in the last column
# (number of crimes per 100,000 people)
# Import Libraries
library(tidyverse)
library(outliers)
# Read the data in
crime <- read.table("_data/uscrime.txt", header = TRUE, sep = "", dec = ".")
# Understand the data
glimpse(crime)
# crime$Crime will be the column we are analyzing for outliers
# Understand the function
?grubbs.test
###
#
# REPORT
#
# The Grubbs test can check both extremes/tails of a distribution.
# In this case, outliers in the level of crime, I'd think we only want to look at
# the high-level of crime. Asking, "Are there any unusually high points for crime?"
# This would mean we would want a one-tailed test, on the highest end.
#
###
# Start testing for outliers
grubbs.test(
crime$Crime, # Target variable for outliers
type = 10, # Look for 1 outlier - we can run again if an outlier is found
opposite = FALSE, # Look at the MAX or high-end tail, not the low-end
two.sided = FALSE # We only want to look at one tail. If crime is unusally low, that's a good thing we want to model.
)
###
#
# REPORT
#
# So after testing for the outlier on the high end, I would accept the NULL hypothesis.
# The highest value '1993' is not an outlier.
# The p-value does not break out 5% significance threshold. It's close though.
#
### | /intro_to_data_modeling/week2/hw_5_1.R | permissive | bwilson668/gt | R | false | false | 1,371 | r | # QUESTION
#
# Test to see whether there are any outliers in the last column
# (number of crimes per 100,000 people)
# Import Libraries
library(tidyverse)
library(outliers)
# Read the data in
crime <- read.table("_data/uscrime.txt", header = TRUE, sep = "", dec = ".")
# Understand the data
glimpse(crime)
# crime$Crime will be the column we are analyzing for outliers
# Understand the function
?grubbs.test
###
#
# REPORT
#
# The Grubbs test can check both extremes/tails of a distribution.
# In this case, outliers in the level of crime, I'd think we only want to look at
# the high-level of crime. Asking, "Are there any unusually high points for crime?"
# This would mean we would want a one-tailed test, on the highest end.
#
###
# Start testing for outliers
grubbs.test(
crime$Crime, # Target variable for outliers
type = 10, # Look for 1 outlier - we can run again if an outlier is found
opposite = FALSE, # Look at the MAX or high-end tail, not the low-end
two.sided = FALSE # We only want to look at one tail. If crime is unusally low, that's a good thing we want to model.
)
###
#
# REPORT
#
# So after testing for the outlier on the high end, I would accept the NULL hypothesis.
# The highest value '1993' is not an outlier.
# The p-value does not break out 5% significance threshold. It's close though.
#
### |
#' The mstSIB test for MSTs
#'
#' This function allows the detection of itemwise DIF using the mstSIB test.
#'
#' Author: Mark J. Gierl, with minor changes by Rudolf Debelak and Dries Debeer
#'
#' @param resp A data frame containing the response matrix. Rows correspond to respondents, columns to items.
#' @param DIF_covariate A vector indicating the membership to the reference (0) and focal (1) groups.
#' @param theta A vector of ability estimates for each respondent.
#' @param see A vector of the standard error of the ability estimates for each respondent.
#' @param NCell The initial number of cells for estimating the overall ability difference between the focal and reference groups.
#' @param cellmin Minimum number of respondents per cell for the focal and reference group. Cells with fewer respondents are discarded.
#' @param pctmin Minimum rate of focal and reference group that should be used for estimating the over ability difference between focal and groups after discarding cells with few respondents.
#'
#' @return A list with four elements. The first element is the response matrix, the second element is the name of
#' the DIF covariate, and the third element is the name of the test. The fourth element is a matrix where each
#' row corresponds to an item. The columns correspond to the following entries:
#' \describe{
#' \item{Beta}{The estimated weighted ability difference between the focal and reference groups.}
#' \item{Vars}{The estimation error of the weighted ability difference between the focal and reference groups.}
#' \item{N_R}{The number of respondents in the reference group.}
#' \item{N_F}{The number of respondents in the focal group.}
#' \item{NCell}{The initial number of cells for estimating the overall ability
#' difference between the focal and reference groups.}
#' \item{p_value}{The p-value of the null hypothesis that the ability difference
#' between the focal and reference groups is 0.}
#' }
#'
#' @examples
#' data("toydata")
#' resp <- toydata$resp
#' group_categ <- toydata$group_categ
#' theta_est <- toydata$theta_est
#' see_est <- toydata$see_est
#' mstSIB(resp = as.data.frame(resp), theta = theta_est,
#' DIF_covariate = group_categ, see = see_est)
#'
#' @export
mstSIB <- function(resp, DIF_covariate, theta = NULL, see = NULL,
cellmin = 3, pctmin = .9, NCell = 80){
# get call
call <- match.call()
# a theta-argument is required
if(is.null(theta)) stop("'theta'-argument is missing. Include a vector with the estimated 'theta'-values.", call. = FALSE)
# a see-argument is required
if(is.null(see)) stop("'see'-argument is missing. Include a vector with the estimated standard errors of the 'theta'-values.", call. = FALSE)
# get the DIF_covariate name
DIF_covariate_name <- as.character(deparse(call$DIF_covariate))
# only works for two groups coded 0 and 1
DIF_covariate <- as.factor(DIF_covariate)
stopifnot(nlevels(DIF_covariate) == 2)
levels(DIF_covariate) <- c(0, 1)
DIF_covariate <- as.numeric(as.character(DIF_covariate))
# get number of items
nItem <- ncol(resp)
# get/set item names
colnames(resp) <- itemnames <- 'if'(is.null(colnames(resp)),
sprintf(paste("it%0", nchar(nItem),
"d", sep=''),
seq_len(nItem)),
colnames(resp))
##insert by variable
Sif <- cbind(theta, see, DIF_covariate, resp)
BetaOut<-matrix(numeric(0),dim(Sif)[2]-3,6)
rownames(BetaOut) <- colnames(resp)
colnames(BetaOut) <- c("stat", "SE", "N_R", "N_F", "NCell", "p_value")
##Start here
for(inum in 4:dim(Sif)[2]){
Rif <- Sif[Sif[, 3] == 0 & !is.na(Sif[, inum]), ]
Fif <- Sif[Sif[, 3] == 1 & !is.na(Sif[, inum]), ]
if(nrow(Rif) > 0 & nrow(Fif) > 0){
RSR <- Rif[, inum]
FSR <- Fif[, inum]
## Splitting the file into the focus and reference group, their item response starts at col 6
EThetaF <- Fif[1:dim(Fif)[1],1]
ESEF<-Fif[1:dim(Fif)[1],2]
EThetaR<-Rif[1:dim(Rif)[1],1]
ESER<-Rif[1:dim(Rif)[1],2]
MeanR<-mean(EThetaR)
VarR<-var(EThetaR)
MeanF<-mean(EThetaF)
VarF<-var(EThetaF)
TMin<-min(min(EThetaR),min(EThetaF))
TMax<-max(max(EThetaR),max(EThetaF))
RI<-(ESER^-2)
FI<-(ESEF^-2)
MeanRI<-mean(RI)
MeanFI<-mean(FI)
## fixed, the equation is be = rmean + (1. - (1./rinfomean)/rvar)*(thetaro(j) - rmean)
## we need test information
AThetaR<-MeanR+(1-(1/MeanRI/VarR))*(EThetaR - MeanR)
AThetaF<-MeanF+(1-(1/MeanFI/VarF))*(EThetaF - MeanF)
## Sort all examinees and their responses by their thetahats
RefMain<-cbind(AThetaR,RSR)
FocMain<-cbind(AThetaF,FSR)
RefMain<-RefMain[order(RefMain[,1]),]
FocMain<-FocMain[order(FocMain[,1]),]
## Defining Min and Max for interval establishment
TMin<-min(min(RefMain[,1]),min(FocMain[,1]))
TMax<-max(max(RefMain[,1]),max(FocMain[,1]))
##Define Initial number of cells
##try here first, finding and counting for bins
RefInt<-findInterval(RefMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
FocInt<-findInterval(FocMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
CellCountR<-0
CellCountF<-0
for(i in 1:(NCell+1)){
CellCountR[i]<-length(RefInt[RefInt==i])
CellCountF[i]<-length(FocInt[FocInt==i])
if ((CellCountR[i]<cellmin)||(CellCountF[i]<cellmin)){
CellCountR[i]<-0
CellCountF[i]<-0
RefInt[RefInt==i]<-0
FocInt[FocInt==i]<-0
}
}
while(((sum(CellCountR)<=pctmin*dim(RefMain)[1])&&(NCell>5))||((sum(CellCountF)<=pctmin*dim(FocMain)[1])&&(NCell>5))||((sum(CellCountR)+sum(CellCountF))<=(pctmin*dim(RefMain)[1]+pctmin*dim(FocMain)[1])&&(NCell>5))) {
NCell<-NCell-4
RefInt<-findInterval(RefMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
FocInt<-findInterval(FocMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
CellCountR<-0
CellCountF<-0
for(i in 1:(NCell+1)){
CellCountR[i]<-length(RefInt[RefInt==i])
CellCountF[i]<-length(FocInt[FocInt==i])
if ((CellCountR[i]<cellmin)||(CellCountF[i]<cellmin)){
CellCountR[i]<-0
CellCountF[i]<-0
RefInt[RefInt==i]<-0
FocInt[FocInt==i]<-0
}
}
}
##check numbers for bins
##NCell
##sum(CellCountF)
##sum(CellCountR>1)
##CellCountF[1]
##CellCountR[1]
##RefInt[RefInt==1]
##plot(RefInt)
##item proportion for bins
beta<-0
items<- if(is.null(dim(RSR))) 1 else (dim(RSR)[2])
vars<-0
for(j in 1:items){
ybarR<-0
ybarF<-0
uf2sum<-0
#ufsum<-0
#ursum<-0
ur2sum<-0
for(i in 1:NCell+1){
uf2sum[i]<-sum(FocMain[FocInt==i,j+1])^2
#ufsum[i]<-sum(FocMain[FocInt==i,j+1])
#ursum[i]<-sum(RefMain[RefInt==i,j+1])
ur2sum[i]<-sum(RefMain[RefInt==i,j+1])^2
ybarR[i]<-sum(RefMain[RefInt==i,j+1])/CellCountR[i]
ybarF[i]<-sum(FocMain[FocInt==i,j+1])/CellCountF[i]
}
wt<-(CellCountR+CellCountF)/(sum(CellCountR)+sum(CellCountF))
wtsum<-sum(wt)
varr<-(ur2sum-(CellCountR*ybarR*ybarR))/(CellCountR-1)
varf<-(uf2sum-(CellCountF*ybarF*ybarF))/(CellCountF-1)
varr
varf
bbg<-(ybarR-ybarF)*wt
var<-wt*wt*((1/CellCountR)*varr + (1/CellCountF)*varf)
##plot((PropCorR/CellCountR)-(PropCorF/CellCountF))
beta[j]<-sum(bbg, na.rm=TRUE)
vars[j]<-sum(var,na.rm=TRUE)
}
} else{beta<- NA
vars <- NA
NCell <- NA}
BetaOut[inum-3,1]<-beta
BetaOut[inum-3,2]<-vars
BetaOut[inum-3,3]<-dim(Rif)[1]
BetaOut[inum-3,4]<-dim(Fif)[1]
BetaOut[inum-3,5]<-NCell
BetaOut[inum-3,6]<-2 * stats::pnorm(-abs(beta/vars))
}
return(list(resp = resp,
DIF_covariate = DIF_covariate_name,
test = "SIB-test",
results = as.data.frame(BetaOut)))
}
| /R/mstSIB.R | no_license | RDebelak/mstDIF | R | false | false | 8,585 | r | #' The mstSIB test for MSTs
#'
#' This function allows the detection of itemwise DIF using the mstSIB test.
#'
#' Author: Mark J. Gierl, with minor changes by Rudolf Debelak and Dries Debeer
#'
#' @param resp A data frame containing the response matrix. Rows correspond to respondents, columns to items.
#' @param DIF_covariate A vector indicating the membership to the reference (0) and focal (1) groups.
#' @param theta A vector of ability estimates for each respondent.
#' @param see A vector of the standard error of the ability estimates for each respondent.
#' @param NCell The initial number of cells for estimating the overall ability difference between the focal and reference groups.
#' @param cellmin Minimum number of respondents per cell for the focal and reference group. Cells with fewer respondents are discarded.
#' @param pctmin Minimum rate of focal and reference group that should be used for estimating the over ability difference between focal and groups after discarding cells with few respondents.
#'
#' @return A list with four elements. The first element is the response matrix, the second element is the name of
#' the DIF covariate, and the third element is the name of the test. The fourth element is a matrix where each
#' row corresponds to an item. The columns correspond to the following entries:
#' \describe{
#' \item{Beta}{The estimated weighted ability difference between the focal and reference groups.}
#' \item{Vars}{The estimation error of the weighted ability difference between the focal and reference groups.}
#' \item{N_R}{The number of respondents in the reference group.}
#' \item{N_F}{The number of respondents in the focal group.}
#' \item{NCell}{The initial number of cells for estimating the overall ability
#' difference between the focal and reference groups.}
#' \item{p_value}{The p-value of the null hypothesis that the ability difference
#' between the focal and reference groups is 0.}
#' }
#'
#' @examples
#' data("toydata")
#' resp <- toydata$resp
#' group_categ <- toydata$group_categ
#' theta_est <- toydata$theta_est
#' see_est <- toydata$see_est
#' mstSIB(resp = as.data.frame(resp), theta = theta_est,
#' DIF_covariate = group_categ, see = see_est)
#'
#' @export
mstSIB <- function(resp, DIF_covariate, theta = NULL, see = NULL,
cellmin = 3, pctmin = .9, NCell = 80){
# get call
call <- match.call()
# a theta-argument is required
if(is.null(theta)) stop("'theta'-argument is missing. Include a vector with the estimated 'theta'-values.", call. = FALSE)
# a see-argument is required
if(is.null(see)) stop("'see'-argument is missing. Include a vector with the estimated standard errors of the 'theta'-values.", call. = FALSE)
# get the DIF_covariate name
DIF_covariate_name <- as.character(deparse(call$DIF_covariate))
# only works for two groups coded 0 and 1
DIF_covariate <- as.factor(DIF_covariate)
stopifnot(nlevels(DIF_covariate) == 2)
levels(DIF_covariate) <- c(0, 1)
DIF_covariate <- as.numeric(as.character(DIF_covariate))
# get number of items
nItem <- ncol(resp)
# get/set item names
colnames(resp) <- itemnames <- 'if'(is.null(colnames(resp)),
sprintf(paste("it%0", nchar(nItem),
"d", sep=''),
seq_len(nItem)),
colnames(resp))
##insert by variable
Sif <- cbind(theta, see, DIF_covariate, resp)
BetaOut<-matrix(numeric(0),dim(Sif)[2]-3,6)
rownames(BetaOut) <- colnames(resp)
colnames(BetaOut) <- c("stat", "SE", "N_R", "N_F", "NCell", "p_value")
##Start here
for(inum in 4:dim(Sif)[2]){
Rif <- Sif[Sif[, 3] == 0 & !is.na(Sif[, inum]), ]
Fif <- Sif[Sif[, 3] == 1 & !is.na(Sif[, inum]), ]
if(nrow(Rif) > 0 & nrow(Fif) > 0){
RSR <- Rif[, inum]
FSR <- Fif[, inum]
## Splitting the file into the focus and reference group, their item response starts at col 6
EThetaF <- Fif[1:dim(Fif)[1],1]
ESEF<-Fif[1:dim(Fif)[1],2]
EThetaR<-Rif[1:dim(Rif)[1],1]
ESER<-Rif[1:dim(Rif)[1],2]
MeanR<-mean(EThetaR)
VarR<-var(EThetaR)
MeanF<-mean(EThetaF)
VarF<-var(EThetaF)
TMin<-min(min(EThetaR),min(EThetaF))
TMax<-max(max(EThetaR),max(EThetaF))
RI<-(ESER^-2)
FI<-(ESEF^-2)
MeanRI<-mean(RI)
MeanFI<-mean(FI)
## fixed, the equation is be = rmean + (1. - (1./rinfomean)/rvar)*(thetaro(j) - rmean)
## we need test information
AThetaR<-MeanR+(1-(1/MeanRI/VarR))*(EThetaR - MeanR)
AThetaF<-MeanF+(1-(1/MeanFI/VarF))*(EThetaF - MeanF)
## Sort all examinees and their responses by their thetahats
RefMain<-cbind(AThetaR,RSR)
FocMain<-cbind(AThetaF,FSR)
RefMain<-RefMain[order(RefMain[,1]),]
FocMain<-FocMain[order(FocMain[,1]),]
## Defining Min and Max for interval establishment
TMin<-min(min(RefMain[,1]),min(FocMain[,1]))
TMax<-max(max(RefMain[,1]),max(FocMain[,1]))
##Define Initial number of cells
##try here first, finding and counting for bins
RefInt<-findInterval(RefMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
FocInt<-findInterval(FocMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
CellCountR<-0
CellCountF<-0
for(i in 1:(NCell+1)){
CellCountR[i]<-length(RefInt[RefInt==i])
CellCountF[i]<-length(FocInt[FocInt==i])
if ((CellCountR[i]<cellmin)||(CellCountF[i]<cellmin)){
CellCountR[i]<-0
CellCountF[i]<-0
RefInt[RefInt==i]<-0
FocInt[FocInt==i]<-0
}
}
while(((sum(CellCountR)<=pctmin*dim(RefMain)[1])&&(NCell>5))||((sum(CellCountF)<=pctmin*dim(FocMain)[1])&&(NCell>5))||((sum(CellCountR)+sum(CellCountF))<=(pctmin*dim(RefMain)[1]+pctmin*dim(FocMain)[1])&&(NCell>5))) {
NCell<-NCell-4
RefInt<-findInterval(RefMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
FocInt<-findInterval(FocMain[,1],(TMin+((TMax-TMin)/NCell)*0:NCell), rightmost.closed = FALSE, all.inside = FALSE)
CellCountR<-0
CellCountF<-0
for(i in 1:(NCell+1)){
CellCountR[i]<-length(RefInt[RefInt==i])
CellCountF[i]<-length(FocInt[FocInt==i])
if ((CellCountR[i]<cellmin)||(CellCountF[i]<cellmin)){
CellCountR[i]<-0
CellCountF[i]<-0
RefInt[RefInt==i]<-0
FocInt[FocInt==i]<-0
}
}
}
##check numbers for bins
##NCell
##sum(CellCountF)
##sum(CellCountR>1)
##CellCountF[1]
##CellCountR[1]
##RefInt[RefInt==1]
##plot(RefInt)
##item proportion for bins
beta<-0
items<- if(is.null(dim(RSR))) 1 else (dim(RSR)[2])
vars<-0
for(j in 1:items){
ybarR<-0
ybarF<-0
uf2sum<-0
#ufsum<-0
#ursum<-0
ur2sum<-0
for(i in 1:NCell+1){
uf2sum[i]<-sum(FocMain[FocInt==i,j+1])^2
#ufsum[i]<-sum(FocMain[FocInt==i,j+1])
#ursum[i]<-sum(RefMain[RefInt==i,j+1])
ur2sum[i]<-sum(RefMain[RefInt==i,j+1])^2
ybarR[i]<-sum(RefMain[RefInt==i,j+1])/CellCountR[i]
ybarF[i]<-sum(FocMain[FocInt==i,j+1])/CellCountF[i]
}
wt<-(CellCountR+CellCountF)/(sum(CellCountR)+sum(CellCountF))
wtsum<-sum(wt)
varr<-(ur2sum-(CellCountR*ybarR*ybarR))/(CellCountR-1)
varf<-(uf2sum-(CellCountF*ybarF*ybarF))/(CellCountF-1)
varr
varf
bbg<-(ybarR-ybarF)*wt
var<-wt*wt*((1/CellCountR)*varr + (1/CellCountF)*varf)
##plot((PropCorR/CellCountR)-(PropCorF/CellCountF))
beta[j]<-sum(bbg, na.rm=TRUE)
vars[j]<-sum(var,na.rm=TRUE)
}
} else{beta<- NA
vars <- NA
NCell <- NA}
BetaOut[inum-3,1]<-beta
BetaOut[inum-3,2]<-vars
BetaOut[inum-3,3]<-dim(Rif)[1]
BetaOut[inum-3,4]<-dim(Fif)[1]
BetaOut[inum-3,5]<-NCell
BetaOut[inum-3,6]<-2 * stats::pnorm(-abs(beta/vars))
}
return(list(resp = resp,
DIF_covariate = DIF_covariate_name,
test = "SIB-test",
results = as.data.frame(BetaOut)))
}
|
## Author Truc Viet 'Joe' Le at tjle@andrew.cmu.edu
rm(list = ls())
library(plyr)
library(ggplot2)
library(maptools)
library(rgdal)
library(rgeos)
# library(rmongodb)
source("./R Script/Util/fivethirtyeight_theme.R")
## Load all taxi GPS traces on Sept. 11, 2009 when occupied
taxi.data <- read.csv(file="./Data/filtered-taxiTraj-2009-09-11.csv", header=TRUE)
# ## Login credentials
# host <- "heinz-tjle.heinz.cmu.edu"
# username <- "student"
# password <- "helloWorld"
# db <- "admin"
#
# ## Connect to MongoDB remote server
# mongo <- mongo.create(host = host, db = db, username = username, password = password)
# ## Check if we are successfully connected
# mongo.is.connected(mongo)
#
# ## The database we're working with is 'admin' and the collection is 'taxi'
# collection <- "taxi"
# namespace <- paste(db, collection, sep=".")
#
# ## Retrieve all ride records on Sept. 11, 2009
# query <- mongo.bson.from.list(list('date'='2009-09-11', 'occupy'=1))
# ## Define the fields to be returned
# fields <- mongo.bson.buffer.create()
# ## '1L' means we want to turn this field on, '0L' to turn it off
# mongo.bson.buffer.append(fields, "_id", 0L)
# mongo.bson.buffer.append(fields, "taxi_no", 1L)
# mongo.bson.buffer.append(fields, "date", 1L)
# mongo.bson.buffer.append(fields, "time", 1L)
# mongo.bson.buffer.append(fields, "lon", 1L)
# mongo.bson.buffer.append(fields, "lat", 1L)
# ## Make an object from the buffer
# fields <- mongo.bson.from.buffer(fields)
#
# ## Create the query cursor
# cursor <- mongo.find(mongo, namespace, query=query, fields=fields)
# ## Define a master data frame to store results
# taxi.data <- data.frame(stringsAsFactors=FALSE)
# ## Iterate over the cursor
# while(mongo.cursor.next(cursor)) {
# ## Iterate and grab the next record
# value <- mongo.cursor.value(cursor)
# taxi.record <- mongo.bson.to.list(value)
# ## Make it a data frame
# taxi.df <- as.data.frame(t(unlist(taxi.record)), stringsAsFactors=FALSE)
# ## Bind to the master data frame
# taxi.data <- rbind.fill(taxi.data, taxi.df)
# }
#
# ## Release the resources attached to cursor on both client and server
# mongo.cursor.destroy(cursor)
# ## Close the connection
# mongo.disconnect(mongo)
# mongo.destroy(mongo)
## Compute the duration between each timestamp
duration <- vector()
trip_indicator <- vector()
threshold <- 5*60 # seconds
## Create a progress bar
progress.bar <- create_progress_bar("text")
progress.bar$init(nrow(taxi.data)-1)
for(i in 1:(nrow(taxi.data)-1)) {
this_taxi_no <- taxi.data$taxi_no[i]
next_taxi_no <- taxi.data$taxi_no[i+1]
if(this_taxi_no == next_taxi_no) {
this_timestamp <- toString(taxi.data$time[i])
this_timestamp <- strsplit(this_timestamp, ":")[[1]]
this_second <- as.numeric(this_timestamp[1])*3600 + as.numeric(this_timestamp[2])*60 + as.numeric(this_timestamp[3])
next_timestamp <- toString(taxi.data$time[i+1])
next_timestamp <- strsplit(next_timestamp, ":")[[1]]
next_second <- as.numeric(next_timestamp[1])*3600 + as.numeric(next_timestamp[2])*60 + as.numeric(next_timestamp[3])
duration[i] <- next_second - this_second
if(i == 1) {
trip_indicator[i] <- "start"
} else {
if(trip_indicator[i-1] == "end" || trip_indicator[i-1] == "error") {
trip_indicator[i] <- "start"
} else {
if(duration[i] >= threshold) {
trip_indicator[i] <- "end"
} else {
trip_indicator[i] <- "going"
}
}
}
} else {
duration[i] <- NA
if(trip_indicator[i-1] == "end") {
trip_indicator[i] <- "error"
} else {
trip_indicator[i] <- "end"
}
}
progress.bar$step()
}
duration[nrow(taxi.data)] <- NA
trip_indicator[nrow(taxi.data)] <- "end"
taxi.data$duration <- duration
taxi.data$indicator <- trip_indicator
## Create a progress bar
progress.bar <- create_progress_bar("text")
progress.bar$init(nrow(taxi.data))
or.data <- data.frame() # data frame for the origins
dest.data <- data.frame() # data frame for the destinations
for(i in 1:nrow(taxi.data)) {
indicator <- taxi.data$indicator[i]
if(indicator == "start") {
or_lon <- taxi.data$lon[i]
or_lat <- taxi.data$lat[i]
or_data <- data.frame(or_lon = or_lon, or_lat = or_lat)
or.data <- rbind(or.data, or_data)
} else if(indicator == "end") {
dest_lon <- taxi.data$lon[i]
dest_lat <- taxi.data$lat[i]
dest_data <- data.frame(dest_lon = dest_lon, dest_lat = dest_lat)
dest.data <- rbind(dest.data, dest_data)
} else {
## Do nothing
}
progress.bar$step()
}
## Combine the OD pairs
od.data <- cbind(or.data, dest.data)
## Add artificial group to the OD pairs in order to be compatitable with the shapefiles
od.data$group <- seq(1:nrow(od.data))
## Remove the axes in the resulting plot
x_quiet <- scale_x_continuous("", breaks=NULL)
y_quiet <- scale_y_continuous("", breaks=NULL)
quiet <- list(x_quiet, y_quiet)
## Read the shapefiles
## The spatial object wouldn't have a coordinate system assigned to it.
## We can check it by proj4string(sz_bou). We thus need to assign a CRS
## (coordinate reference system) to the object before we can plot it.
## Here we use the WGS84 standard (the World Geodetic System proposed in 1984)
sz_bou <- readOGR(dsn="./Shp", layer="sz_bou")
proj4string(sz_bou) <- CRS("+init=epsg:4326")
sz_road <- readOGR(dsn="./Shp", layer="sz_road")
proj4string(sz_road) <- CRS("+init=epsg:4326")
sz_veg <- readOGR(dsn="./Shp", layer="sz_veg")
proj4string(sz_veg) <- CRS("+init=epsg:4326")
sz_wat <- readOGR(dsn="./Shp", layer="sz_wat")
proj4string(sz_wat) <- CRS("+init=epsg:4326")
## Convert shapefiles into data frames so that they can be plotted using ggplot
sz_bou.data <- fortify(sz_bou)
sz_road.data <- fortify(sz_road) ## this will take a while
sz_veg.data <- fortify(sz_veg)
sz_wat.data <- fortify(sz_wat)
## Plot the shapfiles using ggplot
shenzhen <- ggplot(data=sz_bou.data, aes(x=long, y=lat,
group=group)) + geom_polygon(fill="lightblue") +
ggtitle("Map of Shenzhen + All Taxi OD Pairs on 09/11/2009")
shenzhen <- shenzhen + geom_polygon(data=sz_wat.data,
aes(x=long, y=lat, group=group),
fill="blue", alpha=0.75)
shenzhen <- shenzhen + geom_polygon(data=sz_veg.data,
aes(x=long, y=lat, group=group),
fill="darkgreen", alpha=0.75)
shenzhen <- shenzhen + geom_polygon(data=sz_road.data,
aes(x=long, y=lat, group=group),
color="darkgrey", fill=NA)
## Add OD pairs to the shapefiles
shenzhen.od <- shenzhen + geom_segment(data=od.data, aes(x=or_lon, y=or_lat,
xend=dest_lon, yend=dest_lat,
group=group),
alpha=0.8, col="red") + quiet + coord_equal() +
fivethirtyeight_theme()
print(shenzhen.od)
ggsave(filename="./Image/20090911.png", scale = 3, dpi = 400)
| /R Script/taxi_flows.R | no_license | nemochina2008/taxi-fare-estimation | R | false | false | 7,119 | r | ## Author Truc Viet 'Joe' Le at tjle@andrew.cmu.edu
rm(list = ls())
library(plyr)
library(ggplot2)
library(maptools)
library(rgdal)
library(rgeos)
# library(rmongodb)
source("./R Script/Util/fivethirtyeight_theme.R")
## Load all taxi GPS traces on Sept. 11, 2009 when occupied
taxi.data <- read.csv(file="./Data/filtered-taxiTraj-2009-09-11.csv", header=TRUE)
# ## Login credentials
# host <- "heinz-tjle.heinz.cmu.edu"
# username <- "student"
# password <- "helloWorld"
# db <- "admin"
#
# ## Connect to MongoDB remote server
# mongo <- mongo.create(host = host, db = db, username = username, password = password)
# ## Check if we are successfully connected
# mongo.is.connected(mongo)
#
# ## The database we're working with is 'admin' and the collection is 'taxi'
# collection <- "taxi"
# namespace <- paste(db, collection, sep=".")
#
# ## Retrieve all ride records on Sept. 11, 2009
# query <- mongo.bson.from.list(list('date'='2009-09-11', 'occupy'=1))
# ## Define the fields to be returned
# fields <- mongo.bson.buffer.create()
# ## '1L' means we want to turn this field on, '0L' to turn it off
# mongo.bson.buffer.append(fields, "_id", 0L)
# mongo.bson.buffer.append(fields, "taxi_no", 1L)
# mongo.bson.buffer.append(fields, "date", 1L)
# mongo.bson.buffer.append(fields, "time", 1L)
# mongo.bson.buffer.append(fields, "lon", 1L)
# mongo.bson.buffer.append(fields, "lat", 1L)
# ## Make an object from the buffer
# fields <- mongo.bson.from.buffer(fields)
#
# ## Create the query cursor
# cursor <- mongo.find(mongo, namespace, query=query, fields=fields)
# ## Define a master data frame to store results
# taxi.data <- data.frame(stringsAsFactors=FALSE)
# ## Iterate over the cursor
# while(mongo.cursor.next(cursor)) {
# ## Iterate and grab the next record
# value <- mongo.cursor.value(cursor)
# taxi.record <- mongo.bson.to.list(value)
# ## Make it a data frame
# taxi.df <- as.data.frame(t(unlist(taxi.record)), stringsAsFactors=FALSE)
# ## Bind to the master data frame
# taxi.data <- rbind.fill(taxi.data, taxi.df)
# }
#
# ## Release the resources attached to cursor on both client and server
# mongo.cursor.destroy(cursor)
# ## Close the connection
# mongo.disconnect(mongo)
# mongo.destroy(mongo)
## Compute the duration between each timestamp
duration <- vector()
trip_indicator <- vector()
threshold <- 5*60 # seconds
## Create a progress bar
progress.bar <- create_progress_bar("text")
progress.bar$init(nrow(taxi.data)-1)
for(i in 1:(nrow(taxi.data)-1)) {
this_taxi_no <- taxi.data$taxi_no[i]
next_taxi_no <- taxi.data$taxi_no[i+1]
if(this_taxi_no == next_taxi_no) {
this_timestamp <- toString(taxi.data$time[i])
this_timestamp <- strsplit(this_timestamp, ":")[[1]]
this_second <- as.numeric(this_timestamp[1])*3600 + as.numeric(this_timestamp[2])*60 + as.numeric(this_timestamp[3])
next_timestamp <- toString(taxi.data$time[i+1])
next_timestamp <- strsplit(next_timestamp, ":")[[1]]
next_second <- as.numeric(next_timestamp[1])*3600 + as.numeric(next_timestamp[2])*60 + as.numeric(next_timestamp[3])
duration[i] <- next_second - this_second
if(i == 1) {
trip_indicator[i] <- "start"
} else {
if(trip_indicator[i-1] == "end" || trip_indicator[i-1] == "error") {
trip_indicator[i] <- "start"
} else {
if(duration[i] >= threshold) {
trip_indicator[i] <- "end"
} else {
trip_indicator[i] <- "going"
}
}
}
} else {
duration[i] <- NA
if(trip_indicator[i-1] == "end") {
trip_indicator[i] <- "error"
} else {
trip_indicator[i] <- "end"
}
}
progress.bar$step()
}
duration[nrow(taxi.data)] <- NA
trip_indicator[nrow(taxi.data)] <- "end"
taxi.data$duration <- duration
taxi.data$indicator <- trip_indicator
## Create a progress bar
progress.bar <- create_progress_bar("text")
progress.bar$init(nrow(taxi.data))
or.data <- data.frame() # data frame for the origins
dest.data <- data.frame() # data frame for the destinations
for(i in 1:nrow(taxi.data)) {
indicator <- taxi.data$indicator[i]
if(indicator == "start") {
or_lon <- taxi.data$lon[i]
or_lat <- taxi.data$lat[i]
or_data <- data.frame(or_lon = or_lon, or_lat = or_lat)
or.data <- rbind(or.data, or_data)
} else if(indicator == "end") {
dest_lon <- taxi.data$lon[i]
dest_lat <- taxi.data$lat[i]
dest_data <- data.frame(dest_lon = dest_lon, dest_lat = dest_lat)
dest.data <- rbind(dest.data, dest_data)
} else {
## Do nothing
}
progress.bar$step()
}
## Combine the OD pairs
od.data <- cbind(or.data, dest.data)
## Add artificial group to the OD pairs in order to be compatitable with the shapefiles
od.data$group <- seq(1:nrow(od.data))
## Remove the axes in the resulting plot
x_quiet <- scale_x_continuous("", breaks=NULL)
y_quiet <- scale_y_continuous("", breaks=NULL)
quiet <- list(x_quiet, y_quiet)
## Read the shapefiles
## The spatial object wouldn't have a coordinate system assigned to it.
## We can check it by proj4string(sz_bou). We thus need to assign a CRS
## (coordinate reference system) to the object before we can plot it.
## Here we use the WGS84 standard (the World Geodetic System proposed in 1984)
sz_bou <- readOGR(dsn="./Shp", layer="sz_bou")
proj4string(sz_bou) <- CRS("+init=epsg:4326")
sz_road <- readOGR(dsn="./Shp", layer="sz_road")
proj4string(sz_road) <- CRS("+init=epsg:4326")
sz_veg <- readOGR(dsn="./Shp", layer="sz_veg")
proj4string(sz_veg) <- CRS("+init=epsg:4326")
sz_wat <- readOGR(dsn="./Shp", layer="sz_wat")
proj4string(sz_wat) <- CRS("+init=epsg:4326")
## Convert shapefiles into data frames so that they can be plotted using ggplot
sz_bou.data <- fortify(sz_bou)
sz_road.data <- fortify(sz_road) ## this will take a while
sz_veg.data <- fortify(sz_veg)
sz_wat.data <- fortify(sz_wat)
## Plot the shapfiles using ggplot
shenzhen <- ggplot(data=sz_bou.data, aes(x=long, y=lat,
group=group)) + geom_polygon(fill="lightblue") +
ggtitle("Map of Shenzhen + All Taxi OD Pairs on 09/11/2009")
shenzhen <- shenzhen + geom_polygon(data=sz_wat.data,
aes(x=long, y=lat, group=group),
fill="blue", alpha=0.75)
shenzhen <- shenzhen + geom_polygon(data=sz_veg.data,
aes(x=long, y=lat, group=group),
fill="darkgreen", alpha=0.75)
shenzhen <- shenzhen + geom_polygon(data=sz_road.data,
aes(x=long, y=lat, group=group),
color="darkgrey", fill=NA)
## Add OD pairs to the shapefiles
shenzhen.od <- shenzhen + geom_segment(data=od.data, aes(x=or_lon, y=or_lat,
xend=dest_lon, yend=dest_lat,
group=group),
alpha=0.8, col="red") + quiet + coord_equal() +
fivethirtyeight_theme()
print(shenzhen.od)
ggsave(filename="./Image/20090911.png", scale = 3, dpi = 400)
|
library(circlize)
library(gridBase)
library(RColorBrewer)
library(ComplexHeatmap)
# 读取数据
res_list <- readRDS("Downloads/meth.rds")
# 分配数据变量
type <- res_list$type
mat_meth <- res_list$mat_meth
mat_expr <- res_list$mat_expr
direction <- res_list$direction
cor_pvalue <- res_list$cor_pvalue
gene_type <- res_list$gene_type
anno_gene <- res_list$anno_gene
dist <- res_list$dist
anno_enhancer <- res_list$anno_enhancer
# k-means 聚类
km <- kmeans(mat_meth, centers = 5)$cluster
# 为数据分配颜色
col_meth <- colorRamp2(c(0, 0.5, 1), c("#a6611a", "#f5f5f5", "#018571"))
col_direction <- c("hyper" = "red", "hypo" = "blue")
col_expr <- colorRamp2(c(-2, 0, 2), c("#d01c8b", "#f7f7f7", "#4dac26"))
col_pvalue <- colorRamp2(c(0, 2, 4), c("#f1a340", "#f7f7f7", "#998ec3"))
col_gene_type <- structure(brewer.pal(length(unique(gene_type)), "Set3"), names = unique(gene_type))
col_anno_gene <- structure(brewer.pal(length(unique(anno_gene)), "Set1"), names = unique(anno_gene))
col_dist <- colorRamp2(c(0, 10000), c("#ef8a62", "#67a9cf"))
col_enhancer <- colorRamp2(c(0, 1), c("#fc8d59", "#99d594"))
# 创建连接数据
df_link <- data.frame(
from_index = sample(nrow(mat_meth), 20),
to_index = sample(nrow(mat_meth), 20)
)
# 绘制圆形热图
circlize_plot <- function() {
circos.heatmap(mat_meth, split = km, col = col_meth, track.height = 0.12)
circos.heatmap(direction, col = col_direction, track.height = 0.01)
circos.heatmap(mat_expr, col = col_expr, track.height = 0.12)
circos.heatmap(cor_pvalue, col = col_pvalue, track.height = 0.01)
circos.heatmap(gene_type, col = col_gene_type, track.height = 0.01)
circos.heatmap(anno_gene, col = col_anno_gene, track.height = 0.01)
circos.heatmap(dist, col = col_dist, track.height = 0.01)
circos.heatmap(anno_enhancer, col = col_enhancer, track.height = 0.03)
# 添加连接线
for(i in seq_len(nrow(df_link))) {
circos.heatmap.link(
df_link$from_index[i], df_link$to_index[i], col = rand_color(1))
}
circos.clear()
}
# 设置图例
lgd_meth <- Legend(title = "Methylation", col_fun = col_meth)
lgd_direction <- Legend(
title = "Direction", at = names(col_direction),
legend_gp = gpar(fill = col_direction)
)
lgd_expr <- Legend(title = "Expression", col_fun = col_expr)
lgd_pvalue <- Legend(
title = "P-value", col_fun = col_pvalue, at = c(0, 2, 4),
labels = c(1, 0.01, 0.0001)
)
lgd_gene_type <- Legend(
title = "Gene type", at = names(col_gene_type),
legend_gp = gpar(fill = col_gene_type)
)
lgd_anno_gene <- Legend(
title = "Gene anno", at = names(col_anno_gene),
legend_gp = gpar(fill = col_anno_gene)
)
lgd_dist <- Legend(
title = "Dist to TSS", col_fun = col_dist,
at = c(0, 5000, 10000), labels = c("0kb", "5kb", "10kb")
)
lgd_enhancer <- Legend(
title = "Enhancer overlap", col_fun = col_enhancer,
at = c(0, 0.25, 0.5, 0.75, 1),
labels = c("0%", "25%", "50%", "75%", "100%")
)
# 创建 png 图形设备,并设置足够的大小
# 注意:如果图形设备的大小太小,会提示 "figure margins too large"
# 并且,gridOMI() 会返回负值
png(filename = "~/Downloads/a.png", width = 1000, height = 800)
plot.new()
circle_size = unit(1, "snpc") # snpc unit gives you a square region
pushViewport(viewport(
x = 0, y = 0.5, width = circle_size,
height = circle_size, just = c("left", "center"))
)
# 设置 new = TRUE,避免重新创建图形
par(omi = gridOMI(), new = TRUE)
circlize_plot()
upViewport()
# 获取图形设备的高度
h <- dev.size()[2]
lgd_list <- packLegend(
lgd_meth, lgd_direction, lgd_expr,
lgd_pvalue, lgd_gene_type, lgd_anno_gene,
lgd_dist, lgd_enhancer,
max_height = unit(0.9*h, "inch")
)
draw(lgd_list, x = circle_size, just = "left")
dev.off()
circos.clear()
| /R/plot/circos_heatmap.R | no_license | CuncanDeng/learn | R | false | false | 3,771 | r | library(circlize)
library(gridBase)
library(RColorBrewer)
library(ComplexHeatmap)
# 读取数据
res_list <- readRDS("Downloads/meth.rds")
# 分配数据变量
type <- res_list$type
mat_meth <- res_list$mat_meth
mat_expr <- res_list$mat_expr
direction <- res_list$direction
cor_pvalue <- res_list$cor_pvalue
gene_type <- res_list$gene_type
anno_gene <- res_list$anno_gene
dist <- res_list$dist
anno_enhancer <- res_list$anno_enhancer
# k-means 聚类
km <- kmeans(mat_meth, centers = 5)$cluster
# 为数据分配颜色
col_meth <- colorRamp2(c(0, 0.5, 1), c("#a6611a", "#f5f5f5", "#018571"))
col_direction <- c("hyper" = "red", "hypo" = "blue")
col_expr <- colorRamp2(c(-2, 0, 2), c("#d01c8b", "#f7f7f7", "#4dac26"))
col_pvalue <- colorRamp2(c(0, 2, 4), c("#f1a340", "#f7f7f7", "#998ec3"))
col_gene_type <- structure(brewer.pal(length(unique(gene_type)), "Set3"), names = unique(gene_type))
col_anno_gene <- structure(brewer.pal(length(unique(anno_gene)), "Set1"), names = unique(anno_gene))
col_dist <- colorRamp2(c(0, 10000), c("#ef8a62", "#67a9cf"))
col_enhancer <- colorRamp2(c(0, 1), c("#fc8d59", "#99d594"))
# 创建连接数据
df_link <- data.frame(
from_index = sample(nrow(mat_meth), 20),
to_index = sample(nrow(mat_meth), 20)
)
# 绘制圆形热图
circlize_plot <- function() {
circos.heatmap(mat_meth, split = km, col = col_meth, track.height = 0.12)
circos.heatmap(direction, col = col_direction, track.height = 0.01)
circos.heatmap(mat_expr, col = col_expr, track.height = 0.12)
circos.heatmap(cor_pvalue, col = col_pvalue, track.height = 0.01)
circos.heatmap(gene_type, col = col_gene_type, track.height = 0.01)
circos.heatmap(anno_gene, col = col_anno_gene, track.height = 0.01)
circos.heatmap(dist, col = col_dist, track.height = 0.01)
circos.heatmap(anno_enhancer, col = col_enhancer, track.height = 0.03)
# 添加连接线
for(i in seq_len(nrow(df_link))) {
circos.heatmap.link(
df_link$from_index[i], df_link$to_index[i], col = rand_color(1))
}
circos.clear()
}
# 设置图例
lgd_meth <- Legend(title = "Methylation", col_fun = col_meth)
lgd_direction <- Legend(
title = "Direction", at = names(col_direction),
legend_gp = gpar(fill = col_direction)
)
lgd_expr <- Legend(title = "Expression", col_fun = col_expr)
lgd_pvalue <- Legend(
title = "P-value", col_fun = col_pvalue, at = c(0, 2, 4),
labels = c(1, 0.01, 0.0001)
)
lgd_gene_type <- Legend(
title = "Gene type", at = names(col_gene_type),
legend_gp = gpar(fill = col_gene_type)
)
lgd_anno_gene <- Legend(
title = "Gene anno", at = names(col_anno_gene),
legend_gp = gpar(fill = col_anno_gene)
)
lgd_dist <- Legend(
title = "Dist to TSS", col_fun = col_dist,
at = c(0, 5000, 10000), labels = c("0kb", "5kb", "10kb")
)
lgd_enhancer <- Legend(
title = "Enhancer overlap", col_fun = col_enhancer,
at = c(0, 0.25, 0.5, 0.75, 1),
labels = c("0%", "25%", "50%", "75%", "100%")
)
# 创建 png 图形设备,并设置足够的大小
# 注意:如果图形设备的大小太小,会提示 "figure margins too large"
# 并且,gridOMI() 会返回负值
png(filename = "~/Downloads/a.png", width = 1000, height = 800)
plot.new()
circle_size = unit(1, "snpc") # snpc unit gives you a square region
pushViewport(viewport(
x = 0, y = 0.5, width = circle_size,
height = circle_size, just = c("left", "center"))
)
# 设置 new = TRUE,避免重新创建图形
par(omi = gridOMI(), new = TRUE)
circlize_plot()
upViewport()
# 获取图形设备的高度
h <- dev.size()[2]
lgd_list <- packLegend(
lgd_meth, lgd_direction, lgd_expr,
lgd_pvalue, lgd_gene_type, lgd_anno_gene,
lgd_dist, lgd_enhancer,
max_height = unit(0.9*h, "inch")
)
draw(lgd_list, x = circle_size, just = "left")
dev.off()
circos.clear()
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
# really good guide to svgs: http://tympanus.net/codrops/2013/11/27/svg-icons-ftw/
makeRowOfHearts <- function(rowLength){
res <- "<svg height=\"50\" width=\"50\" viewBox=\"0 0 50 50\">
<path id=\"heart-icon\" d=\"M16,28.261c0,0-14-7.926-14-17.046c0-9.356,13.159-10.399,14-0.454c1.011-9.938,14-8.903,14,0.454C30,20.335,16,28.261,16,28.261z\" style=\"height:1;width:1;fill:#ccc;\" />
</svg>"
res <- paste0(paste0(rep(res, rowLength), collapse = ""), "<div></div>")
return(res)
}
makeRowsOfHearts <- function(numberRows){
res <- paste0(rep(makeRowOfHearts(10), numberRows), collapse = "")
return(res)
}
makeHeartsPlot <- function(heartCount){
rowsOfHearts <- heartCount %/% 10
leftOver <- heartCount %% 10
res <- paste0(makeRowsOfHearts(rowsOfHearts),
ifelse(leftOver>0, paste0("<div></div>", makeRowOfHearts(leftOver)), ""))
return(res)
}
# model to predict probabilities
predictProbs <- function(something){
return(1)
}
### actual script starts here
heartCount <- round(predictProbs(input$something) * 100)
output$hearts <- renderUI(HTML(makeHeartsPlot(heartCount)))
})
| /server.R | no_license | paulinshek/RelationshipPredictionApp | R | false | false | 1,418 | r |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
# really good guide to svgs: http://tympanus.net/codrops/2013/11/27/svg-icons-ftw/
makeRowOfHearts <- function(rowLength){
res <- "<svg height=\"50\" width=\"50\" viewBox=\"0 0 50 50\">
<path id=\"heart-icon\" d=\"M16,28.261c0,0-14-7.926-14-17.046c0-9.356,13.159-10.399,14-0.454c1.011-9.938,14-8.903,14,0.454C30,20.335,16,28.261,16,28.261z\" style=\"height:1;width:1;fill:#ccc;\" />
</svg>"
res <- paste0(paste0(rep(res, rowLength), collapse = ""), "<div></div>")
return(res)
}
makeRowsOfHearts <- function(numberRows){
res <- paste0(rep(makeRowOfHearts(10), numberRows), collapse = "")
return(res)
}
makeHeartsPlot <- function(heartCount){
rowsOfHearts <- heartCount %/% 10
leftOver <- heartCount %% 10
res <- paste0(makeRowsOfHearts(rowsOfHearts),
ifelse(leftOver>0, paste0("<div></div>", makeRowOfHearts(leftOver)), ""))
return(res)
}
# model to predict probabilities
predictProbs <- function(something){
return(1)
}
### actual script starts here
heartCount <- round(predictProbs(input$something) * 100)
output$hearts <- renderUI(HTML(makeHeartsPlot(heartCount)))
})
|
## Methods for object of class "kvSparkData" - key-value pairs as Spark RDDs
#' @export
ddoInit.sparkDataConn <- function(obj, ...) {
structure(list(), class="kvSparkData")
}
#' @export
ddoInitConn.sparkDataConn <- function(obj, ...) {
if(obj$hdfs) {
paths <- paste(obj$hdfsURI, rhls(obj$loc, recurse = TRUE)$file, sep = "")
regxp <- rhoptions()$file.types.remove.regex
} else {
paths <- paths[!grepl(rhoptions()$file.types.remove.regex, paths)]
regxp <- "(/_meta|/_outputs|/_SUCCESS|/_LOG|/_log)"
}
paths <- paths[!grepl(regxp, paths)]
if(length(paths) == 0)
stop("No data found - use addData() or specify a connection with a location that contains data.")
obj$data <- objectFile(getSparkContext(), paths)
obj
}
#' @export
requiredObjAttrs.kvSparkData <- function(obj) {
list(
ddo = getDr("requiredDdoAttrs"),
ddf = getDr("requiredDdfAttrs")
)
}
#' @export
getBasicDdoAttrs.kvSparkData <- function(obj, conn) {
if(conn$hdfs) {
ff <- rhls(conn$loc, recurse = TRUE)
ff <- ff[!grepl("\\/_meta", ff$file),]
sz <- sum(ff$size)
} else {
ff <- list.files(conn$loc, recursive = TRUE)
ff <- ff[!grepl("_meta\\/", ff)]
sz <- sum(file.info(file.path(fp, ff))$size)
}
ex <- take(conn$data, 1)[[1]]
if(conn$type == "text")
ex <- list("", ex)
list(
conn = conn,
extractableKV = TRUE,
totStorageSize = sz,
totObjectSize = NA,
nDiv = NA, # length(conn$data),
example = ex
)
}
#' @export
getBasicDdfAttrs.kvSparkData <- function(obj) {
list(vars = lapply(kvExample(obj)[[2]], class))
}
# kvSparkData is never extractable (yet...)
#' @export
hasExtractableKV.kvSparkData <- function(x) {
FALSE
}
######################################################################
### extract methods
######################################################################
#' @export
extract.kvSparkData <- function(x, i, ...) {
idx <- NULL
dat <- getAttribute(x, "conn")$data
keys <- getKeys(x)
if(is.numeric(i)) {
idx <- i
if(length(idx) == 1)
return(take(dat, 1))
} else {
keyHashes <- getAttribute(x, "keyHashes")
# try actual key
idx <- unlist(lapply(as.character(sapply(i, digest)), function(x) which(keyHashes == x)))
if(length(idx) == 0 && is.character(i)) {
if(all(nchar(i) == 32)) {
idx <- unlist(lapply(i, function(x) which(keyHashes == x)))
}
}
}
if(length(idx) == 0)
return(NULL)
lapply(idx, function(a) {
lookup(dat, keys[[a]])
})
}
######################################################################
### convert methods
######################################################################
#' @export
convertImplemented.kvSparkData <- function(obj) {
c("sparkDataConn", "NULL")
}
#' @export
convert.kvSparkData <- function(from, to=NULL) {
convertkvSparkData(to, from)
}
convertkvSparkData <- function(obj, ...)
UseMethod("convertkvSparkData", obj)
# from sparkData to sparkData
#' @export
convertkvSparkData.sparkDataConn <- function(to, from, verbose=FALSE) {
from
}
# from sparkData to memory
#' @export
convertkvSparkData.NULL <- function(to, from, verbose=FALSE) {
res <- collect(getAttribute(from, "conn")$data)
if(inherits(from, "ddf")) {
res <- ddf(res, update=FALSE, verbose=verbose)
} else {
res <- ddo(res, update=FALSE, verbose=verbose)
}
addNeededAttrs(res, from)
}
# # from sparkData to local disk
# #' @export
# convertkvSparkData.sparkDataConn <- function(to, from, verbose=FALSE) {
# from
# }
#
# # from sparkData to HDFS
# #' @export
# convertkvSparkData.hdfsConn <- function(to, from, verbose=FALSE) {
# }
#
| /R/ddo_ddf_kvSpark.R | permissive | migariane/datadr | R | false | false | 3,666 | r | ## Methods for object of class "kvSparkData" - key-value pairs as Spark RDDs
#' @export
ddoInit.sparkDataConn <- function(obj, ...) {
structure(list(), class="kvSparkData")
}
#' @export
ddoInitConn.sparkDataConn <- function(obj, ...) {
if(obj$hdfs) {
paths <- paste(obj$hdfsURI, rhls(obj$loc, recurse = TRUE)$file, sep = "")
regxp <- rhoptions()$file.types.remove.regex
} else {
paths <- paths[!grepl(rhoptions()$file.types.remove.regex, paths)]
regxp <- "(/_meta|/_outputs|/_SUCCESS|/_LOG|/_log)"
}
paths <- paths[!grepl(regxp, paths)]
if(length(paths) == 0)
stop("No data found - use addData() or specify a connection with a location that contains data.")
obj$data <- objectFile(getSparkContext(), paths)
obj
}
#' @export
requiredObjAttrs.kvSparkData <- function(obj) {
list(
ddo = getDr("requiredDdoAttrs"),
ddf = getDr("requiredDdfAttrs")
)
}
#' @export
getBasicDdoAttrs.kvSparkData <- function(obj, conn) {
if(conn$hdfs) {
ff <- rhls(conn$loc, recurse = TRUE)
ff <- ff[!grepl("\\/_meta", ff$file),]
sz <- sum(ff$size)
} else {
ff <- list.files(conn$loc, recursive = TRUE)
ff <- ff[!grepl("_meta\\/", ff)]
sz <- sum(file.info(file.path(fp, ff))$size)
}
ex <- take(conn$data, 1)[[1]]
if(conn$type == "text")
ex <- list("", ex)
list(
conn = conn,
extractableKV = TRUE,
totStorageSize = sz,
totObjectSize = NA,
nDiv = NA, # length(conn$data),
example = ex
)
}
#' @export
getBasicDdfAttrs.kvSparkData <- function(obj) {
list(vars = lapply(kvExample(obj)[[2]], class))
}
# kvSparkData is never extractable (yet...)
#' @export
hasExtractableKV.kvSparkData <- function(x) {
FALSE
}
######################################################################
### extract methods
######################################################################
#' @export
extract.kvSparkData <- function(x, i, ...) {
idx <- NULL
dat <- getAttribute(x, "conn")$data
keys <- getKeys(x)
if(is.numeric(i)) {
idx <- i
if(length(idx) == 1)
return(take(dat, 1))
} else {
keyHashes <- getAttribute(x, "keyHashes")
# try actual key
idx <- unlist(lapply(as.character(sapply(i, digest)), function(x) which(keyHashes == x)))
if(length(idx) == 0 && is.character(i)) {
if(all(nchar(i) == 32)) {
idx <- unlist(lapply(i, function(x) which(keyHashes == x)))
}
}
}
if(length(idx) == 0)
return(NULL)
lapply(idx, function(a) {
lookup(dat, keys[[a]])
})
}
######################################################################
### convert methods
######################################################################
#' @export
convertImplemented.kvSparkData <- function(obj) {
c("sparkDataConn", "NULL")
}
#' @export
convert.kvSparkData <- function(from, to=NULL) {
convertkvSparkData(to, from)
}
convertkvSparkData <- function(obj, ...)
UseMethod("convertkvSparkData", obj)
# from sparkData to sparkData
#' @export
convertkvSparkData.sparkDataConn <- function(to, from, verbose=FALSE) {
from
}
# from sparkData to memory
#' @export
convertkvSparkData.NULL <- function(to, from, verbose=FALSE) {
res <- collect(getAttribute(from, "conn")$data)
if(inherits(from, "ddf")) {
res <- ddf(res, update=FALSE, verbose=verbose)
} else {
res <- ddo(res, update=FALSE, verbose=verbose)
}
addNeededAttrs(res, from)
}
# # from sparkData to local disk
# #' @export
# convertkvSparkData.sparkDataConn <- function(to, from, verbose=FALSE) {
# from
# }
#
# # from sparkData to HDFS
# #' @export
# convertkvSparkData.hdfsConn <- function(to, from, verbose=FALSE) {
# }
#
|
# Nivolumab PK Model with Tumour Growth - TDM Step-wise Dosing
# -----------------------------------------------------------------------------
# Simulation of dosing 240 mg every 2 weeks initially, before using TDM with
# proportional dosage changes.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Clear workspace
rm(list=ls(all=TRUE))
graphics.off()
# Set working directory
# if not working with RStudio project place working directory here
setwd("E:/Hughes/Git/nivo_sim/scripts/dosing_protocols")
# Load package libraries
library(dplyr) # Split and rearrange data - required for mrgsolve
library(mrgsolve) # Metrum differential equation solver for pharmacometrics
library(ggplot2) # Graphical package
# Source external scripts
source("functions_utility.R") # functions utility
source("model.R") # PopPK model script
# Read in data
pop_df <- readr::read_rds("pop_df.rds")
trough_flat_df <- readr::read_rds("flat_dosing.rds")
# trough_flat_df <- readr::read_rds("flat_dosing_120.rds")
# Set up objects for proportional dosing
dose_interval <- 14
dose_min <- 40
dose_max <- 800
dose_opts <- c(40, seq(80, 800, by = 20))
obs_times <- c(0, 14, 42, 112)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TDMstep_fn <- function(induction_df) {
# Set up a loop that will sample the individual's concentration, and determine
# the next dose.
# Make all predicted concentrations and PK parameter values after
# the first sample time equal to NA (aka NA_real_)
tdmstep_df <- induction_df %>%
dplyr::select(ID = ID2, dplyr::everything()) %>%
dplyr::mutate(DV = dplyr::if_else(time > 14, NA_real_, DV))
# Create tumour patient data for setting initial tumour size and assign
# initial compartment value for model
tdmstep_mod <- dplyr::summarise_at(tdmstep_df, "TUM", dplyr::first) %>%
mrgsolve::init(.x = mod, .)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Loop until all doses have been optimised
trough_target <- 2.5
trough_upper <- 5
repeat {
# Determine latest sample time and dose
last_sample <- tidyr::drop_na(tdmstep_df) %>%
dplyr::summarise_at("time", max) %>%
unlist()
next_dose <- obs_times[which(obs_times == last_sample) + 1]
prev_dose <- obs_times[which(obs_times == last_sample) - 1]
last_dose <- dplyr::filter(tdmstep_df, time == prev_dose) %>%
dplyr::pull(amt)
this_dose <- dplyr::filter(tdmstep_df, time == last_sample) %>%
dplyr::pull(amt)
# Determine last sampled conc and last dose
last_conc <- dplyr::filter(tdmstep_df, time == last_sample) %>%
dplyr::pull(DV)
# Determine next dose
if (last_conc <= trough_upper & last_conc > trough_target) {
dose_par <- this_dose
} else if (last_conc > trough_upper & last_conc <= trough_upper + 5) {
dose_par <- this_dose - 40
if (dose_par < dose_min) dose_par <- dose_min
} else if (last_conc <= trough_target & last_conc > trough_target - 1.25) {
dose_par <- this_dose + 40
if (dose_par > dose_max) dose_par <- dose_max
} else if (last_conc > trough_upper + 5) {
dose_par <- this_dose - 80
if (dose_par < dose_min) dose_par <- dose_min
} else if (last_conc <= trough_target - 1.25) {
dose_par <- this_dose + 80
if (dose_par > dose_max) dose_par <- dose_max
}
# Create simulation input
input_tdmstep_df <- tdmstep_df %>%
dplyr::mutate(amt = dplyr::if_else(
time > last_sample, dose_par, amt
))
# Simulate to represent time passing since last trough
tdmstep_df <- tdmstep_mod %>%
mrgsolve::data_set(data = input_tdmstep_df) %>%
mrgsolve::idata_set(data = pop_df) %>%
mrgsolve::carry_out(amt, evid, rate, cmt) %>%
mrgsolve::mrgsim() %>%
tibble::as_tibble() %>%
dplyr::select(ID, time, amt, evid, rate, cmt, DV, AUC, TUM,
AGE, ALB, BWT, GFR, SEX, ECOG, EPS1) %>% # select important columns
dplyr::mutate(Cavg = c(0, diff(AUC))) # calculate delta AUC (ddply .fun)
# End loop once a year of optimised dosing is complete
if (last_sample == 112) break
tdmstep_df <- dplyr::mutate(tdmstep_df,
DV = dplyr::if_else(time > next_dose, NA_real_, DV))
} # brackets closing "repeat
readr::write_rds(tdmstep_df, paste0(
"E:/Hughes/Git/nivo_sim/scripts/dosing_protocols/step_id/id",
unique(tdmstep_df$ID), ".rds"
))
tdmstep_df
} # brackets closing "bayes_fn"
tictoc::tic()
output_tdmstep_df <- trough_flat_df %>%
# dplyr::filter(ID %in% 185) %>%
{ tibble::add_column(., ID2 = .$ID) } %>% # so that ID is carried inside of the nest structure
dplyr::group_by(ID) %>% tidyr::nest() %>% # create list column for ID data
dplyr::mutate(data = purrr::map(data, TDMstep_fn)) %>% # create new list column using bayes_fn
tidyr::unnest() %>%
dplyr::select(-ID2)
tictoc::toc()
readr::write_rds(output_tdmstep_df, path = "stepwise_clin.rds")
| /scripts/dosing_protocols/clin_stepwise.R | no_license | jhhughes256/nivo_sim | R | false | false | 5,169 | r | # Nivolumab PK Model with Tumour Growth - TDM Step-wise Dosing
# -----------------------------------------------------------------------------
# Simulation of dosing 240 mg every 2 weeks initially, before using TDM with
# proportional dosage changes.
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Clear workspace
rm(list=ls(all=TRUE))
graphics.off()
# Set working directory
# if not working with RStudio project place working directory here
setwd("E:/Hughes/Git/nivo_sim/scripts/dosing_protocols")
# Load package libraries
library(dplyr) # Split and rearrange data - required for mrgsolve
library(mrgsolve) # Metrum differential equation solver for pharmacometrics
library(ggplot2) # Graphical package
# Source external scripts
source("functions_utility.R") # functions utility
source("model.R") # PopPK model script
# Read in data
pop_df <- readr::read_rds("pop_df.rds")
trough_flat_df <- readr::read_rds("flat_dosing.rds")
# trough_flat_df <- readr::read_rds("flat_dosing_120.rds")
# Set up objects for proportional dosing
dose_interval <- 14
dose_min <- 40
dose_max <- 800
dose_opts <- c(40, seq(80, 800, by = 20))
obs_times <- c(0, 14, 42, 112)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
TDMstep_fn <- function(induction_df) {
# Set up a loop that will sample the individual's concentration, and determine
# the next dose.
# Make all predicted concentrations and PK parameter values after
# the first sample time equal to NA (aka NA_real_)
tdmstep_df <- induction_df %>%
dplyr::select(ID = ID2, dplyr::everything()) %>%
dplyr::mutate(DV = dplyr::if_else(time > 14, NA_real_, DV))
# Create tumour patient data for setting initial tumour size and assign
# initial compartment value for model
tdmstep_mod <- dplyr::summarise_at(tdmstep_df, "TUM", dplyr::first) %>%
mrgsolve::init(.x = mod, .)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Loop until all doses have been optimised
trough_target <- 2.5
trough_upper <- 5
repeat {
# Determine latest sample time and dose
last_sample <- tidyr::drop_na(tdmstep_df) %>%
dplyr::summarise_at("time", max) %>%
unlist()
next_dose <- obs_times[which(obs_times == last_sample) + 1]
prev_dose <- obs_times[which(obs_times == last_sample) - 1]
last_dose <- dplyr::filter(tdmstep_df, time == prev_dose) %>%
dplyr::pull(amt)
this_dose <- dplyr::filter(tdmstep_df, time == last_sample) %>%
dplyr::pull(amt)
# Determine last sampled conc and last dose
last_conc <- dplyr::filter(tdmstep_df, time == last_sample) %>%
dplyr::pull(DV)
# Determine next dose
if (last_conc <= trough_upper & last_conc > trough_target) {
dose_par <- this_dose
} else if (last_conc > trough_upper & last_conc <= trough_upper + 5) {
dose_par <- this_dose - 40
if (dose_par < dose_min) dose_par <- dose_min
} else if (last_conc <= trough_target & last_conc > trough_target - 1.25) {
dose_par <- this_dose + 40
if (dose_par > dose_max) dose_par <- dose_max
} else if (last_conc > trough_upper + 5) {
dose_par <- this_dose - 80
if (dose_par < dose_min) dose_par <- dose_min
} else if (last_conc <= trough_target - 1.25) {
dose_par <- this_dose + 80
if (dose_par > dose_max) dose_par <- dose_max
}
# Create simulation input
input_tdmstep_df <- tdmstep_df %>%
dplyr::mutate(amt = dplyr::if_else(
time > last_sample, dose_par, amt
))
# Simulate to represent time passing since last trough
tdmstep_df <- tdmstep_mod %>%
mrgsolve::data_set(data = input_tdmstep_df) %>%
mrgsolve::idata_set(data = pop_df) %>%
mrgsolve::carry_out(amt, evid, rate, cmt) %>%
mrgsolve::mrgsim() %>%
tibble::as_tibble() %>%
dplyr::select(ID, time, amt, evid, rate, cmt, DV, AUC, TUM,
AGE, ALB, BWT, GFR, SEX, ECOG, EPS1) %>% # select important columns
dplyr::mutate(Cavg = c(0, diff(AUC))) # calculate delta AUC (ddply .fun)
# End loop once a year of optimised dosing is complete
if (last_sample == 112) break
tdmstep_df <- dplyr::mutate(tdmstep_df,
DV = dplyr::if_else(time > next_dose, NA_real_, DV))
} # brackets closing "repeat
readr::write_rds(tdmstep_df, paste0(
"E:/Hughes/Git/nivo_sim/scripts/dosing_protocols/step_id/id",
unique(tdmstep_df$ID), ".rds"
))
tdmstep_df
} # brackets closing "bayes_fn"
tictoc::tic()
output_tdmstep_df <- trough_flat_df %>%
# dplyr::filter(ID %in% 185) %>%
{ tibble::add_column(., ID2 = .$ID) } %>% # so that ID is carried inside of the nest structure
dplyr::group_by(ID) %>% tidyr::nest() %>% # create list column for ID data
dplyr::mutate(data = purrr::map(data, TDMstep_fn)) %>% # create new list column using bayes_fn
tidyr::unnest() %>%
dplyr::select(-ID2)
tictoc::toc()
readr::write_rds(output_tdmstep_df, path = "stepwise_clin.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpp2dImport.R
\name{tpp2dImport}
\alias{tpp2dImport}
\title{Import 2D-TPP data}
\usage{
tpp2dImport(configTable = NULL, data = NULL, idVar = "gene_name",
addCol = NULL, intensityStr = "signal_sum_", qualColName = "qupm",
nonZeroCols = "qssm", fcStr = NULL)
}
\arguments{
\item{configTable}{dataframe, or character object with the path to a file,
that specifies important details of the 2D-TPP experiment. See Section
\code{details} for instructions how to create this object.}
\item{data}{single dataframe, containing raw measurements and if already available fold
changes and additional annotation columns to be imported. Can be used instead of
specifying the file path in the \code{configTable} argument.}
\item{idVar}{character string indicating which data column provides the
unique identifiers for each protein.}
\item{addCol}{additional column names that specify columns in the input data that are
to be attached to the data frame throughout the analysis}
\item{intensityStr}{character string indicating which columns contain the actual
sumionarea values. Those column names containing the suffix \code{intensityStr}
will be regarded as containing sumionarea values.}
\item{qualColName}{character string indicating which column can be used for
additional quality criteria when deciding between different non-unique
protein identifiers.}
\item{nonZeroCols}{character string indicating a column that will be used for
filtering out zero values.}
\item{fcStr}{character string indicating which columns contain the actual
fold change values. Those column names containing the suffix \code{fcStr}
will be regarded as containing fold change values.}
}
\value{
A dataframe comprising all experimental data
}
\description{
Imports data from 2D-TPP experiments by parsing a configTable and reading in
corresponding data file or data frames containing raw data (sumionarea values) and creating a
big data frame comprising all samples with respective fold changes
}
\examples{
# Preparation:
data(panobinostat_2DTPP_smallExample)
# Import data:
datIn <- tpp2dImport(configTable = panobinostat_2DTPP_config,
data = panobinostat_2DTPP_data,
idVar = "representative",
addCol = "clustername",
intensityStr = "sumionarea_protein_",
nonZeroCols = "qusm")
# View attributes of imported data (experiment infos and import arguments):
attr(datIn, "importSettings") \%>\% unlist
attr(datIn, "configTable")
}
| /man/tpp2dImport.Rd | no_license | SamGG/TPP | R | false | true | 2,610 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpp2dImport.R
\name{tpp2dImport}
\alias{tpp2dImport}
\title{Import 2D-TPP data}
\usage{
tpp2dImport(configTable = NULL, data = NULL, idVar = "gene_name",
addCol = NULL, intensityStr = "signal_sum_", qualColName = "qupm",
nonZeroCols = "qssm", fcStr = NULL)
}
\arguments{
\item{configTable}{dataframe, or character object with the path to a file,
that specifies important details of the 2D-TPP experiment. See Section
\code{details} for instructions how to create this object.}
\item{data}{single dataframe, containing raw measurements and if already available fold
changes and additional annotation columns to be imported. Can be used instead of
specifying the file path in the \code{configTable} argument.}
\item{idVar}{character string indicating which data column provides the
unique identifiers for each protein.}
\item{addCol}{additional column names that specify columns in the input data that are
to be attached to the data frame throughout the analysis}
\item{intensityStr}{character string indicating which columns contain the actual
sumionarea values. Those column names containing the suffix \code{intensityStr}
will be regarded as containing sumionarea values.}
\item{qualColName}{character string indicating which column can be used for
additional quality criteria when deciding between different non-unique
protein identifiers.}
\item{nonZeroCols}{character string indicating a column that will be used for
filtering out zero values.}
\item{fcStr}{character string indicating which columns contain the actual
fold change values. Those column names containing the suffix \code{fcStr}
will be regarded as containing fold change values.}
}
\value{
A dataframe comprising all experimental data
}
\description{
Imports data from 2D-TPP experiments by parsing a configTable and reading in
corresponding data file or data frames containing raw data (sumionarea values) and creating a
big data frame comprising all samples with respective fold changes
}
\examples{
# Preparation:
data(panobinostat_2DTPP_smallExample)
# Import data:
datIn <- tpp2dImport(configTable = panobinostat_2DTPP_config,
data = panobinostat_2DTPP_data,
idVar = "representative",
addCol = "clustername",
intensityStr = "sumionarea_protein_",
nonZeroCols = "qusm")
# View attributes of imported data (experiment infos and import arguments):
attr(datIn, "importSettings") \%>\% unlist
attr(datIn, "configTable")
}
|
# source("/Users/leonardovida/Development/bi-project-2019/Dashboard/global.R")
#
server <- function(input, output, session) {
# =========================================================================
# Server outputs: UI widgets
# =========================================================================
output$timelineControl <- renderUI({
sliderInput(inputId = "timeline", "Timeline:",
min = 2010, max = 2019, value = c(2011, 2017), round = TRUE)
})
output$countryControl <- renderUI({
checkboxGroupInput('country', 'Select one or more countries:',
c("Germany" = "DE",
"Sweden" = "SE",
"Poland" = "PL",
"Israel" = "IL"), selected = countries$country_id)
})
output$csfControl <- renderUI({
checkboxGroupInput('csf', 'Select one or more csfs:',
c("Environment" = "envsus",
"Innovation" = "innenv",
"Business" = "easbus"), selected = csfs$csf_id)
})
# =========================================================================
# Top dashboard outputs
# =========================================================================
output$csf1box <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = selectTop("innenv"),
subtitle = "Top performer in Strong Innovative Environment for Startups",
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf2box <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = selectTop("easbus"),
subtitle = "Top performer in Ease of Doing Business",
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf3box <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = selectTop("envsus"),
subtitle = "Top performer in Green Economic Growth",
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf1rank1 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "1st",
subtitle = selectRankCsf("innenv",1),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf1rank2 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "2nd",
subtitle = selectRankCsf("innenv",2),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf1rank3 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "3rd",
subtitle = selectRankCsf("innenv",3),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf1rank4 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "4th",
subtitle = selectRankCsf("innenv",4),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf2rank1 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "1st",
subtitle = selectRankCsf("easbus",1),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf2rank2 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "2nd",
subtitle = selectRankCsf("easbus",2),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf2rank3 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "3rd",
subtitle = selectRankCsf("easbus",3),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf2rank4 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "4th",
subtitle = selectRankCsf("easbus",4),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf3rank1 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "1st",
subtitle = selectRankCsf("envsus",1),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf3rank2 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "2nd",
subtitle = selectRankCsf("envsus",2),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf3rank3 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "3rd",
subtitle = selectRankCsf("envsus",3),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf3rank4 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "4th",
subtitle = selectRankCsf("envsus",4),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
# =========================================================================
# Prepare datasets for outputs functions
# =========================================================================
# Function to select top countries in csf
selectTop <- function(x) {
result <- data %>%
filter(csf_id == x) %>%
group_by(country_id) %>%
summarize(count = sum(rank_relative)) %>%
arrange(desc(count))
value <- result[[1,1]]
# Gather the name of the country corresponding to the code
value <- filter(countries, country_id == value)[[1]]
return (value)
}
# Function to select top countries in csf
selectRankCsf <- function(x, pos) {
result <- data %>%
filter(csf_id == x) %>%
group_by(country_id) %>%
summarize(count = sum(rank_relative)) %>%
arrange(desc(count))
value <- result[[pos,1]]
# Gather the name of the country corresponding to the code
value <- filter(countries, country_id == value)[[1]]
return (value)
}
# Function for main retrieval of data filtered by input
getGenInputData <- reactive({
minYear <- input$timeline[1]
maxYear <-input$timeline[2]
countriesCode <- input$country
csfsCode <- input$csf
result <- data %>% filter(year_id >= minYear, year_id <= maxYear,
country_id %in% countriesCode,
csf_id %in% csfsCode) %>%
select(year_id, country_id, csf_id, indicator_id, rank_relative, value)
return(result)
})
getCsfInputData <- reactive({
minYear <- input$timeline[1]
maxYear <-input$timeline[2]
countriesCode <- input$country
result <- data %>% filter(year_id >= minYear, year_id <= maxYear,
country_id %in% countriesCode) %>%
select(year_id, country_id, csf_id, indicator_id, rank_relative, value)
return(result)
})
# Function for main Vis
groupRank <- function(input) {
result <- input %>%
group_by(country_id, csf_id, year_id) %>% # Grouping variables
summarise(ranks = sum(rank_relative)) %>%
ungroup()
return(result)
}
# Function to retrieve data not filtered
getGenData <- reactive({
countriesCode <- input$country
result <- data %>% filter(country_id %in% countriesCode)
return(result)
})
# Get last data for gauges
getLastData <- reactive({
maxYear <- input$timeline[2]
result <- data %>% filter(year_id == maxYear)
return(result)
})
retrieveGaugeData <- function(input, indicatorIdGauge, countryIdGauge) {
result <- input %>%
filter(indicator_id == indicatorIdGauge, country_id == countryIdGauge)
return(result)
}
# Function to filter by csf_id and display dumbbells
groupRankByCsf <- function(input, csfId) {
result <- input %>%
filter(csf_id == csfId) %>%
group_by(country_id, csf_id, year_id) %>% # Grouping variables
summarise(ranks = sum(rank_relative)) %>%
ungroup()
return(result)
}
# Function for KPI pages - ranks' plot
retrieveRankedCsf <- function(input, csfName) {
result <- input %>%
filter(csf_id == csfName) %>%
group_by(country_id, csf_id, year_id) %>% # Grouping variables
summarise(points = sum(rank_relative)) %>%
ungroup()
return(result)
}
# Function for KPI pages - KPIs plot
retrieveKpi <- function(input, kpiName) {
result <- input %>%
filter(indicator_id == kpiName)
return(result)
}
# =========================================================================
# Dahsboard page outputs
# =========================================================================
# Small Dumbells
output$genSmallTable1 <- renderPlot({
plotDotTable(groupRankByCsf(getGenData(), "innenv"), 2010, 2017)
})
output$genSmallTable2 <- renderPlot({
plotDotTable(groupRankByCsf(getGenData(), "easbus"), 2010, 2017)
})
output$genSmallTable3 <- renderPlot({
plotDotTable(groupRankByCsf(getGenData(), "envsus"), 2010, 2017)
})
# Chart
output$genPlot <- renderPlotly({
plotGenVis(groupRank(getGenInputData()))
})
output$genTable <- renderDataTable({
data
})
# Hover events for bar chart
output$event <- renderPrint({
d <- event_data("plotly_hover")
if (is.null(d)) "Hover on a point!" else d
})
output$click <- renderPrint({
d <- event_data("plotly_click")
if (is.null(d)) "Click to keep data (double-click to clear)" else d
})
# Pivot table
output$pivotTable <- renderRpivotTable({
dataFrameEurostat <- data.frame(data.eur)
rpivotTable(data = dataFrameEurostat, rows = "country_id",
cols=c("year_id"),
vals = "value",
rendererName = "Pivot Table")
})
# =========================================================================
# CSF1 page outputs
# =========================================================================
output$genCsf1 <- renderPlotly({
plotCsfRankData(retrieveRankedCsf(getCsfInputData(), "innenv"))
})
output$csf1kpi1 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "smeinn"))
})
output$csf1kpi2 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "ppcmln"))
})
output$csf1kpi3 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "pcored"))
})
output$csf1kpi4 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "trdbln"))
})
#Gauges
# output$gaugecsf1kpi1 <- renderGauge({
# gauge(42, min = 0, max = 100, symbol = '%', gaugeSectors(success = c(80, 100), warning = c(40, 79), danger = c(0, 39)
# ))
# })
#kpi1
output$gaugecsf1kpi1de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "DE"), 48.26, 25, 50, 0, "%")
})
output$gaugecsf1kpi1se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "SE"), 48.26, 25, 50, 0, "%")
})
output$gaugecsf1kpi1pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "PL"), 48.26, 25, 50, 0, "%")
})
output$gaugecsf1kpi1il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "IL"), 48.26, 25, 50, 0, "%")
})
#kpi2
output$gaugecsf1kpi2de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "DE"), 260.58, 220, 280, 20, "")
})
output$gaugecsf1kpi2se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "SE"), 260.58, 220, 280, 20, "")
})
output$gaugecsf1kpi2pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "PL"), 260.58, 220, 280, 20, "")
})
output$gaugecsf1kpi2il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "IL"), 260.58, 220, 280, 20, "")
})
#kpi3
output$gaugecsf1kpi3de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "DE"), 0.5, 0.25, 0.6, 0, "%")
})
output$gaugecsf1kpi3se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "SE"), 0.5, 0.25, 0.6, 0, "%")
})
output$gaugecsf1kpi3pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "PL"), 0.5, 0.25, 0.6, 0, "%")
})
output$gaugecsf1kpi3il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "IL"), 0.5, 0.25, 0.6, 0, "%")
})
#kpi4
output$gaugecsf1kpi4de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "DE"), 43.14, 25, 45, 0, "")
})
output$gaugecsf1kpi4se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "SE"), 43.14, 25, 45, 0, "")
})
output$gaugecsf1kpi4pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "PL"), 43.14, 25, 45, 0, "")
})
output$gaugecsf1kpi4il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "IL"), 43.14, 25, 45, 0, "")
})
# =========================================================================
# CSF2 page outputs
# =========================================================================
output$genCsf2 <- renderPlotly({
plotCsfRankData(retrieveRankedCsf(getCsfInputData(), "easbus"))
})
output$csf2kpi1 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "getcre"))
})
output$csf2kpi2 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "strday"))
})
output$csf2kpi3 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "payhou"))
})
output$csf2kpi4 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "enfday"))
})
#Gauges
#kpi1
output$gaugecsf2kpi1de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "DE"), 100, 50, 100, 0, "")
})
output$gaugecsf2kpi1se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "SE"), 100, 50, 100, 0, "")
})
output$gaugecsf2kpi1pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "PL"), 100, 50, 100, 0, "")
})
output$gaugecsf2kpi1il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "IL"), 100, 50, 100, 0, "")
})
#kpi2
output$gaugecsf2kpi2de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "DE"), 0.5, 10, 50, 0, "")
})
output$gaugecsf2kpi2se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "SE"), 0.5, 10, 50, 0, "")
})
output$gaugecsf2kpi2pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "PL"), 0.5, 10, 50, 0, "")
})
output$gaugecsf2kpi2il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "IL"), 0.5, 10, 50, 0, "")
})
#kpi3
output$gaugecsf2kpi3de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "DE"), 12, 50, 200, 0, "")
})
output$gaugecsf2kpi3se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "SE"), 12, 50, 200, 0, "")
})
output$gaugecsf2kpi3pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "PL"), 12, 50, 200, 0, "")
})
output$gaugecsf2kpi3il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "IL"), 12, 50, 200, 0, "")
})
#kpi4
output$gaugecsf2kpi4de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "DE"), 164, 220, 365, 1, "")
})
output$gaugecsf2kpi4se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "SE"), 164, 220, 365, 1, "")
})
output$gaugecsf2kpi4pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "PL"), 164, 220, 365, 1, "")
})
output$gaugecsf2kpi4il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "IL"), 164, 220, 365, 1, "")
})
# =========================================================================
# CSF3 page outputs
# =========================================================================
output$genCsf3 <- renderPlotly({
plotCsfRankData(retrieveRankedCsf(getCsfInputData(), "innenv"))
})
output$csf3kpi1 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "exppmt"))
})
output$csf3kpi2 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "taxrev"))
})
output$csf3kpi3 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "shrrnw"))
})
output$csf3kpi4 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "prdtps"))
})
#Gauges
#kpi1
output$gaugecsf3kpi1de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "DE"), 12, 20, 80, 0, "")
})
output$gaugecsf3kpi1se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "SE"), 12, 20, 80, 0, "")
})
output$gaugecsf3kpi1pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "PL"), 12, 20, 80, 0, "")
})
output$gaugecsf3kpi1il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "IL"), 12, 20, 80, 0, "")
})
#kpi2
output$gaugecsf3kpi2de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "DE"), 4, 1.5, 5, 0, "")
})
output$gaugecsf3kpi2se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "SE"), 4, 1.5, 5, 0, "")
})
output$gaugecsf3kpi2pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "PL"), 4, 1.5, 5, 0, "")
})
output$gaugecsf3kpi2il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "IL"), 4, 1.5, 5, 0, "")
})
#kpi 3
output$gaugecsf3kpi3de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "DE"), 15, 10, 25, 0, "")
})
output$gaugecsf3kpi3se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "SE"), 15, 10, 25, 0, "")
})
output$gaugecsf3kpi3pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "PL"), 15, 10, 25, 0, "")
})
output$gaugecsf3kpi3il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "IL"), 15, 10, 25, 0, "")
})
#kpi4
output$gaugecsf3kpi4de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "DE"), 15143, 10000, 17000, 5000, "")
})
output$gaugecsf3kpi4se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "SE"), 15143, 10000, 17000, 5000, "")
})
output$gaugecsf3kpi4pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "PL"), 15143, 10000, 17000, 5000, "")
})
output$gaugecsf3kpi4il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "IL"), 15143, 10000, 17000, 5000, "")
})
# Correlation and Prediction
getCorrData <- reactive({
var1 <- input$csfCorr1
result <- data %>% filter(csf_id == var1)
return(result)
})
output$corrPlot <- renderPlot({
inputData <- getCorrData()
inputData <- dcast(inputData, country_id + year_id ~ indicator_id)
# Select variable columns
inputData <- inputData[, c(3,4,5,6)]
# Create corr matrix
cormat <- round(cor(inputData),2)
# Get upper triangle matrix
# From http://www.sthda.com/english/wiki/ggplot2-quick-correlation-matrix-heatmap-r-software-and-data-visualization
cormat[lower.tri(cormat)] <- NA
upper_tri <- cormat
# Reshape data
melted_cormat <- melt(upper_tri, na.rm = TRUE)
g <- ggplot(data = melted_cormat, aes(Var2, Var1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 12, hjust = 1)) +
coord_fixed()
return(g)
})
getPredData <- reactive({
var <- input$predInd
countryP <- input$countryPred
result <- data %>% filter(indicator_id == var, country_id == countryP)
return(result)
})
output$predPlot <- renderPlotly({
inputData <- getPredData()
inputData <- dcast(inputData, country_id + year_id ~ indicator_id)
# Convert into timeseries
myts <- ts(inputData[,3], start=min(inputData$year_id), end=max(inputData$year_id), frequency=1)
# Predict with auto arima
dif <- diff(myts)
fit = auto.arima(dif, seasonal = FALSE, allowmean = TRUE, allowdrift = TRUE)
pred = predict(fit, n.ahead = 5)
input_pred <- myts[length(myts)]
for(i in 1:length(pred$pred)){
input_pred[i+1]<-input_pred[i]+pred$pred[i]
}
input_pred <- ts(input_pred, start=max(inputData$year_id), frequency=1)
# From ts to df
end_ts <- ts(c(myts,input_pred), start=start(myts), frequency=frequency(myts))
df <- data.frame(years=index(end_ts), coredata(end_ts)) %>%
rename(Values = coredata.end_ts.)
# Convert to years to plot
df$years <- lubridate::ymd(df$years, truncated = 2L)
# Plot prediction + historic
p <- ggplot(df, aes(x = years, y = Values)) +
geom_line(size = 1) +
# scale_x_continuous(breaks=seq(2010, 2022, 1)) +
xlab("Date") +
ylab("Value") +
theme_minimal()
gg <- plotly_build(p)
return(gg)
})
}
| /Dashboard/server.R | permissive | leonardovida/bi-final-project-2019 | R | false | false | 21,699 | r | # source("/Users/leonardovida/Development/bi-project-2019/Dashboard/global.R")
#
server <- function(input, output, session) {
# =========================================================================
# Server outputs: UI widgets
# =========================================================================
output$timelineControl <- renderUI({
sliderInput(inputId = "timeline", "Timeline:",
min = 2010, max = 2019, value = c(2011, 2017), round = TRUE)
})
output$countryControl <- renderUI({
checkboxGroupInput('country', 'Select one or more countries:',
c("Germany" = "DE",
"Sweden" = "SE",
"Poland" = "PL",
"Israel" = "IL"), selected = countries$country_id)
})
output$csfControl <- renderUI({
checkboxGroupInput('csf', 'Select one or more csfs:',
c("Environment" = "envsus",
"Innovation" = "innenv",
"Business" = "easbus"), selected = csfs$csf_id)
})
# =========================================================================
# Top dashboard outputs
# =========================================================================
output$csf1box <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = selectTop("innenv"),
subtitle = "Top performer in Strong Innovative Environment for Startups",
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf2box <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = selectTop("easbus"),
subtitle = "Top performer in Ease of Doing Business",
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf3box <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = selectTop("envsus"),
subtitle = "Top performer in Green Economic Growth",
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf1rank1 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "1st",
subtitle = selectRankCsf("innenv",1),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf1rank2 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "2nd",
subtitle = selectRankCsf("innenv",2),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf1rank3 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "3rd",
subtitle = selectRankCsf("innenv",3),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf1rank4 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "4th",
subtitle = selectRankCsf("innenv",4),
icon = icon("lightbulb", class = NULL, lib = "font-awesome"),
color = "blue"
)
})
output$csf2rank1 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "1st",
subtitle = selectRankCsf("easbus",1),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf2rank2 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "2nd",
subtitle = selectRankCsf("easbus",2),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf2rank3 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "3rd",
subtitle = selectRankCsf("easbus",3),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf2rank4 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "4th",
subtitle = selectRankCsf("easbus",4),
icon = icon("briefcase", class = NULL, lib = "font-awesome"),
color = "yellow"
)
})
output$csf3rank1 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "1st",
subtitle = selectRankCsf("envsus",1),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf3rank2 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "2nd",
subtitle = selectRankCsf("envsus",2),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf3rank3 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "3rd",
subtitle = selectRankCsf("envsus",3),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
output$csf3rank4 <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
value = "4th",
subtitle = selectRankCsf("envsus",4),
icon = icon("leaf", class = NULL, lib = "font-awesome"),
color = "green"
)
})
# =========================================================================
# Prepare datasets for outputs functions
# =========================================================================
# Function to select top countries in csf
selectTop <- function(x) {
result <- data %>%
filter(csf_id == x) %>%
group_by(country_id) %>%
summarize(count = sum(rank_relative)) %>%
arrange(desc(count))
value <- result[[1,1]]
# Gather the name of the country corresponding to the code
value <- filter(countries, country_id == value)[[1]]
return (value)
}
# Function to select top countries in csf
selectRankCsf <- function(x, pos) {
result <- data %>%
filter(csf_id == x) %>%
group_by(country_id) %>%
summarize(count = sum(rank_relative)) %>%
arrange(desc(count))
value <- result[[pos,1]]
# Gather the name of the country corresponding to the code
value <- filter(countries, country_id == value)[[1]]
return (value)
}
# Function for main retrieval of data filtered by input
getGenInputData <- reactive({
minYear <- input$timeline[1]
maxYear <-input$timeline[2]
countriesCode <- input$country
csfsCode <- input$csf
result <- data %>% filter(year_id >= minYear, year_id <= maxYear,
country_id %in% countriesCode,
csf_id %in% csfsCode) %>%
select(year_id, country_id, csf_id, indicator_id, rank_relative, value)
return(result)
})
getCsfInputData <- reactive({
minYear <- input$timeline[1]
maxYear <-input$timeline[2]
countriesCode <- input$country
result <- data %>% filter(year_id >= minYear, year_id <= maxYear,
country_id %in% countriesCode) %>%
select(year_id, country_id, csf_id, indicator_id, rank_relative, value)
return(result)
})
# Function for main Vis
groupRank <- function(input) {
result <- input %>%
group_by(country_id, csf_id, year_id) %>% # Grouping variables
summarise(ranks = sum(rank_relative)) %>%
ungroup()
return(result)
}
# Function to retrieve data not filtered
getGenData <- reactive({
countriesCode <- input$country
result <- data %>% filter(country_id %in% countriesCode)
return(result)
})
# Get last data for gauges
getLastData <- reactive({
maxYear <- input$timeline[2]
result <- data %>% filter(year_id == maxYear)
return(result)
})
retrieveGaugeData <- function(input, indicatorIdGauge, countryIdGauge) {
result <- input %>%
filter(indicator_id == indicatorIdGauge, country_id == countryIdGauge)
return(result)
}
# Function to filter by csf_id and display dumbbells
groupRankByCsf <- function(input, csfId) {
result <- input %>%
filter(csf_id == csfId) %>%
group_by(country_id, csf_id, year_id) %>% # Grouping variables
summarise(ranks = sum(rank_relative)) %>%
ungroup()
return(result)
}
# Function for KPI pages - ranks' plot
retrieveRankedCsf <- function(input, csfName) {
result <- input %>%
filter(csf_id == csfName) %>%
group_by(country_id, csf_id, year_id) %>% # Grouping variables
summarise(points = sum(rank_relative)) %>%
ungroup()
return(result)
}
# Function for KPI pages - KPIs plot
retrieveKpi <- function(input, kpiName) {
result <- input %>%
filter(indicator_id == kpiName)
return(result)
}
# =========================================================================
# Dahsboard page outputs
# =========================================================================
# Small Dumbells
output$genSmallTable1 <- renderPlot({
plotDotTable(groupRankByCsf(getGenData(), "innenv"), 2010, 2017)
})
output$genSmallTable2 <- renderPlot({
plotDotTable(groupRankByCsf(getGenData(), "easbus"), 2010, 2017)
})
output$genSmallTable3 <- renderPlot({
plotDotTable(groupRankByCsf(getGenData(), "envsus"), 2010, 2017)
})
# Chart
output$genPlot <- renderPlotly({
plotGenVis(groupRank(getGenInputData()))
})
output$genTable <- renderDataTable({
data
})
# Hover events for bar chart
output$event <- renderPrint({
d <- event_data("plotly_hover")
if (is.null(d)) "Hover on a point!" else d
})
output$click <- renderPrint({
d <- event_data("plotly_click")
if (is.null(d)) "Click to keep data (double-click to clear)" else d
})
# Pivot table
output$pivotTable <- renderRpivotTable({
dataFrameEurostat <- data.frame(data.eur)
rpivotTable(data = dataFrameEurostat, rows = "country_id",
cols=c("year_id"),
vals = "value",
rendererName = "Pivot Table")
})
# =========================================================================
# CSF1 page outputs
# =========================================================================
output$genCsf1 <- renderPlotly({
plotCsfRankData(retrieveRankedCsf(getCsfInputData(), "innenv"))
})
output$csf1kpi1 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "smeinn"))
})
output$csf1kpi2 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "ppcmln"))
})
output$csf1kpi3 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "pcored"))
})
output$csf1kpi4 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "trdbln"))
})
#Gauges
# output$gaugecsf1kpi1 <- renderGauge({
# gauge(42, min = 0, max = 100, symbol = '%', gaugeSectors(success = c(80, 100), warning = c(40, 79), danger = c(0, 39)
# ))
# })
#kpi1
output$gaugecsf1kpi1de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "DE"), 48.26, 25, 50, 0, "%")
})
output$gaugecsf1kpi1se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "SE"), 48.26, 25, 50, 0, "%")
})
output$gaugecsf1kpi1pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "PL"), 48.26, 25, 50, 0, "%")
})
output$gaugecsf1kpi1il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"smeinn", "IL"), 48.26, 25, 50, 0, "%")
})
#kpi2
output$gaugecsf1kpi2de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "DE"), 260.58, 220, 280, 20, "")
})
output$gaugecsf1kpi2se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "SE"), 260.58, 220, 280, 20, "")
})
output$gaugecsf1kpi2pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "PL"), 260.58, 220, 280, 20, "")
})
output$gaugecsf1kpi2il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"ppcmln", "IL"), 260.58, 220, 280, 20, "")
})
#kpi3
output$gaugecsf1kpi3de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "DE"), 0.5, 0.25, 0.6, 0, "%")
})
output$gaugecsf1kpi3se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "SE"), 0.5, 0.25, 0.6, 0, "%")
})
output$gaugecsf1kpi3pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "PL"), 0.5, 0.25, 0.6, 0, "%")
})
output$gaugecsf1kpi3il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"pcored", "IL"), 0.5, 0.25, 0.6, 0, "%")
})
#kpi4
output$gaugecsf1kpi4de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "DE"), 43.14, 25, 45, 0, "")
})
output$gaugecsf1kpi4se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "SE"), 43.14, 25, 45, 0, "")
})
output$gaugecsf1kpi4pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "PL"), 43.14, 25, 45, 0, "")
})
output$gaugecsf1kpi4il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"trdbln", "IL"), 43.14, 25, 45, 0, "")
})
# =========================================================================
# CSF2 page outputs
# =========================================================================
output$genCsf2 <- renderPlotly({
plotCsfRankData(retrieveRankedCsf(getCsfInputData(), "easbus"))
})
output$csf2kpi1 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "getcre"))
})
output$csf2kpi2 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "strday"))
})
output$csf2kpi3 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "payhou"))
})
output$csf2kpi4 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "enfday"))
})
#Gauges
#kpi1
output$gaugecsf2kpi1de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "DE"), 100, 50, 100, 0, "")
})
output$gaugecsf2kpi1se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "SE"), 100, 50, 100, 0, "")
})
output$gaugecsf2kpi1pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "PL"), 100, 50, 100, 0, "")
})
output$gaugecsf2kpi1il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"getcre", "IL"), 100, 50, 100, 0, "")
})
#kpi2
output$gaugecsf2kpi2de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "DE"), 0.5, 10, 50, 0, "")
})
output$gaugecsf2kpi2se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "SE"), 0.5, 10, 50, 0, "")
})
output$gaugecsf2kpi2pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "PL"), 0.5, 10, 50, 0, "")
})
output$gaugecsf2kpi2il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"strday", "IL"), 0.5, 10, 50, 0, "")
})
#kpi3
output$gaugecsf2kpi3de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "DE"), 12, 50, 200, 0, "")
})
output$gaugecsf2kpi3se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "SE"), 12, 50, 200, 0, "")
})
output$gaugecsf2kpi3pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "PL"), 12, 50, 200, 0, "")
})
output$gaugecsf2kpi3il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"payhou", "IL"), 12, 50, 200, 0, "")
})
#kpi4
output$gaugecsf2kpi4de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "DE"), 164, 220, 365, 1, "")
})
output$gaugecsf2kpi4se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "SE"), 164, 220, 365, 1, "")
})
output$gaugecsf2kpi4pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "PL"), 164, 220, 365, 1, "")
})
output$gaugecsf2kpi4il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"enfday", "IL"), 164, 220, 365, 1, "")
})
# =========================================================================
# CSF3 page outputs
# =========================================================================
output$genCsf3 <- renderPlotly({
plotCsfRankData(retrieveRankedCsf(getCsfInputData(), "innenv"))
})
output$csf3kpi1 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "exppmt"))
})
output$csf3kpi2 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "taxrev"))
})
output$csf3kpi3 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "shrrnw"))
})
output$csf3kpi4 <- renderPlotly({
plotKpiData(retrieveKpi(getGenInputData(), "prdtps"))
})
#Gauges
#kpi1
output$gaugecsf3kpi1de <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "DE"), 12, 20, 80, 0, "")
})
output$gaugecsf3kpi1se <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "SE"), 12, 20, 80, 0, "")
})
output$gaugecsf3kpi1pl <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "PL"), 12, 20, 80, 0, "")
})
output$gaugecsf3kpi1il <- renderGauge({
plotGauge2(retrieveGaugeData(getLastData(),"exppmt", "IL"), 12, 20, 80, 0, "")
})
#kpi2
output$gaugecsf3kpi2de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "DE"), 4, 1.5, 5, 0, "")
})
output$gaugecsf3kpi2se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "SE"), 4, 1.5, 5, 0, "")
})
output$gaugecsf3kpi2pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "PL"), 4, 1.5, 5, 0, "")
})
output$gaugecsf3kpi2il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"taxrev", "IL"), 4, 1.5, 5, 0, "")
})
#kpi 3
output$gaugecsf3kpi3de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "DE"), 15, 10, 25, 0, "")
})
output$gaugecsf3kpi3se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "SE"), 15, 10, 25, 0, "")
})
output$gaugecsf3kpi3pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "PL"), 15, 10, 25, 0, "")
})
output$gaugecsf3kpi3il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"shrrnw", "IL"), 15, 10, 25, 0, "")
})
#kpi4
output$gaugecsf3kpi4de <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "DE"), 15143, 10000, 17000, 5000, "")
})
output$gaugecsf3kpi4se <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "SE"), 15143, 10000, 17000, 5000, "")
})
output$gaugecsf3kpi4pl <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "PL"), 15143, 10000, 17000, 5000, "")
})
output$gaugecsf3kpi4il <- renderGauge({
plotGauge(retrieveGaugeData(getLastData(),"prdtps", "IL"), 15143, 10000, 17000, 5000, "")
})
# Correlation and Prediction
getCorrData <- reactive({
var1 <- input$csfCorr1
result <- data %>% filter(csf_id == var1)
return(result)
})
output$corrPlot <- renderPlot({
inputData <- getCorrData()
inputData <- dcast(inputData, country_id + year_id ~ indicator_id)
# Select variable columns
inputData <- inputData[, c(3,4,5,6)]
# Create corr matrix
cormat <- round(cor(inputData),2)
# Get upper triangle matrix
# From http://www.sthda.com/english/wiki/ggplot2-quick-correlation-matrix-heatmap-r-software-and-data-visualization
cormat[lower.tri(cormat)] <- NA
upper_tri <- cormat
# Reshape data
melted_cormat <- melt(upper_tri, na.rm = TRUE)
g <- ggplot(data = melted_cormat, aes(Var2, Var1, fill = value)) +
geom_tile(color = "white") +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0, limit = c(-1,1), space = "Lab",
name="Pearson\nCorrelation") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 45, vjust = 1,
size = 12, hjust = 1)) +
coord_fixed()
return(g)
})
getPredData <- reactive({
var <- input$predInd
countryP <- input$countryPred
result <- data %>% filter(indicator_id == var, country_id == countryP)
return(result)
})
output$predPlot <- renderPlotly({
inputData <- getPredData()
inputData <- dcast(inputData, country_id + year_id ~ indicator_id)
# Convert into timeseries
myts <- ts(inputData[,3], start=min(inputData$year_id), end=max(inputData$year_id), frequency=1)
# Predict with auto arima
dif <- diff(myts)
fit = auto.arima(dif, seasonal = FALSE, allowmean = TRUE, allowdrift = TRUE)
pred = predict(fit, n.ahead = 5)
input_pred <- myts[length(myts)]
for(i in 1:length(pred$pred)){
input_pred[i+1]<-input_pred[i]+pred$pred[i]
}
input_pred <- ts(input_pred, start=max(inputData$year_id), frequency=1)
# From ts to df
end_ts <- ts(c(myts,input_pred), start=start(myts), frequency=frequency(myts))
df <- data.frame(years=index(end_ts), coredata(end_ts)) %>%
rename(Values = coredata.end_ts.)
# Convert to years to plot
df$years <- lubridate::ymd(df$years, truncated = 2L)
# Plot prediction + historic
p <- ggplot(df, aes(x = years, y = Values)) +
geom_line(size = 1) +
# scale_x_continuous(breaks=seq(2010, 2022, 1)) +
xlab("Date") +
ylab("Value") +
theme_minimal()
gg <- plotly_build(p)
return(gg)
})
}
|
# functions, taken from ECHSE code
res_stom_leaf <- function(res_min,
cond_vap,
cond_water
) {
res_min / (cond_vap * cond_water)
}
stress_soilwater <- function(wc,
wc_sat,
wc_res,
bubble,
pores_ind,
wstressmin,
wstressmax
) {
# problems with this function; see original code (resistances.h)
sat_rel <- (wc - wc_res) / (wc_sat - wc_res)
m <- pores_ind / (pores_ind + 1)
if (sat_rel >= 0.999) {
suction <- 0
} else {
suction <- (1 / (sat_rel^(1 / m)) - 1)^(1 / (pores_ind + 1)) * bubble
}
if (suction < wstressmin) {
return(1)
} else if (suction >= wstressmax) {
return(0.01)
} else {
return(1 - (suction - wstressmin) / (wstressmax - wstressmin)) # Guentner 2002
}
}
stress_humidity <- function(vap_deficit,
par
) {
# problems with this function; see original code (resistances.h)
return(1 / (1 + par * vap_deficit))
}
satVapPress_overWater <- function(temp
) {
return(6.11 * 10^(7.5 * temp / (237.3 + temp)))
}
vapPress_overWater <- function(temp,
relhum
) {
return(satVapPress_overWater(temp) * relhum / 100)
}
slopeSatVapPress <- function(temp
) {
return(satVapPress_overWater(temp) * 4098. / (237.3 + temp)^2)
}
latentHeatEvap <- function(temp
) {
return(2501. - 2.37 * temp)
}
psychroConst <- function(temp,
airpress
) {
return(0.016286 * airpress / latentHeatEvap(temp));
}
et_sw <- function(lambda, # Latent heat of water evaporation (J/kg)
delta, # slope of saturation vapor pressure curve (hPa/K)
H_net, # net incoming ( (1-alb) * short-wave + long-wave) radiation (at reference/measurement height) (Wm-2)
H_soil, # net incoming (short-wave + long-wave) radiation hitting the soil surface (Wm-2)
totalheat, # heat conduction into soil AND plants (Wm-2)
soilheat, # soil heat flux (Wm-2)
rho_air, # air density (kgm-3)
ez_0, # saturation vapor pressure of air (hPa)
ez, # vapor pressure of air (hPa)
gamma, # psychrometric constant (hPa/K)
r_cs, # Bulk stomatal resistance of the canopy (sm-1)
r_ca, # Bulk boundary layer resistance of the vegetative elements in the canopy (sm-1)
r_ss, # soil surface resistance (sm-1)
r_sa, # aerodynamic resistance between soil surface and canopy source height (sm-1)
r_aa # Aerodynamic resistance between canopy source height and reference/measurement height (sm-1)
) {
# Total available energy (Wm-2), Suttleworth & Wallace (1985), eq. 3
A_total <- H_net - totalheat
# Energy available at substrat (Wm-2), Suttleworth & Wallace (1985), eq. 5
A_s <- H_soil - soilheat
# calculate vapor pressure deficit at reference/measurement height (hPa)
D <- ez_0 - ez;
# calculate term of canopy transpiration (SW eq. 12) (Wm-2)
PM_c <- (delta * A_total + ((rho_air * SPHEATMOIST * D) - (delta * r_ca * A_s)) / (r_aa + r_ca)) /
(delta + (gamma * (1 + r_cs / (r_aa + r_ca))))
# calculate term of soil evaporation (SW eq. 13) (Wm-2)
PM_s <- (delta * A_total + ((rho_air * SPHEATMOIST * D) - (delta * r_sa * (A_total-A_s)) ) / (r_aa + r_sa)) /
(delta + (gamma * (1. + r_ss / (r_aa + r_sa))))
# SW eqs. 16-18
R_a <- (delta + gamma) * r_aa
R_s <- (delta + gamma) * r_sa + gamma * r_ss
R_c <- (delta + gamma) * r_ca + gamma * r_cs
# coefficients, SW eqs. 14, 15 (-)
C_c <- 1. / (1. + R_c * R_a / (R_s * (R_c + R_a)))
C_s <- 1. / (1. + R_s * R_a / (R_c * (R_s + R_a)))
# compute evapotranspiration rate, eq. 11 (mm/s)
et <- (C_c * PM_c + C_s * PM_s) / lambda
# different from ECHSE function: returns only et/1000, possibly < 0!
return(et/1000.)
}
# my parameters
bubble <- 8.08
par_stressHum <- 0.03 # Guentner, WASA code
pores_ind <- 0.45
res_leaf_min <- 50 # s.m-1
wc_res <- 0.049
wc_sat <- 0.387
wstressmax <- 15000
wstressmin <- 10
# read data
library(xts)
rhum <- read.delim("~/uni/projects/evap_portugal/data/forcing/meteo/05_meteofill/out/HS/rhum_data.dat")
rhum <- xts(rhum[, 2], order.by=as.POSIXct(rhum[, 1]))
temper <- read.delim("~/uni/projects/evap_portugal/data/forcing/meteo/05_meteofill/out/HS/temper_data.dat")
temper <- xts(temper[, 2], order.by=as.POSIXct(temper[, 1]))
wc_vol_root <- read.delim("~/uni/projects/evap_portugal/data/forcing/meteo/05_meteofill/out/HS/wc_vol_root_data.dat")
wc_vol_root <- xts(wc_vol_root[, 2], order.by=as.POSIXct(wc_vol_root[, 1]))
mdata <- merge(rhum, temper, wc_vol_root, all=FALSE)
# calculate internal (and possibly erroneous) results
ez0 <- c()
ez <- c()
cond_vap <- c()
cond_water <- c()
res_leaf <- c()
for (ix in 1:2000) { #seq(index(mdata)[1], tail(index(mdata))[1], by="hours")) {
ez0[ix] <- with(mdata[ix], satVapPress_overWater(temper))
ez[ix] <- with(mdata[ix], vapPress_overWater(temper, rhum))
cond_vap[ix] <- with(mdata[ix], stress_humidity(ez0[ix] - ez[ix], par_stressHum))
cond_water[ix] <- with(mdata[ix], stress_soilwater(wc_vol_root, wc_sat, wc_res, bubble, pores_ind, wstressmin, wstressmax))
res_leaf[ix] <- res_stom_leaf(res_leaf_min, cond_vap[ix], cond_water[ix])
}
#H_net <- (1. - alb) * glorad + H_long
par(mfrow=c(2, 1), mar=c(4, 4, 0, 1))
plot(cond_water, type="l")
plot(wc_vol_root[1:2000], main="")
| /R/echse_check_functions.R | no_license | juliuseberhard/echse-et-code | R | false | false | 5,894 | r | # functions, taken from ECHSE code
res_stom_leaf <- function(res_min,
cond_vap,
cond_water
) {
res_min / (cond_vap * cond_water)
}
stress_soilwater <- function(wc,
wc_sat,
wc_res,
bubble,
pores_ind,
wstressmin,
wstressmax
) {
# problems with this function; see original code (resistances.h)
sat_rel <- (wc - wc_res) / (wc_sat - wc_res)
m <- pores_ind / (pores_ind + 1)
if (sat_rel >= 0.999) {
suction <- 0
} else {
suction <- (1 / (sat_rel^(1 / m)) - 1)^(1 / (pores_ind + 1)) * bubble
}
if (suction < wstressmin) {
return(1)
} else if (suction >= wstressmax) {
return(0.01)
} else {
return(1 - (suction - wstressmin) / (wstressmax - wstressmin)) # Guentner 2002
}
}
stress_humidity <- function(vap_deficit,
par
) {
# problems with this function; see original code (resistances.h)
return(1 / (1 + par * vap_deficit))
}
satVapPress_overWater <- function(temp
) {
return(6.11 * 10^(7.5 * temp / (237.3 + temp)))
}
vapPress_overWater <- function(temp,
relhum
) {
return(satVapPress_overWater(temp) * relhum / 100)
}
slopeSatVapPress <- function(temp
) {
return(satVapPress_overWater(temp) * 4098. / (237.3 + temp)^2)
}
latentHeatEvap <- function(temp
) {
return(2501. - 2.37 * temp)
}
psychroConst <- function(temp,
airpress
) {
return(0.016286 * airpress / latentHeatEvap(temp));
}
et_sw <- function(lambda, # Latent heat of water evaporation (J/kg)
delta, # slope of saturation vapor pressure curve (hPa/K)
H_net, # net incoming ( (1-alb) * short-wave + long-wave) radiation (at reference/measurement height) (Wm-2)
H_soil, # net incoming (short-wave + long-wave) radiation hitting the soil surface (Wm-2)
totalheat, # heat conduction into soil AND plants (Wm-2)
soilheat, # soil heat flux (Wm-2)
rho_air, # air density (kgm-3)
ez_0, # saturation vapor pressure of air (hPa)
ez, # vapor pressure of air (hPa)
gamma, # psychrometric constant (hPa/K)
r_cs, # Bulk stomatal resistance of the canopy (sm-1)
r_ca, # Bulk boundary layer resistance of the vegetative elements in the canopy (sm-1)
r_ss, # soil surface resistance (sm-1)
r_sa, # aerodynamic resistance between soil surface and canopy source height (sm-1)
r_aa # Aerodynamic resistance between canopy source height and reference/measurement height (sm-1)
) {
# Total available energy (Wm-2), Suttleworth & Wallace (1985), eq. 3
A_total <- H_net - totalheat
# Energy available at substrat (Wm-2), Suttleworth & Wallace (1985), eq. 5
A_s <- H_soil - soilheat
# calculate vapor pressure deficit at reference/measurement height (hPa)
D <- ez_0 - ez;
# calculate term of canopy transpiration (SW eq. 12) (Wm-2)
PM_c <- (delta * A_total + ((rho_air * SPHEATMOIST * D) - (delta * r_ca * A_s)) / (r_aa + r_ca)) /
(delta + (gamma * (1 + r_cs / (r_aa + r_ca))))
# calculate term of soil evaporation (SW eq. 13) (Wm-2)
PM_s <- (delta * A_total + ((rho_air * SPHEATMOIST * D) - (delta * r_sa * (A_total-A_s)) ) / (r_aa + r_sa)) /
(delta + (gamma * (1. + r_ss / (r_aa + r_sa))))
# SW eqs. 16-18
R_a <- (delta + gamma) * r_aa
R_s <- (delta + gamma) * r_sa + gamma * r_ss
R_c <- (delta + gamma) * r_ca + gamma * r_cs
# coefficients, SW eqs. 14, 15 (-)
C_c <- 1. / (1. + R_c * R_a / (R_s * (R_c + R_a)))
C_s <- 1. / (1. + R_s * R_a / (R_c * (R_s + R_a)))
# compute evapotranspiration rate, eq. 11 (mm/s)
et <- (C_c * PM_c + C_s * PM_s) / lambda
# different from ECHSE function: returns only et/1000, possibly < 0!
return(et/1000.)
}
# my parameters
bubble <- 8.08
par_stressHum <- 0.03 # Guentner, WASA code
pores_ind <- 0.45
res_leaf_min <- 50 # s.m-1
wc_res <- 0.049
wc_sat <- 0.387
wstressmax <- 15000
wstressmin <- 10
# read data
library(xts)
rhum <- read.delim("~/uni/projects/evap_portugal/data/forcing/meteo/05_meteofill/out/HS/rhum_data.dat")
rhum <- xts(rhum[, 2], order.by=as.POSIXct(rhum[, 1]))
temper <- read.delim("~/uni/projects/evap_portugal/data/forcing/meteo/05_meteofill/out/HS/temper_data.dat")
temper <- xts(temper[, 2], order.by=as.POSIXct(temper[, 1]))
wc_vol_root <- read.delim("~/uni/projects/evap_portugal/data/forcing/meteo/05_meteofill/out/HS/wc_vol_root_data.dat")
wc_vol_root <- xts(wc_vol_root[, 2], order.by=as.POSIXct(wc_vol_root[, 1]))
mdata <- merge(rhum, temper, wc_vol_root, all=FALSE)
# calculate internal (and possibly erroneous) results
ez0 <- c()
ez <- c()
cond_vap <- c()
cond_water <- c()
res_leaf <- c()
for (ix in 1:2000) { #seq(index(mdata)[1], tail(index(mdata))[1], by="hours")) {
ez0[ix] <- with(mdata[ix], satVapPress_overWater(temper))
ez[ix] <- with(mdata[ix], vapPress_overWater(temper, rhum))
cond_vap[ix] <- with(mdata[ix], stress_humidity(ez0[ix] - ez[ix], par_stressHum))
cond_water[ix] <- with(mdata[ix], stress_soilwater(wc_vol_root, wc_sat, wc_res, bubble, pores_ind, wstressmin, wstressmax))
res_leaf[ix] <- res_stom_leaf(res_leaf_min, cond_vap[ix], cond_water[ix])
}
#H_net <- (1. - alb) * glorad + H_long
par(mfrow=c(2, 1), mar=c(4, 4, 0, 1))
plot(cond_water, type="l")
plot(wc_vol_root[1:2000], main="")
|
# BrAPI-Core
#
# The Breeding API (BrAPI) is a Standardized REST ful Web Service API Specification for communicating Plant Breeding Data. BrAPI allows for easy data sharing between databases and tools involved in plant breeding. <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">General Reference Documentation</h2> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/URL_Structure.md\">URL Structure</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Response_Structure.md\">Response Structure</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Date_Time_Encoding.md\">Date/Time Encoding</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Location_Encoding.md\">Location Encoding</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Error_Handling.md\">Error Handling</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Search_Services.md\">Search Services</a></div> </div> <div class=\"current-brapi-section brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Core</h2> <div class=\"brapi-section-description\">The BrAPI Core module contains high level entities used for organization and management. This includes Programs, Trials, Studies, Locations, People, and Lists</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Core\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Core\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapicore.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Phenotyping</h2> <div class=\"brapi-section-description\">The BrAPI Phenotyping module contains entities related to phenotypic observations. This includes Observation Units, Observations, Observation Variables, Traits, Scales, Methods, and Images</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Phenotyping\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Phenotyping\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapiphenotyping.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Genotyping</h2> <div class=\"brapi-section-description\">The BrAPI Genotyping module contains entities related to genotyping analysis. This includes Samples, Markers, Variant Sets, Variants, Call Sets, Calls, References, Reads, and Vendor Orders</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Genotyping\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Genotyping\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapigenotyping.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Germplasm</h2> <div class=\"brapi-section-description\">The BrAPI Germplasm module contains entities related to germplasm management. This includes Germplasm, Germplasm Attributes, Seed Lots, Crosses, Pedigree, and Progeny</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Germplasm\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Germplasm\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapigermplasm.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <style> .link-btn{ float: left; margin: 2px 10px 0 0; padding: 0 5px; border-radius: 5px; background-color: #ddd; } .stop-float{ clear: both; } .version-number{ float: left; margin: 5px 10px 0 5px; } .brapi-section-title{ margin: 0 10px 0 0; font-size: 20px; } .current-brapi-section{ font-weight: bolder; border-radius: 5px; background-color: #ddd; } .brapi-section{ padding: 5px 5px; } .brapi-section-description{ margin: 5px 0 0 5px; } </style>
#
# The version of the OpenAPI document: 2.0
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title StudyAllOf
#'
#' @description StudyAllOf Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field studyDbId character
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
StudyAllOf <- R6::R6Class(
'StudyAllOf',
public = list(
`studyDbId` = NULL,
initialize = function(
`studyDbId`, ...
) {
local.optional.var <- list(...)
if (!missing(`studyDbId`)) {
stopifnot(is.character(`studyDbId`), length(`studyDbId`) == 1)
self$`studyDbId` <- `studyDbId`
}
},
toJSON = function() {
StudyAllOfObject <- list()
if (!is.null(self$`studyDbId`)) {
StudyAllOfObject[['studyDbId']] <-
self$`studyDbId`
}
StudyAllOfObject
},
fromJSON = function(StudyAllOfJson) {
StudyAllOfObject <- jsonlite::fromJSON(StudyAllOfJson)
if (!is.null(StudyAllOfObject$`studyDbId`)) {
self$`studyDbId` <- StudyAllOfObject$`studyDbId`
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`studyDbId`)) {
sprintf(
'"studyDbId":
"%s"
',
self$`studyDbId`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(StudyAllOfJson) {
StudyAllOfObject <- jsonlite::fromJSON(StudyAllOfJson)
self$`studyDbId` <- StudyAllOfObject$`studyDbId`
self
}
)
)
| /R/study_all_of.R | no_license | Breeding-Insight/brapi-r-v2 | R | false | false | 6,378 | r | # BrAPI-Core
#
# The Breeding API (BrAPI) is a Standardized REST ful Web Service API Specification for communicating Plant Breeding Data. BrAPI allows for easy data sharing between databases and tools involved in plant breeding. <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">General Reference Documentation</h2> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/URL_Structure.md\">URL Structure</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Response_Structure.md\">Response Structure</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Date_Time_Encoding.md\">Date/Time Encoding</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Location_Encoding.md\">Location Encoding</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Error_Handling.md\">Error Handling</a></div> <div class=\"gen-info-link\"><a href=\"https://github.com/plantbreeding/API/blob/master/Specification/GeneralInfo/Search_Services.md\">Search Services</a></div> </div> <div class=\"current-brapi-section brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Core</h2> <div class=\"brapi-section-description\">The BrAPI Core module contains high level entities used for organization and management. This includes Programs, Trials, Studies, Locations, People, and Lists</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Core\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Core\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapicore.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Phenotyping</h2> <div class=\"brapi-section-description\">The BrAPI Phenotyping module contains entities related to phenotypic observations. This includes Observation Units, Observations, Observation Variables, Traits, Scales, Methods, and Images</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Phenotyping\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Phenotyping\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapiphenotyping.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Genotyping</h2> <div class=\"brapi-section-description\">The BrAPI Genotyping module contains entities related to genotyping analysis. This includes Samples, Markers, Variant Sets, Variants, Call Sets, Calls, References, Reads, and Vendor Orders</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Genotyping\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Genotyping\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapigenotyping.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <div class=\"brapi-section\"> <h2 class=\"brapi-section-title\">BrAPI Germplasm</h2> <div class=\"brapi-section-description\">The BrAPI Germplasm module contains entities related to germplasm management. This includes Germplasm, Germplasm Attributes, Seed Lots, Crosses, Pedigree, and Progeny</div> <div class=\"version-number\">V2.0</div> <div class=\"link-btn\"><a href=\"https://github.com/plantbreeding/API/tree/master/Specification/BrAPI-Germplasm\">GitHub</a></div> <div class=\"link-btn\"><a href=\"https://app.swaggerhub.com/apis/PlantBreedingAPI/BrAPI-Germplasm\">SwaggerHub</a></div> <div class=\"link-btn\"><a href=\"https://brapigermplasm.docs.apiary.io\">Apiary</a></div> <div class=\"stop-float\"></div> </div> <style> .link-btn{ float: left; margin: 2px 10px 0 0; padding: 0 5px; border-radius: 5px; background-color: #ddd; } .stop-float{ clear: both; } .version-number{ float: left; margin: 5px 10px 0 5px; } .brapi-section-title{ margin: 0 10px 0 0; font-size: 20px; } .current-brapi-section{ font-weight: bolder; border-radius: 5px; background-color: #ddd; } .brapi-section{ padding: 5px 5px; } .brapi-section-description{ margin: 5px 0 0 5px; } </style>
#
# The version of the OpenAPI document: 2.0
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title StudyAllOf
#'
#' @description StudyAllOf Class
#'
#' @format An \code{R6Class} generator object
#'
#' @field studyDbId character
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
StudyAllOf <- R6::R6Class(
'StudyAllOf',
public = list(
`studyDbId` = NULL,
initialize = function(
`studyDbId`, ...
) {
local.optional.var <- list(...)
if (!missing(`studyDbId`)) {
stopifnot(is.character(`studyDbId`), length(`studyDbId`) == 1)
self$`studyDbId` <- `studyDbId`
}
},
toJSON = function() {
StudyAllOfObject <- list()
if (!is.null(self$`studyDbId`)) {
StudyAllOfObject[['studyDbId']] <-
self$`studyDbId`
}
StudyAllOfObject
},
fromJSON = function(StudyAllOfJson) {
StudyAllOfObject <- jsonlite::fromJSON(StudyAllOfJson)
if (!is.null(StudyAllOfObject$`studyDbId`)) {
self$`studyDbId` <- StudyAllOfObject$`studyDbId`
}
self
},
toJSONString = function() {
jsoncontent <- c(
if (!is.null(self$`studyDbId`)) {
sprintf(
'"studyDbId":
"%s"
',
self$`studyDbId`
)}
)
jsoncontent <- paste(jsoncontent, collapse = ",")
paste('{', jsoncontent, '}', sep = "")
},
fromJSONString = function(StudyAllOfJson) {
StudyAllOfObject <- jsonlite::fromJSON(StudyAllOfJson)
self$`studyDbId` <- StudyAllOfObject$`studyDbId`
self
}
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_pi_ratings.R
\name{calculate_pi_ratings}
\alias{calculate_pi_ratings}
\title{Calculate Pi Ratings}
\usage{
calculate_pi_ratings(teams, outcomes, lambda, gamma, b, c, return_e)
}
\arguments{
\item{teams}{an (n x 2) character matrix,
contains unique names for the respective home and away teams in n
subsequent matches}
\item{outcomes}{an (n x 2) numeric matrix,
contains the points that the respective home and away teams scored in n
subsequent matches}
\item{lambda}{a constant, the learning rate for performance from
recent matches, default value: 0.035}
\item{gamma}{a constant, the learning rate for performance from
home to away and vice versa, default value: 0.7}
\item{b}{a constant, logarithmic base, default value: 10}
\item{c}{a constant, default value: 3}
\item{return_e}{a boolean variable, conditions the function
to return either the mean squared error when return_e = TRUE,
or the pi ratings when return_e = FALSE, default value: FALSE}
}
\value{
either an (n x 2) matrix containing the pi ratings for the teams in
the n input matches or the mean squared error for the specific parameter
setting, conditional on boolean parameter return_e being FALSE or TRUE
}
\description{
This function calculates dynamic performance ratings
called "pi ratings" for sport teams in competitive matches.
The pi rating system was developed by Constantinou and Fenton (2013)
<doi:10.1515/jqas-2012-0036>
}
\examples{
# toy example
teams <- matrix(c("team A", "team B", "team B", "team A"), nrow = 2)
outcomes <- matrix(c(1, 3, 2, 1), nrow = 2)
calculate_pi_ratings(teams, outcomes)
}
| /man/calculate_pi_ratings.Rd | no_license | cran/piratings | R | false | true | 1,722 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_pi_ratings.R
\name{calculate_pi_ratings}
\alias{calculate_pi_ratings}
\title{Calculate Pi Ratings}
\usage{
calculate_pi_ratings(teams, outcomes, lambda, gamma, b, c, return_e)
}
\arguments{
\item{teams}{an (n x 2) character matrix,
contains unique names for the respective home and away teams in n
subsequent matches}
\item{outcomes}{an (n x 2) numeric matrix,
contains the points that the respective home and away teams scored in n
subsequent matches}
\item{lambda}{a constant, the learning rate for performance from
recent matches, default value: 0.035}
\item{gamma}{a constant, the learning rate for performance from
home to away and vice versa, default value: 0.7}
\item{b}{a constant, logarithmic base, default value: 10}
\item{c}{a constant, default value: 3}
\item{return_e}{a boolean variable, conditions the function
to return either the mean squared error when return_e = TRUE,
or the pi ratings when return_e = FALSE, default value: FALSE}
}
\value{
either an (n x 2) matrix containing the pi ratings for the teams in
the n input matches or the mean squared error for the specific parameter
setting, conditional on boolean parameter return_e being FALSE or TRUE
}
\description{
This function calculates dynamic performance ratings
called "pi ratings" for sport teams in competitive matches.
The pi rating system was developed by Constantinou and Fenton (2013)
<doi:10.1515/jqas-2012-0036>
}
\examples{
# toy example
teams <- matrix(c("team A", "team B", "team B", "team A"), nrow = 2)
outcomes <- matrix(c(1, 3, 2, 1), nrow = 2)
calculate_pi_ratings(teams, outcomes)
}
|
################################################################################
# Description: Summarise and visualise death and case time series per LTLA.
# Produce descriptive figures for paper.
#
# Author: Emily S Nightingale
# Date created: 30/09/2020
#
################################################################################
################################################################################
################################################################################
# SETUP
################################################################################
figdir <- "figures/descriptive"
# LTLA-week-aggregated observed deaths, expected deaths and LTLA covariates
deaths <- readRDS(here::here("data","aggregated","deaths.rds"))
cases <- readRDS(here::here("data","aggregated","cases.rds"))
data.list <- list(deaths = deaths,cases = deaths)
regions <- readRDS(here::here("data","LA_shp_wpops.rds")) %>%
dplyr::filter(grepl("E", lad19cd))
regions.df <- sf::st_drop_geometry(regions)
################################################################################
# DESCRIPTIVE SUMMARIES/PLOTS
################################################################################
ggplot() +
geom_sf(data = regions, aes(geometry = geometry), fill = NA, col = "grey", colour = NA) +
map_theme()
ggsave(here::here(figdir, "ltla_map.png"), height = 6, width = 5)
## MAP TOTALS ##
period <- cases[[3]][[1]]
cases[[1]] %>%
group_by(lad19cd) %>%
summarise(n = sum(n, na.rm = TRUE)) %>%
full_join(regions) %>%
basic_map(fill = "n", rate1e5 = TRUE, scale = F) -> case_map
# labs(title = "Confirmed cases per 100,000",
# subtitle = paste(period[1],"-",period[2])) +
deaths[[1]] %>%
group_by(lad19cd) %>%
summarise(n = sum(n, na.rm = TRUE)) %>%
full_join(regions) %>%
basic_map(fill = "n", rate1e5 = TRUE, scale = F) -> death_map
# labs(title = "Deaths per 100,000") +
png(here::here(figdir,"map_totals.png"), height = 1000, width = 1500, res = 150)
case_map + death_map
dev.off()
## TIME SERIES - TOTALS ##
period <- deaths$breaks[[1]]
deaths[[1]] %>%
group_by(w,week) %>%
summarise(n = sum(n, na.rm= T),
tot_pop = sum(la_pop)) %>%
ggplot(aes(week, n*1e5/tot_pop), col = "grey", lwd = 1.2) +
geom_line() +
labs(subtitle = "COVID-19-related deaths in England, by week of death",
x = "",
y = "Rate per 100,000") +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-22"), y = 10, label = "National lockdown enforced", cex = 2, hjust = "right") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) +
theme(legend.position = c(0.16,0.60),
legend.text=element_text(size=8),
legend.title=element_text(size=8)) -> ts_deaths
cases[[1]] %>%
group_by(w,week) %>%
summarise(n = sum(n, na.rm= T),
tot_pop = sum(la_pop)) %>%
ggplot() +
geom_line(aes(week, n*1e5/tot_pop), col = "grey", lwd = 1.2) +
geom_vline(xintercept = ymd("2020-03-12"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-13"), y = 25, label = "Community testing halted", cex = 2, hjust = "left",) +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-24"), y = 35, label = "National lockdown enforced", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-04-15"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-04-16"), y = 45, label = "P2 available to care home residents and staff", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-05-18"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-05-19"), y = 55, label = "P2 available to all symptomatic cases", cex = 2, hjust = "left") +
labs(subtitle = "Confirmed COVID-19 cases in England, by week of specimen",
x = "Calendar week",
y = "Rate per 100,000") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) -> ts_cases
png(here::here(figdir,"fig1A.png"), height = 1200, width = 2000, res = 150)
(ts_cases | case_map ) / (ts_deaths | death_map) + plot_layout(widths = c(2,1))
dev.off()
## TIME SERIES - BY GEOGRAPHY ##
period <- deaths$breaks[[1]]
deaths[[1]] %>%
group_by(w,week,geography) %>%
summarise(n = sum(n, na.rm= T),
geog_pop = sum(la_pop)) %>%
ggplot(aes(week, n*1e5/geog_pop, group = geography, col = geography)) +
geom_line() +
labs(subtitle = "COVID19-related deaths in England, by geography and week of death",
x = "",
y = "Rate per 100,000",
colour = "Geography type") +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-22"), y = 15, label = "National lockdown enforced", cex = 2, hjust = "right") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) +
theme(legend.position = c(0.16,0.60),
legend.text=element_text(size=8),
legend.title=element_text(size=8)) -> ts_geog_deaths
cases[[1]] %>%
group_by(w,week,geography) %>%
summarise(n = sum(n, na.rm= T),
geog_pop = sum(la_pop)) %>%
ggplot() +
geom_line(aes(week, n*1e5/geog_pop, group = geography, col = geography)) +
geom_vline(xintercept = ymd("2020-03-12"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-13"), y = 40, label = "Community testing halted", cex = 2, hjust = "left",) +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-24"), y = 45, label = "National lockdown enforced", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-04-15"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-04-16"), y = 55, label = "P2 available to care home residents and staff", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-05-18"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-05-19"), y = 60, label = "P2 available to all symptomatic cases", cex = 2, hjust = "left") +
labs(subtitle = "Confirmed COVID-19 cases in England, by geography and week of specimen",
x = "Calendar week",
y = "Rate per 100,000") +
guides(col = "none") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) -> ts_geog_cases
png(here::here(figdir,"fig1.png"), height = 1200, width = 2000, res = 150)
tiff(here::here("figures","paper","fig1.tif"), height = 1500, width = 2200, res = 200)
(ts_geog_deaths | death_map) / (ts_geog_cases | case_map ) +
plot_layout(widths = c(2,1)) +
plot_annotation(tag_levels = 'A')
dev.off()
# ---------------------------------------------------------------------------- #
## GEOGRAPHY ##
png(here::here(figdir,"map_geog.png"), height = 800, width = 900, res = 150)
regions %>%
basic_map(fill = "geography") +
scale_fill_discrete()
dev.off()
# ---------------------------------------------------------------------------- #
## COVARIATES ##
cov_names <- c("med_age","pop_dens", "IMD", "prop_minority")
deaths[[1]] %>%
group_by(geography, lad19cd) %>%
summarise_at(all_of(cov_names), .funs = base::mean) %>%
full_join(dplyr::select(regions.df, lad19cd, med_age, prop_male_all)) %>%
ungroup() -> covs
# Summarise covariates
get_quants <- function(var){ paste(round(quantile(var, p = c(0.25,0.5,0.75)),2), collapse = ", ")}
# By geography
covs %>%
group_by(geography) %>%
summarise(across(c(med_age,IMD,prop_minority, prop_male_all), get_quants))
# geography med_age IMD prop_minority prop_male_all
# 1 London Borough 33, 34.5, 36 13.89, 20.4, 26.46 0.31, 0.39, 0.47 0.49, 0.5, 0.5
# 2 Metropolitan District 35, 39, 41 21.4, 27.2, 30.99 0.04, 0.11, 0.19 0.49, 0.49, 0.5
# 3 Non-metropolitan District 40, 43, 46 10.78, 13.77, 18.38 0.02, 0.04, 0.07 0.49, 0.49, 0.49
# 4 Unitary Authority 35.75, 39.5, 43 12.95, 19.14, 23.87 0.03, 0.06, 0.14 0.49, 0.5, 0.5
# Overall
covs %>%
summarise(across(c(med_age,IMD,prop_minority, prop_male_all), get_quants))
# med_age IMD prop_minority prop_male_all
# 1 37, 41, 45 11.43, 16.11, 22.44 0.03, 0.05, 0.13 0.49, 0.49, 0.5
regions %>%
full_join(covs) %>%
mutate(pop_dens = as.numeric(pop_dens)) -> regions_wcovs
map_dens <-
basic_map(regions_wcovs, fill = "pop_dens", scale = F) +
scale_fill_viridis_c(trans = "log10") +
labs(fill = "", title = "Population density \n(per KM-squared)") +
theme(plot.title = element_text(size=10))
map_pop <-
basic_map(regions_wcovs, fill = "la_pop", scale = F) +
scale_fill_viridis_c(trans = "log10") +
labs(fill = "", title = "Population size") +
theme(plot.title = element_text(size=10))
map_imd <-
basic_map(regions_wcovs, fill = "IMD", scale = F) +
labs(fill = "", title = "Index of Multiple Deprivation \n(median score)") +
theme(plot.title = element_text(size=10))
map_mino <-
basic_map(regions_wcovs, fill = "prop_minority", scale = F) +
labs(fill = "", title = "Proportion of minority \nethnicities in population") +
scale_fill_viridis_c(trans = "log10") +
theme(plot.title = element_text(size=10))
map_age <-
basic_map(regions_wcovs, fill = "med_age", scale = F) +
labs(fill = "", title = "Median age") +
theme(plot.title = element_text(size=10))
map_sex <-
basic_map(regions_wcovs, fill = "prop_male_all", scale = F) +
labs(fill = "", title = "Proportion male") +
theme(plot.title = element_text(size=10))
png(here::here(figdir,"map_covariates.png"), height = 2000, width = 2000, res = 300)
(map_age + map_pop) /
(map_mino + map_imd)
dev.off()
png(here::here(figdir,"map_sex.png"), height = 1000, width = 1000, res = 300)
map_sex
dev.off()
png(here::here(figdir,"map_covariates3.png"), height = 600, width = 1800, res = 150)
(map_age + map_mino + map_imd)
dev.off()
# ---------------------------------------------------------------------------- #
deaths[[1]] %>%
group_by(lad19nm) %>%
summarise(N = sum(n, na.rm = T),
pop = mean(la_pop),
rate = N*1e5/pop) -> death_rates
summary(death_rates$rate)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 10.34 71.42 88.84 90.56 112.06 196.34
hist(death_rates$rate, breaks = 40)
cases[[1]] %>%
group_by(lad19nm) %>%
summarise(N = sum(n, na.rm = T),
pop = mean(la_pop),
rate = N*1e5/pop) -> case_rates
summary(case_rates$rate)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 71.78 298.29 379.17 403.77 491.51 1039.74
hist(case_rates$rate, breaks = 40)
################################################################################
################################################################################
| /code/main/02_descriptive.R | no_license | esnightingale/covid_deaths_spatial | R | false | false | 11,064 | r | ################################################################################
# Description: Summarise and visualise death and case time series per LTLA.
# Produce descriptive figures for paper.
#
# Author: Emily S Nightingale
# Date created: 30/09/2020
#
################################################################################
################################################################################
################################################################################
# SETUP
################################################################################
figdir <- "figures/descriptive"
# LTLA-week-aggregated observed deaths, expected deaths and LTLA covariates
deaths <- readRDS(here::here("data","aggregated","deaths.rds"))
cases <- readRDS(here::here("data","aggregated","cases.rds"))
data.list <- list(deaths = deaths,cases = deaths)
regions <- readRDS(here::here("data","LA_shp_wpops.rds")) %>%
dplyr::filter(grepl("E", lad19cd))
regions.df <- sf::st_drop_geometry(regions)
################################################################################
# DESCRIPTIVE SUMMARIES/PLOTS
################################################################################
ggplot() +
geom_sf(data = regions, aes(geometry = geometry), fill = NA, col = "grey", colour = NA) +
map_theme()
ggsave(here::here(figdir, "ltla_map.png"), height = 6, width = 5)
## MAP TOTALS ##
period <- cases[[3]][[1]]
cases[[1]] %>%
group_by(lad19cd) %>%
summarise(n = sum(n, na.rm = TRUE)) %>%
full_join(regions) %>%
basic_map(fill = "n", rate1e5 = TRUE, scale = F) -> case_map
# labs(title = "Confirmed cases per 100,000",
# subtitle = paste(period[1],"-",period[2])) +
deaths[[1]] %>%
group_by(lad19cd) %>%
summarise(n = sum(n, na.rm = TRUE)) %>%
full_join(regions) %>%
basic_map(fill = "n", rate1e5 = TRUE, scale = F) -> death_map
# labs(title = "Deaths per 100,000") +
png(here::here(figdir,"map_totals.png"), height = 1000, width = 1500, res = 150)
case_map + death_map
dev.off()
## TIME SERIES - TOTALS ##
period <- deaths$breaks[[1]]
deaths[[1]] %>%
group_by(w,week) %>%
summarise(n = sum(n, na.rm= T),
tot_pop = sum(la_pop)) %>%
ggplot(aes(week, n*1e5/tot_pop), col = "grey", lwd = 1.2) +
geom_line() +
labs(subtitle = "COVID-19-related deaths in England, by week of death",
x = "",
y = "Rate per 100,000") +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-22"), y = 10, label = "National lockdown enforced", cex = 2, hjust = "right") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) +
theme(legend.position = c(0.16,0.60),
legend.text=element_text(size=8),
legend.title=element_text(size=8)) -> ts_deaths
cases[[1]] %>%
group_by(w,week) %>%
summarise(n = sum(n, na.rm= T),
tot_pop = sum(la_pop)) %>%
ggplot() +
geom_line(aes(week, n*1e5/tot_pop), col = "grey", lwd = 1.2) +
geom_vline(xintercept = ymd("2020-03-12"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-13"), y = 25, label = "Community testing halted", cex = 2, hjust = "left",) +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-24"), y = 35, label = "National lockdown enforced", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-04-15"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-04-16"), y = 45, label = "P2 available to care home residents and staff", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-05-18"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-05-19"), y = 55, label = "P2 available to all symptomatic cases", cex = 2, hjust = "left") +
labs(subtitle = "Confirmed COVID-19 cases in England, by week of specimen",
x = "Calendar week",
y = "Rate per 100,000") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) -> ts_cases
png(here::here(figdir,"fig1A.png"), height = 1200, width = 2000, res = 150)
(ts_cases | case_map ) / (ts_deaths | death_map) + plot_layout(widths = c(2,1))
dev.off()
## TIME SERIES - BY GEOGRAPHY ##
period <- deaths$breaks[[1]]
deaths[[1]] %>%
group_by(w,week,geography) %>%
summarise(n = sum(n, na.rm= T),
geog_pop = sum(la_pop)) %>%
ggplot(aes(week, n*1e5/geog_pop, group = geography, col = geography)) +
geom_line() +
labs(subtitle = "COVID19-related deaths in England, by geography and week of death",
x = "",
y = "Rate per 100,000",
colour = "Geography type") +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-22"), y = 15, label = "National lockdown enforced", cex = 2, hjust = "right") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) +
theme(legend.position = c(0.16,0.60),
legend.text=element_text(size=8),
legend.title=element_text(size=8)) -> ts_geog_deaths
cases[[1]] %>%
group_by(w,week,geography) %>%
summarise(n = sum(n, na.rm= T),
geog_pop = sum(la_pop)) %>%
ggplot() +
geom_line(aes(week, n*1e5/geog_pop, group = geography, col = geography)) +
geom_vline(xintercept = ymd("2020-03-12"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-13"), y = 40, label = "Community testing halted", cex = 2, hjust = "left",) +
geom_vline(xintercept = ymd("2020-03-23"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-03-24"), y = 45, label = "National lockdown enforced", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-04-15"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-04-16"), y = 55, label = "P2 available to care home residents and staff", cex = 2, hjust = "left") +
geom_vline(xintercept = ymd("2020-05-18"), lty = "dashed", lwd = 0.2) +
annotate("text", x = ymd("2020-05-19"), y = 60, label = "P2 available to all symptomatic cases", cex = 2, hjust = "left") +
labs(subtitle = "Confirmed COVID-19 cases in England, by geography and week of specimen",
x = "Calendar week",
y = "Rate per 100,000") +
guides(col = "none") +
scale_x_date(date_minor_breaks = "1 week",
date_breaks = "1 month",
date_labels = "%b",
limits = period) -> ts_geog_cases
png(here::here(figdir,"fig1.png"), height = 1200, width = 2000, res = 150)
tiff(here::here("figures","paper","fig1.tif"), height = 1500, width = 2200, res = 200)
(ts_geog_deaths | death_map) / (ts_geog_cases | case_map ) +
plot_layout(widths = c(2,1)) +
plot_annotation(tag_levels = 'A')
dev.off()
# ---------------------------------------------------------------------------- #
## GEOGRAPHY ##
png(here::here(figdir,"map_geog.png"), height = 800, width = 900, res = 150)
regions %>%
basic_map(fill = "geography") +
scale_fill_discrete()
dev.off()
# ---------------------------------------------------------------------------- #
## COVARIATES ##
cov_names <- c("med_age","pop_dens", "IMD", "prop_minority")
deaths[[1]] %>%
group_by(geography, lad19cd) %>%
summarise_at(all_of(cov_names), .funs = base::mean) %>%
full_join(dplyr::select(regions.df, lad19cd, med_age, prop_male_all)) %>%
ungroup() -> covs
# Summarise covariates
get_quants <- function(var){ paste(round(quantile(var, p = c(0.25,0.5,0.75)),2), collapse = ", ")}
# By geography
covs %>%
group_by(geography) %>%
summarise(across(c(med_age,IMD,prop_minority, prop_male_all), get_quants))
# geography med_age IMD prop_minority prop_male_all
# 1 London Borough 33, 34.5, 36 13.89, 20.4, 26.46 0.31, 0.39, 0.47 0.49, 0.5, 0.5
# 2 Metropolitan District 35, 39, 41 21.4, 27.2, 30.99 0.04, 0.11, 0.19 0.49, 0.49, 0.5
# 3 Non-metropolitan District 40, 43, 46 10.78, 13.77, 18.38 0.02, 0.04, 0.07 0.49, 0.49, 0.49
# 4 Unitary Authority 35.75, 39.5, 43 12.95, 19.14, 23.87 0.03, 0.06, 0.14 0.49, 0.5, 0.5
# Overall
covs %>%
summarise(across(c(med_age,IMD,prop_minority, prop_male_all), get_quants))
# med_age IMD prop_minority prop_male_all
# 1 37, 41, 45 11.43, 16.11, 22.44 0.03, 0.05, 0.13 0.49, 0.49, 0.5
regions %>%
full_join(covs) %>%
mutate(pop_dens = as.numeric(pop_dens)) -> regions_wcovs
map_dens <-
basic_map(regions_wcovs, fill = "pop_dens", scale = F) +
scale_fill_viridis_c(trans = "log10") +
labs(fill = "", title = "Population density \n(per KM-squared)") +
theme(plot.title = element_text(size=10))
map_pop <-
basic_map(regions_wcovs, fill = "la_pop", scale = F) +
scale_fill_viridis_c(trans = "log10") +
labs(fill = "", title = "Population size") +
theme(plot.title = element_text(size=10))
map_imd <-
basic_map(regions_wcovs, fill = "IMD", scale = F) +
labs(fill = "", title = "Index of Multiple Deprivation \n(median score)") +
theme(plot.title = element_text(size=10))
map_mino <-
basic_map(regions_wcovs, fill = "prop_minority", scale = F) +
labs(fill = "", title = "Proportion of minority \nethnicities in population") +
scale_fill_viridis_c(trans = "log10") +
theme(plot.title = element_text(size=10))
map_age <-
basic_map(regions_wcovs, fill = "med_age", scale = F) +
labs(fill = "", title = "Median age") +
theme(plot.title = element_text(size=10))
map_sex <-
basic_map(regions_wcovs, fill = "prop_male_all", scale = F) +
labs(fill = "", title = "Proportion male") +
theme(plot.title = element_text(size=10))
png(here::here(figdir,"map_covariates.png"), height = 2000, width = 2000, res = 300)
(map_age + map_pop) /
(map_mino + map_imd)
dev.off()
png(here::here(figdir,"map_sex.png"), height = 1000, width = 1000, res = 300)
map_sex
dev.off()
png(here::here(figdir,"map_covariates3.png"), height = 600, width = 1800, res = 150)
(map_age + map_mino + map_imd)
dev.off()
# ---------------------------------------------------------------------------- #
deaths[[1]] %>%
group_by(lad19nm) %>%
summarise(N = sum(n, na.rm = T),
pop = mean(la_pop),
rate = N*1e5/pop) -> death_rates
summary(death_rates$rate)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 10.34 71.42 88.84 90.56 112.06 196.34
hist(death_rates$rate, breaks = 40)
cases[[1]] %>%
group_by(lad19nm) %>%
summarise(N = sum(n, na.rm = T),
pop = mean(la_pop),
rate = N*1e5/pop) -> case_rates
summary(case_rates$rate)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 71.78 298.29 379.17 403.77 491.51 1039.74
hist(case_rates$rate, breaks = 40)
################################################################################
################################################################################
|
require(devtools)
require(testthat)
options(error = NULL)
load_all()
test()
roxygen2::roxygenize()
build_vignettes()
### check reverse dependencies:
#revdep()
devtools::revdep_check(libpath = "../revdep", check_dir = "../revdep_checks")
#devtools::install.packages("brms", lib = "../revdep")
devtools::revdep_check_resume()
devtools::revdep_check_save_summary()
devtools::revdep_check_print_problems()
devtools::revdep_maintainers()
| /development.R | no_license | zippeurfou/bridgesampling | R | false | false | 438 | r | require(devtools)
require(testthat)
options(error = NULL)
load_all()
test()
roxygen2::roxygenize()
build_vignettes()
### check reverse dependencies:
#revdep()
devtools::revdep_check(libpath = "../revdep", check_dir = "../revdep_checks")
#devtools::install.packages("brms", lib = "../revdep")
devtools::revdep_check_resume()
devtools::revdep_check_save_summary()
devtools::revdep_check_print_problems()
devtools::revdep_maintainers()
|
#' @title Print a summary.rcorex object
#' @description Print method for a summary.rcorex object
#' @param x A object of class summary.rcorex
#' @param ... Not used
#' @export
#'
print.summary.rcorex <- function(x, ...) {
# get parameters of latent and observed data
cat("rcorex model call: \n")
print(x$call)
cat(paste0("Data dimensions: ", x$datadim[1], " samples (rows) by ", x$datadim[2], " variables (columns).\n"))
cat(paste0("Latent variable parameters: rcorex searched for ", x$latentpars[1],
" hidden variables with ", x$latentpars[2], " possible states.\n"))
cat(paste0("Model outcome state: ", x$state, "\n"))
cat(paste0("Numer of iterations performed: ", x$iters, "\n"))
cat(paste0("Total TCS at final iteration: ", format(x$tcs, digits=5), "\n"))
}
| /R/print.summary.rcorex.R | permissive | jpkrooney/rcorex | R | false | false | 811 | r | #' @title Print a summary.rcorex object
#' @description Print method for a summary.rcorex object
#' @param x A object of class summary.rcorex
#' @param ... Not used
#' @export
#'
print.summary.rcorex <- function(x, ...) {
# get parameters of latent and observed data
cat("rcorex model call: \n")
print(x$call)
cat(paste0("Data dimensions: ", x$datadim[1], " samples (rows) by ", x$datadim[2], " variables (columns).\n"))
cat(paste0("Latent variable parameters: rcorex searched for ", x$latentpars[1],
" hidden variables with ", x$latentpars[2], " possible states.\n"))
cat(paste0("Model outcome state: ", x$state, "\n"))
cat(paste0("Numer of iterations performed: ", x$iters, "\n"))
cat(paste0("Total TCS at final iteration: ", format(x$tcs, digits=5), "\n"))
}
|
#' @title Histogram/density based threshold selection
#'
#' @description ...
#'
#' @docType package
#' @name threshold
NULL
| /R/threshold-package.R | no_license | benmack/threshold | R | false | false | 127 | r | #' @title Histogram/density based threshold selection
#'
#' @description ...
#'
#' @docType package
#' @name threshold
NULL
|
#' Create R code for a dm object
#'
#' `dm_paste` takes an existing `dm` and produces the code necessary for its creation
#'
#' @inheritParams dm_add_pk
#' @param select Boolean, default `FALSE`. If `TRUE` will try to produce code for reducing to necessary columns.
#' @param tab_width Indentation width for code from the second line onwards
#'
#' @details At the very least (if no keys exist in the given [`dm`]) a `dm()` statement is produced that -- when executed --
#' produces the same `dm`. In addition, the code for setting the existing primary keys as well as the relations between the
#' tables is produced. If `select = TRUE`, statements are included to select the respective columns of each table of the `dm` (useful if
#' only a subset of the columns of the original tables is used for the `dm`).
#'
#' Mind, that it is assumed, that the tables of the existing `dm` are available in the global environment under their names
#' within the `dm`.
#'
#' @return Code for producing the given `dm`.
#'
#' @export
#' @examples
#' dm_nycflights13() %>%
#' dm_paste()
#'
#' dm_nycflights13() %>%
#' dm_paste(select = TRUE)
dm_paste <- function(dm, select = FALSE, tab_width = 2) {
check_not_zoomed(dm)
check_no_filter(dm)
# we assume the tables exist and have the necessary columns
# code for including the tables
code <- glue("dm({paste(tick_if_needed({src_tbls(dm)}), collapse = ', ')})")
tab <- paste0(rep(" ", tab_width), collapse = "")
if (select) {
# adding code for selection of columns
tbl_select <- tibble(tbl_name = src_tbls(dm), tbls = dm_get_tables_impl(dm)) %>%
mutate(cols = map(tbls, colnames)) %>%
mutate(code = map2_chr(
tbl_name,
cols,
~ glue("{tab}dm_select({..1}, {paste0(tick_if_needed(..2), collapse = ', ')})")
))
code_select <- if (nrow(tbl_select)) summarize(tbl_select, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
code <- glue_collapse(c(code, code_select), sep = " %>%\n")
}
# adding code for establishing PKs
# FIXME: this will fail with compound keys
tbl_pks <- dm_get_all_pks_impl(dm) %>%
mutate(code = glue("{tab}dm_add_pk({table}, {pk_col})"))
code_pks <- if (nrow(tbl_pks)) summarize(tbl_pks, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
# adding code for establishing FKs
# FIXME: this will fail with compound keys
tbl_fks <- dm_get_all_fks_impl(dm) %>%
mutate(code = glue("{tab}dm_add_fk({child_table}, {child_fk_cols}, {parent_table})"))
code_fks <- if (nrow(tbl_fks)) summarize(tbl_fks, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
# without "\n" in the end it looks weird when a warning is issued
cat(glue_collapse(c(code, code_pks, code_fks), sep = " %>%\n"), "\n")
invisible(dm)
}
| /R/paste.R | permissive | jmjohns9/dm | R | false | false | 2,818 | r | #' Create R code for a dm object
#'
#' `dm_paste` takes an existing `dm` and produces the code necessary for its creation
#'
#' @inheritParams dm_add_pk
#' @param select Boolean, default `FALSE`. If `TRUE` will try to produce code for reducing to necessary columns.
#' @param tab_width Indentation width for code from the second line onwards
#'
#' @details At the very least (if no keys exist in the given [`dm`]) a `dm()` statement is produced that -- when executed --
#' produces the same `dm`. In addition, the code for setting the existing primary keys as well as the relations between the
#' tables is produced. If `select = TRUE`, statements are included to select the respective columns of each table of the `dm` (useful if
#' only a subset of the columns of the original tables is used for the `dm`).
#'
#' Mind, that it is assumed, that the tables of the existing `dm` are available in the global environment under their names
#' within the `dm`.
#'
#' @return Code for producing the given `dm`.
#'
#' @export
#' @examples
#' dm_nycflights13() %>%
#' dm_paste()
#'
#' dm_nycflights13() %>%
#' dm_paste(select = TRUE)
dm_paste <- function(dm, select = FALSE, tab_width = 2) {
check_not_zoomed(dm)
check_no_filter(dm)
# we assume the tables exist and have the necessary columns
# code for including the tables
code <- glue("dm({paste(tick_if_needed({src_tbls(dm)}), collapse = ', ')})")
tab <- paste0(rep(" ", tab_width), collapse = "")
if (select) {
# adding code for selection of columns
tbl_select <- tibble(tbl_name = src_tbls(dm), tbls = dm_get_tables_impl(dm)) %>%
mutate(cols = map(tbls, colnames)) %>%
mutate(code = map2_chr(
tbl_name,
cols,
~ glue("{tab}dm_select({..1}, {paste0(tick_if_needed(..2), collapse = ', ')})")
))
code_select <- if (nrow(tbl_select)) summarize(tbl_select, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
code <- glue_collapse(c(code, code_select), sep = " %>%\n")
}
# adding code for establishing PKs
# FIXME: this will fail with compound keys
tbl_pks <- dm_get_all_pks_impl(dm) %>%
mutate(code = glue("{tab}dm_add_pk({table}, {pk_col})"))
code_pks <- if (nrow(tbl_pks)) summarize(tbl_pks, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
# adding code for establishing FKs
# FIXME: this will fail with compound keys
tbl_fks <- dm_get_all_fks_impl(dm) %>%
mutate(code = glue("{tab}dm_add_fk({child_table}, {child_fk_cols}, {parent_table})"))
code_fks <- if (nrow(tbl_fks)) summarize(tbl_fks, code = glue_collapse(code, sep = " %>%\n")) %>% pull() else character()
# without "\n" in the end it looks weird when a warning is issued
cat(glue_collapse(c(code, code_pks, code_fks), sep = " %>%\n"), "\n")
invisible(dm)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codeartifact_service.R
\name{codeartifact}
\alias{codeartifact}
\title{CodeArtifact}
\usage{
codeartifact(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
AWS CodeArtifact is a fully managed artifact repository compatible with
language-native package managers and build tools such as npm, Apache
Maven, and pip. You can use CodeArtifact to share packages with
development teams and pull packages. Packages can be pulled from both
public and CodeArtifact repositories. You can also create an upstream
relationship between a CodeArtifact repository and another repository,
which effectively merges their contents from the point of view of a
package manager client.
\strong{AWS CodeArtifact Components}
Use the information in this guide to help you work with the following
CodeArtifact components:
\itemize{
\item \strong{Repository}: A CodeArtifact repository contains a set of \href{https://docs.aws.amazon.com/codeartifact/latest/ug/welcome.html#welcome-concepts-package-version}{package versions},
each of which maps to a set of assets, or files. Repositories are
polyglot, so a single repository can contain packages of any
supported type. Each repository exposes endpoints for fetching and
publishing packages using tools like the \strong{\code{npm}} CLI, the Maven
CLI ( \strong{\code{mvn}} ), and \strong{\code{pip}} . You can create up to 100
repositories per AWS account.
\item \strong{Domain}: Repositories are aggregated into a higher-level entity
known as a \emph{domain}. All package assets and metadata are stored in
the domain, but are consumed through repositories. A given package
asset, such as a Maven JAR file, is stored once per domain, no
matter how many repositories it\'s present in. All of the assets and
metadata in a domain are encrypted with the same customer master key
(CMK) stored in AWS Key Management Service (AWS KMS).
Each repository is a member of a single domain and can\'t be moved
to a different domain.
The domain allows organizational policy to be applied across
multiple repositories, such as which accounts can access
repositories in the domain, and which public repositories can be
used as sources of packages.
Although an organization can have multiple domains, we recommend a
single production domain that contains all published artifacts so
that teams can find and share packages across their organization.
\item \strong{Package}: A \emph{package} is a bundle of software and the metadata
required to resolve dependencies and install the software.
CodeArtifact supports
\href{https://docs.aws.amazon.com/codeartifact/latest/ug/using-npm.html}{npm},
\href{https://docs.aws.amazon.com/codeartifact/latest/ug/using-python.html}{PyPI},
and
\href{https://docs.aws.amazon.com/codeartifact/latest/ug/using-maven}{Maven}
package formats.
In CodeArtifact, a package consists of:
\itemize{
\item A \emph{name} (for example, \code{webpack} is the name of a popular npm
package)
\item An optional namespace (for example, \verb{@types} in \verb{@types/node})
\item A set of versions (for example, \verb{1.0.0}, \verb{1.0.1}, \verb{1.0.2}, etc.)
\item Package-level metadata (for example, npm tags)
}
\item \strong{Package version}: A version of a package, such as
\verb{@types/node 12.6.9}. The version number format and semantics vary
for different package formats. For example, npm package versions
must conform to the \href{https://semver.org/}{Semantic Versioning specification}. In CodeArtifact, a package
version consists of the version identifier, metadata at the package
version level, and a set of assets.
\item \strong{Upstream repository}: One repository is \emph{upstream} of another
when the package versions in it can be accessed from the repository
endpoint of the downstream repository, effectively merging the
contents of the two repositories from the point of view of a client.
CodeArtifact allows creating an upstream relationship between two
repositories.
\item \strong{Asset}: An individual file stored in CodeArtifact associated with
a package version, such as an npm \code{.tgz} file or Maven POM and JAR
files.
}
CodeArtifact supports these operations:
\itemize{
\item \code{AssociateExternalConnection}: Adds an existing external connection
to a repository.
\item \code{CopyPackageVersions}: Copies package versions from one repository
to another repository in the same domain.
\item \code{CreateDomain}: Creates a domain
\item \code{CreateRepository}: Creates a CodeArtifact repository in a domain.
\item \code{DeleteDomain}: Deletes a domain. You cannot delete a domain that
contains repositories.
\item \code{DeleteDomainPermissionsPolicy}: Deletes the resource policy that is
set on a domain.
\item \code{DeletePackageVersions}: Deletes versions of a package. After a
package has been deleted, it can be republished, but its assets and
metadata cannot be restored because they have been permanently
removed from storage.
\item \code{DeleteRepository}: Deletes a repository.
\item \code{DeleteRepositoryPermissionsPolicy}: Deletes the resource policy
that is set on a repository.
\item \code{DescribeDomain}: Returns a \code{DomainDescription} object that contains
information about the requested domain.
\item \code{DescribePackageVersion}: Returns a
\verb{<a href="https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageVersionDescription.html">PackageVersionDescription</a>}
object that contains details about a package version.
\item \code{DescribeRepository}: Returns a \code{RepositoryDescription} object that
contains detailed information about the requested repository.
\item \code{DisposePackageVersions}: Disposes versions of a package. A package
version with the status \code{Disposed} cannot be restored because they
have been permanently removed from storage.
\item \code{DisassociateExternalConnection}: Removes an existing external
connection from a repository.
\item \code{GetAuthorizationToken}: Generates a temporary authorization token
for accessing repositories in the domain. The token expires the
authorization period has passed. The default authorization period is
12 hours and can be customized to any length with a maximum of 12
hours.
\item \code{GetDomainPermissionsPolicy}: Returns the policy of a resource that
is attached to the specified domain.
\item \code{GetPackageVersionAsset}: Returns the contents of an asset that is
in a package version.
\item \code{GetPackageVersionReadme}: Gets the readme file or descriptive text
for a package version.
\item \code{GetRepositoryEndpoint}: Returns the endpoint of a repository for a
specific package format. A repository has one endpoint for each
package format:
\itemize{
\item \code{npm}
\item \code{pypi}
\item \code{maven}
}
\item \code{GetRepositoryPermissionsPolicy}: Returns the resource policy that
is set on a repository.
\item \code{ListDomains}: Returns a list of \code{DomainSummary} objects. Each
returned \code{DomainSummary} object contains information about a domain.
\item \code{ListPackages}: Lists the packages in a repository.
\item \code{ListPackageVersionAssets}: Lists the assets for a given package
version.
\item \code{ListPackageVersionDependencies}: Returns a list of the direct
dependencies for a package version.
\item \code{ListPackageVersions}: Returns a list of package versions for a
specified package in a repository.
\item \code{ListRepositories}: Returns a list of repositories owned by the AWS
account that called this method.
\item \code{ListRepositoriesInDomain}: Returns a list of the repositories in a
domain.
\item \code{PutDomainPermissionsPolicy}: Attaches a resource policy to a
domain.
\item \code{PutRepositoryPermissionsPolicy}: Sets the resource policy on a
repository that specifies permissions to access it.
\item \code{UpdatePackageVersionsStatus}: Updates the status of one or more
versions of a package.
\item \code{UpdateRepository}: Updates the properties of a repository.
}
}
\section{Service syntax}{
\preformatted{svc <- codeartifact(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=codeartifact_associate_external_connection]{associate_external_connection} \tab Adds an existing external connection to a repository \cr
\link[=codeartifact_copy_package_versions]{copy_package_versions} \tab Copies package versions from one repository to another repository in the same domain \cr
\link[=codeartifact_create_domain]{create_domain} \tab Creates a domain \cr
\link[=codeartifact_create_repository]{create_repository} \tab Creates a repository \cr
\link[=codeartifact_delete_domain]{delete_domain} \tab Deletes a domain \cr
\link[=codeartifact_delete_domain_permissions_policy]{delete_domain_permissions_policy} \tab Deletes the resource policy set on a domain \cr
\link[=codeartifact_delete_package_versions]{delete_package_versions} \tab Deletes one or more versions of a package \cr
\link[=codeartifact_delete_repository]{delete_repository} \tab Deletes a repository \cr
\link[=codeartifact_delete_repository_permissions_policy]{delete_repository_permissions_policy} \tab Deletes the resource policy that is set on a repository \cr
\link[=codeartifact_describe_domain]{describe_domain} \tab Returns a DomainDescription object that contains information about the requested domain \cr
\link[=codeartifact_describe_package_version]{describe_package_version} \tab Returns a PackageVersionDescription object that contains information about the requested package version \cr
\link[=codeartifact_describe_repository]{describe_repository} \tab Returns a RepositoryDescription object that contains detailed information about the requested repository \cr
\link[=codeartifact_disassociate_external_connection]{disassociate_external_connection} \tab Removes an existing external connection from a repository \cr
\link[=codeartifact_dispose_package_versions]{dispose_package_versions} \tab Deletes the assets in package versions and sets the package versions' status to Disposed \cr
\link[=codeartifact_get_authorization_token]{get_authorization_token} \tab Generates a temporary authentication token for accessing repositories in the domain \cr
\link[=codeartifact_get_domain_permissions_policy]{get_domain_permissions_policy} \tab Returns the resource policy attached to the specified domain \cr
\link[=codeartifact_get_package_version_asset]{get_package_version_asset} \tab Returns an asset (or file) that is in a package \cr
\link[=codeartifact_get_package_version_readme]{get_package_version_readme} \tab Gets the readme file or descriptive text for a package version \cr
\link[=codeartifact_get_repository_endpoint]{get_repository_endpoint} \tab Returns the endpoint of a repository for a specific package format \cr
\link[=codeartifact_get_repository_permissions_policy]{get_repository_permissions_policy} \tab Returns the resource policy that is set on a repository \cr
\link[=codeartifact_list_domains]{list_domains} \tab Returns a list of DomainSummary objects for all domains owned by the AWS account that makes this call \cr
\link[=codeartifact_list_packages]{list_packages} \tab Returns a list of PackageSummary objects for packages in a repository that match the request parameters \cr
\link[=codeartifact_list_package_version_assets]{list_package_version_assets} \tab Returns a list of AssetSummary objects for assets in a package version \cr
\link[=codeartifact_list_package_version_dependencies]{list_package_version_dependencies} \tab Returns the direct dependencies for a package version \cr
\link[=codeartifact_list_package_versions]{list_package_versions} \tab Returns a list of PackageVersionSummary objects for package versions in a repository that match the request parameters\cr
\link[=codeartifact_list_repositories]{list_repositories} \tab Returns a list of RepositorySummary objects \cr
\link[=codeartifact_list_repositories_in_domain]{list_repositories_in_domain} \tab Returns a list of RepositorySummary objects \cr
\link[=codeartifact_put_domain_permissions_policy]{put_domain_permissions_policy} \tab Sets a resource policy on a domain that specifies permissions to access it \cr
\link[=codeartifact_put_repository_permissions_policy]{put_repository_permissions_policy} \tab Sets the resource policy on a repository that specifies permissions to access it \cr
\link[=codeartifact_update_package_versions_status]{update_package_versions_status} \tab Updates the status of one or more versions of a package \cr
\link[=codeartifact_update_repository]{update_repository} \tab Update the properties of a repository
}
}
\examples{
\dontrun{
svc <- codeartifact()
svc$associate_external_connection(
Foo = 123
)
}
}
| /paws/man/codeartifact.Rd | permissive | jcheng5/paws | R | false | true | 12,964 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codeartifact_service.R
\name{codeartifact}
\alias{codeartifact}
\title{CodeArtifact}
\usage{
codeartifact(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.}
}
\description{
AWS CodeArtifact is a fully managed artifact repository compatible with
language-native package managers and build tools such as npm, Apache
Maven, and pip. You can use CodeArtifact to share packages with
development teams and pull packages. Packages can be pulled from both
public and CodeArtifact repositories. You can also create an upstream
relationship between a CodeArtifact repository and another repository,
which effectively merges their contents from the point of view of a
package manager client.
\strong{AWS CodeArtifact Components}
Use the information in this guide to help you work with the following
CodeArtifact components:
\itemize{
\item \strong{Repository}: A CodeArtifact repository contains a set of \href{https://docs.aws.amazon.com/codeartifact/latest/ug/welcome.html#welcome-concepts-package-version}{package versions},
each of which maps to a set of assets, or files. Repositories are
polyglot, so a single repository can contain packages of any
supported type. Each repository exposes endpoints for fetching and
publishing packages using tools like the \strong{\code{npm}} CLI, the Maven
CLI ( \strong{\code{mvn}} ), and \strong{\code{pip}} . You can create up to 100
repositories per AWS account.
\item \strong{Domain}: Repositories are aggregated into a higher-level entity
known as a \emph{domain}. All package assets and metadata are stored in
the domain, but are consumed through repositories. A given package
asset, such as a Maven JAR file, is stored once per domain, no
matter how many repositories it\'s present in. All of the assets and
metadata in a domain are encrypted with the same customer master key
(CMK) stored in AWS Key Management Service (AWS KMS).
Each repository is a member of a single domain and can\'t be moved
to a different domain.
The domain allows organizational policy to be applied across
multiple repositories, such as which accounts can access
repositories in the domain, and which public repositories can be
used as sources of packages.
Although an organization can have multiple domains, we recommend a
single production domain that contains all published artifacts so
that teams can find and share packages across their organization.
\item \strong{Package}: A \emph{package} is a bundle of software and the metadata
required to resolve dependencies and install the software.
CodeArtifact supports
\href{https://docs.aws.amazon.com/codeartifact/latest/ug/using-npm.html}{npm},
\href{https://docs.aws.amazon.com/codeartifact/latest/ug/using-python.html}{PyPI},
and
\href{https://docs.aws.amazon.com/codeartifact/latest/ug/using-maven}{Maven}
package formats.
In CodeArtifact, a package consists of:
\itemize{
\item A \emph{name} (for example, \code{webpack} is the name of a popular npm
package)
\item An optional namespace (for example, \verb{@types} in \verb{@types/node})
\item A set of versions (for example, \verb{1.0.0}, \verb{1.0.1}, \verb{1.0.2}, etc.)
\item Package-level metadata (for example, npm tags)
}
\item \strong{Package version}: A version of a package, such as
\verb{@types/node 12.6.9}. The version number format and semantics vary
for different package formats. For example, npm package versions
must conform to the \href{https://semver.org/}{Semantic Versioning specification}. In CodeArtifact, a package
version consists of the version identifier, metadata at the package
version level, and a set of assets.
\item \strong{Upstream repository}: One repository is \emph{upstream} of another
when the package versions in it can be accessed from the repository
endpoint of the downstream repository, effectively merging the
contents of the two repositories from the point of view of a client.
CodeArtifact allows creating an upstream relationship between two
repositories.
\item \strong{Asset}: An individual file stored in CodeArtifact associated with
a package version, such as an npm \code{.tgz} file or Maven POM and JAR
files.
}
CodeArtifact supports these operations:
\itemize{
\item \code{AssociateExternalConnection}: Adds an existing external connection
to a repository.
\item \code{CopyPackageVersions}: Copies package versions from one repository
to another repository in the same domain.
\item \code{CreateDomain}: Creates a domain
\item \code{CreateRepository}: Creates a CodeArtifact repository in a domain.
\item \code{DeleteDomain}: Deletes a domain. You cannot delete a domain that
contains repositories.
\item \code{DeleteDomainPermissionsPolicy}: Deletes the resource policy that is
set on a domain.
\item \code{DeletePackageVersions}: Deletes versions of a package. After a
package has been deleted, it can be republished, but its assets and
metadata cannot be restored because they have been permanently
removed from storage.
\item \code{DeleteRepository}: Deletes a repository.
\item \code{DeleteRepositoryPermissionsPolicy}: Deletes the resource policy
that is set on a repository.
\item \code{DescribeDomain}: Returns a \code{DomainDescription} object that contains
information about the requested domain.
\item \code{DescribePackageVersion}: Returns a
\verb{<a href="https://docs.aws.amazon.com/codeartifact/latest/APIReference/API_PackageVersionDescription.html">PackageVersionDescription</a>}
object that contains details about a package version.
\item \code{DescribeRepository}: Returns a \code{RepositoryDescription} object that
contains detailed information about the requested repository.
\item \code{DisposePackageVersions}: Disposes versions of a package. A package
version with the status \code{Disposed} cannot be restored because they
have been permanently removed from storage.
\item \code{DisassociateExternalConnection}: Removes an existing external
connection from a repository.
\item \code{GetAuthorizationToken}: Generates a temporary authorization token
for accessing repositories in the domain. The token expires the
authorization period has passed. The default authorization period is
12 hours and can be customized to any length with a maximum of 12
hours.
\item \code{GetDomainPermissionsPolicy}: Returns the policy of a resource that
is attached to the specified domain.
\item \code{GetPackageVersionAsset}: Returns the contents of an asset that is
in a package version.
\item \code{GetPackageVersionReadme}: Gets the readme file or descriptive text
for a package version.
\item \code{GetRepositoryEndpoint}: Returns the endpoint of a repository for a
specific package format. A repository has one endpoint for each
package format:
\itemize{
\item \code{npm}
\item \code{pypi}
\item \code{maven}
}
\item \code{GetRepositoryPermissionsPolicy}: Returns the resource policy that
is set on a repository.
\item \code{ListDomains}: Returns a list of \code{DomainSummary} objects. Each
returned \code{DomainSummary} object contains information about a domain.
\item \code{ListPackages}: Lists the packages in a repository.
\item \code{ListPackageVersionAssets}: Lists the assets for a given package
version.
\item \code{ListPackageVersionDependencies}: Returns a list of the direct
dependencies for a package version.
\item \code{ListPackageVersions}: Returns a list of package versions for a
specified package in a repository.
\item \code{ListRepositories}: Returns a list of repositories owned by the AWS
account that called this method.
\item \code{ListRepositoriesInDomain}: Returns a list of the repositories in a
domain.
\item \code{PutDomainPermissionsPolicy}: Attaches a resource policy to a
domain.
\item \code{PutRepositoryPermissionsPolicy}: Sets the resource policy on a
repository that specifies permissions to access it.
\item \code{UpdatePackageVersionsStatus}: Updates the status of one or more
versions of a package.
\item \code{UpdateRepository}: Updates the properties of a repository.
}
}
\section{Service syntax}{
\preformatted{svc <- codeartifact(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string"
),
endpoint = "string",
region = "string"
)
)
}
}
\section{Operations}{
\tabular{ll}{
\link[=codeartifact_associate_external_connection]{associate_external_connection} \tab Adds an existing external connection to a repository \cr
\link[=codeartifact_copy_package_versions]{copy_package_versions} \tab Copies package versions from one repository to another repository in the same domain \cr
\link[=codeartifact_create_domain]{create_domain} \tab Creates a domain \cr
\link[=codeartifact_create_repository]{create_repository} \tab Creates a repository \cr
\link[=codeartifact_delete_domain]{delete_domain} \tab Deletes a domain \cr
\link[=codeartifact_delete_domain_permissions_policy]{delete_domain_permissions_policy} \tab Deletes the resource policy set on a domain \cr
\link[=codeartifact_delete_package_versions]{delete_package_versions} \tab Deletes one or more versions of a package \cr
\link[=codeartifact_delete_repository]{delete_repository} \tab Deletes a repository \cr
\link[=codeartifact_delete_repository_permissions_policy]{delete_repository_permissions_policy} \tab Deletes the resource policy that is set on a repository \cr
\link[=codeartifact_describe_domain]{describe_domain} \tab Returns a DomainDescription object that contains information about the requested domain \cr
\link[=codeartifact_describe_package_version]{describe_package_version} \tab Returns a PackageVersionDescription object that contains information about the requested package version \cr
\link[=codeartifact_describe_repository]{describe_repository} \tab Returns a RepositoryDescription object that contains detailed information about the requested repository \cr
\link[=codeartifact_disassociate_external_connection]{disassociate_external_connection} \tab Removes an existing external connection from a repository \cr
\link[=codeartifact_dispose_package_versions]{dispose_package_versions} \tab Deletes the assets in package versions and sets the package versions' status to Disposed \cr
\link[=codeartifact_get_authorization_token]{get_authorization_token} \tab Generates a temporary authentication token for accessing repositories in the domain \cr
\link[=codeartifact_get_domain_permissions_policy]{get_domain_permissions_policy} \tab Returns the resource policy attached to the specified domain \cr
\link[=codeartifact_get_package_version_asset]{get_package_version_asset} \tab Returns an asset (or file) that is in a package \cr
\link[=codeartifact_get_package_version_readme]{get_package_version_readme} \tab Gets the readme file or descriptive text for a package version \cr
\link[=codeartifact_get_repository_endpoint]{get_repository_endpoint} \tab Returns the endpoint of a repository for a specific package format \cr
\link[=codeartifact_get_repository_permissions_policy]{get_repository_permissions_policy} \tab Returns the resource policy that is set on a repository \cr
\link[=codeartifact_list_domains]{list_domains} \tab Returns a list of DomainSummary objects for all domains owned by the AWS account that makes this call \cr
\link[=codeartifact_list_packages]{list_packages} \tab Returns a list of PackageSummary objects for packages in a repository that match the request parameters \cr
\link[=codeartifact_list_package_version_assets]{list_package_version_assets} \tab Returns a list of AssetSummary objects for assets in a package version \cr
\link[=codeartifact_list_package_version_dependencies]{list_package_version_dependencies} \tab Returns the direct dependencies for a package version \cr
\link[=codeartifact_list_package_versions]{list_package_versions} \tab Returns a list of PackageVersionSummary objects for package versions in a repository that match the request parameters\cr
\link[=codeartifact_list_repositories]{list_repositories} \tab Returns a list of RepositorySummary objects \cr
\link[=codeartifact_list_repositories_in_domain]{list_repositories_in_domain} \tab Returns a list of RepositorySummary objects \cr
\link[=codeartifact_put_domain_permissions_policy]{put_domain_permissions_policy} \tab Sets a resource policy on a domain that specifies permissions to access it \cr
\link[=codeartifact_put_repository_permissions_policy]{put_repository_permissions_policy} \tab Sets the resource policy on a repository that specifies permissions to access it \cr
\link[=codeartifact_update_package_versions_status]{update_package_versions_status} \tab Updates the status of one or more versions of a package \cr
\link[=codeartifact_update_repository]{update_repository} \tab Update the properties of a repository
}
}
\examples{
\dontrun{
svc <- codeartifact()
svc$associate_external_connection(
Foo = 123
)
}
}
|
/scripts/plots/Java2/Rectangle/187/lines187.R | no_license | seminariosuacj/EyeTracking | R | false | false | 19,821 | r | ||
D1 <-
"Package: foo
Title: Foo Package
Maintainer: foo@foofoo.com
Description: What the package does.
Depends: R (>= 2.15)
URL: https://www.foo.com
BugReports: https://www.foo.com/bugs
"
D2 <-
"Package: foo
Title: Foo Package
Maintainer: foo@foofoo.com
Description: What the package does.
Depends: foobar (>= 2.15)
Date: 2018-03-22
"
test_that("Depends: R is OK", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$no_description_depends$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$no_description_depends$check(state)
)
})
test_that("Date", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$no_description_date$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$no_description_date$check(state)
)
})
test_that("URL", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$description_url$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$description_url$check(state)
)
})
test_that("BugReports", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$description_bugreports$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$description_bugreports$check(state)
)
})
| /tests/testthat/test-description.R | permissive | MangoTheCat/goodpractice | R | false | false | 1,481 | r |
D1 <-
"Package: foo
Title: Foo Package
Maintainer: foo@foofoo.com
Description: What the package does.
Depends: R (>= 2.15)
URL: https://www.foo.com
BugReports: https://www.foo.com/bugs
"
D2 <-
"Package: foo
Title: Foo Package
Maintainer: foo@foofoo.com
Description: What the package does.
Depends: foobar (>= 2.15)
Date: 2018-03-22
"
test_that("Depends: R is OK", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$no_description_depends$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$no_description_depends$check(state)
)
})
test_that("Date", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$no_description_date$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$no_description_date$check(state)
)
})
test_that("URL", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$description_url$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$description_url$check(state)
)
})
test_that("BugReports", {
state <- list(description = desc::description$new(text = D1))
expect_true(
CHECKS$description_bugreports$check(state)
)
state <- list(description = desc::description$new(text = D2))
expect_false(
CHECKS$description_bugreports$check(state)
)
})
|
\name{ok}
\alias{ok}
\title{The unittest package's workhorse function}
\description{Report the test of an expression in TAP format.}
\usage{
ok(test, description)
}
\arguments{
\item{test}{
Expression to be tested. Evaluating to \code{TRUE} is treated as success,
anything else as failure.
}
\item{description}{
Character string describing the test. If a description is not given a
character representation of the test expression will be used.
}
}
\value{
\code{ok()} returns whatever was returned when \code{test} is evaluated.
More importantly it has the side effect of printing the result of the
test in \code{TAP} format.
}
\details{
See \code{\link{unittest}} package documentation.
The \code{unittest.output} option tells unittest where output should be sent.
This is most useful for vignettes, where sending output to
\code{\link{stderr}} separates the unittest output from the vignette itself.
}
\examples{
ok(1==1, "1 equals 1")
ok(1==1)
ok(1==2, "1 equals 2")
ok(all.equal(c(1,2),c(1,2)), "compare vectors")
fn <- function () stop("oops")
ok(fn(), "something with a coding error")
ok(c("Some diagnostic", "messages"), "A failure with diagnostic messages")
## Send unittest output to stderr()
options(unittest.output = stderr())
ok(ut_cmp_equal(4, 5), "4 == 5? Probably not")
## Reset unittest output to default (stdout())
options(unittest.output = NULL)
ok(ut_cmp_equal(4, 5), "4 == 5? Probably not")
\dontshow{
# Clear unittest result log, so our unittest failues don't fail example-building
unittest:::clear_outcomes()
}
}
| /man/ok.Rd | no_license | ravingmantis/unittest | R | false | false | 1,596 | rd | \name{ok}
\alias{ok}
\title{The unittest package's workhorse function}
\description{Report the test of an expression in TAP format.}
\usage{
ok(test, description)
}
\arguments{
\item{test}{
Expression to be tested. Evaluating to \code{TRUE} is treated as success,
anything else as failure.
}
\item{description}{
Character string describing the test. If a description is not given a
character representation of the test expression will be used.
}
}
\value{
\code{ok()} returns whatever was returned when \code{test} is evaluated.
More importantly it has the side effect of printing the result of the
test in \code{TAP} format.
}
\details{
See \code{\link{unittest}} package documentation.
The \code{unittest.output} option tells unittest where output should be sent.
This is most useful for vignettes, where sending output to
\code{\link{stderr}} separates the unittest output from the vignette itself.
}
\examples{
ok(1==1, "1 equals 1")
ok(1==1)
ok(1==2, "1 equals 2")
ok(all.equal(c(1,2),c(1,2)), "compare vectors")
fn <- function () stop("oops")
ok(fn(), "something with a coding error")
ok(c("Some diagnostic", "messages"), "A failure with diagnostic messages")
## Send unittest output to stderr()
options(unittest.output = stderr())
ok(ut_cmp_equal(4, 5), "4 == 5? Probably not")
## Reset unittest output to default (stdout())
options(unittest.output = NULL)
ok(ut_cmp_equal(4, 5), "4 == 5? Probably not")
\dontshow{
# Clear unittest result log, so our unittest failues don't fail example-building
unittest:::clear_outcomes()
}
}
|
rm(list=ls())
##then packages are loaded into the R environment
library(tidyverse)
library(tidycensus)
library(tigris)
library(tmap)
library(sf)
# census_api_key("") ## install personal census API key
##############################################################
## data import and prepping
##############################################################
## define year and region for census data import
yr <- '2016'
cnty <- c("Jefferson")
ST <- "Kentucky"
## import race variables of interest
race_vars <- c(white = "B03002_003E", black = "B03002_004E",
native_american = "B03002_005E", asian = "B03002_006E",
hawaiian = "B03002_007E", other = "B03002_008E",
multiracial = "B03002_009E", latinx = "B03002_012E")
## import area of interest data
aoi <- get_acs(geography = "block group",
variables = race_vars,
state = ST,
county = cnty,
year = yr) %>%
dplyr::select(-moe, -NAME) %>%
spread(key = "variable", value = "estimate")
## import spatial data for "cnty" region
shp <- get_acs(geography = "block group",
variables = "B03002_001E",
state = ST,
county = cnty,
year = yr,
geometry = TRUE)
shp <- st_zm(shp) ## drop "Z" data
## append census race data to spatial data
aoi_shp <- left_join(shp, aoi, by = "GEOID", copy = TRUE) %>%
dplyr::select(-moe, -variable, -NAME) %>%
rename(B03002_001 = estimate) %>%
mutate(perc_POC = 1-(B03002_003/B03002_001), count_POC = B03002_001 - B03002_003) %>%
st_as_sf() %>%
st_transform(4269)
tm_shape(aoi_shp) +
tm_fill('perc_POC', palette = "Purples")
| /scripts/state.R | no_license | deanhardy/mapping_race | R | false | false | 1,704 | r | rm(list=ls())
##then packages are loaded into the R environment
library(tidyverse)
library(tidycensus)
library(tigris)
library(tmap)
library(sf)
# census_api_key("") ## install personal census API key
##############################################################
## data import and prepping
##############################################################
## define year and region for census data import
yr <- '2016'
cnty <- c("Jefferson")
ST <- "Kentucky"
## import race variables of interest
race_vars <- c(white = "B03002_003E", black = "B03002_004E",
native_american = "B03002_005E", asian = "B03002_006E",
hawaiian = "B03002_007E", other = "B03002_008E",
multiracial = "B03002_009E", latinx = "B03002_012E")
## import area of interest data
aoi <- get_acs(geography = "block group",
variables = race_vars,
state = ST,
county = cnty,
year = yr) %>%
dplyr::select(-moe, -NAME) %>%
spread(key = "variable", value = "estimate")
## import spatial data for "cnty" region
shp <- get_acs(geography = "block group",
variables = "B03002_001E",
state = ST,
county = cnty,
year = yr,
geometry = TRUE)
shp <- st_zm(shp) ## drop "Z" data
## append census race data to spatial data
aoi_shp <- left_join(shp, aoi, by = "GEOID", copy = TRUE) %>%
dplyr::select(-moe, -variable, -NAME) %>%
rename(B03002_001 = estimate) %>%
mutate(perc_POC = 1-(B03002_003/B03002_001), count_POC = B03002_001 - B03002_003) %>%
st_as_sf() %>%
st_transform(4269)
tm_shape(aoi_shp) +
tm_fill('perc_POC', palette = "Purples")
|
server_home <- function(input, output, session, graphData, appOptions, refresh) {
output$text <- renderUI({
if(is.null(graphData())) return(NULL)
if(nrow(graphData()) == 0) return(NULL)
selected <- isolate(appOptions())
selected$event_name <- "speedcubing"
selected$event <- "all"
selected$total_tourn <- no_events %>%
ungroup() %>%
filter(championship_type == selected$region, eventId == selected$event) %>%
summarise(n = sum(n)) %>%
pull(n)
text_data <- c(
graphData() %>%
ungroup() %>%
filter(type == "cum", gender == "t") %>%
summarise(start = year(min(end_date, na.rm = TRUE)), cumm = max(n, na.rm = TRUE)),
graphData() %>%
ungroup() %>%
mutate(year = year(end_date)) %>%
filter(type != "cum", year == metadata$refYr) %>%
mutate(gender = case_when(gender == "" ~ "nk", gender == "o" ~ "nb", TRUE ~ gender)) %>%
select(gender, type, n) %>%
pivot_wider(names_from = c(gender, type), values_from = c(n), names_sep = "_"),
graphData()%>%
ungroup() %>%
filter(type != "cum", gender == "t") %>%
mutate(peak = year(end_date)) %>%
arrange(desc(n)) %>%
filter(row_number() == 1) %>%
select(peak)
)
readLines("home.Rmd", encoding = "UTF-8") %>%
knit(text = ., quiet = TRUE, encoding = "UTF-8") %>%
markdownToHTML(text = ., fragment.only = TRUE, encoding = "UTF-8") %>%
HTML()
})
}
| /tab_home.R | permissive | jayware9/speedcubing | R | false | false | 1,518 | r | server_home <- function(input, output, session, graphData, appOptions, refresh) {
output$text <- renderUI({
if(is.null(graphData())) return(NULL)
if(nrow(graphData()) == 0) return(NULL)
selected <- isolate(appOptions())
selected$event_name <- "speedcubing"
selected$event <- "all"
selected$total_tourn <- no_events %>%
ungroup() %>%
filter(championship_type == selected$region, eventId == selected$event) %>%
summarise(n = sum(n)) %>%
pull(n)
text_data <- c(
graphData() %>%
ungroup() %>%
filter(type == "cum", gender == "t") %>%
summarise(start = year(min(end_date, na.rm = TRUE)), cumm = max(n, na.rm = TRUE)),
graphData() %>%
ungroup() %>%
mutate(year = year(end_date)) %>%
filter(type != "cum", year == metadata$refYr) %>%
mutate(gender = case_when(gender == "" ~ "nk", gender == "o" ~ "nb", TRUE ~ gender)) %>%
select(gender, type, n) %>%
pivot_wider(names_from = c(gender, type), values_from = c(n), names_sep = "_"),
graphData()%>%
ungroup() %>%
filter(type != "cum", gender == "t") %>%
mutate(peak = year(end_date)) %>%
arrange(desc(n)) %>%
filter(row_number() == 1) %>%
select(peak)
)
readLines("home.Rmd", encoding = "UTF-8") %>%
knit(text = ., quiet = TRUE, encoding = "UTF-8") %>%
markdownToHTML(text = ., fragment.only = TRUE, encoding = "UTF-8") %>%
HTML()
})
}
|
## Matrix inversion is usually a costly computation
## and there may be some benefit to caching the inverse of a matrix rather than compute it repeatedly.
## makeCacheMatrix function creates a special "matrix" object that can cache its inverse
## which is really a list containing a function to
## set the value of the matrix
## get the value of the matrix
## set the value of inverse of the matrix
## get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
cache <- NULL
set <- function(y) {
x <<- y
cache <<- NULL
}
get <- function() x
setInverse <- function(inverse) cache <<- inverse
getInverse <- function() cache
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## cacheSolve function calculates the inverse of the special "matrix" created with the above function.
## However, it first checks to see if the inverse has already been calculated.
## If so, it gets the inverse from the cache and skips the computation.
## Otherwise, it calculates the inverse of the data and sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
inverse <- x$getInverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
dataMatrix <- x$get()
inverse <- solve(dataMatrix, ...)
x$setInverse(inverse)
inverse
}
## Example :
## > data_matrix <- makeCacheMatrix(matrix(c(2,7,9,11), 2, 2))
## > data_matrix$get()
## [,1] [,2]
## [1,] 2 9
## [2,] 7 11
## > data_matrix$getInverse()
## NULL
## > cacheSolve(data_matrix)
## [,1] [,2]
## [1,] -0.2682927 0.21951220
## [2,] 0.1707317 -0.04878049
## > cacheSolve(data_matrix)
## getting cached data
## [,1] [,2]
## [1,] -0.2682927 0.21951220
## [2,] 0.1707317 -0.04878049
## > data_matrix$getInverse()
## [,1] [,2]
## [1,] -0.2682927 0.21951220
## [2,] 0.1707317 -0.04878049
| /cachematrix.R | no_license | craghu4u/ProgrammingAssignment2 | R | false | false | 2,053 | r | ## Matrix inversion is usually a costly computation
## and there may be some benefit to caching the inverse of a matrix rather than compute it repeatedly.
## makeCacheMatrix function creates a special "matrix" object that can cache its inverse
## which is really a list containing a function to
## set the value of the matrix
## get the value of the matrix
## set the value of inverse of the matrix
## get the value of inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
cache <- NULL
set <- function(y) {
x <<- y
cache <<- NULL
}
get <- function() x
setInverse <- function(inverse) cache <<- inverse
getInverse <- function() cache
list(
set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse
)
}
## cacheSolve function calculates the inverse of the special "matrix" created with the above function.
## However, it first checks to see if the inverse has already been calculated.
## If so, it gets the inverse from the cache and skips the computation.
## Otherwise, it calculates the inverse of the data and sets the value of the inverse in the cache via the setInverse function.
cacheSolve <- function(x, ...) {
inverse <- x$getInverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
dataMatrix <- x$get()
inverse <- solve(dataMatrix, ...)
x$setInverse(inverse)
inverse
}
## Example :
## > data_matrix <- makeCacheMatrix(matrix(c(2,7,9,11), 2, 2))
## > data_matrix$get()
## [,1] [,2]
## [1,] 2 9
## [2,] 7 11
## > data_matrix$getInverse()
## NULL
## > cacheSolve(data_matrix)
## [,1] [,2]
## [1,] -0.2682927 0.21951220
## [2,] 0.1707317 -0.04878049
## > cacheSolve(data_matrix)
## getting cached data
## [,1] [,2]
## [1,] -0.2682927 0.21951220
## [2,] 0.1707317 -0.04878049
## > data_matrix$getInverse()
## [,1] [,2]
## [1,] -0.2682927 0.21951220
## [2,] 0.1707317 -0.04878049
|
# dat proc
library(tidyverse)
library(readxl)
library(forcats)
library(stringi)
library(here)
##
# create restdat, reststat
# format raw data
raw <- read.csv(here('data-raw', 'DRAFT_TBEP_Combined_Projects_1971-2017_06222018.csv'), stringsAsFactors = F) %>%
rename(
date = Completion_Date,
type = Project_Activity,
tech = Project_Technology,
lat = ProjectLatitude,
lon = ProjectLongitude
) %>%
select(date, type, tech, lat, lon) %>%
filter(
lon > -1e6 & lat > 20 & !is.na(date)
) %>%
mutate(
id = stri_rand_strings(nrow(.), length = 4),
top = fct_recode(type,
hab = 'Habitat_Enhancement',
hab = 'Habitat_Establishment',
hab = 'Habitat_Protection',
wtr = 'Nonpoint_Source',
wtr = 'Point_Source'
),
type = fct_recode(type,
hab_enh = 'Habitat_Enhancement',
hab_est = 'Habitat_Establishment',
hab_pro = 'Habitat_Protection',
non_src = 'Nonpoint_Source',
pnt_src = 'Point_Source'
),
tech = toupper(tech),
date = gsub('\\?$', '', date),
date = as.numeric(date)
)
# restdat
restdat <- raw %>%
select(date, tech, type, top, id)
# reststat
reststat <- raw %>%
select(id, lat, lon)
save(restdat, file = here('data', 'restdat.RData'))
save(reststat, file = here('data', 'reststat.RData'))
| /R/dat_proc.R | no_license | tbep-tech/restore-gulf | R | false | false | 1,485 | r | # dat proc
library(tidyverse)
library(readxl)
library(forcats)
library(stringi)
library(here)
##
# create restdat, reststat
# format raw data
raw <- read.csv(here('data-raw', 'DRAFT_TBEP_Combined_Projects_1971-2017_06222018.csv'), stringsAsFactors = F) %>%
rename(
date = Completion_Date,
type = Project_Activity,
tech = Project_Technology,
lat = ProjectLatitude,
lon = ProjectLongitude
) %>%
select(date, type, tech, lat, lon) %>%
filter(
lon > -1e6 & lat > 20 & !is.na(date)
) %>%
mutate(
id = stri_rand_strings(nrow(.), length = 4),
top = fct_recode(type,
hab = 'Habitat_Enhancement',
hab = 'Habitat_Establishment',
hab = 'Habitat_Protection',
wtr = 'Nonpoint_Source',
wtr = 'Point_Source'
),
type = fct_recode(type,
hab_enh = 'Habitat_Enhancement',
hab_est = 'Habitat_Establishment',
hab_pro = 'Habitat_Protection',
non_src = 'Nonpoint_Source',
pnt_src = 'Point_Source'
),
tech = toupper(tech),
date = gsub('\\?$', '', date),
date = as.numeric(date)
)
# restdat
restdat <- raw %>%
select(date, tech, type, top, id)
# reststat
reststat <- raw %>%
select(id, lat, lon)
save(restdat, file = here('data', 'restdat.RData'))
save(reststat, file = here('data', 'reststat.RData'))
|
### Function to run one simulation of this
n_rolls_until_all_equal <- function() {
### First roll
my_die <- sample(1:6, 6, replace = TRUE)
nrolls <- 1
### Until all numbers on the die are the same, keep re-rolling
while(length(unique(my_die)) > 1) {
my_die <- sample(my_die, 6, replace = TRUE)
nrolls <- nrolls + 1
}
### return number of rolls
return(nrolls)
}
### number of simulations
Nsims <- 1E6
### record rolls
set.seed(123)
my_rolls <- rep(NA, Nsims)
for(i in 1:Nsims) {
my_rolls[i] <- n_rolls_until_all_equal()
}
### Return the mean number of rolls
mean(my_rolls)
| /rolling_dice/rolling_dice.R | no_license | jfiksel/riddlers | R | false | false | 626 | r | ### Function to run one simulation of this
n_rolls_until_all_equal <- function() {
### First roll
my_die <- sample(1:6, 6, replace = TRUE)
nrolls <- 1
### Until all numbers on the die are the same, keep re-rolling
while(length(unique(my_die)) > 1) {
my_die <- sample(my_die, 6, replace = TRUE)
nrolls <- nrolls + 1
}
### return number of rolls
return(nrolls)
}
### number of simulations
Nsims <- 1E6
### record rolls
set.seed(123)
my_rolls <- rep(NA, Nsims)
for(i in 1:Nsims) {
my_rolls[i] <- n_rolls_until_all_equal()
}
### Return the mean number of rolls
mean(my_rolls)
|
# Create a sample of 50 numbers which are normally distributed.
y <- rnorm(50)
# Give the chart file a name.
png(file = "rnorm.png")
# Plot the histogram for this sample.
hist(y, main = "Normal DIstribution1")
# Save the file.
dev.off() | /R_Scripts/r_norm.R | no_license | thunderpearl/R_Code_ThunderPearl | R | false | false | 239 | r | # Create a sample of 50 numbers which are normally distributed.
y <- rnorm(50)
# Give the chart file a name.
png(file = "rnorm.png")
# Plot the histogram for this sample.
hist(y, main = "Normal DIstribution1")
# Save the file.
dev.off() |
setwd("/scATAC-machineLearning-benchmarking/intra-Corces2016/output/")
evaluate <- function(TrueLabelsPath, PredLabelsPath, Indices = NULL){
"
Script to evaluate the performance of the classifier.
It returns multiple evaluation measures: the confusion matrix, median F1-score, F1-score for each class, accuracy, percentage of unlabeled, population size.
The percentage of unlabeled cells is find by checking for cells that are labeled 'Unassigned', 'unassigned', 'Unknown', 'unknown', 'Nodexx', 'rand', or 'ambiguous'.
Parameters
----------
TrueLabelsPath: csv file with the true labels (format: one column, no index)
PredLabelsPath: csv file with the predicted labels (format: one column, no index)
Indices: which part of the csv file should be read (e.g. if more datasets are tested at the same time) (format: c(begin, end))
Returns
-------
Conf: confusion matrix
MedF1 : median F1-score
F1 : F1-score per class
Acc : accuracy
PercUnl : percentage of unlabeled cells
PopSize : number of cells per cell type
"
true_lab <- unlist(read.csv(TrueLabelsPath))
pred_lab <- unlist(read.csv(PredLabelsPath))
if (! is.null(Indices)){
true_lab <- true_lab[Indices]
pred_lab <- pred_lab[Indices]
}
unique_true <- unlist(unique(true_lab))
unique_pred <- unlist(unique(pred_lab))
unique_all <- unique(c(unique_true,unique_pred))
conf <- table(true_lab,pred_lab)
pop_size <- rowSums(conf)
pred_lab = gsub('Node..','Node',pred_lab)
conf_F1 <- table(true_lab,pred_lab,exclude = c('unassigned','Unassigned','Unknown','rand','Node','ambiguous','unknown'))
F1 <- vector()
sum_acc <- 0
for (i in c(1:length(unique_true))){
findLabel = colnames(conf_F1) == row.names(conf_F1)[i]
if(sum(findLabel)){
prec <- conf_F1[i,findLabel] / colSums(conf_F1)[findLabel]
rec <- conf_F1[i,findLabel] / rowSums(conf_F1)[i]
if (prec == 0 || rec == 0){
F1[i] = 0
} else{
F1[i] <- (2*prec*rec) / (prec + rec)
}
sum_acc <- sum_acc + conf_F1[i,findLabel]
} else {
F1[i] = 0
}
}
pop_size <- pop_size[pop_size > 0]
names(F1) <- names(pop_size)
med_F1 <- median(F1)
total <- length(pred_lab)
num_unlab <- sum(pred_lab == 'unassigned') + sum(pred_lab == 'Unassigned') + sum(pred_lab == 'rand') + sum(pred_lab == 'Unknown') + sum(pred_lab == 'unknown') + sum(pred_lab == 'Node') + sum(pred_lab == 'ambiguous')
per_unlab <- num_unlab / total
acc <- sum_acc/sum(conf_F1)
result <- list(Conf = conf, MedF1 = med_F1, F1 = F1, Acc = acc, PercUnl = per_unlab, PopSize = pop_size)
return(result)
}
TrueLabelsPath <- "./DT_true.csv"
PredLabelsPath <- "./DT_pred.csv"
OutputDir <- "./"
ToolName <- "DT"
results <- evaluate(TrueLabelsPath, PredLabelsPath)
dir.create(file.path(OutputDir, "Confusion"))
dir.create(file.path(OutputDir, "F1"))
dir.create(file.path(OutputDir, "PopSize"))
dir.create(file.path(OutputDir, "Summary"))
write.csv(results$Conf, file.path(OutputDir, "Confusion", paste0(ToolName, ".csv")))
write.csv(results$F1, file.path(OutputDir, "F1", paste0(ToolName, ".csv")))
write.csv(results$PopSize, file.path(OutputDir, "PopSize", paste0(ToolName, ".csv")))
df <- data.frame(results[c("MedF1", "Acc", "PercUnl")])
write.csv(df, file.path(OutputDir, "Summary", paste0(ToolName, ".csv")))
| /intra-Corces2016/bin/3_evaluate_DT.r | no_license | mrcuizhe/scATAC-MachineLearning-benchmarking | R | false | false | 3,339 | r | setwd("/scATAC-machineLearning-benchmarking/intra-Corces2016/output/")
evaluate <- function(TrueLabelsPath, PredLabelsPath, Indices = NULL){
"
Script to evaluate the performance of the classifier.
It returns multiple evaluation measures: the confusion matrix, median F1-score, F1-score for each class, accuracy, percentage of unlabeled, population size.
The percentage of unlabeled cells is find by checking for cells that are labeled 'Unassigned', 'unassigned', 'Unknown', 'unknown', 'Nodexx', 'rand', or 'ambiguous'.
Parameters
----------
TrueLabelsPath: csv file with the true labels (format: one column, no index)
PredLabelsPath: csv file with the predicted labels (format: one column, no index)
Indices: which part of the csv file should be read (e.g. if more datasets are tested at the same time) (format: c(begin, end))
Returns
-------
Conf: confusion matrix
MedF1 : median F1-score
F1 : F1-score per class
Acc : accuracy
PercUnl : percentage of unlabeled cells
PopSize : number of cells per cell type
"
true_lab <- unlist(read.csv(TrueLabelsPath))
pred_lab <- unlist(read.csv(PredLabelsPath))
if (! is.null(Indices)){
true_lab <- true_lab[Indices]
pred_lab <- pred_lab[Indices]
}
unique_true <- unlist(unique(true_lab))
unique_pred <- unlist(unique(pred_lab))
unique_all <- unique(c(unique_true,unique_pred))
conf <- table(true_lab,pred_lab)
pop_size <- rowSums(conf)
pred_lab = gsub('Node..','Node',pred_lab)
conf_F1 <- table(true_lab,pred_lab,exclude = c('unassigned','Unassigned','Unknown','rand','Node','ambiguous','unknown'))
F1 <- vector()
sum_acc <- 0
for (i in c(1:length(unique_true))){
findLabel = colnames(conf_F1) == row.names(conf_F1)[i]
if(sum(findLabel)){
prec <- conf_F1[i,findLabel] / colSums(conf_F1)[findLabel]
rec <- conf_F1[i,findLabel] / rowSums(conf_F1)[i]
if (prec == 0 || rec == 0){
F1[i] = 0
} else{
F1[i] <- (2*prec*rec) / (prec + rec)
}
sum_acc <- sum_acc + conf_F1[i,findLabel]
} else {
F1[i] = 0
}
}
pop_size <- pop_size[pop_size > 0]
names(F1) <- names(pop_size)
med_F1 <- median(F1)
total <- length(pred_lab)
num_unlab <- sum(pred_lab == 'unassigned') + sum(pred_lab == 'Unassigned') + sum(pred_lab == 'rand') + sum(pred_lab == 'Unknown') + sum(pred_lab == 'unknown') + sum(pred_lab == 'Node') + sum(pred_lab == 'ambiguous')
per_unlab <- num_unlab / total
acc <- sum_acc/sum(conf_F1)
result <- list(Conf = conf, MedF1 = med_F1, F1 = F1, Acc = acc, PercUnl = per_unlab, PopSize = pop_size)
return(result)
}
TrueLabelsPath <- "./DT_true.csv"
PredLabelsPath <- "./DT_pred.csv"
OutputDir <- "./"
ToolName <- "DT"
results <- evaluate(TrueLabelsPath, PredLabelsPath)
dir.create(file.path(OutputDir, "Confusion"))
dir.create(file.path(OutputDir, "F1"))
dir.create(file.path(OutputDir, "PopSize"))
dir.create(file.path(OutputDir, "Summary"))
write.csv(results$Conf, file.path(OutputDir, "Confusion", paste0(ToolName, ".csv")))
write.csv(results$F1, file.path(OutputDir, "F1", paste0(ToolName, ".csv")))
write.csv(results$PopSize, file.path(OutputDir, "PopSize", paste0(ToolName, ".csv")))
df <- data.frame(results[c("MedF1", "Acc", "PercUnl")])
write.csv(df, file.path(OutputDir, "Summary", paste0(ToolName, ".csv")))
|
# Program: One_sample_ttest.R
# Programmer: Heewon Jeong
# Objective(s):
# To compare mean of a data set with the given population mean
## Clear workspace
dev.off() # clear all plots
rm(list=ls()) # clear global Environmental
cat("\f") # clear Console
## Install required library
install.packages("psych") # for descriptive statistics
## Attach library
library(psych) # for descriptive statistics
## Data loading
df <- read.csv("paired-ttest.csv", header=TRUE)
data <- data.frame(df) # raw data saved as variable 'data'
## Variables assigning
A <- data$AFTER # difference between Before and After
## Descriptive statistics (number of data, mean, standard deviation, ...)
describe(A)
## One sample t-test
One_sample <- t.test(A, mu=14, alternative=c("two.sided"))
One_sample
| /데이터모음/Ch 04_t-test/One_sample_ttest.R | no_license | hsyliark/Environmental_Statistics_with_R | R | false | false | 818 | r | # Program: One_sample_ttest.R
# Programmer: Heewon Jeong
# Objective(s):
# To compare mean of a data set with the given population mean
## Clear workspace
dev.off() # clear all plots
rm(list=ls()) # clear global Environmental
cat("\f") # clear Console
## Install required library
install.packages("psych") # for descriptive statistics
## Attach library
library(psych) # for descriptive statistics
## Data loading
df <- read.csv("paired-ttest.csv", header=TRUE)
data <- data.frame(df) # raw data saved as variable 'data'
## Variables assigning
A <- data$AFTER # difference between Before and After
## Descriptive statistics (number of data, mean, standard deviation, ...)
describe(A)
## One sample t-test
One_sample <- t.test(A, mu=14, alternative=c("two.sided"))
One_sample
|
241181
355422
355427
355438
362091
362921
362923
364738
365167
369928
369931
369935
369937
369939
370724
370729
370732
370735
370738
370739
371765
371767
372183
372216
372218
372222
372224
372228
372231
372652
373296
373301
373322
374605
374612
374615
380058
382312
387814
499128
499469
502061
502690
502692
502694
502697
502699
502702
502705
507752
507756
510815
| /psf/astro/zone037.r | no_license | flaviasobreira/DESWL | R | false | false | 364 | r | 241181
355422
355427
355438
362091
362921
362923
364738
365167
369928
369931
369935
369937
369939
370724
370729
370732
370735
370738
370739
371765
371767
372183
372216
372218
372222
372224
372228
372231
372652
373296
373301
373322
374605
374612
374615
380058
382312
387814
499128
499469
502061
502690
502692
502694
502697
502699
502702
502705
507752
507756
510815
|
library(testthat)
library(repipe)
test_check("repipe")
| /tests/testthat.R | permissive | tsostarics/repipe | R | false | false | 56 | r | library(testthat)
library(repipe)
test_check("repipe")
|
#' Simulated Rare Variants Data in Dense Scenario
#'
#' A simulated dataset containing 1,000 subjects and 300 rare variants.
#' 20\% of the variants are simulated to be causal/associated. Effect
#' strengths are randomly sampled from U(-0.5, 0.5) distribution.
#'
#' @docType data
#'
#' @usage data(RV_dense)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 300 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing disease labels
#' (500 cases and 500 controls).}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"RV_dense"
#' Simulated Rare Variants Data in Sparse Scenario
#'
#' A simulated dataset containing 1,000 subjects and 300 rare variants.
#' 1\% of the variants are simulated to be causal/associated. Effect
#' strengths are randomly sampled from U(-2, 2) distribution.
#'
#' @docType data
#'
#' @usage data(RV_sparse)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 300 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing disease labels
#' (500 cases and 500 controls).}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"RV_sparse"
#' Simulated Single Nucleotide Variants (SNVs) Data in Dense Scenario
#'
#' A simulated dataset containing 1,000 subjects and 100 SNVs. 20\% of
#' the variants are simulated to be causal/associated. Effect strengths
#' are randomly sampled from U(-0.2, 0.2) distribution.
#'
#' @docType data
#'
#' @usage data(SNV_dense)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 100 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing a continuous trait.}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"SNV_dense"
#' Simulated Single Nucleotide Variants (SNVs) Data in Sparse Scenario
#'
#' A simulated dataset containing 1,000 subjects and 100 SNVs. 1\% of
#' the variants are simulated to be causal/associated. Effect strengths
#' are randomly sampled from U(-0.5, 0.5) distribution.
#'
#' @docType data
#'
#' @usage data(SNV_sparse)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 300 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing a continuous trait.}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"SNV_sparse"
| /R/data_description.R | no_license | songbiostat/wAF | R | false | false | 3,065 | r | #' Simulated Rare Variants Data in Dense Scenario
#'
#' A simulated dataset containing 1,000 subjects and 300 rare variants.
#' 20\% of the variants are simulated to be causal/associated. Effect
#' strengths are randomly sampled from U(-0.5, 0.5) distribution.
#'
#' @docType data
#'
#' @usage data(RV_dense)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 300 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing disease labels
#' (500 cases and 500 controls).}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"RV_dense"
#' Simulated Rare Variants Data in Sparse Scenario
#'
#' A simulated dataset containing 1,000 subjects and 300 rare variants.
#' 1\% of the variants are simulated to be causal/associated. Effect
#' strengths are randomly sampled from U(-2, 2) distribution.
#'
#' @docType data
#'
#' @usage data(RV_sparse)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 300 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing disease labels
#' (500 cases and 500 controls).}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"RV_sparse"
#' Simulated Single Nucleotide Variants (SNVs) Data in Dense Scenario
#'
#' A simulated dataset containing 1,000 subjects and 100 SNVs. 20\% of
#' the variants are simulated to be causal/associated. Effect strengths
#' are randomly sampled from U(-0.2, 0.2) distribution.
#'
#' @docType data
#'
#' @usage data(SNV_dense)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 100 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing a continuous trait.}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"SNV_dense"
#' Simulated Single Nucleotide Variants (SNVs) Data in Sparse Scenario
#'
#' A simulated dataset containing 1,000 subjects and 100 SNVs. 1\% of
#' the variants are simulated to be causal/associated. Effect strengths
#' are randomly sampled from U(-0.5, 0.5) distribution.
#'
#' @docType data
#'
#' @usage data(SNV_sparse)
#'
#' @format A list object.
#' \describe{
#' \item{SNV}{A 1,000 by 300 matrix containing the genotypes. Each
#' component of the matrix denotes the number of minor alleles.}
#' \item{trait}{A vector of length 1,000 containing a continuous trait.}
#' \item{zero_var}{Indexes for columns with no variation. These
#' columns should be removed if SNV is further used by perm_score, wAF and
#' wAFd functions.}
#' }
"SNV_sparse"
|
library(bcRep)
### Name: sequences.mutation.base
### Title: Statistics about silent mutations
### Aliases: sequences.mutation.base plotSequencesMutationBase
### ** Examples
data(mutationtab)
data(summarytab)
V.base.mut<-sequences.mutation.base(mutationtab = mutationtab, summarytab = summarytab,
sequence = "V", nrCores = 1)
## Not run:
##D plotSequencesMutationBase(mutationBaseTab = V.base.mut, plotMutation = T)
## End(Not run)
| /data/genthat_extracted_code/bcRep/examples/sequences.mutation.base.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 447 | r | library(bcRep)
### Name: sequences.mutation.base
### Title: Statistics about silent mutations
### Aliases: sequences.mutation.base plotSequencesMutationBase
### ** Examples
data(mutationtab)
data(summarytab)
V.base.mut<-sequences.mutation.base(mutationtab = mutationtab, summarytab = summarytab,
sequence = "V", nrCores = 1)
## Not run:
##D plotSequencesMutationBase(mutationBaseTab = V.base.mut, plotMutation = T)
## End(Not run)
|
\name{plot-methods}
\docType{methods}
\alias{plot-methods}
\alias{plot,boundEst,ANY-method}
\alias{plot,boundEst,missing-method}
\alias{points-methods}
\alias{points,ANY-method}
\alias{points,boundEst-method}
\title{Methods for Function \code{plot} and \code{points} in Package \pkg{binseqtest}}
\description{
Plot binary sequential boundaries for \code{"boundEst"} objects.
}
\usage{
\S4method{plot}{boundEst,missing}(x,
rcol = c(orange = "#E69F00", blue = "#56B4E9", green = "#009E73"),
rpch = c(openCircle=1, filledCircle=16, filledDiamond=18),
bplottype = "NS",
newplot = TRUE, dtext=NULL, grid=50, xlab=NULL, ylab=NULL, \dots)
\S4method{points}{boundEst}(x, \dots)
}
\arguments{
\item{x}{an object of class \code{"boundEst"} }
\item{rcol}{rejection color vector, rcol[1]=fail to reject, rcol[2]=reject, conclude theta>theta0, rcol[3]=reject, conclude theta< theta0 (see details)}
\item{rpch}{rejection pch vector, correspond to same categories as rcol vector}
\item{bplottype}{character, either 'NS' (default), 'FS', 'NB', 'NZ', or 'NE' (see details)}
\item{newplot}{logical, should a new plot be started? if FALSE add to existing plot (only makes sense to add to plot with the same bplottype)}
\item{dtext}{logical, add descriptive text? if NULL only adds text when newplot=TRUE (used for bplottype='NS' or 'FS')}
\item{grid}{numeric, if maximum possible total trials<=grid then add gridlines (used for bplottype='NS' or 'FS')}
\item{xlab}{title for x axis, if NULL value depends on bplottype}
\item{ylab}{title for y axis, if NULL value depends on bplottype}
\item{\dots}{other arguments to the \code{plot} function can be passed here.}
}
\section{Methods}{
\describe{
\item{\code{signature(x = "ANY", y = "ANY")}}{Generic function: see
\code{\link[graphics]{plot}}.}
\item{\code{signature(x = "boundEst", y = "missing")}}{Plot
binary sequential boundaries for \code{x}.}
\item{\code{signature(x = "ANY")}}{Generic function: see
\code{\link[graphics]{points}}.}
\item{\code{signature(x = "boundEst")}}{Add points associated with the binary sequential boundaries for \code{x} to a plot.}
}
}
\details{
The default rcol vector are good colors for distinguishing for those with color blindness. Text is printed on the unused portion of the plot, which uses the color names
taken from the rcol vector names.
Their are several different types of plots, selected by the \code{bplottype} argument, where the value is a character string with 2 characters, the first representing the
x-axis and the second representing the y-axis. For example \code{bplottype}='NS' denotes N=total number of trials on the horizontal axis, and S=number of successes on the vertical
axis. Other plots are: 'FS'=failure by successes; 'NB'=total by B-values; 'NZ'=total by Z-scores; 'NE'=total by estimates and confidence intervals. The type 'NE' is only defined
if there are only 1 value for each N on the upper and 1 value for each N on the lower part of the boundary. Otherwise, the confidence intervals would overlap and be uninformative.
For 'NE' the end of the boundary is not plotted because of that overlapping.
For some examples, see plot section of the vignette. The method points just calls \code{plot(x,newPlot=FALSE,\dots)}.
}
\examples{
b<-designOBF(50,theta0=.5)
plot(b,bplottype="NE")
plot(b)
b2<-designFixed(49,theta0=.5)
points(b2,rpch=c(17,17,17))
}
\keyword{methods}
| /man/plot-methods.Rd | no_license | cran/binseqtest | R | false | false | 3,523 | rd | \name{plot-methods}
\docType{methods}
\alias{plot-methods}
\alias{plot,boundEst,ANY-method}
\alias{plot,boundEst,missing-method}
\alias{points-methods}
\alias{points,ANY-method}
\alias{points,boundEst-method}
\title{Methods for Function \code{plot} and \code{points} in Package \pkg{binseqtest}}
\description{
Plot binary sequential boundaries for \code{"boundEst"} objects.
}
\usage{
\S4method{plot}{boundEst,missing}(x,
rcol = c(orange = "#E69F00", blue = "#56B4E9", green = "#009E73"),
rpch = c(openCircle=1, filledCircle=16, filledDiamond=18),
bplottype = "NS",
newplot = TRUE, dtext=NULL, grid=50, xlab=NULL, ylab=NULL, \dots)
\S4method{points}{boundEst}(x, \dots)
}
\arguments{
\item{x}{an object of class \code{"boundEst"} }
\item{rcol}{rejection color vector, rcol[1]=fail to reject, rcol[2]=reject, conclude theta>theta0, rcol[3]=reject, conclude theta< theta0 (see details)}
\item{rpch}{rejection pch vector, correspond to same categories as rcol vector}
\item{bplottype}{character, either 'NS' (default), 'FS', 'NB', 'NZ', or 'NE' (see details)}
\item{newplot}{logical, should a new plot be started? if FALSE add to existing plot (only makes sense to add to plot with the same bplottype)}
\item{dtext}{logical, add descriptive text? if NULL only adds text when newplot=TRUE (used for bplottype='NS' or 'FS')}
\item{grid}{numeric, if maximum possible total trials<=grid then add gridlines (used for bplottype='NS' or 'FS')}
\item{xlab}{title for x axis, if NULL value depends on bplottype}
\item{ylab}{title for y axis, if NULL value depends on bplottype}
\item{\dots}{other arguments to the \code{plot} function can be passed here.}
}
\section{Methods}{
\describe{
\item{\code{signature(x = "ANY", y = "ANY")}}{Generic function: see
\code{\link[graphics]{plot}}.}
\item{\code{signature(x = "boundEst", y = "missing")}}{Plot
binary sequential boundaries for \code{x}.}
\item{\code{signature(x = "ANY")}}{Generic function: see
\code{\link[graphics]{points}}.}
\item{\code{signature(x = "boundEst")}}{Add points associated with the binary sequential boundaries for \code{x} to a plot.}
}
}
\details{
The default rcol vector are good colors for distinguishing for those with color blindness. Text is printed on the unused portion of the plot, which uses the color names
taken from the rcol vector names.
Their are several different types of plots, selected by the \code{bplottype} argument, where the value is a character string with 2 characters, the first representing the
x-axis and the second representing the y-axis. For example \code{bplottype}='NS' denotes N=total number of trials on the horizontal axis, and S=number of successes on the vertical
axis. Other plots are: 'FS'=failure by successes; 'NB'=total by B-values; 'NZ'=total by Z-scores; 'NE'=total by estimates and confidence intervals. The type 'NE' is only defined
if there are only 1 value for each N on the upper and 1 value for each N on the lower part of the boundary. Otherwise, the confidence intervals would overlap and be uninformative.
For 'NE' the end of the boundary is not plotted because of that overlapping.
For some examples, see plot section of the vignette. The method points just calls \code{plot(x,newPlot=FALSE,\dots)}.
}
\examples{
b<-designOBF(50,theta0=.5)
plot(b,bplottype="NE")
plot(b)
b2<-designFixed(49,theta0=.5)
points(b2,rpch=c(17,17,17))
}
\keyword{methods}
|
#' Converting hierarchy specifications to a (signed) dummy matrix
#'
#' A matrix for mapping input codes (columns) to output codes (rows) are created.
#' The elements of the matrix specify how columns contribute to rows.
#'
#'
#' @param mapsFrom Character vector from hierarchy table
#' @param mapsTo Character vector from hierarchy table
#' @param sign Numeric vector of either 1 or -1 from hierarchy table
#' @param level Numeric vector from hierarchy table
#' @param mapsInput All codes in mapsFrom not in mapsTo (created automatically when NULL) and possibly other codes in input data.
#' @param inputInOutput When FALSE all output rows represent codes in mapsTo
#' @param keepCodes To prevent some codes to be removed when inputInOutput = FALSE
#' @param unionComplement When TRUE, sign means union and complement instead of addition or subtraction (see note)
#' @param reOrder When TRUE (FALSE is default) output codes are ordered differently, more similar to a usual model matrix ordering.
#'
#' @return
#' A sparse matrix with row and column and names
#' @export
#' @author Øyvind Langsrud
#' @import Matrix
#'
#' @note
#' With unionComplement = FALSE (default), the sign of each mapping specifies the contribution as addition or subtraction.
#' Thus, values above one and negative values in output can occur.
#' With unionComplement = TRUE, positive is treated as union and negative as complement. Then 0 and 1 are the only possible elements in the output matrix.
#'
#' @examples
#' # A hierarchy table
#' h <- SSBtoolsData("FIFA2018ABCD")
#'
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level)
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level, inputInOutput = TRUE)
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level, keepCodes = c("Portugal", "Spain"))
#'
#' # Extend the hierarchy table to illustrate the effect of unionComplement
#' h2 <- rbind(data.frame(mapsFrom = c("EU", "Schengen"), mapsTo = "EUandSchengen",
#' sign = 1, level = 3), h)
#'
#' DummyHierarchy(h2$mapsFrom, h2$mapsTo, h2$sign, h2$level)
#' DummyHierarchy(h2$mapsFrom, h2$mapsTo, h2$sign, h2$level, unionComplement = TRUE)
#'
#' # Extend mapsInput - leading to zero columns.
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level,
#' mapsInput = c(h$mapsFrom[!(h$mapsFrom %in% h$mapsTo)], "Norway", "Finland"))
#'
#' # DummyHierarchies
#' DummyHierarchies(FindHierarchies(SSBtoolsData("sprt_emp_withEU")[, c("geo", "eu", "age")]),
#' inputInOutput = c(FALSE, TRUE))
DummyHierarchy <- function(mapsFrom, mapsTo, sign, level, mapsInput = NULL, inputInOutput = FALSE, keepCodes = mapsFrom[integer(0)], unionComplement = FALSE, reOrder = FALSE) {
mapsFrom <- as.character(mapsFrom) # Ensure character (if factor)
mapsTo <- as.character(mapsTo) # Ensure character (if factor)
if (is.null(mapsInput))
mapsInput <- mapsFrom[!(mapsFrom %in% mapsTo)]
mapsInput <- sort(as.factor(unique(mapsInput)))
m <- Matrix::t(fac2sparse(mapsInput))
rownames(m) <- as.character(mapsInput) #dimnames(m)[[2]] = as.character(mapsInput)
dropInput <- rownames(m)
if (length(keepCodes) > 0)
dropInput <- dropInput[!(dropInput %in% keepCodes)]
nInput <- dim(m)[1]
for (i in unique(sort(level))) {
ri <- (level == i)
mapsToi <- factor(mapsTo[ri])
mapsFromi <- factor(mapsFrom[ri], levels = rownames(m))
if (anyNA(mapsFromi)) {
warning("Problematic hierarchy specification")
}
mNew <- Matrix(0, NROW(m), length(levels(mapsToi)), dimnames = list(levels(mapsFromi), levels(mapsToi)))
mNew[cbind(as.integer(mapsFromi), as.integer(mapsToi))] <- sign[ri]
if(reOrder){
if (unionComplement)
m <- rbind(CrossprodUnionComplement(mNew, m),m) # Better ordering
else m <- rbind(Mult_crossprod(mNew, m),m) #rbind(crossprod(mNew, m),m)
} else {
if (unionComplement)
m <- rbind(m, CrossprodUnionComplement(mNew, m)) # Matrix::rBind(m, CrossprodUnionComplement(mNew,m))
else m <- rbind(m, Mult_crossprod(mNew, m)) #rbind(m, crossprod(mNew, m)) # Matrix::rBind(m, crossprod(mNew,m))
}
}
if (is.list(inputInOutput)) { # When list: Extended use of inputInOutput (hack)
inputInOutput <- inputInOutput[[1]]
if (is.character(inputInOutput)) {
ma <- match(inputInOutput, rownames(m))
if (anyNA(ma)) {
warning(paste("Output codes not found in the hierarchy result in empties:",
paste(HeadEnd(inputInOutput[is.na(ma)]), collapse = ", ")))
m0 <- Matrix(0, sum(is.na(ma)), ncol(m))
rownames(m0) <- inputInOutput[is.na(ma)]
m <- rbind(m, m0)
ma <- match(inputInOutput, rownames(m))
}
m <- m[ma, , drop = FALSE]
return(m)
}
}
if (!inputInOutput & length(dropInput) > 0) {
keepRows <- rownames(m)[!(rownames(m) %in% dropInput)]
m <- m[keepRows, , drop = FALSE]
}
m # Lage warnig/error om annet i matrisa enn 0, -1, 1 ?
}
#' @rdname DummyHierarchy
#' @details `DummyHierarchies` is a user-friendly wrapper for the original function `DummyHierarchy`.
#' Then, the logical input parameters are vectors (possibly recycled).
#' `mapsInput` and `keepCodes` can be supplied as attributes.
#' `mapsInput` will be generated when `data` is non-NULL.
#'
#'
#' @param hierarchies List of hierarchies
#' @param data data
#' @export
DummyHierarchies <- function(hierarchies, data = NULL, inputInOutput = FALSE, unionComplement = FALSE, reOrder = FALSE) {
n <- length(hierarchies)
inputInOutput <- rep_len(inputInOutput, n)
unionComplement <- rep_len(unionComplement, n)
reOrder <- rep_len(reOrder, n)
for (i in seq_len(n)) {
if (!is.null(data)) {
hierarchies[i] <- AddMapsInput(hierarchies[i], data)
}
hierarchies[[i]] <- DummyHierarchy(mapsFrom = hierarchies[[i]]$mapsFrom,
mapsTo = hierarchies[[i]]$mapsTo,
mapsInput = attr(hierarchies[[i]], "mapsInput"),
keepCodes = attr(hierarchies[[i]], "keepCodes"),
sign = hierarchies[[i]]$sign,
level = hierarchies[[i]]$level,
inputInOutput = inputInOutput[i],
unionComplement = unionComplement[i],
reOrder = reOrder[i])
}
hierarchies
} | /R/DummyHierarchies.R | no_license | cran/SSBtools | R | false | false | 6,710 | r |
#' Converting hierarchy specifications to a (signed) dummy matrix
#'
#' A matrix for mapping input codes (columns) to output codes (rows) are created.
#' The elements of the matrix specify how columns contribute to rows.
#'
#'
#' @param mapsFrom Character vector from hierarchy table
#' @param mapsTo Character vector from hierarchy table
#' @param sign Numeric vector of either 1 or -1 from hierarchy table
#' @param level Numeric vector from hierarchy table
#' @param mapsInput All codes in mapsFrom not in mapsTo (created automatically when NULL) and possibly other codes in input data.
#' @param inputInOutput When FALSE all output rows represent codes in mapsTo
#' @param keepCodes To prevent some codes to be removed when inputInOutput = FALSE
#' @param unionComplement When TRUE, sign means union and complement instead of addition or subtraction (see note)
#' @param reOrder When TRUE (FALSE is default) output codes are ordered differently, more similar to a usual model matrix ordering.
#'
#' @return
#' A sparse matrix with row and column and names
#' @export
#' @author Øyvind Langsrud
#' @import Matrix
#'
#' @note
#' With unionComplement = FALSE (default), the sign of each mapping specifies the contribution as addition or subtraction.
#' Thus, values above one and negative values in output can occur.
#' With unionComplement = TRUE, positive is treated as union and negative as complement. Then 0 and 1 are the only possible elements in the output matrix.
#'
#' @examples
#' # A hierarchy table
#' h <- SSBtoolsData("FIFA2018ABCD")
#'
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level)
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level, inputInOutput = TRUE)
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level, keepCodes = c("Portugal", "Spain"))
#'
#' # Extend the hierarchy table to illustrate the effect of unionComplement
#' h2 <- rbind(data.frame(mapsFrom = c("EU", "Schengen"), mapsTo = "EUandSchengen",
#' sign = 1, level = 3), h)
#'
#' DummyHierarchy(h2$mapsFrom, h2$mapsTo, h2$sign, h2$level)
#' DummyHierarchy(h2$mapsFrom, h2$mapsTo, h2$sign, h2$level, unionComplement = TRUE)
#'
#' # Extend mapsInput - leading to zero columns.
#' DummyHierarchy(h$mapsFrom, h$mapsTo, h$sign, h$level,
#' mapsInput = c(h$mapsFrom[!(h$mapsFrom %in% h$mapsTo)], "Norway", "Finland"))
#'
#' # DummyHierarchies
#' DummyHierarchies(FindHierarchies(SSBtoolsData("sprt_emp_withEU")[, c("geo", "eu", "age")]),
#' inputInOutput = c(FALSE, TRUE))
DummyHierarchy <- function(mapsFrom, mapsTo, sign, level, mapsInput = NULL, inputInOutput = FALSE, keepCodes = mapsFrom[integer(0)], unionComplement = FALSE, reOrder = FALSE) {
mapsFrom <- as.character(mapsFrom) # Ensure character (if factor)
mapsTo <- as.character(mapsTo) # Ensure character (if factor)
if (is.null(mapsInput))
mapsInput <- mapsFrom[!(mapsFrom %in% mapsTo)]
mapsInput <- sort(as.factor(unique(mapsInput)))
m <- Matrix::t(fac2sparse(mapsInput))
rownames(m) <- as.character(mapsInput) #dimnames(m)[[2]] = as.character(mapsInput)
dropInput <- rownames(m)
if (length(keepCodes) > 0)
dropInput <- dropInput[!(dropInput %in% keepCodes)]
nInput <- dim(m)[1]
for (i in unique(sort(level))) {
ri <- (level == i)
mapsToi <- factor(mapsTo[ri])
mapsFromi <- factor(mapsFrom[ri], levels = rownames(m))
if (anyNA(mapsFromi)) {
warning("Problematic hierarchy specification")
}
mNew <- Matrix(0, NROW(m), length(levels(mapsToi)), dimnames = list(levels(mapsFromi), levels(mapsToi)))
mNew[cbind(as.integer(mapsFromi), as.integer(mapsToi))] <- sign[ri]
if(reOrder){
if (unionComplement)
m <- rbind(CrossprodUnionComplement(mNew, m),m) # Better ordering
else m <- rbind(Mult_crossprod(mNew, m),m) #rbind(crossprod(mNew, m),m)
} else {
if (unionComplement)
m <- rbind(m, CrossprodUnionComplement(mNew, m)) # Matrix::rBind(m, CrossprodUnionComplement(mNew,m))
else m <- rbind(m, Mult_crossprod(mNew, m)) #rbind(m, crossprod(mNew, m)) # Matrix::rBind(m, crossprod(mNew,m))
}
}
if (is.list(inputInOutput)) { # When list: Extended use of inputInOutput (hack)
inputInOutput <- inputInOutput[[1]]
if (is.character(inputInOutput)) {
ma <- match(inputInOutput, rownames(m))
if (anyNA(ma)) {
warning(paste("Output codes not found in the hierarchy result in empties:",
paste(HeadEnd(inputInOutput[is.na(ma)]), collapse = ", ")))
m0 <- Matrix(0, sum(is.na(ma)), ncol(m))
rownames(m0) <- inputInOutput[is.na(ma)]
m <- rbind(m, m0)
ma <- match(inputInOutput, rownames(m))
}
m <- m[ma, , drop = FALSE]
return(m)
}
}
if (!inputInOutput & length(dropInput) > 0) {
keepRows <- rownames(m)[!(rownames(m) %in% dropInput)]
m <- m[keepRows, , drop = FALSE]
}
m # Lage warnig/error om annet i matrisa enn 0, -1, 1 ?
}
#' @rdname DummyHierarchy
#' @details `DummyHierarchies` is a user-friendly wrapper for the original function `DummyHierarchy`.
#' Then, the logical input parameters are vectors (possibly recycled).
#' `mapsInput` and `keepCodes` can be supplied as attributes.
#' `mapsInput` will be generated when `data` is non-NULL.
#'
#'
#' @param hierarchies List of hierarchies
#' @param data data
#' @export
DummyHierarchies <- function(hierarchies, data = NULL, inputInOutput = FALSE, unionComplement = FALSE, reOrder = FALSE) {
n <- length(hierarchies)
inputInOutput <- rep_len(inputInOutput, n)
unionComplement <- rep_len(unionComplement, n)
reOrder <- rep_len(reOrder, n)
for (i in seq_len(n)) {
if (!is.null(data)) {
hierarchies[i] <- AddMapsInput(hierarchies[i], data)
}
hierarchies[[i]] <- DummyHierarchy(mapsFrom = hierarchies[[i]]$mapsFrom,
mapsTo = hierarchies[[i]]$mapsTo,
mapsInput = attr(hierarchies[[i]], "mapsInput"),
keepCodes = attr(hierarchies[[i]], "keepCodes"),
sign = hierarchies[[i]]$sign,
level = hierarchies[[i]]$level,
inputInOutput = inputInOutput[i],
unionComplement = unionComplement[i],
reOrder = reOrder[i])
}
hierarchies
} |
#' loglikelihood_SNF
#' @name loglikelihood_SNF
#' @aliases loglikelihood_SNF
#' @title loglikelihood_SNF
#' @param y indicator of whether i and j is connected
#' @param x1 variables of i
#' @param x2 variables of j
#' @param delta parameters
#' @param y_not complement of y
#' @return value of log likelihood
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
loglikelihood_SNF = function(y, x1, x2, delta, y_not){
if (missing(y_not))
y_not = !y
p = pnorm(x1 %*% delta) * pnorm(x2 %*% delta)
out = sum(log(p^y*(1-p)^y_not))
if (!is.finite(out)){
return(-1e+20)
}
return(out)
}
#' lik_grad_single_SNF
#' @name lik_grad_single_SNF
#' @aliases lik_grad_single_SNF
#' @title lik_grad_single_SNF
#' @param y indicator of whether i and j is connected
#' @param x1 variables of i
#' @param x2 variables of j
#' @param delta parameters
#' @param y_not complement of y
#' @return value of gradient of log likelihood
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
lik_grad_single_SNF = function(y, x1, x2, delta, y_not){
R1 = x1 %*% delta
R2 = x2 %*% delta
pi = pnorm(R1)
pj = pnorm(R2)
di = dnorm(R1)
dj = dnorm(R2)
p = pi * pj
f = (y-p) / (p* (1-p))
f[is.nan(f)] = 0
out = as.vector( f ) * (as.vector(pj * di) * x1 + as.vector(pi *dj) * x2 )
as.vector(colSums(out))
}
#' drawYstar
#' @name drawYstar
#' @aliases drawYstar
#' @title drawYstar
#' @param y indicator of whether i and j is connected
#' @param ystar_other latent value of j
#' @param mean x*delta
#' @param y_not complement of y
#' @param sd sd of the error
#' @return value
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
drawYstar = function(y, ystar_other, mean, y_not=!y, sd=1){
ystar_other_positive = ystar_other>=0
index_case1 = y
index_case2 = as.logical(y_not * ystar_other_positive)
index_case3 = as.logical(y_not * !ystar_other_positive)
n1=sum(index_case1)
n2=sum(index_case2)
n3=sum(index_case3)
ystar_new = rep(NA, length(y))
if (n1>0)
ystar_new[index_case1] = rtruncnorm(1,a=0,b=Inf, mean=mean[index_case1],sd=sd)
if (n2>0)
ystar_new[index_case2] =rtruncnorm(1,a=-Inf,b=0,mean=mean[index_case2],sd=sd)
if (n3>0)
ystar_new[index_case3] = mean[index_case3] +rnorm(n3,sd=sd)
ystar_new
}
#' Strategy Network Formation
#' @name SNF
#' @rdname SNF
#' @aliases SNF
#' @aliases SNF.static.maxLik
#' @aliases SNF.static.mcmc
#' @aliases SNF.dynamic.mcmc
#' @title SNF
#' @param data data
#' @param method Estimation method, either "static.maxLik","static.mcmc","dynamic.mcmc". Default is "static.maxLik"
#' @param m m
#' @param last_estimation last_estimation
#' @param update_tau update_tau
#' @param tau tau
#' @param ... others argument.
#' @return SNF object
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
SNF = function(data , method=c("static.maxLik","static.mcmc","dynamic.mcmc"), ...){
method = match.arg(method)
switch(method,
static.maxLik = SNF.static.maxLik(data,...),
static.mcmc = SNF.static.mcmc(data,...),
dynamic.mcmc = SNF.dynamic.mcmc(data,...),
)
}
#' @rdname SNF
#' @export
SNF.static.maxLik = function(data,...){
tic()
self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
y = do.call(rbind, lapply(data, function(z) z$response_self))
number_of_network = ncol(y)
network_name = data[[1]]$network_name
out = vector("list",number_of_network)
summary_table = vector("list",number_of_network)
for (i in 1:number_of_network){
yy = y[,i]
yy_not = !yy
start = glm(yy~self_data_matrix-1, family=binomial(link="probit"))$coef
out[[i]] = maxLik(function(z, ...) loglikelihood_SNF(delta=z, ...) , start=start , x1=self_data_matrix, x2=friends_data_matrix, y=yy, y_not=yy_not , grad= lik_grad_single_SNF, method="BFGS")
summary_table[[i]] = generateSignificance(summary(out[[i]])$estimate[,1:2])
rownames(summary_table[[i]]) = network_name[i] %+% "_" %+%colnames(self_data_matrix)
}
summary_table = do.call(rbind,summary_table)
toc()
out2 = list(out=out, summary_table=summary_table)
class(out2) = "SNF.static.maxLik"
out2
}
#' @rdname SNF
#' @export
SNF.static.mcmc = function(data, m=1000, last_estimation,...){
self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
y = do.call(rbind, lapply(data,"[[","response_self"))
y_not = !y
number_of_network = ncol(y)
name = colnames(self_data_matrix)
k = ncol(self_data_matrix)
n = NROW(y)*2
delta_matrix = rep(list(matrix(0, nrow=m, ncol= k )), number_of_network)
if (number_of_network>1){
number_col_Sigma_matrix = number_of_network*(number_of_network-1)/2
Sigma_matrix = matrix(0, nrow=m, ncol = number_col_Sigma_matrix )
sigma_name = genPairwiseIndex(number_of_network)
sigma_name = sigma_name[,1] %+% sigma_name[,2]
colnames(Sigma_matrix) = "Sigma_" %+% sigma_name
} else{
number_col_Sigma_matrix=1
Sigma_matrix = matrix(0, nrow=m, ncol = number_col_Sigma_matrix )
colnames(Sigma_matrix) = "Sigma_11"
}
network_name = data[[1]]$network_name
for (i in 1:number_of_network){
colnames(delta_matrix[[i]]) = network_name[[i]] %+% "_" %+% name
}
ystar1 = matrix(0, nrow=nrow(y), ncol=ncol(y))
ystar2 = matrix(0, nrow=nrow(y), ncol=ncol(y))
Sigma = matrix(0.5,number_of_network,number_of_network)
diag(Sigma) = 1
delta = matrix(0, nrow=k, ncol=number_of_network)
if (!missing(last_estimation)){
ystar1 = last_estimation$ystar1
ystar2 = last_estimation$ystar2
delta = last_estimation$delta
Sigma = last_estimation$Sigma
}
X = rbind(self_data_matrix, friends_data_matrix)
XX_inv = solve(crossprod(X))
xb1 = self_data_matrix %*% delta
xb2 = friends_data_matrix %*% delta
tic()
for (i in 1:m){
if (i %% 1000 == 0 )
cat(i, ">\n")
## update ystar
for( j in 1:number_of_network){
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
temp = find_normal_conditional_dist(a=ystar1_demean, i=j, j=-j, Sigma=Sigma)
ystar1[,j] = drawYstar(y=y[,j] , ystar_other=ystar2[,j], mean=xb1[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
temp = find_normal_conditional_dist(a= ystar2_demean, i=j, j=-j, Sigma=Sigma)
ystar2[,j] = drawYstar(y=y[,j] , ystar_other=ystar1[,j], mean=xb2[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
}
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
ystar = rbind(ystar1,ystar2)
for ( j in 1:number_of_network){
temp = find_normal_conditional_dist(a=ystar_demean, i=j, j=-j, Sigma=Sigma)
beta_coef = XX_inv %*% crossprod(X, (ystar[,j]-temp$mean ) )
delta[,j] = mvrnorm(n=1, mu=beta_coef, XX_inv * as.vector(temp$var) )
ystar_demean = ystar[,j] - X %*% delta[,j]
delta_matrix[[j]][i,] = delta[,j]
}
xb1 = self_data_matrix %*% delta
xb2 = friends_data_matrix %*% delta
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
## Sigma
if (number_of_network > 1 ){
Sigma = solve( rwish(n , solve( crossprod(ystar_demean )) ) )
normalization = diag(1/sqrt(diag(Sigma)))
Sigma = normalization %*% Sigma %*% t(normalization)
Sigma_matrix[i,] = Sigma[lower.tri(Sigma)]
} else {
Sigma = as.matrix(1)
Sigma_matrix[i,] = 1
}
}
toc()
out = list(delta_matrix=delta_matrix, ystar1=ystar1,ystar2=ystar2, Sigma=Sigma,Sigma_matrix=Sigma_matrix, delta=delta)
class(out) = "network_formation.mcmc"
out
}
#' merge.SNF.static.mcmc
#' @name merge.SNF.static.mcmc
#' @aliases merge.SNF.static.mcmc
#' @title merge.SNF.static.mcmc
#' @param x First object to merge with
#' @param y Second object to merge with
#' @param ... not used
#' @return A new SNF.static.mcmc object
#' @method merge SNF.static.mcmc
#' @export merge SNF.static.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
merge.SNF.static.mcmc = function(x,y,...){
out = y
if (is.list(x$delta_matrix)){
for (i in 1:length(x$delta_matrix)){
out$delta_matrix[[i]] = rbind(x$delta_matrix[[i]], y$delta_matrix[[i]] )
}
out$Sigma_matrix = rbind(x$Sigma_matrix, y$Sigma_matrix)
} else{
out$delta_matrix = rbind(x$delta_matrix , y$delta_matrix)
}
out
}
#' Get a matrix of parameter
#' @name getParameterMatrix.SNF.static.mcmc
#' @aliases getParameterMatrix.SNF.static.mcmc
#' @title getParameterMatrix.SNF.static.mcmc
#' @param x SNF.static.mcmc
#' @param tail iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#'' @param ... not used
#' @return A matrix
#' @method getParameterMatrix SNF.static.mcmc
#' @export getParameterMatrix SNF.static.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
getParameterMatrix.SNF.static.mcmc = function(x, tail, ...){
if (is.list(x$delta_matrix)){
out = do.call(cbind, x$delta_matrix)
out = cbind(out, x$Sigma_matrix )
} else{
out = x$delta_matrix
}
if (!missing(tail)) {
out = extractTail(out, tail)
}
out
}
#' Create a summary table
#' @name summary.SNF.static.mcmc
#' @aliases summary.SNF.static.mcmc
#' @title summary.SNF.static.mcmc
#' @param object SNF.static.mcmc object
#' @param ... tail: iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#' @return A summary table
#' @method summary SNF.static.mcmc
#' @export summary SNF.static.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
summary.SNF.static.mcmc = function(object,...){
computeSummaryTable(object,...)
}
#' Create a summary table
#' @name summary.SNF.static.maxLik
#' @aliases summary.SNF.static.maxLik
#' @title summary.SNF.static.maxLik
#' @param object SNF.static.maxLik object
#' @param ... not used
#' @return A summary table
#' @method summary SNF.static.maxLik
#' @export summary SNF.static.maxLik
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
summary.SNF.static.maxLik = function(object,...){
object$summary_table
}
# single_network_formation_mcmc = function(data, m=1000, last_estimation){
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# response = do.call(rbind, lapply(data, function(z) z$response_self))
# response_not = !response
# name = colnames(self_data_matrix)
# k = ncol(self_data_matrix)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# ystar1 = rep(0, length(response))
# ystar2 = rep(0, length(response))
# network_name = data[[1]]$network_name
# if (!missing(last_estimation)){
# delta_matrix[1,] = tail(last_estimation$delta_matrix,1)
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# }
# colnames(delta_matrix) = network_name %+% "_" %+% colnames(self_data_matrix)
# X = rbind(self_data_matrix, friends_data_matrix)
# XX_inv = solve(crossprod(X))
# tic()
# for (i in 1:m){
# if (i %% 1000 == 0 ){
# cat(i ,">\n")
# }
# xb1 = self_data_matrix %*% delta_matrix[i, ]
# xb2 = friends_data_matrix %*% delta_matrix[i, ]
# ystar1 = drawYstar(y=response , ystar_other=ystar2, mean=xb1, y_not= response_not)
# ystar2 = drawYstar(y=response, ystar_other=ystar1, mean=xb2, y_not= response_not)
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=XX_inv %*% crossprod(X,c(ystar1,ystar2)), XX_inv)
# }
# toc()
# delta_matrix = tail(delta_matrix,-1)
# out = list(delta_matrix=delta_matrix, ystar1=ystar1, ystar2=ystar2)
# class(out) = "network_formation"
# out
# }
# lik_single_network_formation_parser = function(data, delta, network_id=1){
# loglikelihood_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response1,
# delta
# if (network_id==1){
# return(
# )
# )
# } else if (network_id==2){
# return(
# loglikelihood_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response2,
# delta
# )
# ) }
# })
# lik_single_network_formation_par = function(cl, delta, network_id=1, G=5){
# sum(
# parSapply(cl, 1:G,
# function(i,delta,network_id) lik_single_network_formation_parser(
# data[[i]],
# delta=delta,
# network_id=network_id
# ),
# delta=delta,
# network_id=network_id
# )
# )
# })
# lik_grad_single_network_formation_parser = function(data, delta, network_id=1){
# if (network_id==1){
# return(
# lik_grad_single_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response1,
# delta
# )
# )
# } else if (network_id==2){
# return(
# lik_grad_single_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response2,
# delta
# )
# ) }
# })
# lik_grad_single_network_formation_par = function(cl, delta, network_id=1, G=5){
# rowSums(
# parSapply(cl, 1:G,
# function(i,delta,network_id) lik_grad_single_network_formation_parser(
# data[[i]],
# delta=delta,
# network_id=network_id
# ),
# delta=delta,
# network_id=network_id
# )
# )
# })
# single_network_formation = function(data, network_id=1){
# tic()
# require("maxLik")
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# if (network_id==1){
# response = unlist(lapply(data, function(z) z$response1))
# } else if (network_id==2){
# response = unlist(lapply(data, function(z) z$response2))
# }
# start = rep(0,ncol(self_data_matrix))
# system.time({
# out= maxLik(function(z, ...) loglikelihood_network_formation(delta=z, ...) , start=start , self_data_matrix=self_data_matrix, friends_data_matrix=friends_data_matrix, response=response , grad= lik_grad_single_network_formation, method="BFGS")
# })
# summary_table = generateSignificance(summary(out)$estimate[,1:2])
# rownames(summary_table) = colnames(self_data_matrix)
# toc()
# list(out, summary_table)
# })
# single_network_formation_parallel = function(data, cl, network_id=1){
# tic()
# name = colnames(data[[1]]$self_data_matrix)
# start = rep(0,length(name))
# out= maxLik(
# lik_single_network_formation_par,
# start=start ,
# cl=cl,
# G=length(data),
# network_id=network_id ,
# grad= lik_grad_single_network_formation_par,
# method="BFGS"
# )
# summary_table = summary(out)$estimate
# rownames(summary_table) = name
# toc()
# list(maxLik_object=out, summary_table=generateSignificance(summary_table[,1:2]))
# })
# single_network_formation_mcmc_v1 = function(start, tau, m, cl, network_id, G){
# k = length(start)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# delta_matrix[1, ] = start
# update_rate=0
# for (i in 1:m){
# metro_obj =
# metropolis2(
# beta_previous=delta_matrix[i,],
# tau=tau,
# likelihoodFunction=lik_single_network_formation_par,
# cl=cl,
# network_id=network_id,
# G=G
# )
# delta_matrix[i+1,] = metro_obj$beta
# update_rate = update_rate + metro_obj$update
# }
# delta_matrix = tail(delta_matrix,-1)
# update_rate =update_rate / m
# next_tau = tau * ifelse(update_rate==0,0.1,update_rate) / 0.27
# return(list(delta_matrix = delta_matrix, update_rate =update_rate, next_parameter = tail(delta_matrix,1), tau=tau, next_tau = next_tau))
# })
# single_network_formation_mcmc_v2 = function(start, tau, m, cl, network_id, G){
# k = length(start)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# delta_matrix[1, ] = start
# update_rate=0
# for (i in 1:m){
# metro_obj =
# metropolis(
# beta_previous=delta_matrix[i,],
# tau=tau,
# likelihoodFunction=lik_single_network_formation_par,
# cl=cl,
# network_id=network_id,
# G=G
# )
# delta_matrix[i+1,] = metro_obj$beta
# update_rate = update_rate + metro_obj$update
# }
# delta_matrix = tail(delta_matrix,-1)
# update_rate =update_rate / m
# next_tau = tau * ifelse(update_rate==0,0.1,update_rate) / 0.27
# return(list(delta_matrix = delta_matrix, update_rate =update_rate, next_parameter = tail(delta_matrix,1), tau=tau, next_tau = next_tau))
# })
## method 1 : update delta as vector
## method 2 : udpate delta one by one. Method 2 is more efficient, because it reduces the call to the likelihood function by half.
# single_network_formation_mcmc_RE = function(data, network_id, m=1000, last_estimation){
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# response = as.logical(unlist(lapply(data, function(z) z$response[[network_id]])))
# response_not = !response
# n = sapply(data, function(z) length(z$y))
# n2 = sapply(data, function(z) length(z$response[[network_id]]))
# name = colnames(self_data_matrix)
# k = ncol(self_data_matrix)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# sigma2e_matrix = matrix(1, nrow=m+1, ncol=1)
# ystar1 = rep(0, length(response))
# ystar2 = rep(0, length(response))
# e = rep(0,sum(n)) #rnorm(sum(n))
# if (!missing(last_estimation)){
# delta_matrix[1,] = tail(last_estimation$delta_matrix,1)
# sigma2e_matrix[1,] = tail(last_estimation$sigma2e_matrix,1)
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# e = last_estimation$e
# }
# colnames(delta_matrix) = colnames(self_data_matrix)
# X = rbind(self_data_matrix, friends_data_matrix)
# XX_inv = solve(crossprod(X))
# full_group_index = genFullGroupIndex(data)
# full_position_index = genFullPositionIndex(data)
# full_position_matrix = genFullPositionMatrix(data)
# row_sums_full_position_matrix = rowSums(full_position_matrix)
# tic()
# for (i in 1:m){
# if (i %% 1000 == 0 ){
# cat(i ,">\n")
# }
# full_e = genFulle(e,full_group_index )
# xb1 = self_data_matrix %*% delta_matrix[i, ] + full_e$e_i
# xb2 = friends_data_matrix %*% delta_matrix[i, ] + full_e$e_j
# # update ystar
# ystar1 = drawYstar(y=response , ystar_other=ystar2, mean=xb1, y_not= response_not)
# ystar2 = drawYstar(y=response, ystar_other=ystar1, mean=xb2, y_not= response_not)
# # update delta
# mu_delta = XX_inv %*% crossprod(X,c(ystar1-full_e$e_i,ystar2-full_e$e_j))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=mu_delta, XX_inv)
# # update e
# # actually i dont need previous e, just need ystar1-xb1, ystar2-xb2 and the correct position.
# #
# residual = c(ystar1, ystar2) - X %*% delta_matrix[i+1,]
# # mean of residual by individual
# var_e = 1 / (row_sums_full_position_matrix + sigma2e_matrix[i,])
# mean_e = as.numeric( full_position_matrix %*% residual ) * var_e
# e<-rnorm(sum(n),mean_e,sqrt(var_e))
# sigma2e_matrix[i+1,]<-1/rgamma(1,length(e)/2,crossprod(e,e)/2)
# }
# toc()
# delta_matrix = tail(delta_matrix,-1)
# sigma2e_matrix= tail(sigma2e_matrix,-1)
# out = list(ystar1=ystar1, ystar2=ystar2,e=e,delta_matrix=delta_matrix, sigma2e_matrix=sigma2e_matrix)
# class(out) = "single_network_formation_RE"
# out
# })
# single_network_formation_mcmc_parallel = function(data,cl, network_id, m=1000, last_estimation){
# name = colnames(data[[1]]$self_data_matrix)
# k = ncol(data[[1]]$self_data_matrix)
# n2 = sapply(data,function(z) length(z$response1))
# G = length(data)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# ystar1 = lapply(n2, rep,x=0 )
# ystar2 = lapply(n2, rep,x=0 )
# if (!missing(last_estimation)){
# delta_matrix[1,] = tail(last_estimation$delta_matrix,1)
# ystar1 = tail(last_estimation$ystar1)
# ystar2 = tail(last_estimation$ystar2)
# }
# colnames(delta_matrix) = name
# X = rbind(
# do.call(rbind,lapply(data,"[[",i="self_data_matrix")),
# do.call(rbind,lapply(data,"[[",i="friends_data_matrix"))
# )
# XX_inv = solve(crossprod(X))
# tic()
# for (i in 1:m){
# ystar1=
# parLapply(cl, 1:G, function(z, ystar2, network_id,delta) {
# drawYstar(
# y= data[[z]]$response[[network_id]],
# ystar_other = ystar2[[z]],
# mean = data[[z]]$self_data_matrix %*% delta
# )},
# delta = delta_matrix[i,],
# network_id = network_id,
# ystar2=ystar2
# )
# ystar2=
# parLapply(cl, 1:G, function(z, ystar1, network_id, delta) {
# drawYstar(
# y= data[[z]]$response[[network_id]],
# ystar_other = ystar1[[z]],
# mean = data[[z]]$friends_data_matrix %*% delta
# )},
# delta = delta_matrix[i,],
# network_id = network_id,
# ystar1=ystar1
# )
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=XX_inv %*% crossprod(X,c(unlist(ystar1), unlist(ystar2))), XX_inv)
# }
# toc()
# delta_matrix = tail(delta_matrix,-1)
# plotmcmc(delta_matrix,remove=remove)
# print(computeSummaryTable(delta_matrix, remove=remove))
# list(delta_matrix=delta_matrix, ystar1=ystar1, ystar2=ystar2)
# })
# drawYstar_multi = function(y, ystar_other, demean_ystar_corr, mean , y_not=!y, rho){
# mean = mean + rho * (demean_ystar_corr)
# sd = sqrt(1-rho^2)
# ystar_other_positive = ystar_other>=0
# index_case1 = y
# index_case2 = as.logical(y_not * ystar_other_positive)
# index_case3 = as.logical(y_not * !ystar_other_positive)
# n = length(y)
# n1=sum(index_case1)
# n2=sum(index_case2)
# n3=sum(index_case3)
# stopifnot(n==n1+n2+n3)
# ystar_new = rep(NA, length(y))
# if (n1>0)
# ystar_new[index_case1] = rtruncnorm(1,a=0,b=Inf, mean=mean[index_case1], sd=sd)
# if (n2>0)
# ystar_new[index_case2] =rtruncnorm(1,a=-Inf,b=0,mean=mean[index_case2], sd=sd)
# if (n3>0)
# ystar_new[index_case3] = mean[index_case3] +rnorm(n3, sd=sd)
# stopifnot(!any(is.nan(ystar_new)))
# ystar_new
# })
# multi_network_formation_mcmc_RE = function(data, m=1000, last_estimation){
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# y1 = as.logical(unlist(lapply(data, function(z) z$response1)))
# y2 = as.logical(unlist(lapply(data, function(z) z$response2)))
# y1_not = !y1
# y2_not = !y2
# name = colnames(self_data_matrix)
# k = ncol(self_data_matrix)
# n = length(y1)
# delta1_matrix = matrix(0, nrow=m+1, ncol=k)
# delta2_matrix = matrix(0, nrow=m+1, ncol=k)
# rho_matrix = matrix(0, nrow=m+1,ncol=1)
# ystar11 = rep(0, length(y1))
# ystar12 = rep(0, length(y1))
# ystar21 = rep(0, length(y2))
# ystar22 = rep(0, length(y2))
# e1 = rep(0,sum(sapply(data, "[[", "n")))
# e2 = rep(0,sum(sapply(data, "[[", "n")))
# sigma2e1_matrix = matrix(1, nrow=m+1,ncol=1)
# sigma2e2_matrix = matrix(1, nrow=m+1,ncol=1)
# colnames(delta1_matrix) = name
# colnames(delta2_matrix) = name
# if (!missing(last_estimation)){
# delta1_matrix[1,] = tail(last_estimation$delta1,1)
# delta2_matrix[1,] = tail(last_estimation$delta2,1)
# rho_matrix[1,] = tail(last_estimation$rho,1)
# ystar11 = last_estimation$ystar11
# ystar12 = last_estimation$ystar12
# ystar21 = last_estimation$ystar21
# ystar22 = last_estimation$ystar22
# e1 = last_estimation$e1
# e2 = last_estimation$e2
# }
# X = rbind(self_data_matrix, friends_data_matrix)
# XX_inv = solve(crossprod(X))
# # xb11 = self_data_matrix %*% delta1_matrix[1, ]
# # xb12 = friends_data_matrix %*% delta1_matrix[1, ]
# # xb21 = self_data_matrix %*% delta2_matrix[1, ]
# # xb22 = friends_data_matrix %*% delta2_matrix[1, ]
# full_group_index = genFullGroupIndex(data)
# full_position_index = genFullPositionIndex(data)
# full_position_matrix = genFullPositionMatrix(data)
# row_sums_full_position_matrix = rowSums(full_position_matrix)
# tic()
# for (i in 1:m){
# if (i %% 1000 == 0 )
# cat(i, ">\n")
# rho = rho_matrix[i, 1]
# full_e1 = genFulle(e1, full_group_index )
# full_e2 = genFulle(e2, full_group_index )
# xb11 = self_data_matrix %*% delta1_matrix[i, ] + full_e1$e_i
# xb12 = friends_data_matrix %*% delta1_matrix[i, ] + full_e1$e_j
# xb21 = self_data_matrix %*% delta2_matrix[i, ] + full_e2$e_i
# xb22 = friends_data_matrix %*% delta2_matrix[i, ] + full_e2$e_j
# ystar11 = drawYstar_multi(y=y1 , ystar_other=ystar12, demean_ystar_corr=ystar21-xb21 ,mean=xb11, y_not= y1_not, rho=rho)
# ystar12 = drawYstar_multi(y=y1, ystar_other=ystar11, demean_ystar_corr=ystar22-xb22, mean=xb12, y_not= y1_not, rho=rho)
# ystar21 = drawYstar_multi(y=y2 , ystar_other=ystar22, demean_ystar_corr=ystar11-xb11, mean=xb21, y_not= y2_not, rho=rho)
# ystar22 = drawYstar_multi(y=y2, ystar_other=ystar21, demean_ystar_corr=ystar12-xb12, mean=xb22, y_not= y2_not, rho=rho)
# ystar1 = c(ystar11,ystar12)
# ystar2 = c(ystar21,ystar22)
# ystar2_demean = ystar2 - c(xb21,xb22) - c(full_e2$e_i,full_e2$e_j)
# new_y1 = ystar1 - rho*ystar2_demean - c(full_e1$e_i,full_e1$e_j)
# lm1 = myFastLm(X, new_y1)
# delta1_matrix[i+1, ] = mvrnorm(n=1, mu=lm1$coef, (lm1$cov)/(lm1$s^2) * (1-rho^2) )
# xb11 = self_data_matrix %*% delta1_matrix[i+1, ]
# xb12 = friends_data_matrix %*% delta1_matrix[i+1, ]
# ystar1_demean = ystar1 - c(xb11,xb12) - c(full_e1$e_i,full_e1$e_j)
# new_y2 = ystar2 - rho*ystar1_demean - c(full_e2$e_i,full_e2$e_j)
# lm2 = myFastLm(X, new_y2 )
# delta2_matrix[i+1, ] = mvrnorm(n=1, mu=lm2$coef, (lm2$cov)/(lm2$s^2) * (1-rho^2) )
# xb21 = self_data_matrix %*% delta2_matrix[i+1, ]
# xb22 = friends_data_matrix %*% delta2_matrix[i+1, ]
# ystar2_demean= ystar2 - c(xb21,xb22)- c(full_e2$e_i,full_e2$e_j)
# # update rho
# # var_rho = (1-rho^2)/ sum((ystar1_demean)^2)
# # mean_rho = var_rho / (1-rho^2) * crossprod(ystar1_demean, ystar2_demean )
# # mean_rho = cov(ystar1_demean,ystar2_demean)
# # var_rho = (mean(ystar1_demean^2* ystar2_demean^2 ) - mean( ystar1_demean * ystar2_demean )^2) / 2/n
# mean_rho = mean(ystar1_demean * ystar2_demean)
# var_rho = var(ystar1_demean * ystar2_demean)/2/n
# rho_matrix[i+1,1 ] = rtruncnorm(1,mean= mean_rho, sd= sqrt(var_rho),a=-.999,b=.999)
# # update e1
# residual1 = ystar1 - rho*ystar2_demean - c(full_e1$e_i,full_e1$e_j) - c(xb11,xb12)
# residual2 = ystar2 - rho*ystar1_demean - c(full_e2$e_i,full_e2$e_j) - c(xb21,xb22)
# # mean of residual by individual
# var_e1 = 1 / (row_sums_full_position_matrix + sigma2e1_matrix[i,])
# mean_e1 = as.numeric( full_position_matrix %*% residual1 ) * var_e1
# e1<-rnorm(length(e1),mean_e1,sqrt(var_e1))
# sigma2e1_matrix[i+1,]<-1/rgamma(1,length(e1)/2,crossprod(e1,e1)/2)
# var_e2 = 1 / (row_sums_full_position_matrix + sigma2e2_matrix[i,])
# mean_e2 = as.numeric( full_position_matrix %*% residual2 ) * var_e2
# e2<-rnorm(length(e2),mean_e2,sqrt(var_e2))
# sigma2e2_matrix[i+1,]<-1/rgamma(1,length(e2)/2,crossprod(e2,e2)/2)
# # cat(i, "> ", rho_matrix[i+1,],"\n")
# }
# toc()
# delta1_matrix = tail(delta1_matrix,-1)
# delta2_matrix = tail(delta2_matrix,-1)
# rho_matrix = tail(rho_matrix,-1)
# sigma2e1_matrix = tail(sigma2e1_matrix,-1)
# sigma2e2_matrix = tail(sigma2e2_matrix,-1)
# out = list(delta1_matrix=delta1_matrix, delta2_matrix=delta2_matrix, rho_matrix=rho_matrix, ystar11=ystar11, ystar12=ystar12, ystar21=ystar21, ystar22=ystar22, e1=e1, e2=e2, sigma2e1_matrix=sigma2e1_matrix, sigma2e2_matrix=sigma2e2_matrix)
# class(out) = "multi_network_formation_RE"
# out
# })
# plotmcmc.single_network_formation_RE = function(x, tail=-0.2){
# data_matrix = cbind(x$delta, x$sigma2e_matrix)
# colnames(data_matrix) = c(colnames(x$delta), "Sigma2")
# plotmcmc.default(data_matrix, tail=tail)
# })
# merge.single_network_formation_RE = function(x,y,...){
# out = y
# out$delta_matrix = rbind(x$delta_matrix , y$delta_matrix)
# out$sigma2e_matrix = rbind(x$sigma2e_matrix , y$sigma2e_matrix)
# out
# })
# getParameterMatrix.multi_network_formation = function(x){
# out = do.call(cbind, x$delta_matrix)
# out = cbind(out, x$Sigma_matrix )
# out
# }
# merge.multi_network_formation = function(x,y){
# out = y
# for (i in 1:length(x$delta_matrix)){
# out$delta_matrix[[i]] = rbind(x$delta_matrix[[i]], y$delta_matrix[[i]] )
# }
# out$Sigma_matrix = rbind(x$Sigma_matrix, y$Sigma_matrix)
# out
# })
# plotmcmc.multi_network_formation_RE = function(x, tail=-0.2){
# data_matrix = cbind(x$delta1_matrix, x$delta2_matrix, x$rho_matrix, x$sigma2e1_matrix, x$sigma2e2_matrix)
# colnames(data_matrix) = c(colnames(x$delta1_matrix), colnames(x$delta2_matrix), "rho", "sigma2e1", "sigma2e2")
# plotmcmc.default(data_matrix, tail=tail)
# })
# merge.multi_network_formation_RE = function(x,y){
# out = y
# out$delta1_matrix = rbind(x$delta1_matrix , y$delta1_matrix)
# out$delta2_matrix = rbind(x$delta2_matrix , y$delta2_matrix)
# out$rho_matrix = rbind(x$rho_matrix , y$rho_matrix)
# out$sigma2e1_matrix = rbind(x$sigma2e1_matrix , y$sigma2e1_matrix)
# out$sigma2e2_matrix = rbind(x$sigma2e2_matrix , y$sigma2e2_matrix)
# out
# })
##############################################################################################################################
##############################################################################################################################
##############################################################################################################################
## Given a seq, beta, D, compute the likelihood
## Given a seq m n*(n-1)/2 x 2 matrix
## D: n by n network matrix
## U_xb : an n by n utility matrix, i,j element is the utility of i to make friends with j. (Xbeta)
## delta1 delta2
#' Draw random sample of meeting sequence
#' @name DrawSeqSample
#' @aliases DrawSeqSample
#' @title DrawSeqSample
#' @param x x
#' @param p p
#' @return value
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
DrawSeqSample = function(x , p =0.01){
if (is.list(x)){
out = vector("list",length(x))
if (length(p)==1)
p = rep(p,length(x))
for (i in 1:length(x)){
out[[i]] = DrawSeqSample(x[[i]], p[[i]])
}
return(out)
} else if (is.vector(x)){
n = length(x)
pn = pmax(2,ceiling(n * p) )
to_change = sample(n,pn)
reorder = sample(to_change)
x[to_change] = x[reorder]
return(x)
} else if (is.matrix(x)){
n = nrow(x)
pn = pmax(2,ceiling(n * p) )
to_change = sample(n,pn)
reorder = sample(to_change)
x[to_change,] = x[reorder,]
return(x)
}
}
# repeat
# computeNetworkSummary_r = function(seq_m,D){
# n = nrow(D)
# D0 = matrix(0,ncol=n,nrow=n)
# degree = matrix(0,ncol=n,nrow=n)
# common_frds_1 = matrix(0,ncol=n,nrow=n)
# common_frds_2 = matrix(0,ncol=n,nrow=n)
# nn = n*(n-1)/2
# for ( i in 1:nn){
# index = seq_m[i,]
# index1 = index[1]
# index2 = index[2]
# if (D[index1,index2]==1) {
# D0[index1,index2] = D0[index2,index1]= 1
# }
# d1 = D0[index1,]
# d2 = D0[index2,]
# degree[index1,index2] = sum(d1)
# degree[index2,index1] = sum(d2)
# common_frds_1[index1,index2] = sum(d1*d2)
# }
# lower_tri = lower.tri(degree)
# degree1 = degree[ lower_tri ]
# degree2 = t(degree)[ lower_tri ]
# common_frds_1 = common_frds_1[ lower_tri ]
# list(self=cbind(degree1,degree1^2,common_frds_1), friends=cbind(degree2,degree2^2,common_frds_1))
# })
# src=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# return Rcpp::List::create(Rcpp::Named("degree")=degreee, Rcpp::Named("common_frds_1")=common_frds_11);
# '
# g <- cxxfunction(signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=src)
# computeNetworkSummary_cxx <- cxxfunction(
# signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# arma::mat out1 = arma::zeros(nn*(nn-1)/2, 3);
# arma::mat out2 = arma::zeros(nn*(nn-1)/2, 3);
# int k = 0;
# for ( int j=0 ; j < nn ; j++){
# for ( int i=j+1 ; i< nn ; i++){
# out1(k,0) = arma::as_scalar( degreee(i,j) );
# out1(k,1) = arma::as_scalar( degreee(i,j)*degreee(i,j) );
# out1(k,2) = arma::as_scalar( common_frds_11(i,j) );
# out2(k,0) = arma::as_scalar( degreee(j,i) );
# out2(k,1) = arma::as_scalar( degreee(j,i)*degreee(j,i) );
# out2(k,2) = arma::as_scalar( common_frds_11(i,j) );
# k++;
# }
# }
# return Rcpp::List::create(Rcpp::Named("self")=out1, Rcpp::Named("friends")=out2);
# '
# )
# q1=computeNetworkSummary_r(seq_m[[1]],D[[1]])
# q2=computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
# benchmark(
# b={
# q2 = computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# }
# )
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
################################
# library(Matrix)
# load("model_data.rData")
# library(Matrix)
# library(rbenchmark)
# library(compiler)
# D = (data[[1]]$W!=0) + 0
# n=nrow(D)
# index_table = data[[1]]$group_index
# seq_m = index_table[sample(nn,nn),]
# benchmark(
# a={
# q1= f(seq_m=seq_m, D=D)
# }
# ,b={
# q2= g(seq_m=seq_m, D=D)
# }
# )
# all(q1[[1]]==q2[[1]])
# all(q1[[2]]==q2[[2]])
#' computeNetworkSummary
#' @name computeNetworkSummary
#' @aliases computeNetworkSummary
#' @title computeNetworkSummary
#' @param seq_m meeting sequence
#' @param D adjacency matrix
#' @return value
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
computeNetworkSummary=function(seq_m, D){
# out = list()
# for (i in 1:length(D)){
# temp = computeNetworkSummary_r(g(seq_m=seq_m[[i]], D=D[[i]]))
# out$self = rbind(out$self, temp$self)
# out$friends = rbind(out$friends, temp$friends)
# }
# out
out = mapply(function(x,y) computeNetworkSummary_cxx(seq_m=x, D=y), x= seq_m, y=D )
self = do.call(rbind, out[1,] )
friends = do.call(rbind, out[1,] )
colnames(self) = c("degree","degree_seq","common_frds")
colnames(friends) = c("degree","degree_seq","common_frds")
list(self=self,friends=friends)
}
# q1 = computeNetworkSummary(seq_m,D)
#############################################
# SNF_single_mcmc = function(m, data, last_estimation, update_tau=TRUE,tau=0.0005){
# # if (network_id==1){
# # D = lapply(data, function(z) z$W!=0 )
# # y = do.call(c, lapply(data, "[[", "response1"))
# # y_not = !y
# # } else{
# # D = lapply(data, function(z) z$W2!=0 )
# # y = do.call(c, lapply(data, "[[", "response2"))
# # y_not = !y
# # }
# D = lapply(data, function(z) z$D_list[[1]])
# y = do.call(c, lapply(data,"[[","response_self") )
# y_not = !y
# n= sapply(data,"[[","n")
# n2 = sapply(data, function(z) length(z$response_self))
# x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
# x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
# number_of_network_variable = 3
# postition = mapply(seq, c(0,head(cumsum(n2),-1)) +1,cumsum(n2))
# ystar1 = rep(0,length(y))
# ystar2 = rep(0,length(y))
# seq_m = lapply(data,"[[","group_index")
# delta_matrix = matrix(0, nrow=m+1, ncol= ncol(x1) + number_of_network_variable )
# update_rate = 0
# ## initialization
# if (!missing(last_estimation) && !is.null(last_estimation) ){
# cat("Using last_estimation \n")
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# seq_m = last_estimation$seq_m
# delta_matrix[1,] = as.vector(tail(last_estimation$delta,1))
# index = last_estimation$index+1
# # name = last_estimation$name
# ID = last_estimation$ID
# if (update_tau){
# tau=updateTau(last_estimation$tau, last_estimation$update_rate, lower_bound=0.2, upper_bound=0.4,optim_rate=0.3,min_rate=0.00001)
# } else{
# tau=last_estimation$tau
# }
# } else {
# index = 1
# ID = genUniqueID()
# cat("Start new instance with ID ", ID, "\n")
# }
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# colnames(delta_matrix) = colnames(xx1)
# name = colnames(xx1)
# delta_x_index = 1:ncol(x1)
# delta_network_index = 1:number_of_network_variable + ncol(x1)
# tic()
# ## start the gibbs
# for (i in 1:m){
# ## base on the seq, compute the network summary
# ## draw ystar
# ystar1 = drawYstar(y=y , ystar_other=ystar2, mean=xb1, y_not= y_not)
# ystar2 = drawYstar(y=y, ystar_other=ystar1, mean=xb2, y_not= y_not)
# ## draw delta
# lm_fit = myFastLm(X= rbind(xx1,xx2), y = c(ystar1,ystar2))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2)
# R1 = x1 %*% delta_matrix[i+1, delta_x_index]
# R2 = x2 %*% delta_matrix[i+1, delta_x_index]
# xb1 = R1 + network_summary$self %*% delta_matrix[i+1,delta_network_index]
# xb2 = R2 + network_summary$friends %*% delta_matrix[i+1,delta_network_index]
# ## update sequence
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# # sapply(1:5, function(i) sum(seq_m_new[[i]]!=seq_m[[i]]) )
# network_summary_new = computeNetworkSummary(seq_m=seq_m_new, D=D)
# xb1_new = R1 + network_summary_new$self %*% delta_matrix[i+1,delta_network_index]
# xb2_new = R2 + network_summary_new$friends %*% delta_matrix[i+1,delta_network_index]
# p1 = splitBy(dnorm(ystar1 - xb1, log=TRUE),by=n2)
# p2 = splitBy(dnorm(ystar2 - xb2, log=TRUE),by=n2)
# p1_new = splitBy(dnorm(ystar1 - xb1_new, log=TRUE),by=n2)
# p2_new = splitBy(dnorm(ystar2 - xb2_new, log=TRUE),by=n2)
# p1 = sapply(p1, sum)
# p2 = sapply(p2, sum)
# p1_new = sapply(p1_new, sum)
# p2_new = sapply(p2_new, sum)
# alpha = exp( p1_new+ p2_new - p1- p2 )
# update_index = alpha > runif(5)
# seq_m[update_index] = seq_m_new[update_index]
# update_rate = update_rate + update_index
# update_position = unlist(postition[update_index])
# network_summary$self[update_position,] = network_summary_new$self[update_position,]
# network_summary$friends[update_position,] = network_summary_new$friends[update_position,]
# xb1[update_position] = xb1_new[update_position]
# xb2[update_position] = xb2_new[update_position]
# xx1[update_position,delta_network_index] = network_summary$self[update_position,]
# xx2[update_position,delta_network_index] = network_summary$friends[update_position,]
# # test
# # xx1_q = cbind(x1,network_summary$self)
# # xx2_q = cbind(x2,network_summary$friends)
# # network_summary_q = computeNetworkSummary(seq_m=seq_m, D=D)
# # xx1_q = cbind(x1,network_summary_q$self)
# # xx2_q = cbind(x2,network_summary_q$friends)
# # xb1_q = xx1_q %*% delta_matrix[i+1,]
# # xb2_q = xx2_q %*% delta_matrix[i+1,]
# # identical(xx1,xx1_q)
# # identical(xx2,xx2_q)
# # identical(xb1,xb1_q)
# # identical(xb2,xb2_q)
# }
# toc()
# update_rate = update_rate/m
# cat("Update rate : \n")
# print(update_rate)
# out = list(delta=tail(delta_matrix,-1) , seq_m=seq_m,ystar1=ystar1,ystar2=ystar2, tau=tau, update_rate=update_rate, index=index,ID=ID, name=name)
# class(out) = "SNF_single"
# out
# }
# merge.SNF_single = function(x,y,...){
# out = y
# out$delta_matrix = rbind(x$delta_matrix, y$delta_matrix)
# out
# }
# getParameterMatrix.SNF_single = function(x ){
# x$delta
# }
# ## update by network
# ## ystar1_demean
# updateSequence = function(ystar1_demean, ystar2_demean, seq_m, tau, delta_network, D){
# network_summary = computeNetworkSummary(g(seq_m=seq_m, D=D))
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# network_summary_new = computeNetworkSummary(g(seq_m=seq_m, D=D))
# lik_old = sum(dnorm(ystar1_demean - network_summary$self %*% delta_network, log=TRUE)) + sum(dnorm(ystar2_demean - network_summary$friends %*% delta_network, log=TRUE))
# lik_new = sum(dnorm(ystar1_demean - network_summary_new$self %*% delta_network, log=TRUE)) + sum(dnorm(ystar2_demean - network_summary_new$friends %*% delta_network, log=TRUE))
# alpha = exp(lik_new - lik_old )
# if (alpha>runif(1)){
# return(list(seq_m=seq_m_new, update=TRUE))
# } else{
# return(list(seq_m=seq_m, update=FALSE))
# }
# })
# ######parallel
# library(Matrix)
# load("model_data.rData")
# library(Matrix)
# library(rbenchmark)
# library(compiler)
# library(parallel)
# D = lapply(data, function(z) z$W!=0 )
# n= sapply(data,"[[","n")
# x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
# x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
# y = do.call(c, lapply(data, "[[", "response1"))
# y_not = !y
# n2 = sapply(data, function(z) length(z$response1))
# parameter=list()
# parameter$delta = rep(0, ncol(x1)+3)
# parameter$ystar1 = rep(0,length(y))
# parameter$ystar2 = rep(0,length(y))
# parameter$seq_m = lapply(data,"[[","group_index")
# parameter$tau = parameter$tau
# parameter$m = 100
# ystar1 = parameter$ystar1
# ystar2 = parameter$ystar2
# seq_m = parameter$seq_m
# tau = parameter$tau
# m = parameter$m
# delta_matrix = matrix(0,nrow=m+1,ncol=length(parameter$delta))
# delta_matrix[1,] = parameter$delta
# ## initialization
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# cl=makeCluster(6)
# exportAllFunction(cl)
# clusterExport(cl,c("D","src"))
# clusterEvalQ(cl,{library(inline);library(RcppArmadillo)})
# clusterEvalQ(cl,{g <- cxxfunction(signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=src)
# })
# tic()
# ## start the gibbs
# for (i in 1:m){
# ## base on the seq, compute the network summary
# ## draw ystar
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# ystar1 = drawYstar(y=y , ystar_other=ystar2, mean=xb1, y_not= y_not)
# ystar2 = drawYstar(y=y, ystar_other=ystar1, mean=xb2, y_not= y_not)
# ## draw delta
# lm_fit = myFastLm(XX= rbind(xx1,xx2), yy = c(ystar1,ystar2))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2)
# xb1 = xx1 %*% delta_matrix[i+1,]
# xb2 = xx2 %*% delta_matrix[i+1,]
# ystar1_demean = ystar1 - x1 %*% head(delta_matrix[i+1,],ncol(x1))
# ystar2_demean = ystar2 - x2 %*% head(delta_matrix[i+1,],ncol(x1))
# ystar1_demean_list = splitBy(ystar1_demean,n2)
# ystar2_demean_list = splitBy(ystar2_demean,n2)
# out = parLapply(cl, 1:length(D),
# function(x,ystar1_demean_list,ystar2_demean_list, seq_m, tau, delta ) {
# updateSequence(
# ystar1_demean=ystar1_demean_list[[x]],
# ystar2_demean=ystar2_demean_list[[x]],
# seq_m= seq_m[[x]],
# tau = tau ,
# delta_network=delta,
# D=D[[x]]
# )
# },
# delta=tail(delta_matrix[i+1,],-ncol(x1)),
# ystar1_demean_list = ystar1_demean_list,
# ystar2_demean_list = ystar2_demean_list,
# tau=tau,
# seq_m=seq_m
# )
# seq_m = lapply(out,"[[","seq_m" )
# update = update + out$update
# }
# toc()
##############################################################################################################
## Given a seq, beta, D, compute the likelihood
## Given a seq m n*(n-1)/2 x 2 matrix
## D: n by n network matrix
## U_xb : an n by n utility matrix, i,j element is the utility of i to make friends with j. (Xbeta)
## delta1 delta2
# DrawSeqSample = function(x , p =0.01){
# if (is.list(x)){
# out = vector("list",length(x))
# if (length(p)==1)
# p = rep(p,length(x))
# for (i in 1:length(x)){
# out[[i]] = DrawSeqSample(x[[i]], p[[i]])
# }
# return(out)
# } else if (is.vector(x)){
# n = length(x)
# pn = pmax(2,ceiling(n * p) )
# to_change = sample(n,pn)
# reorder = sample(to_change)
# x[to_change] = x[reorder]
# return(x)
# } else if (is.matrix(x)){
# n = nrow(x)
# pn = pmax(2,ceiling(n * p) )
# to_change = sample(n,pn)
# reorder = sample(to_change)
# x[to_change,] = x[reorder,]
# return(x)
# }
# })
# # repeat
# computeNetworkSummary = function(seq_m,D){
# n = nrow(D)
# D0 = matrix(0,ncol=n,nrow=n)
# degree = matrix(0,ncol=n,nrow=n)
# common_frds_1 = matrix(0,ncol=n,nrow=n)
# common_frds_2 = matrix(0,ncol=n,nrow=n)
# nn = n*(n-1)/2
# for ( i in 1:nn){
# index = seq_m[i,]
# index1 = index[1]
# index2 = index[2]
# if (D[index1,index2]==1) {
# D0[index1,index2] = D0[index2,index1]= 1
# }
# d1 = D0[index1,]
# d2 = D0[index2,]
# degree[index1,index2] = sum(d1)
# degree[index2,index1] = sum(d2)
# common_frds_1[index1,index2] = sum(d1*d2)
# }
# lower_tri = lower.tri(degree)
# degree1 = degree[ lower_tri ]
# degree2 = t(degree)[ lower_tri ]
# common_frds_1 = common_frds_1[ lower_tri ]
# list(self=cbind(degree1,degree1^2,common_frds_1), friends=cbind(degree2,degree2^2,common_frds_1))
# })
# src=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# return Rcpp::List::create(Rcpp::Named("degree")=degreee, Rcpp::Named("common_frds_1")=common_frds_11);
# '
# g <- cxxfunction(signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=src)
# computeNetworkSummary_cxx <- cxxfunction(
# signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# arma::mat out1 = arma::zeros(nn*(nn-1)/2, 3);
# arma::mat out2 = arma::zeros(nn*(nn-1)/2, 3);
# int k = 0;
# for ( int j=0 ; j < nn ; j++){
# for ( int i=j+1 ; i< nn ; i++){
# out1(k,0) = arma::as_scalar( degreee(i,j) );
# out1(k,1) = arma::as_scalar( degreee(i,j)*degreee(i,j) );
# out1(k,2) = arma::as_scalar( common_frds_11(i,j) );
# out2(k,0) = arma::as_scalar( degreee(j,i) );
# out2(k,1) = arma::as_scalar( degreee(j,i)*degreee(j,i) );
# out2(k,2) = arma::as_scalar( common_frds_11(i,j) );
# k++;
# }
# }
# return Rcpp::List::create(Rcpp::Named("self")=out1, Rcpp::Named("friends")=out2);
# '
# )
# q1=computeNetworkSummary(seq_m[[1]],D[[1]])
# q2=computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
# benchmark(
# b={
# q2 = computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# }
# )
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
################################
# library(Matrix)
# load("model_data.rData")
# library(Matrix)
# library(rbenchmark)
# library(compiler)
# D = (data[[1]]$W!=0) + 0
# n=nrow(D)
# index_table = data[[1]]$group_index
# seq_m = index_table[sample(nn,nn),]
# benchmark(
# a={
# q1= f(seq_m=seq_m, D=D)
# }
# ,b={
# q2= g(seq_m=seq_m, D=D)
# }
# )
# all(q1[[1]]==q2[[1]])
# all(q1[[2]]==q2[[2]])
# computeNetworkSummary=function(seq_m=seq_m_new, D=D){
# # out = list()
# # for (i in 1:length(D)){
# # temp = computeNetworkSummary(g(seq_m=seq_m[[i]], D=D[[i]]))
# # out$self = rbind(out$self, temp$self)
# # out$friends = rbind(out$friends, temp$friends)
# # }
# # out
# out = mapply(function(x,y) computeNetworkSummary_cxx(seq_m=x, D=y), x= seq_m, y=D )
# self = do.call(rbind, out[1,] )
# friends = do.call(rbind, out[1,] )
# colnames(self) = c("degree","degree_seq","common_frds")
# colnames(friends) = c("degree","degree_seq","common_frds")
# list(self=self,friends=friends)
# })
# # q1 = computeNetworkSummary(seq_m,D)
# computeConditionalVariance = function(Sigma){
# k = nrow(Sigma)
# ols_coef = vector("list", k)
# sd_new = vector("list",k)
# for (i in 1:k){
# ols_coef[[i]] = Sigma[i,-i] %*% solve(Sigma[-i,-i])
# sd_new[[i]] = sqrt ( Sigma[i,i] - ols_coef[[i]] %*% Sigma[-i,i] )
# }
# list(sd=sd_new, ols_coef=ols_coef)
# })
# #############################################
# update_seq_multi = function(seq_m, D_list, xb1, xb2, x1_network, x2_network, delta_network_index, ystar1,ystar2, Sigma,n2,update_rate, tau){
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# network_summary_new = lapply(D_list,computeNetworkSummary, seq_m=seq_m_new )
# x1_network_new = do.call(cbind, lapply(network_summary_new, "[[", "self"))
# x2_network_new = do.call(cbind, lapply(network_summary_new, "[[", "friends"))
# xb1_new = R1 + x1_network_new %*% delta[delta_network_index,]
# xb2_new = R2 + x2_network_new %*% delta[delta_network_index,]
# p1 = splitBy( dmvnorm(ystar1 - xb1, sigma=Sigma, log=TRUE),by=n2)
# p2 = splitBy( dmvnorm(ystar2 - xb2, sigma=Sigma, log=TRUE),by=n2)
# p1_new = splitBy( dmvnorm(ystar1 - xb1_new, sigma=Sigma, log=TRUE),by=n2)
# p2_new = splitBy( dmvnorm(ystar2 - xb2_new, sigma=Sigma, log=TRUE),by=n2)
# p1 = sapply(p1, sum)
# p2 = sapply(p2, sum)
# p1_new = sapply(p1_new, sum)
# p2_new = sapply(p2_new, sum)
# alpha = exp( p1_new+ p2_new - p1- p2 )
# update_index = alpha > runif(5)
# seq_m[update_index] = seq_m_new[update_index]
# update_rate = update_rate + update_index
# update_position = unlist(position [update_index])
# x1_network[update_position,] = x1_network_new[update_position,]
# x2_network[update_position,] = x2_network_new[update_position,]
# xb1[update_position] = xb1_new[update_position]
# xb2[update_position] = xb2_new[update_position]
# list(seq_m, xb1, xb2, x1_network, x2_network,update_rate)
# })
# drawYstar_multi_SNF = function(y, y_not=!y, ystar, ystar_other, xb, Sigma){
# # update ystar given ystar_other
# ystar_other_positive = ystar_other>=0
# number_of_network = ncol(y)
# n = nrow(y)
# for (i in 1:number_of_network){
# ols_coef = Sigma[i,-i] %*% solve(Sigma[-i,-i])
# sd_new = sqrt ( Sigma[i,i] - ols_coef %*% Sigma[-i,i] )
# mean_new = xb1[,i] + ols_coef %*% (ystar[,-i] - xb[,-i])
# index_case1 = y[,i]
# index_case2 = as.logical(y_not[,i] * ystar_other_positive[,i])
# index_case3 = as.logical(y_not[,i] * !ystar_other_positive[,i])
# n1=sum(index_case1)
# n2=sum(index_case2)
# n3=sum(index_case3)
# stopifnot(n==n1+n2+n3)
# if (n1>0)
# ystar[index_case1,i] = rtruncnorm(1,a=0,b=Inf, mean=mean_new[index_case1], sd=sd_new)
# if (n2>0)
# ystar[index_case2,i] =rtruncnorm(1,a=-Inf,b=0,mean=mean_new[index_case2], sd=sd_new)
# if (n3>0)
# ystar[index_case3,i] = mean_new[index_case3] +rnorm(n3, sd=sd_new)
# }
# ystar
# })
# SNF_single_mcmc = function(m, data, network_id, last_estimation, update_tau=TRUE,tau=0.005){
# if (network_id==1){
# D = lapply(data, function(z) z$W!=0 )
# y = do.call(c, lapply(data, "[[", "response1"))
# y_not = !y
# } else{
# D = lapply(data, function(z) z$W2!=0 )
# y = do.call(c, lapply(data, "[[", "response2"))
# y_not = !y
# }
# n= sapply(data,"[[","n")
# n2 = sapply(data, function(z) length(z$response1))
# x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
# x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
# number_of_network_variable = 3
# position = mapply(seq, c(0,head(cumsum(n2),-1)) +1,cumsum(n2))
# ystar1 = rep(0,length(y))
# ystar2 = rep(0,length(y))
# seq_m = lapply(data,"[[","group_index")
# delta_matrix = matrix(0, nrow=m+1, ncol= ncol(x1) + number_of_network_variable )
# update_rate = 0
# if (!missing(last_estimation) && !is.null(last_estimation) ){
# cat("Using last_estimation \n")
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# seq_m = last_estimation$seq_m
# delta_matrix[1,] = as.vector(tail(last_estimation$delta,1))
# if (update_tau){
# tau = last_estimation$tau
# for ( j in 1:length(tau)){
# if (any(last_estimation$update_rate[[j]] >0.5 | any(last_estimation$update_rate[[j]]< 0.2) )){
# cat("update tau-", j , "\n")
# tau[[j]] = tau[[j]] * last_estimation$update_rate[[j]] / 0.4
# tau[[j]] = ifelse(tau[[j]]==0, 0.0001, tau[[j]])
# }
# }
# }
# }
# ## initialization
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# colnames(delta_matrix) = colnames(xx1)
# delta_x_index = 1:ncol(x1)
# delta_network_index = 1:number_of_network_variable + ncol(x1)
# tic()
# ## start the gibbs
# for (i in 1:m){
# ## base on the seq, compute the network summary
# ## draw ystar
# ystar1 = drawYstar(y=y , ystar_other=ystar2, mean=xb1, y_not= y_not)
# ystar2 = drawYstar(y=y, ystar_other=ystar1, mean=xb2, y_not= y_not)
# ## draw delta
# lm_fit = my.fastLm(XX= rbind(xx1,xx2), yy = c(ystar1,ystar2))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2)
# R1 = x1 %*% delta_matrix[i+1, delta_x_index]
# R2 = x2 %*% delta_matrix[i+1, delta_x_index]
# xb1 = R1 + network_summary$self %*% delta_matrix[i+1,delta_network_index]
# xb2 = R2 + network_summary$friends %*% delta_matrix[i+1,delta_network_index]
# ## update sequence
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# network_summary_new = computeNetworkSummary(seq_m=seq_m_new, D=D)
# xb1_new = R1 + network_summary_new$self %*% delta_matrix[i+1,delta_network_index]
# xb2_new = R2 + network_summary_new$friends %*% delta_matrix[i+1,delta_network_index]
# p1 = splitBy(dnorm(ystar1 - xb1, log=TRUE),by=n2)
# p2 = splitBy(dnorm(ystar2 - xb2, log=TRUE),by=n2)
# p1_new = splitBy(dnorm(ystar1 - xb1_new, log=TRUE),by=n2)
# p2_new = splitBy(dnorm(ystar2 - xb2_new, log=TRUE),by=n2)
# p1 = sapply(p1, sum)
# p2 = sapply(p2, sum)
# p1_new = sapply(p1_new, sum)
# p2_new = sapply(p2_new, sum)
# alpha = exp( p1_new+ p2_new - p1- p2 )
# update_index = alpha > runif(5)
# seq_m[update_index] = seq_m_new[update_index]
# update_rate = update_rate + update_index
# update_position = unlist(position [update_index])
# network_summary$self[update_position,] = network_summary_new$self[update_position,]
# network_summary$friends[update_position,] = network_summary_new$friends[update_position,]
# xb1[update_position] = xb1_new[update_position]
# xb2[update_position] = xb2_new[update_position]
# xx1[update_position,delta_network_index] = network_summary$self[update_position,]
# xx2[update_position,delta_network_index] = network_summary$friends[update_position,]
# }
# toc()
# update_rate = update_rate/m
# cat("Update rate : \n")
# print(update_rate)
# out = list(delta=tail(delta_matrix,-1) , seq_m=seq_m,ystar1=ystar1,ystar2=ystar2, tau=tau, update_rate=update_rate)
# class(out) = "SNF_single"
# out
# })
# merge.SNF_single = function(x,y,...){
# if (length(list(...))>0){
# list_args = list(...)
# out = list_args[[1]]
# for (i in 2:length(list_args)){
# out = merge(out, list_args[[i]])
# }
# return(out)
# }
# out =y
# out$delta = rbind(x$delta, y$delta)
# out
# })
# plotmcmc.SNF_single = function(x,remove=0.2,...){
# plotmcmc(x$delta, remove=nrow(x$delta)*remove )
# })
# getParameterMatrix.SNF_single = function(x, tail){
# out = cbind(x$delta)
# if (tail!=0) {
# out = tail(out,tail)
# }
# out
# })
#' @rdname SNF
#' @export
SNF.dynamic.mcmc = function(m, data, last_estimation, update_tau=TRUE,tau=0.005){
G = length(data)
number_of_network = length(data[[1]]$D_list)
D_list = vector("list", number_of_network)
for (i in 1:number_of_network){
D_list[[i]] = lapply(data, function(z) z$D_list[[i]])
}
# D_list = lapply(data, "[[", "D_list")
n= sapply(data,"[[","n")
n2 = sapply(data, function(z) NROW(z$response_self))
nn = sum(n2) * 2
y = do.call(rbind, lapply(data, "[[", "response_self") )
y_not = !y
x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
number_of_network_variable = 3
number_of_variable = ncol(x1) + number_of_network_variable*number_of_network
## store all the network matrix of different group into one vector. position is the location of them.
position = mapply(seq, c(0,head(cumsum(n2),-1)) +1,cumsum(n2))
ystar1 = array(0, dim=dim(y))
ystar2 = array(0, dim=dim(y))
seq_m = lapply(data,"[[","group_index")
delta_matrix = rep(list(matrix(0, nrow=m, ncol= number_of_variable )), number_of_network)
delta= matrix(0, nrow=number_of_variable , ncol=number_of_network)
for (i in 1:number_of_network){
delta[,i] = delta_matrix[[i]][1,]
}
update_rate = 0
Sigma = diag(number_of_network)
if (!missing(last_estimation) && !is.null(last_estimation) ){
cat("Using last_estimation \n")
ystar1 = last_estimation$ystar1
ystar2 = last_estimation$ystar2
seq_m = last_estimation$seq_m
delta = last_estimation$delta
Sigma = last_estimation$Sigma
if (update_tau){
tau = last_estimation$tau
for ( j in 1:length(tau)){
if (any(last_estimation$update_rate[[j]] >0.5 | any(last_estimation$update_rate[[j]]< 0.2) )){
cat("update tau-", j , "\n")
tau[[j]] = tau[[j]] * last_estimation$update_rate[[j]] / 0.4
tau[[j]] = ifelse(tau[[j]]==0, 0.0001, tau[[j]])
}
}
}
}
## initialization
network_summary = lapply(D_list,computeNetworkSummary, seq_m=seq_m )
## repeat that for serial D.
# network_summary reduce to 1 variable ( # of common frds, then include all the network , or we can have all, it would be 3 x number of network )
x1_network = do.call(cbind, lapply(network_summary, "[[", "self"))
x2_network = do.call(cbind, lapply(network_summary, "[[", "friends"))
delta_x_index = 1:ncol(x1)
delta_network_index = ncol(x1)+ seq(ncol(x1_network))
R1 = x1 %*% delta[delta_x_index,]
R2 = x2 %*% delta[delta_x_index,]
xb1 = R1 + x1_network %*% delta[delta_network_index,]
xb2 = R2 + x2_network %*% delta[delta_network_index,]
network_name = data[[1]]$network_name
rownames(delta) = c( colnames(x1) , colnames(x1_network) )
colname_network = colnames(x1_network)[1:3]
colname_network = unlist( lapply(network_name, function(z) z %+% "_" %+% colname_network) )
for (i in 1:number_of_network){
colnames(delta_matrix[[i]]) = c(network_name[[i]] %+% "_" %+% colnames(x1) , network_name[[i]] %+% "_" %+% colname_network )
}
X= rbind(x1,x2)
if (number_of_network>1){
number_col_Sigma_matrix = number_of_network*(number_of_network-1)/2
sigma_name = genPairwiseIndex(number_of_network)
sigma_name = sigma_name[,1] %+% sigma_name[,2]
} else {
number_col_Sigma_matrix =1
sigma_name = 11
}
Sigma_matrix = matrix(0, nrow=m, ncol = number_col_Sigma_matrix )
colnames(Sigma_matrix) = "Sigma_" %+% sigma_name
tic()
## start the gibbs
for (i in 1:m){
##### base on the seq, compute the network summary
##### drawing ystar
for (j in 1:number_of_network){
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
temp = find_normal_conditional_dist(a= ystar1_demean, i=j, j=-j, Sigma=Sigma)
ystar1[,j] = drawYstar(y=y[,j] , ystar_other=ystar2[,j], mean=xb1[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
temp = find_normal_conditional_dist(a= ystar2_demean, i=j, j=-j, Sigma=Sigma)
ystar2[,j] = drawYstar(y=y[,j] , ystar_other=ystar1[,j], mean=xb2[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
}
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
ystar = rbind(ystar1,ystar2)
##### draw delta
XX = cbind(X, rbind(x1_network,x2_network) )
YY = rbind(ystar1,ystar2)
for ( j in 1:number_of_network){
temp = find_normal_conditional_dist(a=ystar_demean, i=j, j=-j, Sigma=Sigma)
lm_fit = myFastLm(X=XX, y =YY[,j]-temp$mean)
delta[,j] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2 * as.vector(temp$var) )
delta_matrix[[j]][i,] = delta[,j]
}
R1 = x1 %*% delta[delta_x_index,]
R2 = x2 %*% delta[delta_x_index,]
xb1 = R1 + x1_network %*% delta[delta_network_index,]
xb2 = R2 + x2_network %*% delta[delta_network_index,]
##### update sequence , only need to do it once. but may need to modify the likelihood
seq_m_new = DrawSeqSample(seq_m,p=tau)
network_summary_new = lapply(D_list,computeNetworkSummary, seq_m=seq_m_new )
x1_network_new = do.call(cbind, lapply(network_summary_new, "[[", "self"))
x2_network_new = do.call(cbind, lapply(network_summary_new, "[[", "friends"))
xb1_new = R1 + x1_network_new %*% delta[delta_network_index,]
xb2_new = R2 + x2_network_new %*% delta[delta_network_index,]
p1 = splitBy( dmvnorm(ystar1 - xb1, sigma=Sigma, log=TRUE),by=n2)
p2 = splitBy( dmvnorm(ystar2 - xb2, sigma=Sigma, log=TRUE),by=n2)
p1_new = splitBy( dmvnorm(ystar1 - xb1_new, sigma=Sigma, log=TRUE),by=n2)
p2_new = splitBy( dmvnorm(ystar2 - xb2_new, sigma=Sigma, log=TRUE),by=n2)
p1 = sapply(p1, sum)
p2 = sapply(p2, sum)
p1_new = sapply(p1_new, sum)
p2_new = sapply(p2_new, sum)
alpha = exp( p1_new+ p2_new - p1- p2 )
update_index = alpha > runif(5)
update_rate = update_rate + update_index
seq_m[update_index] = seq_m_new[update_index]
update_position = unlist(position[update_index])
x1_network[update_position,] = x1_network_new[update_position,]
x2_network[update_position,] = x2_network_new[update_position,]
xb1[update_position] = xb1_new[update_position]
xb2[update_position] = xb2_new[update_position]
##### compute Sigma
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
if (number_of_network > 1 ){
Sigma = solve( rwish(nn , solve( crossprod(ystar_demean )) ) )
normalization = diag(1/sqrt(diag(Sigma)))
Sigma = normalization %*% Sigma %*% t(normalization)
Sigma_matrix[i,] = Sigma[lower.tri(Sigma)]
} else {
Sigma = as.matrix(1)
Sigma_matrix[i,] = 1
}
}
toc()
update_rate = update_rate/m
cat("Update rate : \n")
print(update_rate)
out = list(delta_matrix=delta_matrix, delta=delta, seq_m=seq_m, ystar1=ystar1,ystar2=ystar2, tau=tau, update_rate=update_rate, Sigma=Sigma,Sigma_matrix=Sigma_matrix)
class(out) = "SNF.dynamic.mcmc"
out
}
#' Get a matrix of parameter
#' @name getParameterMatrix.SNF.dynamic.mcmc
#' @aliases getParameterMatrix.SNF.dynamic.mcmc
#' @title getParameterMatrix.SNF.dynamic.mcmc
#' @param x SNF.dynamic.mcmc object
#' @param tail iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#'' @param ... not used
#' @return A matrix
#' @method getParameterMatrix SNF.dynamic.mcmc
#' @export getParameterMatrix SNF.dynamic.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
getParameterMatrix.SNF.dynamic.mcmc = function(x, tail, ...){
if (is.list(x$delta_matrix)){
out = do.call(cbind, x$delta_matrix)
out = cbind(out, x$Sigma_matrix )
} else{
out = x$delta_matrix
}
if (!missing(tail)) {
out = extractTail(out, tail)
}
out
}
#' merge.SNF.dynamic.mcmc
#' @name merge.SNF.dynamic.mcmc
#' @aliases merge.SNF.dynamic.mcmc
#' @title merge.SNF.dynamic.mcmc
#' @param x First object to merge with
#' @param y Second object to merge with
#' @param ... not used
#' @return A new SNF.dynamic.mcmc object
#' @method merge SNF.dynamic.mcmc
#' @export merge SNF.dynamic.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
merge.SNF.dynamic.mcmc = function(x,y,...){
out = y
for (i in 1:length(y$delta_matrix)){
out$delta_matrix[[i]] = rbind(x$delta_matrix[[i]], y$delta_matrix[[i]])
}
out$Sigma_matrix = rbind(x$Sigma_matrix, y$Sigma_matrix)
out
}
#' Create a summary table
#' @name summary.SNF.dynamic.mcmc
#' @aliases summary.SNF.dynamic.mcmc
#' @title summary.SNF.dynamic.mcmc
#' @param object SNF.dynamic.mcmc object
#' @param ... tail: iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#' @return A summary table
#' @method summary SNF.dynamic.mcmc
#' @export summary SNF.dynamic.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
summary.SNF.dynamic.mcmc = function(object,...){
computeSummaryTable(object,...)
}
| /simmen/R/SNF.r | no_license | ctszkin/simmen | R | false | false | 68,885 | r | #' loglikelihood_SNF
#' @name loglikelihood_SNF
#' @aliases loglikelihood_SNF
#' @title loglikelihood_SNF
#' @param y indicator of whether i and j is connected
#' @param x1 variables of i
#' @param x2 variables of j
#' @param delta parameters
#' @param y_not complement of y
#' @return value of log likelihood
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
loglikelihood_SNF = function(y, x1, x2, delta, y_not){
if (missing(y_not))
y_not = !y
p = pnorm(x1 %*% delta) * pnorm(x2 %*% delta)
out = sum(log(p^y*(1-p)^y_not))
if (!is.finite(out)){
return(-1e+20)
}
return(out)
}
#' lik_grad_single_SNF
#' @name lik_grad_single_SNF
#' @aliases lik_grad_single_SNF
#' @title lik_grad_single_SNF
#' @param y indicator of whether i and j is connected
#' @param x1 variables of i
#' @param x2 variables of j
#' @param delta parameters
#' @param y_not complement of y
#' @return value of gradient of log likelihood
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
lik_grad_single_SNF = function(y, x1, x2, delta, y_not){
R1 = x1 %*% delta
R2 = x2 %*% delta
pi = pnorm(R1)
pj = pnorm(R2)
di = dnorm(R1)
dj = dnorm(R2)
p = pi * pj
f = (y-p) / (p* (1-p))
f[is.nan(f)] = 0
out = as.vector( f ) * (as.vector(pj * di) * x1 + as.vector(pi *dj) * x2 )
as.vector(colSums(out))
}
#' drawYstar
#' @name drawYstar
#' @aliases drawYstar
#' @title drawYstar
#' @param y indicator of whether i and j is connected
#' @param ystar_other latent value of j
#' @param mean x*delta
#' @param y_not complement of y
#' @param sd sd of the error
#' @return value
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
drawYstar = function(y, ystar_other, mean, y_not=!y, sd=1){
ystar_other_positive = ystar_other>=0
index_case1 = y
index_case2 = as.logical(y_not * ystar_other_positive)
index_case3 = as.logical(y_not * !ystar_other_positive)
n1=sum(index_case1)
n2=sum(index_case2)
n3=sum(index_case3)
ystar_new = rep(NA, length(y))
if (n1>0)
ystar_new[index_case1] = rtruncnorm(1,a=0,b=Inf, mean=mean[index_case1],sd=sd)
if (n2>0)
ystar_new[index_case2] =rtruncnorm(1,a=-Inf,b=0,mean=mean[index_case2],sd=sd)
if (n3>0)
ystar_new[index_case3] = mean[index_case3] +rnorm(n3,sd=sd)
ystar_new
}
#' Strategy Network Formation
#' @name SNF
#' @rdname SNF
#' @aliases SNF
#' @aliases SNF.static.maxLik
#' @aliases SNF.static.mcmc
#' @aliases SNF.dynamic.mcmc
#' @title SNF
#' @param data data
#' @param method Estimation method, either "static.maxLik","static.mcmc","dynamic.mcmc". Default is "static.maxLik"
#' @param m m
#' @param last_estimation last_estimation
#' @param update_tau update_tau
#' @param tau tau
#' @param ... others argument.
#' @return SNF object
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
SNF = function(data , method=c("static.maxLik","static.mcmc","dynamic.mcmc"), ...){
method = match.arg(method)
switch(method,
static.maxLik = SNF.static.maxLik(data,...),
static.mcmc = SNF.static.mcmc(data,...),
dynamic.mcmc = SNF.dynamic.mcmc(data,...),
)
}
#' @rdname SNF
#' @export
SNF.static.maxLik = function(data,...){
tic()
self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
y = do.call(rbind, lapply(data, function(z) z$response_self))
number_of_network = ncol(y)
network_name = data[[1]]$network_name
out = vector("list",number_of_network)
summary_table = vector("list",number_of_network)
for (i in 1:number_of_network){
yy = y[,i]
yy_not = !yy
start = glm(yy~self_data_matrix-1, family=binomial(link="probit"))$coef
out[[i]] = maxLik(function(z, ...) loglikelihood_SNF(delta=z, ...) , start=start , x1=self_data_matrix, x2=friends_data_matrix, y=yy, y_not=yy_not , grad= lik_grad_single_SNF, method="BFGS")
summary_table[[i]] = generateSignificance(summary(out[[i]])$estimate[,1:2])
rownames(summary_table[[i]]) = network_name[i] %+% "_" %+%colnames(self_data_matrix)
}
summary_table = do.call(rbind,summary_table)
toc()
out2 = list(out=out, summary_table=summary_table)
class(out2) = "SNF.static.maxLik"
out2
}
#' @rdname SNF
#' @export
SNF.static.mcmc = function(data, m=1000, last_estimation,...){
self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
y = do.call(rbind, lapply(data,"[[","response_self"))
y_not = !y
number_of_network = ncol(y)
name = colnames(self_data_matrix)
k = ncol(self_data_matrix)
n = NROW(y)*2
delta_matrix = rep(list(matrix(0, nrow=m, ncol= k )), number_of_network)
if (number_of_network>1){
number_col_Sigma_matrix = number_of_network*(number_of_network-1)/2
Sigma_matrix = matrix(0, nrow=m, ncol = number_col_Sigma_matrix )
sigma_name = genPairwiseIndex(number_of_network)
sigma_name = sigma_name[,1] %+% sigma_name[,2]
colnames(Sigma_matrix) = "Sigma_" %+% sigma_name
} else{
number_col_Sigma_matrix=1
Sigma_matrix = matrix(0, nrow=m, ncol = number_col_Sigma_matrix )
colnames(Sigma_matrix) = "Sigma_11"
}
network_name = data[[1]]$network_name
for (i in 1:number_of_network){
colnames(delta_matrix[[i]]) = network_name[[i]] %+% "_" %+% name
}
ystar1 = matrix(0, nrow=nrow(y), ncol=ncol(y))
ystar2 = matrix(0, nrow=nrow(y), ncol=ncol(y))
Sigma = matrix(0.5,number_of_network,number_of_network)
diag(Sigma) = 1
delta = matrix(0, nrow=k, ncol=number_of_network)
if (!missing(last_estimation)){
ystar1 = last_estimation$ystar1
ystar2 = last_estimation$ystar2
delta = last_estimation$delta
Sigma = last_estimation$Sigma
}
X = rbind(self_data_matrix, friends_data_matrix)
XX_inv = solve(crossprod(X))
xb1 = self_data_matrix %*% delta
xb2 = friends_data_matrix %*% delta
tic()
for (i in 1:m){
if (i %% 1000 == 0 )
cat(i, ">\n")
## update ystar
for( j in 1:number_of_network){
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
temp = find_normal_conditional_dist(a=ystar1_demean, i=j, j=-j, Sigma=Sigma)
ystar1[,j] = drawYstar(y=y[,j] , ystar_other=ystar2[,j], mean=xb1[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
temp = find_normal_conditional_dist(a= ystar2_demean, i=j, j=-j, Sigma=Sigma)
ystar2[,j] = drawYstar(y=y[,j] , ystar_other=ystar1[,j], mean=xb2[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
}
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
ystar = rbind(ystar1,ystar2)
for ( j in 1:number_of_network){
temp = find_normal_conditional_dist(a=ystar_demean, i=j, j=-j, Sigma=Sigma)
beta_coef = XX_inv %*% crossprod(X, (ystar[,j]-temp$mean ) )
delta[,j] = mvrnorm(n=1, mu=beta_coef, XX_inv * as.vector(temp$var) )
ystar_demean = ystar[,j] - X %*% delta[,j]
delta_matrix[[j]][i,] = delta[,j]
}
xb1 = self_data_matrix %*% delta
xb2 = friends_data_matrix %*% delta
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
## Sigma
if (number_of_network > 1 ){
Sigma = solve( rwish(n , solve( crossprod(ystar_demean )) ) )
normalization = diag(1/sqrt(diag(Sigma)))
Sigma = normalization %*% Sigma %*% t(normalization)
Sigma_matrix[i,] = Sigma[lower.tri(Sigma)]
} else {
Sigma = as.matrix(1)
Sigma_matrix[i,] = 1
}
}
toc()
out = list(delta_matrix=delta_matrix, ystar1=ystar1,ystar2=ystar2, Sigma=Sigma,Sigma_matrix=Sigma_matrix, delta=delta)
class(out) = "network_formation.mcmc"
out
}
#' merge.SNF.static.mcmc
#' @name merge.SNF.static.mcmc
#' @aliases merge.SNF.static.mcmc
#' @title merge.SNF.static.mcmc
#' @param x First object to merge with
#' @param y Second object to merge with
#' @param ... not used
#' @return A new SNF.static.mcmc object
#' @method merge SNF.static.mcmc
#' @export merge SNF.static.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
merge.SNF.static.mcmc = function(x,y,...){
out = y
if (is.list(x$delta_matrix)){
for (i in 1:length(x$delta_matrix)){
out$delta_matrix[[i]] = rbind(x$delta_matrix[[i]], y$delta_matrix[[i]] )
}
out$Sigma_matrix = rbind(x$Sigma_matrix, y$Sigma_matrix)
} else{
out$delta_matrix = rbind(x$delta_matrix , y$delta_matrix)
}
out
}
#' Get a matrix of parameter
#' @name getParameterMatrix.SNF.static.mcmc
#' @aliases getParameterMatrix.SNF.static.mcmc
#' @title getParameterMatrix.SNF.static.mcmc
#' @param x SNF.static.mcmc
#' @param tail iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#'' @param ... not used
#' @return A matrix
#' @method getParameterMatrix SNF.static.mcmc
#' @export getParameterMatrix SNF.static.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
getParameterMatrix.SNF.static.mcmc = function(x, tail, ...){
if (is.list(x$delta_matrix)){
out = do.call(cbind, x$delta_matrix)
out = cbind(out, x$Sigma_matrix )
} else{
out = x$delta_matrix
}
if (!missing(tail)) {
out = extractTail(out, tail)
}
out
}
#' Create a summary table
#' @name summary.SNF.static.mcmc
#' @aliases summary.SNF.static.mcmc
#' @title summary.SNF.static.mcmc
#' @param object SNF.static.mcmc object
#' @param ... tail: iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#' @return A summary table
#' @method summary SNF.static.mcmc
#' @export summary SNF.static.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
summary.SNF.static.mcmc = function(object,...){
computeSummaryTable(object,...)
}
#' Create a summary table
#' @name summary.SNF.static.maxLik
#' @aliases summary.SNF.static.maxLik
#' @title summary.SNF.static.maxLik
#' @param object SNF.static.maxLik object
#' @param ... not used
#' @return A summary table
#' @method summary SNF.static.maxLik
#' @export summary SNF.static.maxLik
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
summary.SNF.static.maxLik = function(object,...){
object$summary_table
}
# single_network_formation_mcmc = function(data, m=1000, last_estimation){
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# response = do.call(rbind, lapply(data, function(z) z$response_self))
# response_not = !response
# name = colnames(self_data_matrix)
# k = ncol(self_data_matrix)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# ystar1 = rep(0, length(response))
# ystar2 = rep(0, length(response))
# network_name = data[[1]]$network_name
# if (!missing(last_estimation)){
# delta_matrix[1,] = tail(last_estimation$delta_matrix,1)
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# }
# colnames(delta_matrix) = network_name %+% "_" %+% colnames(self_data_matrix)
# X = rbind(self_data_matrix, friends_data_matrix)
# XX_inv = solve(crossprod(X))
# tic()
# for (i in 1:m){
# if (i %% 1000 == 0 ){
# cat(i ,">\n")
# }
# xb1 = self_data_matrix %*% delta_matrix[i, ]
# xb2 = friends_data_matrix %*% delta_matrix[i, ]
# ystar1 = drawYstar(y=response , ystar_other=ystar2, mean=xb1, y_not= response_not)
# ystar2 = drawYstar(y=response, ystar_other=ystar1, mean=xb2, y_not= response_not)
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=XX_inv %*% crossprod(X,c(ystar1,ystar2)), XX_inv)
# }
# toc()
# delta_matrix = tail(delta_matrix,-1)
# out = list(delta_matrix=delta_matrix, ystar1=ystar1, ystar2=ystar2)
# class(out) = "network_formation"
# out
# }
# lik_single_network_formation_parser = function(data, delta, network_id=1){
# loglikelihood_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response1,
# delta
# if (network_id==1){
# return(
# )
# )
# } else if (network_id==2){
# return(
# loglikelihood_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response2,
# delta
# )
# ) }
# })
# lik_single_network_formation_par = function(cl, delta, network_id=1, G=5){
# sum(
# parSapply(cl, 1:G,
# function(i,delta,network_id) lik_single_network_formation_parser(
# data[[i]],
# delta=delta,
# network_id=network_id
# ),
# delta=delta,
# network_id=network_id
# )
# )
# })
# lik_grad_single_network_formation_parser = function(data, delta, network_id=1){
# if (network_id==1){
# return(
# lik_grad_single_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response1,
# delta
# )
# )
# } else if (network_id==2){
# return(
# lik_grad_single_network_formation(
# x1=data$self_data_matrix,
# x2= data$friends_data_matrix,
# y=data$response2,
# delta
# )
# ) }
# })
# lik_grad_single_network_formation_par = function(cl, delta, network_id=1, G=5){
# rowSums(
# parSapply(cl, 1:G,
# function(i,delta,network_id) lik_grad_single_network_formation_parser(
# data[[i]],
# delta=delta,
# network_id=network_id
# ),
# delta=delta,
# network_id=network_id
# )
# )
# })
# single_network_formation = function(data, network_id=1){
# tic()
# require("maxLik")
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# if (network_id==1){
# response = unlist(lapply(data, function(z) z$response1))
# } else if (network_id==2){
# response = unlist(lapply(data, function(z) z$response2))
# }
# start = rep(0,ncol(self_data_matrix))
# system.time({
# out= maxLik(function(z, ...) loglikelihood_network_formation(delta=z, ...) , start=start , self_data_matrix=self_data_matrix, friends_data_matrix=friends_data_matrix, response=response , grad= lik_grad_single_network_formation, method="BFGS")
# })
# summary_table = generateSignificance(summary(out)$estimate[,1:2])
# rownames(summary_table) = colnames(self_data_matrix)
# toc()
# list(out, summary_table)
# })
# single_network_formation_parallel = function(data, cl, network_id=1){
# tic()
# name = colnames(data[[1]]$self_data_matrix)
# start = rep(0,length(name))
# out= maxLik(
# lik_single_network_formation_par,
# start=start ,
# cl=cl,
# G=length(data),
# network_id=network_id ,
# grad= lik_grad_single_network_formation_par,
# method="BFGS"
# )
# summary_table = summary(out)$estimate
# rownames(summary_table) = name
# toc()
# list(maxLik_object=out, summary_table=generateSignificance(summary_table[,1:2]))
# })
# single_network_formation_mcmc_v1 = function(start, tau, m, cl, network_id, G){
# k = length(start)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# delta_matrix[1, ] = start
# update_rate=0
# for (i in 1:m){
# metro_obj =
# metropolis2(
# beta_previous=delta_matrix[i,],
# tau=tau,
# likelihoodFunction=lik_single_network_formation_par,
# cl=cl,
# network_id=network_id,
# G=G
# )
# delta_matrix[i+1,] = metro_obj$beta
# update_rate = update_rate + metro_obj$update
# }
# delta_matrix = tail(delta_matrix,-1)
# update_rate =update_rate / m
# next_tau = tau * ifelse(update_rate==0,0.1,update_rate) / 0.27
# return(list(delta_matrix = delta_matrix, update_rate =update_rate, next_parameter = tail(delta_matrix,1), tau=tau, next_tau = next_tau))
# })
# single_network_formation_mcmc_v2 = function(start, tau, m, cl, network_id, G){
# k = length(start)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# delta_matrix[1, ] = start
# update_rate=0
# for (i in 1:m){
# metro_obj =
# metropolis(
# beta_previous=delta_matrix[i,],
# tau=tau,
# likelihoodFunction=lik_single_network_formation_par,
# cl=cl,
# network_id=network_id,
# G=G
# )
# delta_matrix[i+1,] = metro_obj$beta
# update_rate = update_rate + metro_obj$update
# }
# delta_matrix = tail(delta_matrix,-1)
# update_rate =update_rate / m
# next_tau = tau * ifelse(update_rate==0,0.1,update_rate) / 0.27
# return(list(delta_matrix = delta_matrix, update_rate =update_rate, next_parameter = tail(delta_matrix,1), tau=tau, next_tau = next_tau))
# })
## method 1 : update delta as vector
## method 2 : udpate delta one by one. Method 2 is more efficient, because it reduces the call to the likelihood function by half.
# single_network_formation_mcmc_RE = function(data, network_id, m=1000, last_estimation){
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# response = as.logical(unlist(lapply(data, function(z) z$response[[network_id]])))
# response_not = !response
# n = sapply(data, function(z) length(z$y))
# n2 = sapply(data, function(z) length(z$response[[network_id]]))
# name = colnames(self_data_matrix)
# k = ncol(self_data_matrix)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# sigma2e_matrix = matrix(1, nrow=m+1, ncol=1)
# ystar1 = rep(0, length(response))
# ystar2 = rep(0, length(response))
# e = rep(0,sum(n)) #rnorm(sum(n))
# if (!missing(last_estimation)){
# delta_matrix[1,] = tail(last_estimation$delta_matrix,1)
# sigma2e_matrix[1,] = tail(last_estimation$sigma2e_matrix,1)
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# e = last_estimation$e
# }
# colnames(delta_matrix) = colnames(self_data_matrix)
# X = rbind(self_data_matrix, friends_data_matrix)
# XX_inv = solve(crossprod(X))
# full_group_index = genFullGroupIndex(data)
# full_position_index = genFullPositionIndex(data)
# full_position_matrix = genFullPositionMatrix(data)
# row_sums_full_position_matrix = rowSums(full_position_matrix)
# tic()
# for (i in 1:m){
# if (i %% 1000 == 0 ){
# cat(i ,">\n")
# }
# full_e = genFulle(e,full_group_index )
# xb1 = self_data_matrix %*% delta_matrix[i, ] + full_e$e_i
# xb2 = friends_data_matrix %*% delta_matrix[i, ] + full_e$e_j
# # update ystar
# ystar1 = drawYstar(y=response , ystar_other=ystar2, mean=xb1, y_not= response_not)
# ystar2 = drawYstar(y=response, ystar_other=ystar1, mean=xb2, y_not= response_not)
# # update delta
# mu_delta = XX_inv %*% crossprod(X,c(ystar1-full_e$e_i,ystar2-full_e$e_j))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=mu_delta, XX_inv)
# # update e
# # actually i dont need previous e, just need ystar1-xb1, ystar2-xb2 and the correct position.
# #
# residual = c(ystar1, ystar2) - X %*% delta_matrix[i+1,]
# # mean of residual by individual
# var_e = 1 / (row_sums_full_position_matrix + sigma2e_matrix[i,])
# mean_e = as.numeric( full_position_matrix %*% residual ) * var_e
# e<-rnorm(sum(n),mean_e,sqrt(var_e))
# sigma2e_matrix[i+1,]<-1/rgamma(1,length(e)/2,crossprod(e,e)/2)
# }
# toc()
# delta_matrix = tail(delta_matrix,-1)
# sigma2e_matrix= tail(sigma2e_matrix,-1)
# out = list(ystar1=ystar1, ystar2=ystar2,e=e,delta_matrix=delta_matrix, sigma2e_matrix=sigma2e_matrix)
# class(out) = "single_network_formation_RE"
# out
# })
# single_network_formation_mcmc_parallel = function(data,cl, network_id, m=1000, last_estimation){
# name = colnames(data[[1]]$self_data_matrix)
# k = ncol(data[[1]]$self_data_matrix)
# n2 = sapply(data,function(z) length(z$response1))
# G = length(data)
# delta_matrix = matrix(0, nrow=m+1, ncol=k)
# ystar1 = lapply(n2, rep,x=0 )
# ystar2 = lapply(n2, rep,x=0 )
# if (!missing(last_estimation)){
# delta_matrix[1,] = tail(last_estimation$delta_matrix,1)
# ystar1 = tail(last_estimation$ystar1)
# ystar2 = tail(last_estimation$ystar2)
# }
# colnames(delta_matrix) = name
# X = rbind(
# do.call(rbind,lapply(data,"[[",i="self_data_matrix")),
# do.call(rbind,lapply(data,"[[",i="friends_data_matrix"))
# )
# XX_inv = solve(crossprod(X))
# tic()
# for (i in 1:m){
# ystar1=
# parLapply(cl, 1:G, function(z, ystar2, network_id,delta) {
# drawYstar(
# y= data[[z]]$response[[network_id]],
# ystar_other = ystar2[[z]],
# mean = data[[z]]$self_data_matrix %*% delta
# )},
# delta = delta_matrix[i,],
# network_id = network_id,
# ystar2=ystar2
# )
# ystar2=
# parLapply(cl, 1:G, function(z, ystar1, network_id, delta) {
# drawYstar(
# y= data[[z]]$response[[network_id]],
# ystar_other = ystar1[[z]],
# mean = data[[z]]$friends_data_matrix %*% delta
# )},
# delta = delta_matrix[i,],
# network_id = network_id,
# ystar1=ystar1
# )
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=XX_inv %*% crossprod(X,c(unlist(ystar1), unlist(ystar2))), XX_inv)
# }
# toc()
# delta_matrix = tail(delta_matrix,-1)
# plotmcmc(delta_matrix,remove=remove)
# print(computeSummaryTable(delta_matrix, remove=remove))
# list(delta_matrix=delta_matrix, ystar1=ystar1, ystar2=ystar2)
# })
# drawYstar_multi = function(y, ystar_other, demean_ystar_corr, mean , y_not=!y, rho){
# mean = mean + rho * (demean_ystar_corr)
# sd = sqrt(1-rho^2)
# ystar_other_positive = ystar_other>=0
# index_case1 = y
# index_case2 = as.logical(y_not * ystar_other_positive)
# index_case3 = as.logical(y_not * !ystar_other_positive)
# n = length(y)
# n1=sum(index_case1)
# n2=sum(index_case2)
# n3=sum(index_case3)
# stopifnot(n==n1+n2+n3)
# ystar_new = rep(NA, length(y))
# if (n1>0)
# ystar_new[index_case1] = rtruncnorm(1,a=0,b=Inf, mean=mean[index_case1], sd=sd)
# if (n2>0)
# ystar_new[index_case2] =rtruncnorm(1,a=-Inf,b=0,mean=mean[index_case2], sd=sd)
# if (n3>0)
# ystar_new[index_case3] = mean[index_case3] +rnorm(n3, sd=sd)
# stopifnot(!any(is.nan(ystar_new)))
# ystar_new
# })
# multi_network_formation_mcmc_RE = function(data, m=1000, last_estimation){
# self_data_matrix = do.call(rbind, lapply(data , function(z) z$self_data_matrix))
# friends_data_matrix = do.call(rbind, lapply(data , function(z) z$friends_data_matrix))
# y1 = as.logical(unlist(lapply(data, function(z) z$response1)))
# y2 = as.logical(unlist(lapply(data, function(z) z$response2)))
# y1_not = !y1
# y2_not = !y2
# name = colnames(self_data_matrix)
# k = ncol(self_data_matrix)
# n = length(y1)
# delta1_matrix = matrix(0, nrow=m+1, ncol=k)
# delta2_matrix = matrix(0, nrow=m+1, ncol=k)
# rho_matrix = matrix(0, nrow=m+1,ncol=1)
# ystar11 = rep(0, length(y1))
# ystar12 = rep(0, length(y1))
# ystar21 = rep(0, length(y2))
# ystar22 = rep(0, length(y2))
# e1 = rep(0,sum(sapply(data, "[[", "n")))
# e2 = rep(0,sum(sapply(data, "[[", "n")))
# sigma2e1_matrix = matrix(1, nrow=m+1,ncol=1)
# sigma2e2_matrix = matrix(1, nrow=m+1,ncol=1)
# colnames(delta1_matrix) = name
# colnames(delta2_matrix) = name
# if (!missing(last_estimation)){
# delta1_matrix[1,] = tail(last_estimation$delta1,1)
# delta2_matrix[1,] = tail(last_estimation$delta2,1)
# rho_matrix[1,] = tail(last_estimation$rho,1)
# ystar11 = last_estimation$ystar11
# ystar12 = last_estimation$ystar12
# ystar21 = last_estimation$ystar21
# ystar22 = last_estimation$ystar22
# e1 = last_estimation$e1
# e2 = last_estimation$e2
# }
# X = rbind(self_data_matrix, friends_data_matrix)
# XX_inv = solve(crossprod(X))
# # xb11 = self_data_matrix %*% delta1_matrix[1, ]
# # xb12 = friends_data_matrix %*% delta1_matrix[1, ]
# # xb21 = self_data_matrix %*% delta2_matrix[1, ]
# # xb22 = friends_data_matrix %*% delta2_matrix[1, ]
# full_group_index = genFullGroupIndex(data)
# full_position_index = genFullPositionIndex(data)
# full_position_matrix = genFullPositionMatrix(data)
# row_sums_full_position_matrix = rowSums(full_position_matrix)
# tic()
# for (i in 1:m){
# if (i %% 1000 == 0 )
# cat(i, ">\n")
# rho = rho_matrix[i, 1]
# full_e1 = genFulle(e1, full_group_index )
# full_e2 = genFulle(e2, full_group_index )
# xb11 = self_data_matrix %*% delta1_matrix[i, ] + full_e1$e_i
# xb12 = friends_data_matrix %*% delta1_matrix[i, ] + full_e1$e_j
# xb21 = self_data_matrix %*% delta2_matrix[i, ] + full_e2$e_i
# xb22 = friends_data_matrix %*% delta2_matrix[i, ] + full_e2$e_j
# ystar11 = drawYstar_multi(y=y1 , ystar_other=ystar12, demean_ystar_corr=ystar21-xb21 ,mean=xb11, y_not= y1_not, rho=rho)
# ystar12 = drawYstar_multi(y=y1, ystar_other=ystar11, demean_ystar_corr=ystar22-xb22, mean=xb12, y_not= y1_not, rho=rho)
# ystar21 = drawYstar_multi(y=y2 , ystar_other=ystar22, demean_ystar_corr=ystar11-xb11, mean=xb21, y_not= y2_not, rho=rho)
# ystar22 = drawYstar_multi(y=y2, ystar_other=ystar21, demean_ystar_corr=ystar12-xb12, mean=xb22, y_not= y2_not, rho=rho)
# ystar1 = c(ystar11,ystar12)
# ystar2 = c(ystar21,ystar22)
# ystar2_demean = ystar2 - c(xb21,xb22) - c(full_e2$e_i,full_e2$e_j)
# new_y1 = ystar1 - rho*ystar2_demean - c(full_e1$e_i,full_e1$e_j)
# lm1 = myFastLm(X, new_y1)
# delta1_matrix[i+1, ] = mvrnorm(n=1, mu=lm1$coef, (lm1$cov)/(lm1$s^2) * (1-rho^2) )
# xb11 = self_data_matrix %*% delta1_matrix[i+1, ]
# xb12 = friends_data_matrix %*% delta1_matrix[i+1, ]
# ystar1_demean = ystar1 - c(xb11,xb12) - c(full_e1$e_i,full_e1$e_j)
# new_y2 = ystar2 - rho*ystar1_demean - c(full_e2$e_i,full_e2$e_j)
# lm2 = myFastLm(X, new_y2 )
# delta2_matrix[i+1, ] = mvrnorm(n=1, mu=lm2$coef, (lm2$cov)/(lm2$s^2) * (1-rho^2) )
# xb21 = self_data_matrix %*% delta2_matrix[i+1, ]
# xb22 = friends_data_matrix %*% delta2_matrix[i+1, ]
# ystar2_demean= ystar2 - c(xb21,xb22)- c(full_e2$e_i,full_e2$e_j)
# # update rho
# # var_rho = (1-rho^2)/ sum((ystar1_demean)^2)
# # mean_rho = var_rho / (1-rho^2) * crossprod(ystar1_demean, ystar2_demean )
# # mean_rho = cov(ystar1_demean,ystar2_demean)
# # var_rho = (mean(ystar1_demean^2* ystar2_demean^2 ) - mean( ystar1_demean * ystar2_demean )^2) / 2/n
# mean_rho = mean(ystar1_demean * ystar2_demean)
# var_rho = var(ystar1_demean * ystar2_demean)/2/n
# rho_matrix[i+1,1 ] = rtruncnorm(1,mean= mean_rho, sd= sqrt(var_rho),a=-.999,b=.999)
# # update e1
# residual1 = ystar1 - rho*ystar2_demean - c(full_e1$e_i,full_e1$e_j) - c(xb11,xb12)
# residual2 = ystar2 - rho*ystar1_demean - c(full_e2$e_i,full_e2$e_j) - c(xb21,xb22)
# # mean of residual by individual
# var_e1 = 1 / (row_sums_full_position_matrix + sigma2e1_matrix[i,])
# mean_e1 = as.numeric( full_position_matrix %*% residual1 ) * var_e1
# e1<-rnorm(length(e1),mean_e1,sqrt(var_e1))
# sigma2e1_matrix[i+1,]<-1/rgamma(1,length(e1)/2,crossprod(e1,e1)/2)
# var_e2 = 1 / (row_sums_full_position_matrix + sigma2e2_matrix[i,])
# mean_e2 = as.numeric( full_position_matrix %*% residual2 ) * var_e2
# e2<-rnorm(length(e2),mean_e2,sqrt(var_e2))
# sigma2e2_matrix[i+1,]<-1/rgamma(1,length(e2)/2,crossprod(e2,e2)/2)
# # cat(i, "> ", rho_matrix[i+1,],"\n")
# }
# toc()
# delta1_matrix = tail(delta1_matrix,-1)
# delta2_matrix = tail(delta2_matrix,-1)
# rho_matrix = tail(rho_matrix,-1)
# sigma2e1_matrix = tail(sigma2e1_matrix,-1)
# sigma2e2_matrix = tail(sigma2e2_matrix,-1)
# out = list(delta1_matrix=delta1_matrix, delta2_matrix=delta2_matrix, rho_matrix=rho_matrix, ystar11=ystar11, ystar12=ystar12, ystar21=ystar21, ystar22=ystar22, e1=e1, e2=e2, sigma2e1_matrix=sigma2e1_matrix, sigma2e2_matrix=sigma2e2_matrix)
# class(out) = "multi_network_formation_RE"
# out
# })
# plotmcmc.single_network_formation_RE = function(x, tail=-0.2){
# data_matrix = cbind(x$delta, x$sigma2e_matrix)
# colnames(data_matrix) = c(colnames(x$delta), "Sigma2")
# plotmcmc.default(data_matrix, tail=tail)
# })
# merge.single_network_formation_RE = function(x,y,...){
# out = y
# out$delta_matrix = rbind(x$delta_matrix , y$delta_matrix)
# out$sigma2e_matrix = rbind(x$sigma2e_matrix , y$sigma2e_matrix)
# out
# })
# getParameterMatrix.multi_network_formation = function(x){
# out = do.call(cbind, x$delta_matrix)
# out = cbind(out, x$Sigma_matrix )
# out
# }
# merge.multi_network_formation = function(x,y){
# out = y
# for (i in 1:length(x$delta_matrix)){
# out$delta_matrix[[i]] = rbind(x$delta_matrix[[i]], y$delta_matrix[[i]] )
# }
# out$Sigma_matrix = rbind(x$Sigma_matrix, y$Sigma_matrix)
# out
# })
# plotmcmc.multi_network_formation_RE = function(x, tail=-0.2){
# data_matrix = cbind(x$delta1_matrix, x$delta2_matrix, x$rho_matrix, x$sigma2e1_matrix, x$sigma2e2_matrix)
# colnames(data_matrix) = c(colnames(x$delta1_matrix), colnames(x$delta2_matrix), "rho", "sigma2e1", "sigma2e2")
# plotmcmc.default(data_matrix, tail=tail)
# })
# merge.multi_network_formation_RE = function(x,y){
# out = y
# out$delta1_matrix = rbind(x$delta1_matrix , y$delta1_matrix)
# out$delta2_matrix = rbind(x$delta2_matrix , y$delta2_matrix)
# out$rho_matrix = rbind(x$rho_matrix , y$rho_matrix)
# out$sigma2e1_matrix = rbind(x$sigma2e1_matrix , y$sigma2e1_matrix)
# out$sigma2e2_matrix = rbind(x$sigma2e2_matrix , y$sigma2e2_matrix)
# out
# })
##############################################################################################################################
##############################################################################################################################
##############################################################################################################################
## Given a seq, beta, D, compute the likelihood
## Given a seq m n*(n-1)/2 x 2 matrix
## D: n by n network matrix
## U_xb : an n by n utility matrix, i,j element is the utility of i to make friends with j. (Xbeta)
## delta1 delta2
#' Draw random sample of meeting sequence
#' @name DrawSeqSample
#' @aliases DrawSeqSample
#' @title DrawSeqSample
#' @param x x
#' @param p p
#' @return value
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
DrawSeqSample = function(x , p =0.01){
if (is.list(x)){
out = vector("list",length(x))
if (length(p)==1)
p = rep(p,length(x))
for (i in 1:length(x)){
out[[i]] = DrawSeqSample(x[[i]], p[[i]])
}
return(out)
} else if (is.vector(x)){
n = length(x)
pn = pmax(2,ceiling(n * p) )
to_change = sample(n,pn)
reorder = sample(to_change)
x[to_change] = x[reorder]
return(x)
} else if (is.matrix(x)){
n = nrow(x)
pn = pmax(2,ceiling(n * p) )
to_change = sample(n,pn)
reorder = sample(to_change)
x[to_change,] = x[reorder,]
return(x)
}
}
# repeat
# computeNetworkSummary_r = function(seq_m,D){
# n = nrow(D)
# D0 = matrix(0,ncol=n,nrow=n)
# degree = matrix(0,ncol=n,nrow=n)
# common_frds_1 = matrix(0,ncol=n,nrow=n)
# common_frds_2 = matrix(0,ncol=n,nrow=n)
# nn = n*(n-1)/2
# for ( i in 1:nn){
# index = seq_m[i,]
# index1 = index[1]
# index2 = index[2]
# if (D[index1,index2]==1) {
# D0[index1,index2] = D0[index2,index1]= 1
# }
# d1 = D0[index1,]
# d2 = D0[index2,]
# degree[index1,index2] = sum(d1)
# degree[index2,index1] = sum(d2)
# common_frds_1[index1,index2] = sum(d1*d2)
# }
# lower_tri = lower.tri(degree)
# degree1 = degree[ lower_tri ]
# degree2 = t(degree)[ lower_tri ]
# common_frds_1 = common_frds_1[ lower_tri ]
# list(self=cbind(degree1,degree1^2,common_frds_1), friends=cbind(degree2,degree2^2,common_frds_1))
# })
# src=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# return Rcpp::List::create(Rcpp::Named("degree")=degreee, Rcpp::Named("common_frds_1")=common_frds_11);
# '
# g <- cxxfunction(signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=src)
# computeNetworkSummary_cxx <- cxxfunction(
# signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# arma::mat out1 = arma::zeros(nn*(nn-1)/2, 3);
# arma::mat out2 = arma::zeros(nn*(nn-1)/2, 3);
# int k = 0;
# for ( int j=0 ; j < nn ; j++){
# for ( int i=j+1 ; i< nn ; i++){
# out1(k,0) = arma::as_scalar( degreee(i,j) );
# out1(k,1) = arma::as_scalar( degreee(i,j)*degreee(i,j) );
# out1(k,2) = arma::as_scalar( common_frds_11(i,j) );
# out2(k,0) = arma::as_scalar( degreee(j,i) );
# out2(k,1) = arma::as_scalar( degreee(j,i)*degreee(j,i) );
# out2(k,2) = arma::as_scalar( common_frds_11(i,j) );
# k++;
# }
# }
# return Rcpp::List::create(Rcpp::Named("self")=out1, Rcpp::Named("friends")=out2);
# '
# )
# q1=computeNetworkSummary_r(seq_m[[1]],D[[1]])
# q2=computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
# benchmark(
# b={
# q2 = computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# }
# )
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
################################
# library(Matrix)
# load("model_data.rData")
# library(Matrix)
# library(rbenchmark)
# library(compiler)
# D = (data[[1]]$W!=0) + 0
# n=nrow(D)
# index_table = data[[1]]$group_index
# seq_m = index_table[sample(nn,nn),]
# benchmark(
# a={
# q1= f(seq_m=seq_m, D=D)
# }
# ,b={
# q2= g(seq_m=seq_m, D=D)
# }
# )
# all(q1[[1]]==q2[[1]])
# all(q1[[2]]==q2[[2]])
#' computeNetworkSummary
#' @name computeNetworkSummary
#' @aliases computeNetworkSummary
#' @title computeNetworkSummary
#' @param seq_m meeting sequence
#' @param D adjacency matrix
#' @return value
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
computeNetworkSummary=function(seq_m, D){
# out = list()
# for (i in 1:length(D)){
# temp = computeNetworkSummary_r(g(seq_m=seq_m[[i]], D=D[[i]]))
# out$self = rbind(out$self, temp$self)
# out$friends = rbind(out$friends, temp$friends)
# }
# out
out = mapply(function(x,y) computeNetworkSummary_cxx(seq_m=x, D=y), x= seq_m, y=D )
self = do.call(rbind, out[1,] )
friends = do.call(rbind, out[1,] )
colnames(self) = c("degree","degree_seq","common_frds")
colnames(friends) = c("degree","degree_seq","common_frds")
list(self=self,friends=friends)
}
# q1 = computeNetworkSummary(seq_m,D)
#############################################
# SNF_single_mcmc = function(m, data, last_estimation, update_tau=TRUE,tau=0.0005){
# # if (network_id==1){
# # D = lapply(data, function(z) z$W!=0 )
# # y = do.call(c, lapply(data, "[[", "response1"))
# # y_not = !y
# # } else{
# # D = lapply(data, function(z) z$W2!=0 )
# # y = do.call(c, lapply(data, "[[", "response2"))
# # y_not = !y
# # }
# D = lapply(data, function(z) z$D_list[[1]])
# y = do.call(c, lapply(data,"[[","response_self") )
# y_not = !y
# n= sapply(data,"[[","n")
# n2 = sapply(data, function(z) length(z$response_self))
# x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
# x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
# number_of_network_variable = 3
# postition = mapply(seq, c(0,head(cumsum(n2),-1)) +1,cumsum(n2))
# ystar1 = rep(0,length(y))
# ystar2 = rep(0,length(y))
# seq_m = lapply(data,"[[","group_index")
# delta_matrix = matrix(0, nrow=m+1, ncol= ncol(x1) + number_of_network_variable )
# update_rate = 0
# ## initialization
# if (!missing(last_estimation) && !is.null(last_estimation) ){
# cat("Using last_estimation \n")
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# seq_m = last_estimation$seq_m
# delta_matrix[1,] = as.vector(tail(last_estimation$delta,1))
# index = last_estimation$index+1
# # name = last_estimation$name
# ID = last_estimation$ID
# if (update_tau){
# tau=updateTau(last_estimation$tau, last_estimation$update_rate, lower_bound=0.2, upper_bound=0.4,optim_rate=0.3,min_rate=0.00001)
# } else{
# tau=last_estimation$tau
# }
# } else {
# index = 1
# ID = genUniqueID()
# cat("Start new instance with ID ", ID, "\n")
# }
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# colnames(delta_matrix) = colnames(xx1)
# name = colnames(xx1)
# delta_x_index = 1:ncol(x1)
# delta_network_index = 1:number_of_network_variable + ncol(x1)
# tic()
# ## start the gibbs
# for (i in 1:m){
# ## base on the seq, compute the network summary
# ## draw ystar
# ystar1 = drawYstar(y=y , ystar_other=ystar2, mean=xb1, y_not= y_not)
# ystar2 = drawYstar(y=y, ystar_other=ystar1, mean=xb2, y_not= y_not)
# ## draw delta
# lm_fit = myFastLm(X= rbind(xx1,xx2), y = c(ystar1,ystar2))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2)
# R1 = x1 %*% delta_matrix[i+1, delta_x_index]
# R2 = x2 %*% delta_matrix[i+1, delta_x_index]
# xb1 = R1 + network_summary$self %*% delta_matrix[i+1,delta_network_index]
# xb2 = R2 + network_summary$friends %*% delta_matrix[i+1,delta_network_index]
# ## update sequence
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# # sapply(1:5, function(i) sum(seq_m_new[[i]]!=seq_m[[i]]) )
# network_summary_new = computeNetworkSummary(seq_m=seq_m_new, D=D)
# xb1_new = R1 + network_summary_new$self %*% delta_matrix[i+1,delta_network_index]
# xb2_new = R2 + network_summary_new$friends %*% delta_matrix[i+1,delta_network_index]
# p1 = splitBy(dnorm(ystar1 - xb1, log=TRUE),by=n2)
# p2 = splitBy(dnorm(ystar2 - xb2, log=TRUE),by=n2)
# p1_new = splitBy(dnorm(ystar1 - xb1_new, log=TRUE),by=n2)
# p2_new = splitBy(dnorm(ystar2 - xb2_new, log=TRUE),by=n2)
# p1 = sapply(p1, sum)
# p2 = sapply(p2, sum)
# p1_new = sapply(p1_new, sum)
# p2_new = sapply(p2_new, sum)
# alpha = exp( p1_new+ p2_new - p1- p2 )
# update_index = alpha > runif(5)
# seq_m[update_index] = seq_m_new[update_index]
# update_rate = update_rate + update_index
# update_position = unlist(postition[update_index])
# network_summary$self[update_position,] = network_summary_new$self[update_position,]
# network_summary$friends[update_position,] = network_summary_new$friends[update_position,]
# xb1[update_position] = xb1_new[update_position]
# xb2[update_position] = xb2_new[update_position]
# xx1[update_position,delta_network_index] = network_summary$self[update_position,]
# xx2[update_position,delta_network_index] = network_summary$friends[update_position,]
# # test
# # xx1_q = cbind(x1,network_summary$self)
# # xx2_q = cbind(x2,network_summary$friends)
# # network_summary_q = computeNetworkSummary(seq_m=seq_m, D=D)
# # xx1_q = cbind(x1,network_summary_q$self)
# # xx2_q = cbind(x2,network_summary_q$friends)
# # xb1_q = xx1_q %*% delta_matrix[i+1,]
# # xb2_q = xx2_q %*% delta_matrix[i+1,]
# # identical(xx1,xx1_q)
# # identical(xx2,xx2_q)
# # identical(xb1,xb1_q)
# # identical(xb2,xb2_q)
# }
# toc()
# update_rate = update_rate/m
# cat("Update rate : \n")
# print(update_rate)
# out = list(delta=tail(delta_matrix,-1) , seq_m=seq_m,ystar1=ystar1,ystar2=ystar2, tau=tau, update_rate=update_rate, index=index,ID=ID, name=name)
# class(out) = "SNF_single"
# out
# }
# merge.SNF_single = function(x,y,...){
# out = y
# out$delta_matrix = rbind(x$delta_matrix, y$delta_matrix)
# out
# }
# getParameterMatrix.SNF_single = function(x ){
# x$delta
# }
# ## update by network
# ## ystar1_demean
# updateSequence = function(ystar1_demean, ystar2_demean, seq_m, tau, delta_network, D){
# network_summary = computeNetworkSummary(g(seq_m=seq_m, D=D))
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# network_summary_new = computeNetworkSummary(g(seq_m=seq_m, D=D))
# lik_old = sum(dnorm(ystar1_demean - network_summary$self %*% delta_network, log=TRUE)) + sum(dnorm(ystar2_demean - network_summary$friends %*% delta_network, log=TRUE))
# lik_new = sum(dnorm(ystar1_demean - network_summary_new$self %*% delta_network, log=TRUE)) + sum(dnorm(ystar2_demean - network_summary_new$friends %*% delta_network, log=TRUE))
# alpha = exp(lik_new - lik_old )
# if (alpha>runif(1)){
# return(list(seq_m=seq_m_new, update=TRUE))
# } else{
# return(list(seq_m=seq_m, update=FALSE))
# }
# })
# ######parallel
# library(Matrix)
# load("model_data.rData")
# library(Matrix)
# library(rbenchmark)
# library(compiler)
# library(parallel)
# D = lapply(data, function(z) z$W!=0 )
# n= sapply(data,"[[","n")
# x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
# x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
# y = do.call(c, lapply(data, "[[", "response1"))
# y_not = !y
# n2 = sapply(data, function(z) length(z$response1))
# parameter=list()
# parameter$delta = rep(0, ncol(x1)+3)
# parameter$ystar1 = rep(0,length(y))
# parameter$ystar2 = rep(0,length(y))
# parameter$seq_m = lapply(data,"[[","group_index")
# parameter$tau = parameter$tau
# parameter$m = 100
# ystar1 = parameter$ystar1
# ystar2 = parameter$ystar2
# seq_m = parameter$seq_m
# tau = parameter$tau
# m = parameter$m
# delta_matrix = matrix(0,nrow=m+1,ncol=length(parameter$delta))
# delta_matrix[1,] = parameter$delta
# ## initialization
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# cl=makeCluster(6)
# exportAllFunction(cl)
# clusterExport(cl,c("D","src"))
# clusterEvalQ(cl,{library(inline);library(RcppArmadillo)})
# clusterEvalQ(cl,{g <- cxxfunction(signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=src)
# })
# tic()
# ## start the gibbs
# for (i in 1:m){
# ## base on the seq, compute the network summary
# ## draw ystar
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# ystar1 = drawYstar(y=y , ystar_other=ystar2, mean=xb1, y_not= y_not)
# ystar2 = drawYstar(y=y, ystar_other=ystar1, mean=xb2, y_not= y_not)
# ## draw delta
# lm_fit = myFastLm(XX= rbind(xx1,xx2), yy = c(ystar1,ystar2))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2)
# xb1 = xx1 %*% delta_matrix[i+1,]
# xb2 = xx2 %*% delta_matrix[i+1,]
# ystar1_demean = ystar1 - x1 %*% head(delta_matrix[i+1,],ncol(x1))
# ystar2_demean = ystar2 - x2 %*% head(delta_matrix[i+1,],ncol(x1))
# ystar1_demean_list = splitBy(ystar1_demean,n2)
# ystar2_demean_list = splitBy(ystar2_demean,n2)
# out = parLapply(cl, 1:length(D),
# function(x,ystar1_demean_list,ystar2_demean_list, seq_m, tau, delta ) {
# updateSequence(
# ystar1_demean=ystar1_demean_list[[x]],
# ystar2_demean=ystar2_demean_list[[x]],
# seq_m= seq_m[[x]],
# tau = tau ,
# delta_network=delta,
# D=D[[x]]
# )
# },
# delta=tail(delta_matrix[i+1,],-ncol(x1)),
# ystar1_demean_list = ystar1_demean_list,
# ystar2_demean_list = ystar2_demean_list,
# tau=tau,
# seq_m=seq_m
# )
# seq_m = lapply(out,"[[","seq_m" )
# update = update + out$update
# }
# toc()
##############################################################################################################
## Given a seq, beta, D, compute the likelihood
## Given a seq m n*(n-1)/2 x 2 matrix
## D: n by n network matrix
## U_xb : an n by n utility matrix, i,j element is the utility of i to make friends with j. (Xbeta)
## delta1 delta2
# DrawSeqSample = function(x , p =0.01){
# if (is.list(x)){
# out = vector("list",length(x))
# if (length(p)==1)
# p = rep(p,length(x))
# for (i in 1:length(x)){
# out[[i]] = DrawSeqSample(x[[i]], p[[i]])
# }
# return(out)
# } else if (is.vector(x)){
# n = length(x)
# pn = pmax(2,ceiling(n * p) )
# to_change = sample(n,pn)
# reorder = sample(to_change)
# x[to_change] = x[reorder]
# return(x)
# } else if (is.matrix(x)){
# n = nrow(x)
# pn = pmax(2,ceiling(n * p) )
# to_change = sample(n,pn)
# reorder = sample(to_change)
# x[to_change,] = x[reorder,]
# return(x)
# }
# })
# # repeat
# computeNetworkSummary = function(seq_m,D){
# n = nrow(D)
# D0 = matrix(0,ncol=n,nrow=n)
# degree = matrix(0,ncol=n,nrow=n)
# common_frds_1 = matrix(0,ncol=n,nrow=n)
# common_frds_2 = matrix(0,ncol=n,nrow=n)
# nn = n*(n-1)/2
# for ( i in 1:nn){
# index = seq_m[i,]
# index1 = index[1]
# index2 = index[2]
# if (D[index1,index2]==1) {
# D0[index1,index2] = D0[index2,index1]= 1
# }
# d1 = D0[index1,]
# d2 = D0[index2,]
# degree[index1,index2] = sum(d1)
# degree[index2,index1] = sum(d2)
# common_frds_1[index1,index2] = sum(d1*d2)
# }
# lower_tri = lower.tri(degree)
# degree1 = degree[ lower_tri ]
# degree2 = t(degree)[ lower_tri ]
# common_frds_1 = common_frds_1[ lower_tri ]
# list(self=cbind(degree1,degree1^2,common_frds_1), friends=cbind(degree2,degree2^2,common_frds_1))
# })
# src=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# return Rcpp::List::create(Rcpp::Named("degree")=degreee, Rcpp::Named("common_frds_1")=common_frds_11);
# '
# g <- cxxfunction(signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=src)
# computeNetworkSummary_cxx <- cxxfunction(
# signature(seq_m="integer", D="integer"),
# plugin="RcppArmadillo",
# body=
# '
# arma::mat seq_m2 = Rcpp::as<arma::mat>(seq_m);
# arma::mat DD = Rcpp::as<arma::mat>(D);
# int nn = DD.n_rows;
# arma::mat D00 = arma::zeros(nn,nn);
# arma::mat degreee = arma::zeros(nn,nn);
# arma::mat common_frds_11 = arma::zeros(nn,nn);
# for ( int i=0 ; i<nn*(nn-1)/2; i++ ){
# int index1 = seq_m2(i,0) -1 ;
# int index2 = seq_m2(i,1) -1;
# if (DD(index1,index2)==1) {
# D00(index1,index2) = 1;
# D00(index2,index1) = 1;
# }
# degreee(index1,index2) = sum(D00.col(index1)) ;
# degreee(index2,index1) = sum(D00.col(index2)) ;
# common_frds_11(index1,index2) = arma::as_scalar(D00.col(index1).t() * D00.col(index2)) ;
# }
# arma::mat out1 = arma::zeros(nn*(nn-1)/2, 3);
# arma::mat out2 = arma::zeros(nn*(nn-1)/2, 3);
# int k = 0;
# for ( int j=0 ; j < nn ; j++){
# for ( int i=j+1 ; i< nn ; i++){
# out1(k,0) = arma::as_scalar( degreee(i,j) );
# out1(k,1) = arma::as_scalar( degreee(i,j)*degreee(i,j) );
# out1(k,2) = arma::as_scalar( common_frds_11(i,j) );
# out2(k,0) = arma::as_scalar( degreee(j,i) );
# out2(k,1) = arma::as_scalar( degreee(j,i)*degreee(j,i) );
# out2(k,2) = arma::as_scalar( common_frds_11(i,j) );
# k++;
# }
# }
# return Rcpp::List::create(Rcpp::Named("self")=out1, Rcpp::Named("friends")=out2);
# '
# )
# q1=computeNetworkSummary(seq_m[[1]],D[[1]])
# q2=computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
# benchmark(
# b={
# q2 = computeNetworkSummary_cxx(seq_m[[1]],D[[1]])
# }
# )
# all.equal(q1$self,q2$self,check.attributes=F)
# all.equal(q1$friends,q2$friends,check.attributes=F)
################################
# library(Matrix)
# load("model_data.rData")
# library(Matrix)
# library(rbenchmark)
# library(compiler)
# D = (data[[1]]$W!=0) + 0
# n=nrow(D)
# index_table = data[[1]]$group_index
# seq_m = index_table[sample(nn,nn),]
# benchmark(
# a={
# q1= f(seq_m=seq_m, D=D)
# }
# ,b={
# q2= g(seq_m=seq_m, D=D)
# }
# )
# all(q1[[1]]==q2[[1]])
# all(q1[[2]]==q2[[2]])
# computeNetworkSummary=function(seq_m=seq_m_new, D=D){
# # out = list()
# # for (i in 1:length(D)){
# # temp = computeNetworkSummary(g(seq_m=seq_m[[i]], D=D[[i]]))
# # out$self = rbind(out$self, temp$self)
# # out$friends = rbind(out$friends, temp$friends)
# # }
# # out
# out = mapply(function(x,y) computeNetworkSummary_cxx(seq_m=x, D=y), x= seq_m, y=D )
# self = do.call(rbind, out[1,] )
# friends = do.call(rbind, out[1,] )
# colnames(self) = c("degree","degree_seq","common_frds")
# colnames(friends) = c("degree","degree_seq","common_frds")
# list(self=self,friends=friends)
# })
# # q1 = computeNetworkSummary(seq_m,D)
# computeConditionalVariance = function(Sigma){
# k = nrow(Sigma)
# ols_coef = vector("list", k)
# sd_new = vector("list",k)
# for (i in 1:k){
# ols_coef[[i]] = Sigma[i,-i] %*% solve(Sigma[-i,-i])
# sd_new[[i]] = sqrt ( Sigma[i,i] - ols_coef[[i]] %*% Sigma[-i,i] )
# }
# list(sd=sd_new, ols_coef=ols_coef)
# })
# #############################################
# update_seq_multi = function(seq_m, D_list, xb1, xb2, x1_network, x2_network, delta_network_index, ystar1,ystar2, Sigma,n2,update_rate, tau){
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# network_summary_new = lapply(D_list,computeNetworkSummary, seq_m=seq_m_new )
# x1_network_new = do.call(cbind, lapply(network_summary_new, "[[", "self"))
# x2_network_new = do.call(cbind, lapply(network_summary_new, "[[", "friends"))
# xb1_new = R1 + x1_network_new %*% delta[delta_network_index,]
# xb2_new = R2 + x2_network_new %*% delta[delta_network_index,]
# p1 = splitBy( dmvnorm(ystar1 - xb1, sigma=Sigma, log=TRUE),by=n2)
# p2 = splitBy( dmvnorm(ystar2 - xb2, sigma=Sigma, log=TRUE),by=n2)
# p1_new = splitBy( dmvnorm(ystar1 - xb1_new, sigma=Sigma, log=TRUE),by=n2)
# p2_new = splitBy( dmvnorm(ystar2 - xb2_new, sigma=Sigma, log=TRUE),by=n2)
# p1 = sapply(p1, sum)
# p2 = sapply(p2, sum)
# p1_new = sapply(p1_new, sum)
# p2_new = sapply(p2_new, sum)
# alpha = exp( p1_new+ p2_new - p1- p2 )
# update_index = alpha > runif(5)
# seq_m[update_index] = seq_m_new[update_index]
# update_rate = update_rate + update_index
# update_position = unlist(position [update_index])
# x1_network[update_position,] = x1_network_new[update_position,]
# x2_network[update_position,] = x2_network_new[update_position,]
# xb1[update_position] = xb1_new[update_position]
# xb2[update_position] = xb2_new[update_position]
# list(seq_m, xb1, xb2, x1_network, x2_network,update_rate)
# })
# drawYstar_multi_SNF = function(y, y_not=!y, ystar, ystar_other, xb, Sigma){
# # update ystar given ystar_other
# ystar_other_positive = ystar_other>=0
# number_of_network = ncol(y)
# n = nrow(y)
# for (i in 1:number_of_network){
# ols_coef = Sigma[i,-i] %*% solve(Sigma[-i,-i])
# sd_new = sqrt ( Sigma[i,i] - ols_coef %*% Sigma[-i,i] )
# mean_new = xb1[,i] + ols_coef %*% (ystar[,-i] - xb[,-i])
# index_case1 = y[,i]
# index_case2 = as.logical(y_not[,i] * ystar_other_positive[,i])
# index_case3 = as.logical(y_not[,i] * !ystar_other_positive[,i])
# n1=sum(index_case1)
# n2=sum(index_case2)
# n3=sum(index_case3)
# stopifnot(n==n1+n2+n3)
# if (n1>0)
# ystar[index_case1,i] = rtruncnorm(1,a=0,b=Inf, mean=mean_new[index_case1], sd=sd_new)
# if (n2>0)
# ystar[index_case2,i] =rtruncnorm(1,a=-Inf,b=0,mean=mean_new[index_case2], sd=sd_new)
# if (n3>0)
# ystar[index_case3,i] = mean_new[index_case3] +rnorm(n3, sd=sd_new)
# }
# ystar
# })
# SNF_single_mcmc = function(m, data, network_id, last_estimation, update_tau=TRUE,tau=0.005){
# if (network_id==1){
# D = lapply(data, function(z) z$W!=0 )
# y = do.call(c, lapply(data, "[[", "response1"))
# y_not = !y
# } else{
# D = lapply(data, function(z) z$W2!=0 )
# y = do.call(c, lapply(data, "[[", "response2"))
# y_not = !y
# }
# n= sapply(data,"[[","n")
# n2 = sapply(data, function(z) length(z$response1))
# x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
# x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
# number_of_network_variable = 3
# position = mapply(seq, c(0,head(cumsum(n2),-1)) +1,cumsum(n2))
# ystar1 = rep(0,length(y))
# ystar2 = rep(0,length(y))
# seq_m = lapply(data,"[[","group_index")
# delta_matrix = matrix(0, nrow=m+1, ncol= ncol(x1) + number_of_network_variable )
# update_rate = 0
# if (!missing(last_estimation) && !is.null(last_estimation) ){
# cat("Using last_estimation \n")
# ystar1 = last_estimation$ystar1
# ystar2 = last_estimation$ystar2
# seq_m = last_estimation$seq_m
# delta_matrix[1,] = as.vector(tail(last_estimation$delta,1))
# if (update_tau){
# tau = last_estimation$tau
# for ( j in 1:length(tau)){
# if (any(last_estimation$update_rate[[j]] >0.5 | any(last_estimation$update_rate[[j]]< 0.2) )){
# cat("update tau-", j , "\n")
# tau[[j]] = tau[[j]] * last_estimation$update_rate[[j]] / 0.4
# tau[[j]] = ifelse(tau[[j]]==0, 0.0001, tau[[j]])
# }
# }
# }
# }
# ## initialization
# network_summary = computeNetworkSummary(seq_m=seq_m, D=D)
# xx1 = cbind(x1,network_summary$self)
# xx2 = cbind(x2,network_summary$friends)
# xb1 = xx1 %*% delta_matrix[1,]
# xb2 = xx2 %*% delta_matrix[1,]
# colnames(delta_matrix) = colnames(xx1)
# delta_x_index = 1:ncol(x1)
# delta_network_index = 1:number_of_network_variable + ncol(x1)
# tic()
# ## start the gibbs
# for (i in 1:m){
# ## base on the seq, compute the network summary
# ## draw ystar
# ystar1 = drawYstar(y=y , ystar_other=ystar2, mean=xb1, y_not= y_not)
# ystar2 = drawYstar(y=y, ystar_other=ystar1, mean=xb2, y_not= y_not)
# ## draw delta
# lm_fit = my.fastLm(XX= rbind(xx1,xx2), yy = c(ystar1,ystar2))
# delta_matrix[i+1, ] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2)
# R1 = x1 %*% delta_matrix[i+1, delta_x_index]
# R2 = x2 %*% delta_matrix[i+1, delta_x_index]
# xb1 = R1 + network_summary$self %*% delta_matrix[i+1,delta_network_index]
# xb2 = R2 + network_summary$friends %*% delta_matrix[i+1,delta_network_index]
# ## update sequence
# seq_m_new = DrawSeqSample(seq_m,p=tau)
# network_summary_new = computeNetworkSummary(seq_m=seq_m_new, D=D)
# xb1_new = R1 + network_summary_new$self %*% delta_matrix[i+1,delta_network_index]
# xb2_new = R2 + network_summary_new$friends %*% delta_matrix[i+1,delta_network_index]
# p1 = splitBy(dnorm(ystar1 - xb1, log=TRUE),by=n2)
# p2 = splitBy(dnorm(ystar2 - xb2, log=TRUE),by=n2)
# p1_new = splitBy(dnorm(ystar1 - xb1_new, log=TRUE),by=n2)
# p2_new = splitBy(dnorm(ystar2 - xb2_new, log=TRUE),by=n2)
# p1 = sapply(p1, sum)
# p2 = sapply(p2, sum)
# p1_new = sapply(p1_new, sum)
# p2_new = sapply(p2_new, sum)
# alpha = exp( p1_new+ p2_new - p1- p2 )
# update_index = alpha > runif(5)
# seq_m[update_index] = seq_m_new[update_index]
# update_rate = update_rate + update_index
# update_position = unlist(position [update_index])
# network_summary$self[update_position,] = network_summary_new$self[update_position,]
# network_summary$friends[update_position,] = network_summary_new$friends[update_position,]
# xb1[update_position] = xb1_new[update_position]
# xb2[update_position] = xb2_new[update_position]
# xx1[update_position,delta_network_index] = network_summary$self[update_position,]
# xx2[update_position,delta_network_index] = network_summary$friends[update_position,]
# }
# toc()
# update_rate = update_rate/m
# cat("Update rate : \n")
# print(update_rate)
# out = list(delta=tail(delta_matrix,-1) , seq_m=seq_m,ystar1=ystar1,ystar2=ystar2, tau=tau, update_rate=update_rate)
# class(out) = "SNF_single"
# out
# })
# merge.SNF_single = function(x,y,...){
# if (length(list(...))>0){
# list_args = list(...)
# out = list_args[[1]]
# for (i in 2:length(list_args)){
# out = merge(out, list_args[[i]])
# }
# return(out)
# }
# out =y
# out$delta = rbind(x$delta, y$delta)
# out
# })
# plotmcmc.SNF_single = function(x,remove=0.2,...){
# plotmcmc(x$delta, remove=nrow(x$delta)*remove )
# })
# getParameterMatrix.SNF_single = function(x, tail){
# out = cbind(x$delta)
# if (tail!=0) {
# out = tail(out,tail)
# }
# out
# })
#' @rdname SNF
#' @export
SNF.dynamic.mcmc = function(m, data, last_estimation, update_tau=TRUE,tau=0.005){
G = length(data)
number_of_network = length(data[[1]]$D_list)
D_list = vector("list", number_of_network)
for (i in 1:number_of_network){
D_list[[i]] = lapply(data, function(z) z$D_list[[i]])
}
# D_list = lapply(data, "[[", "D_list")
n= sapply(data,"[[","n")
n2 = sapply(data, function(z) NROW(z$response_self))
nn = sum(n2) * 2
y = do.call(rbind, lapply(data, "[[", "response_self") )
y_not = !y
x1 = do.call(rbind, lapply(data, "[[" , "self_data_matrix") )
x2 = do.call(rbind, lapply(data, "[[" , "friends_data_matrix") )
number_of_network_variable = 3
number_of_variable = ncol(x1) + number_of_network_variable*number_of_network
## store all the network matrix of different group into one vector. position is the location of them.
position = mapply(seq, c(0,head(cumsum(n2),-1)) +1,cumsum(n2))
ystar1 = array(0, dim=dim(y))
ystar2 = array(0, dim=dim(y))
seq_m = lapply(data,"[[","group_index")
delta_matrix = rep(list(matrix(0, nrow=m, ncol= number_of_variable )), number_of_network)
delta= matrix(0, nrow=number_of_variable , ncol=number_of_network)
for (i in 1:number_of_network){
delta[,i] = delta_matrix[[i]][1,]
}
update_rate = 0
Sigma = diag(number_of_network)
if (!missing(last_estimation) && !is.null(last_estimation) ){
cat("Using last_estimation \n")
ystar1 = last_estimation$ystar1
ystar2 = last_estimation$ystar2
seq_m = last_estimation$seq_m
delta = last_estimation$delta
Sigma = last_estimation$Sigma
if (update_tau){
tau = last_estimation$tau
for ( j in 1:length(tau)){
if (any(last_estimation$update_rate[[j]] >0.5 | any(last_estimation$update_rate[[j]]< 0.2) )){
cat("update tau-", j , "\n")
tau[[j]] = tau[[j]] * last_estimation$update_rate[[j]] / 0.4
tau[[j]] = ifelse(tau[[j]]==0, 0.0001, tau[[j]])
}
}
}
}
## initialization
network_summary = lapply(D_list,computeNetworkSummary, seq_m=seq_m )
## repeat that for serial D.
# network_summary reduce to 1 variable ( # of common frds, then include all the network , or we can have all, it would be 3 x number of network )
x1_network = do.call(cbind, lapply(network_summary, "[[", "self"))
x2_network = do.call(cbind, lapply(network_summary, "[[", "friends"))
delta_x_index = 1:ncol(x1)
delta_network_index = ncol(x1)+ seq(ncol(x1_network))
R1 = x1 %*% delta[delta_x_index,]
R2 = x2 %*% delta[delta_x_index,]
xb1 = R1 + x1_network %*% delta[delta_network_index,]
xb2 = R2 + x2_network %*% delta[delta_network_index,]
network_name = data[[1]]$network_name
rownames(delta) = c( colnames(x1) , colnames(x1_network) )
colname_network = colnames(x1_network)[1:3]
colname_network = unlist( lapply(network_name, function(z) z %+% "_" %+% colname_network) )
for (i in 1:number_of_network){
colnames(delta_matrix[[i]]) = c(network_name[[i]] %+% "_" %+% colnames(x1) , network_name[[i]] %+% "_" %+% colname_network )
}
X= rbind(x1,x2)
if (number_of_network>1){
number_col_Sigma_matrix = number_of_network*(number_of_network-1)/2
sigma_name = genPairwiseIndex(number_of_network)
sigma_name = sigma_name[,1] %+% sigma_name[,2]
} else {
number_col_Sigma_matrix =1
sigma_name = 11
}
Sigma_matrix = matrix(0, nrow=m, ncol = number_col_Sigma_matrix )
colnames(Sigma_matrix) = "Sigma_" %+% sigma_name
tic()
## start the gibbs
for (i in 1:m){
##### base on the seq, compute the network summary
##### drawing ystar
for (j in 1:number_of_network){
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
temp = find_normal_conditional_dist(a= ystar1_demean, i=j, j=-j, Sigma=Sigma)
ystar1[,j] = drawYstar(y=y[,j] , ystar_other=ystar2[,j], mean=xb1[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
temp = find_normal_conditional_dist(a= ystar2_demean, i=j, j=-j, Sigma=Sigma)
ystar2[,j] = drawYstar(y=y[,j] , ystar_other=ystar1[,j], mean=xb2[,j] + temp$mean, y_not= y_not[,j], sd= sqrt(temp$var) )
}
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
ystar = rbind(ystar1,ystar2)
##### draw delta
XX = cbind(X, rbind(x1_network,x2_network) )
YY = rbind(ystar1,ystar2)
for ( j in 1:number_of_network){
temp = find_normal_conditional_dist(a=ystar_demean, i=j, j=-j, Sigma=Sigma)
lm_fit = myFastLm(X=XX, y =YY[,j]-temp$mean)
delta[,j] = mvrnorm(n=1, mu=lm_fit$coef, lm_fit$cov/lm_fit$s^2 * as.vector(temp$var) )
delta_matrix[[j]][i,] = delta[,j]
}
R1 = x1 %*% delta[delta_x_index,]
R2 = x2 %*% delta[delta_x_index,]
xb1 = R1 + x1_network %*% delta[delta_network_index,]
xb2 = R2 + x2_network %*% delta[delta_network_index,]
##### update sequence , only need to do it once. but may need to modify the likelihood
seq_m_new = DrawSeqSample(seq_m,p=tau)
network_summary_new = lapply(D_list,computeNetworkSummary, seq_m=seq_m_new )
x1_network_new = do.call(cbind, lapply(network_summary_new, "[[", "self"))
x2_network_new = do.call(cbind, lapply(network_summary_new, "[[", "friends"))
xb1_new = R1 + x1_network_new %*% delta[delta_network_index,]
xb2_new = R2 + x2_network_new %*% delta[delta_network_index,]
p1 = splitBy( dmvnorm(ystar1 - xb1, sigma=Sigma, log=TRUE),by=n2)
p2 = splitBy( dmvnorm(ystar2 - xb2, sigma=Sigma, log=TRUE),by=n2)
p1_new = splitBy( dmvnorm(ystar1 - xb1_new, sigma=Sigma, log=TRUE),by=n2)
p2_new = splitBy( dmvnorm(ystar2 - xb2_new, sigma=Sigma, log=TRUE),by=n2)
p1 = sapply(p1, sum)
p2 = sapply(p2, sum)
p1_new = sapply(p1_new, sum)
p2_new = sapply(p2_new, sum)
alpha = exp( p1_new+ p2_new - p1- p2 )
update_index = alpha > runif(5)
update_rate = update_rate + update_index
seq_m[update_index] = seq_m_new[update_index]
update_position = unlist(position[update_index])
x1_network[update_position,] = x1_network_new[update_position,]
x2_network[update_position,] = x2_network_new[update_position,]
xb1[update_position] = xb1_new[update_position]
xb2[update_position] = xb2_new[update_position]
##### compute Sigma
ystar1_demean = ystar1 - xb1
ystar2_demean = ystar2 - xb2
ystar_demean = rbind(ystar1_demean,ystar2_demean)
if (number_of_network > 1 ){
Sigma = solve( rwish(nn , solve( crossprod(ystar_demean )) ) )
normalization = diag(1/sqrt(diag(Sigma)))
Sigma = normalization %*% Sigma %*% t(normalization)
Sigma_matrix[i,] = Sigma[lower.tri(Sigma)]
} else {
Sigma = as.matrix(1)
Sigma_matrix[i,] = 1
}
}
toc()
update_rate = update_rate/m
cat("Update rate : \n")
print(update_rate)
out = list(delta_matrix=delta_matrix, delta=delta, seq_m=seq_m, ystar1=ystar1,ystar2=ystar2, tau=tau, update_rate=update_rate, Sigma=Sigma,Sigma_matrix=Sigma_matrix)
class(out) = "SNF.dynamic.mcmc"
out
}
#' Get a matrix of parameter
#' @name getParameterMatrix.SNF.dynamic.mcmc
#' @aliases getParameterMatrix.SNF.dynamic.mcmc
#' @title getParameterMatrix.SNF.dynamic.mcmc
#' @param x SNF.dynamic.mcmc object
#' @param tail iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#'' @param ... not used
#' @return A matrix
#' @method getParameterMatrix SNF.dynamic.mcmc
#' @export getParameterMatrix SNF.dynamic.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
getParameterMatrix.SNF.dynamic.mcmc = function(x, tail, ...){
if (is.list(x$delta_matrix)){
out = do.call(cbind, x$delta_matrix)
out = cbind(out, x$Sigma_matrix )
} else{
out = x$delta_matrix
}
if (!missing(tail)) {
out = extractTail(out, tail)
}
out
}
#' merge.SNF.dynamic.mcmc
#' @name merge.SNF.dynamic.mcmc
#' @aliases merge.SNF.dynamic.mcmc
#' @title merge.SNF.dynamic.mcmc
#' @param x First object to merge with
#' @param y Second object to merge with
#' @param ... not used
#' @return A new SNF.dynamic.mcmc object
#' @method merge SNF.dynamic.mcmc
#' @export merge SNF.dynamic.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
merge.SNF.dynamic.mcmc = function(x,y,...){
out = y
for (i in 1:length(y$delta_matrix)){
out$delta_matrix[[i]] = rbind(x$delta_matrix[[i]], y$delta_matrix[[i]])
}
out$Sigma_matrix = rbind(x$Sigma_matrix, y$Sigma_matrix)
out
}
#' Create a summary table
#' @name summary.SNF.dynamic.mcmc
#' @aliases summary.SNF.dynamic.mcmc
#' @title summary.SNF.dynamic.mcmc
#' @param object SNF.dynamic.mcmc object
#' @param ... tail: iteration to be used. Negative value: Removing the first \code{tail} iterations. Positive value: keep the last \code{tail} iterations. If -1< code{tail}< 1, it represent the percentage of iterations.
#' @return A summary table
#' @method summary SNF.dynamic.mcmc
#' @export summary SNF.dynamic.mcmc
#' @author TszKin Julian Chan \email{ctszkin@@gmail.com}
#' @export
summary.SNF.dynamic.mcmc = function(object,...){
computeSummaryTable(object,...)
}
|
ggamma.lnL.bak <- function(data, mu, sigma, lambda) {
# Generalized Gamma Log Likelihood
# For Uncensored data only
# Based on Lawless, p. 244
n <- length(data)
k <- 1/lambda^2
y <- log(data)
w <- (y - mu)/sigma
sum1 <- sum(w)
sum2 <- sum(exp(w/k^0.5))
value <- n * (k - 0.5) * log(k) - n * lgamma(k) - n * log(sigma) + (k^0.5) * sum1 - k * sum2
value
} | /ggamma.lnL.bak.R | no_license | robertandrewstevens/R | R | false | false | 376 | r | ggamma.lnL.bak <- function(data, mu, sigma, lambda) {
# Generalized Gamma Log Likelihood
# For Uncensored data only
# Based on Lawless, p. 244
n <- length(data)
k <- 1/lambda^2
y <- log(data)
w <- (y - mu)/sigma
sum1 <- sum(w)
sum2 <- sum(exp(w/k^0.5))
value <- n * (k - 0.5) * log(k) - n * lgamma(k) - n * log(sigma) + (k^0.5) * sum1 - k * sum2
value
} |
# COMPARED TO EMBOSS:HMOMENT
# http://emboss.bioinformatics.nl/cgi-bin/emboss/hmoment
# SEQUENCE: FLPVLAGLTPSIVPKLVCLLTKKC
# ALPHA-HELIX 100º : 0.56
# BETA-SHEET 160º : 0.25
# ALPHA HELIX VALUE
test_that("hmoment function: output value is wrong",{
expect_equal(hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC",angle = 100,window = 11), 0.520,tolerance = 0.01)
})
# BETA SHEET VALUE
test_that("hmoment function: output value is wrong",{
expect_equal(hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC",angle = 160,window = 11), 0.271,tolerance = 0.01)
})
# CHECK OUTPUT CLASS
test_that("hmoment function: output class is wrong",{
expect_true(is.numeric(hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC",angle = 100)))
}) | /fuzzedpackages/Peptides/tests/testthat/test.hmoment.R | no_license | akhikolla/testpackages | R | false | false | 726 | r | # COMPARED TO EMBOSS:HMOMENT
# http://emboss.bioinformatics.nl/cgi-bin/emboss/hmoment
# SEQUENCE: FLPVLAGLTPSIVPKLVCLLTKKC
# ALPHA-HELIX 100º : 0.56
# BETA-SHEET 160º : 0.25
# ALPHA HELIX VALUE
test_that("hmoment function: output value is wrong",{
expect_equal(hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC",angle = 100,window = 11), 0.520,tolerance = 0.01)
})
# BETA SHEET VALUE
test_that("hmoment function: output value is wrong",{
expect_equal(hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC",angle = 160,window = 11), 0.271,tolerance = 0.01)
})
# CHECK OUTPUT CLASS
test_that("hmoment function: output class is wrong",{
expect_true(is.numeric(hmoment(seq = "FLPVLAGLTPSIVPKLVCLLTKKC",angle = 100)))
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkClass.R
\name{checkClass}
\alias{checkClass}
\title{Checks that an object has the same class in all studies}
\usage{
checkClass(datasources = NULL, obj = NULL)
}
\arguments{
\item{datasources}{a list of opal object(s) obtained after login in to opal servers;
these objects hold also the data assign to R, as \code{dataframe}, from opal datasources.}
\item{obj}{a string charcater, the name of the object to check for.}
}
\value{
a message or the class of the object if the object has the same class in all studies.
}
\description{
This is an internal function.
}
\details{
In DataSHIELD an object included in analysis must be of the same type in all
the collaborating studies. If that is not the case the process is stopped
}
\keyword{internal}
| /man/checkClass.Rd | no_license | datashield/dsModellingClient | R | false | true | 830 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/checkClass.R
\name{checkClass}
\alias{checkClass}
\title{Checks that an object has the same class in all studies}
\usage{
checkClass(datasources = NULL, obj = NULL)
}
\arguments{
\item{datasources}{a list of opal object(s) obtained after login in to opal servers;
these objects hold also the data assign to R, as \code{dataframe}, from opal datasources.}
\item{obj}{a string charcater, the name of the object to check for.}
}
\value{
a message or the class of the object if the object has the same class in all studies.
}
\description{
This is an internal function.
}
\details{
In DataSHIELD an object included in analysis must be of the same type in all
the collaborating studies. If that is not the case the process is stopped
}
\keyword{internal}
|
#' @title SETRED generic method
#' @description SETRED is a variant of the self-training classification method
#' (\code{\link{selfTraining}}) with a different addition mechanism.
#' The SETRED classifier is initially trained with a
#' reduced set of labeled examples. Then it is iteratively retrained with its own most
#' confident predictions over the unlabeled examples. SETRED uses an amending scheme
#' to avoid the introduction of noisy examples into the enlarged labeled set. For each
#' iteration, the mislabeled examples are identified using the local information provided
#' by the neighborhood graph.
#' @param y A vector with the labels of training instances. In this vector the
#' unlabeled instances are specified with the value \code{NA}.
#' @param D A distance matrix between all the training instances. This matrix is used to
#' construct the neighborhood graph.
#' @param gen.learner A function for training a supervised base classifier.
#' This function needs two parameters, indexes and cls, where indexes indicates
#' the instances to use and cls specifies the classes of those instances.
#' @param gen.pred A function for predicting the probabilities per classes.
#' This function must be two parameters, model and indexes, where the model
#' is a classifier trained with \code{gen.learner} function and
#' indexes indicates the instances to predict.
#' @param theta Rejection threshold to test the critical region. Default is 0.1.
#' @param max.iter Maximum number of iterations to execute the self-labeling process.
#' Default is 50.
#' @param perc.full A number between 0 and 1. If the percentage
#' of new labeled examples reaches this value the self-training process is stopped.
#' Default is 0.7.
#' @details
#' SetredG can be helpful in those cases where the method selected as
#' base classifier needs a \code{learner} and \code{pred} functions with other
#' specifications. For more information about the general setred method,
#' please see \code{\link{setred}} function. Essentially, \code{setred}
#' function is a wrapper of \code{setredG} function.
#' @return A list object of class "setredG" containing:
#' \describe{
#' \item{model}{The final base classifier trained using the enlarged labeled set.}
#' \item{instances.index}{The indexes of the training instances used to
#' train the \code{model}. These indexes include the initial labeled instances
#' and the newly labeled instances.
#' Those indexes are relative to the \code{y} argument.}
#' }
#' @example demo/SETREDG.R
#' @export
setredG <- function(
y, D, gen.learner, gen.pred,
theta = 0.1,
max.iter = 50,
perc.full = 0.7
) {
### Check parameters ###
# Check y
if (!is.factor(y)) {
if (!is.vector(y)) {
stop("Parameter y is neither a vector nor a factor.")
} else {
y = as.factor(y)
}
}
# Check distance matrix
D <- as.matrix(D)
if (!is.matrix(D)) {
stop("Parameter D is neither a matrix or a dist object.")
} else if (nrow(D) != ncol(D)) {
stop("The distance matrix D is not a square matrix.")
} else if (nrow(D) != length(y)) {
stop(sprintf(paste("The dimensions of the matrix D is %i x %i",
"and it's expected %i x %i according to the size of y."),
nrow(D), ncol(D), length(y), length(y)))
}
# Check theta
if (!(theta >= 0 && theta <= 1)) {
stop("theta must be between 0 and 1")
}
# Check max.iter
if (max.iter < 1) {
stop("Parameter max.iter is less than 1. Expected a value greater than and equal to 1.")
}
# Check perc.full
if (perc.full < 0 || perc.full > 1) {
stop("Parameter perc.full is not in the range 0 to 1.")
}
### Init variables ###
# Identify the classes
classes <- levels(y)
nclasses <- length(classes)
# Init variable to store the labels
ynew <- y
# Obtain the indexes of labeled and unlabeled instances
labeled <- which(!is.na(y))
unlabeled <- which(is.na(y))
## Check the labeled and unlabeled sets
if (length(labeled) == 0) {
# labeled is empty
stop("The labeled set is empty. All the values in y parameter are NA.")
}
if (length(unlabeled) == 0) {
# unlabeled is empty
stop("The unlabeled set is empty. None value in y parameter is NA.")
}
### SETRED algorithm ###
# Count the examples per class
cls.summary <- summary(y[labeled])
# Ratio between count per class and the initial number of labeled instances
proportion <- cls.summary / length(labeled)
# Determine the total of instances to include per iteration
cantClass <- round(cls.summary / min(cls.summary))
totalPerIter <- sum(cantClass)
# Compute count full
count.full <- length(labeled) + round(length(unlabeled) * perc.full)
iter <- 1
while ((length(labeled) < count.full) && (length(unlabeled) >= totalPerIter) && (iter <= max.iter)) {
# Train classifier
#model <- trainModel(x[labeled, ], ynew[labeled], learner, learner.pars)
model <- gen.learner(labeled, ynew[labeled])
# Predict probabilities per classes of unlabeled examples
#prob <- predProb(model, x[unlabeled, ], pred, pred.pars, classes)
prob <- gen.pred(model, unlabeled)
colnames(prob) <- classes
prob <- checkProb(prob = prob, ninstances = length(unlabeled), classes)
# Select the instances with better class probability
selection <- selectInstances(cantClass, as.matrix(prob))
# Save count of labeled set before it's modification
nlabeled.old <- length(labeled)
# Add selected instances to L
labeled.prime <- unlabeled[selection$unlabeled.idx]
sel.classes <- classes[selection$class.idx]
ynew[labeled.prime] <- sel.classes
labeled <- c(labeled, labeled.prime)
# Delete selected instances from U
unlabeled <- unlabeled[-selection$unlabeled.idx]
# Save count of labeled set after it's modification
nlabeled.new <- length(labeled)
# Build a neighborhood graph G with L U L'
'ady <- vector("list", nlabeled.new) # Adjacency list of G
for (i in (nlabeled.old + 1):nlabeled.new){
for (j in 1:(i - 1)) {
con <- TRUE
for (k in 1:nlabeled.new)
if (k != i && k != j && D[labeled[i], labeled[j]] >
max(D[labeled[i], labeled[k]], D[labeled[k], labeled[j]])) {
con <- FALSE
break
}
if (con) {
ady[[i]] <- c(ady[[i]],j)
ady[[j]] <- c(ady[[j]],i)
}
}
}'
#Call cpp loop function
ady <- setred_loop(nlabeled.new, nlabeled.old, D, as.numeric(labeled))
# Compute the bad examples and noise instances
noise.insts <- c() # instances to delete from labeled set
for (i in (nlabeled.old + 1):nlabeled.new) {
# only on L'
propi <- proportion[unclass(ynew[labeled[i]])]
# calculate Oi observation of Ji
Oi <- 0
nv <- W <- k <- 0
for (j in ady[[i]]) {
k <- k + 1
W[k] <- 1 / (1 + D[labeled[i], labeled[j]])
if (ynew[labeled[i]] != ynew[labeled[j]]) {
Oi <- Oi + W[k]
nv <- nv + 1
}
}
if (normalCriterion(theta, Oi, length(ady[[i]]), propi, W)) {
noise.insts <- c(noise.insts, i)
}
}
# Delete from labeled the noise instances
if (length(noise.insts) > 0) {
ynew[labeled[noise.insts]] <- NA
labeled <- labeled[-noise.insts]
}
iter <- iter + 1
}
### Result ###
# Train final model
#model <- trainModel(x[labeled, ], ynew[labeled], learner, learner.pars)
model <- gen.learner(labeled, ynew[labeled])
# Save result
result <- list(
model = model,
instances.index = labeled
)
class(result) <- "setredG"
result
}
#' @title General Interface for SETRED model
#' @description SETRED (SElf-TRaining with EDiting) is a variant of the self-training
#' classification method (as implemented in the function \code{\link{selfTraining}}) with a different addition mechanism.
#' The SETRED classifier is initially trained with a
#' reduced set of labeled examples. Then, it is iteratively retrained with its own most
#' confident predictions over the unlabeled examples. SETRED uses an amending scheme
#' to avoid the introduction of noisy examples into the enlarged labeled set. For each
#' iteration, the mislabeled examples are identified using the local information provided
#' by the neighborhood graph.
#' @param dist A distance function or the name of a distance available
#' in the \code{proxy} package to compute. Default is "Euclidean"
#' the distance matrix in the case that \code{D} is \code{NULL}.
#' @param learner model from parsnip package for training a supervised base classifier
#' using a set of instances. This model need to have probability predictions
#' (or optionally a distance matrix) and it's corresponding classes.
#' @param theta Rejection threshold to test the critical region. Default is 0.1.
#' @param max.iter maximum number of iterations to execute the self-labeling process.
#' Default is 50.
#' @param perc.full A number between 0 and 1. If the percentage
#' of new labeled examples reaches this value the self-training process is stopped.
#' Default is 0.7.
#' @param D A distance matrix between all the training instances. This matrix is used to
#' construct the neighborhood graph. Default is NULL, this means the
#' method create a matrix with dist param
#' @details
#' SETRED initiates the self-labeling process by training a model from the original
#' labeled set. In each iteration, the \code{learner} function detects unlabeled
#' examples for which it makes the most confident prediction and labels those examples
#' according to the \code{pred} function. The identification of mislabeled examples is
#' performed using a neighborhood graph created from the distance matrix.
#' Most examples possess the same label in a neighborhood. So if an example locates
#' in a neighborhood with too many neighbors from different classes, this example should
#' be considered problematic. The value of the \code{theta} argument controls the confidence
#' of the candidates selected to enlarge the labeled set. The lower this value is, the more
#' restrictive is the selection of the examples that are considered good.
#' For more information about the self-labeled process and the rest of the parameters, please
#' see \code{\link{selfTraining}}.
#'
#' @return (When model fit) A list object of class "setred" containing:
#' \describe{
#' \item{model}{The final base classifier trained using the enlarged labeled set.}
#' \item{instances.index}{The indexes of the training instances used to
#' train the \code{model}. These indexes include the initial labeled instances
#' and the newly labeled instances.
#' Those indexes are relative to \code{x} argument.}
#' \item{classes}{The levels of \code{y} factor.}
#' \item{pred}{The function provided in the \code{pred} argument.}
#' \item{pred.pars}{The list provided in the \code{pred.pars} argument.}
#' }
#' @references
#' Ming Li and ZhiHua Zhou.\cr
#' \emph{Setred: Self-training with editing.}\cr
#' In Advances in Knowledge Discovery and Data Mining, volume 3518 of Lecture Notes in
#' Computer Science, pages 611-621. Springer Berlin Heidelberg, 2005.
#' ISBN 978-3-540-26076-9. doi: 10.1007/11430919 71.
#' @example demo/SETRED.R
#' @importFrom magrittr %>%
#' @export
setred <- function(
dist = "Euclidean",
learner,
theta = 0.1,
max.iter = 50,
perc.full = 0.7,
D = NULL
) {
### Check parameters ###
train_function <- function(x, y) {
y <- as.factor(y)
# Instance matrix case
gen.learner2 <- function(training.ints, cls) {
m <- learner %>% parsnip::fit_xy(x = x[training.ints,], y = cls)
return(m)
}
gen.pred2 <- function(m, testing.ints) {
prob <- predict(m, x[testing.ints,], type = "prob")
return(prob)
}
if (is.null(D))
distance <- proxy::dist(x, method = dist, by_rows = TRUE, diag = TRUE, upper = TRUE)
else
distance <- D
pred.pars <- list(type = "prob")
result <- setredG(
y,
D = distance,
gen.learner2, gen.pred2,
theta, max.iter, perc.full
)
### Result ###
result$classes = levels(y)
result$pred = "predict"
result$pred.pars = "prob"
result$pred.params = c("class","raw")
result$mode = "classification"
class(result) <- "setred"
result
}
args <- list(
dist = dist,
learner = learner,
theta = theta,
max.iter = max.iter,
perc.full = perc.full
)
new_model_sslr(train_function, "setred", args)
}
#' @title Predictions of the SETRED method
#' @description Predicts the label of instances according to the \code{setred} model.
#' @details For additional help see \code{\link{setred}} examples.
#' @param object SETRED model built with the \code{\link{setred}} function.
#' @param x A object that can be coerced as matrix.
#' Depending on how was the model built, \code{x} is interpreted as a matrix
#' with the distances between the unseen instances and the selected training instances,
#' or a matrix of instances.
#' @param ... This parameter is included for compatibility reasons.
#' @param col_name is the colname from returned tibble in class type.
#' The same from parsnip and tidymodels
#' Default is .pred_clas
#' @return Vector with the labels assigned.
#' @method predict setred
#' @importFrom stats predict
predict.setred <- function(object, x, col_name = ".pred_class", ...) {
prob <- predProb(object$model, x, object$pred, object$pred.pars)
colnames(prob) <- object$classes
result <- getClass(
checkProb(
prob = prob,
ninstances = nrow(x),
object$classes
)
)
result
}
#' @title Normal criterion
#' @details Computes the critical value using the normal distribution as the authors suggest
#' when the neighborhood is big for the instances in the RNG.
#' @return A boolean value indicating if the instance must be eliminated
#' @noRd
normalCriterion <- function(theta, Oi, vec, propi, W) {
# calculate mean and desv est of J
mean <- (1 - propi) * sum(W)
sd <- sqrt(propi * (1 - propi) * sum(W ^ 2))
# calculate p-value for Oi
vc <- stats::qnorm(theta / 2, mean, sd)
if (vc < 0 && Oi == 0) # special case where vc <0 product of the approximation by dist.
FALSE
else
Oi >= vc
}
| /R/SETRED.R | no_license | cran/SSLR | R | false | false | 14,708 | r | #' @title SETRED generic method
#' @description SETRED is a variant of the self-training classification method
#' (\code{\link{selfTraining}}) with a different addition mechanism.
#' The SETRED classifier is initially trained with a
#' reduced set of labeled examples. Then it is iteratively retrained with its own most
#' confident predictions over the unlabeled examples. SETRED uses an amending scheme
#' to avoid the introduction of noisy examples into the enlarged labeled set. For each
#' iteration, the mislabeled examples are identified using the local information provided
#' by the neighborhood graph.
#' @param y A vector with the labels of training instances. In this vector the
#' unlabeled instances are specified with the value \code{NA}.
#' @param D A distance matrix between all the training instances. This matrix is used to
#' construct the neighborhood graph.
#' @param gen.learner A function for training a supervised base classifier.
#' This function needs two parameters, indexes and cls, where indexes indicates
#' the instances to use and cls specifies the classes of those instances.
#' @param gen.pred A function for predicting the probabilities per classes.
#' This function must be two parameters, model and indexes, where the model
#' is a classifier trained with \code{gen.learner} function and
#' indexes indicates the instances to predict.
#' @param theta Rejection threshold to test the critical region. Default is 0.1.
#' @param max.iter Maximum number of iterations to execute the self-labeling process.
#' Default is 50.
#' @param perc.full A number between 0 and 1. If the percentage
#' of new labeled examples reaches this value the self-training process is stopped.
#' Default is 0.7.
#' @details
#' SetredG can be helpful in those cases where the method selected as
#' base classifier needs a \code{learner} and \code{pred} functions with other
#' specifications. For more information about the general setred method,
#' please see \code{\link{setred}} function. Essentially, \code{setred}
#' function is a wrapper of \code{setredG} function.
#' @return A list object of class "setredG" containing:
#' \describe{
#' \item{model}{The final base classifier trained using the enlarged labeled set.}
#' \item{instances.index}{The indexes of the training instances used to
#' train the \code{model}. These indexes include the initial labeled instances
#' and the newly labeled instances.
#' Those indexes are relative to the \code{y} argument.}
#' }
#' @example demo/SETREDG.R
#' @export
setredG <- function(
y, D, gen.learner, gen.pred,
theta = 0.1,
max.iter = 50,
perc.full = 0.7
) {
### Check parameters ###
# Check y
if (!is.factor(y)) {
if (!is.vector(y)) {
stop("Parameter y is neither a vector nor a factor.")
} else {
y = as.factor(y)
}
}
# Check distance matrix
D <- as.matrix(D)
if (!is.matrix(D)) {
stop("Parameter D is neither a matrix or a dist object.")
} else if (nrow(D) != ncol(D)) {
stop("The distance matrix D is not a square matrix.")
} else if (nrow(D) != length(y)) {
stop(sprintf(paste("The dimensions of the matrix D is %i x %i",
"and it's expected %i x %i according to the size of y."),
nrow(D), ncol(D), length(y), length(y)))
}
# Check theta
if (!(theta >= 0 && theta <= 1)) {
stop("theta must be between 0 and 1")
}
# Check max.iter
if (max.iter < 1) {
stop("Parameter max.iter is less than 1. Expected a value greater than and equal to 1.")
}
# Check perc.full
if (perc.full < 0 || perc.full > 1) {
stop("Parameter perc.full is not in the range 0 to 1.")
}
### Init variables ###
# Identify the classes
classes <- levels(y)
nclasses <- length(classes)
# Init variable to store the labels
ynew <- y
# Obtain the indexes of labeled and unlabeled instances
labeled <- which(!is.na(y))
unlabeled <- which(is.na(y))
## Check the labeled and unlabeled sets
if (length(labeled) == 0) {
# labeled is empty
stop("The labeled set is empty. All the values in y parameter are NA.")
}
if (length(unlabeled) == 0) {
# unlabeled is empty
stop("The unlabeled set is empty. None value in y parameter is NA.")
}
### SETRED algorithm ###
# Count the examples per class
cls.summary <- summary(y[labeled])
# Ratio between count per class and the initial number of labeled instances
proportion <- cls.summary / length(labeled)
# Determine the total of instances to include per iteration
cantClass <- round(cls.summary / min(cls.summary))
totalPerIter <- sum(cantClass)
# Compute count full
count.full <- length(labeled) + round(length(unlabeled) * perc.full)
iter <- 1
while ((length(labeled) < count.full) && (length(unlabeled) >= totalPerIter) && (iter <= max.iter)) {
# Train classifier
#model <- trainModel(x[labeled, ], ynew[labeled], learner, learner.pars)
model <- gen.learner(labeled, ynew[labeled])
# Predict probabilities per classes of unlabeled examples
#prob <- predProb(model, x[unlabeled, ], pred, pred.pars, classes)
prob <- gen.pred(model, unlabeled)
colnames(prob) <- classes
prob <- checkProb(prob = prob, ninstances = length(unlabeled), classes)
# Select the instances with better class probability
selection <- selectInstances(cantClass, as.matrix(prob))
# Save count of labeled set before it's modification
nlabeled.old <- length(labeled)
# Add selected instances to L
labeled.prime <- unlabeled[selection$unlabeled.idx]
sel.classes <- classes[selection$class.idx]
ynew[labeled.prime] <- sel.classes
labeled <- c(labeled, labeled.prime)
# Delete selected instances from U
unlabeled <- unlabeled[-selection$unlabeled.idx]
# Save count of labeled set after it's modification
nlabeled.new <- length(labeled)
# Build a neighborhood graph G with L U L'
'ady <- vector("list", nlabeled.new) # Adjacency list of G
for (i in (nlabeled.old + 1):nlabeled.new){
for (j in 1:(i - 1)) {
con <- TRUE
for (k in 1:nlabeled.new)
if (k != i && k != j && D[labeled[i], labeled[j]] >
max(D[labeled[i], labeled[k]], D[labeled[k], labeled[j]])) {
con <- FALSE
break
}
if (con) {
ady[[i]] <- c(ady[[i]],j)
ady[[j]] <- c(ady[[j]],i)
}
}
}'
#Call cpp loop function
ady <- setred_loop(nlabeled.new, nlabeled.old, D, as.numeric(labeled))
# Compute the bad examples and noise instances
noise.insts <- c() # instances to delete from labeled set
for (i in (nlabeled.old + 1):nlabeled.new) {
# only on L'
propi <- proportion[unclass(ynew[labeled[i]])]
# calculate Oi observation of Ji
Oi <- 0
nv <- W <- k <- 0
for (j in ady[[i]]) {
k <- k + 1
W[k] <- 1 / (1 + D[labeled[i], labeled[j]])
if (ynew[labeled[i]] != ynew[labeled[j]]) {
Oi <- Oi + W[k]
nv <- nv + 1
}
}
if (normalCriterion(theta, Oi, length(ady[[i]]), propi, W)) {
noise.insts <- c(noise.insts, i)
}
}
# Delete from labeled the noise instances
if (length(noise.insts) > 0) {
ynew[labeled[noise.insts]] <- NA
labeled <- labeled[-noise.insts]
}
iter <- iter + 1
}
### Result ###
# Train final model
#model <- trainModel(x[labeled, ], ynew[labeled], learner, learner.pars)
model <- gen.learner(labeled, ynew[labeled])
# Save result
result <- list(
model = model,
instances.index = labeled
)
class(result) <- "setredG"
result
}
#' @title General Interface for SETRED model
#' @description SETRED (SElf-TRaining with EDiting) is a variant of the self-training
#' classification method (as implemented in the function \code{\link{selfTraining}}) with a different addition mechanism.
#' The SETRED classifier is initially trained with a
#' reduced set of labeled examples. Then, it is iteratively retrained with its own most
#' confident predictions over the unlabeled examples. SETRED uses an amending scheme
#' to avoid the introduction of noisy examples into the enlarged labeled set. For each
#' iteration, the mislabeled examples are identified using the local information provided
#' by the neighborhood graph.
#' @param dist A distance function or the name of a distance available
#' in the \code{proxy} package to compute. Default is "Euclidean"
#' the distance matrix in the case that \code{D} is \code{NULL}.
#' @param learner model from parsnip package for training a supervised base classifier
#' using a set of instances. This model need to have probability predictions
#' (or optionally a distance matrix) and it's corresponding classes.
#' @param theta Rejection threshold to test the critical region. Default is 0.1.
#' @param max.iter maximum number of iterations to execute the self-labeling process.
#' Default is 50.
#' @param perc.full A number between 0 and 1. If the percentage
#' of new labeled examples reaches this value the self-training process is stopped.
#' Default is 0.7.
#' @param D A distance matrix between all the training instances. This matrix is used to
#' construct the neighborhood graph. Default is NULL, this means the
#' method create a matrix with dist param
#' @details
#' SETRED initiates the self-labeling process by training a model from the original
#' labeled set. In each iteration, the \code{learner} function detects unlabeled
#' examples for which it makes the most confident prediction and labels those examples
#' according to the \code{pred} function. The identification of mislabeled examples is
#' performed using a neighborhood graph created from the distance matrix.
#' Most examples possess the same label in a neighborhood. So if an example locates
#' in a neighborhood with too many neighbors from different classes, this example should
#' be considered problematic. The value of the \code{theta} argument controls the confidence
#' of the candidates selected to enlarge the labeled set. The lower this value is, the more
#' restrictive is the selection of the examples that are considered good.
#' For more information about the self-labeled process and the rest of the parameters, please
#' see \code{\link{selfTraining}}.
#'
#' @return (When model fit) A list object of class "setred" containing:
#' \describe{
#' \item{model}{The final base classifier trained using the enlarged labeled set.}
#' \item{instances.index}{The indexes of the training instances used to
#' train the \code{model}. These indexes include the initial labeled instances
#' and the newly labeled instances.
#' Those indexes are relative to \code{x} argument.}
#' \item{classes}{The levels of \code{y} factor.}
#' \item{pred}{The function provided in the \code{pred} argument.}
#' \item{pred.pars}{The list provided in the \code{pred.pars} argument.}
#' }
#' @references
#' Ming Li and ZhiHua Zhou.\cr
#' \emph{Setred: Self-training with editing.}\cr
#' In Advances in Knowledge Discovery and Data Mining, volume 3518 of Lecture Notes in
#' Computer Science, pages 611-621. Springer Berlin Heidelberg, 2005.
#' ISBN 978-3-540-26076-9. doi: 10.1007/11430919 71.
#' @example demo/SETRED.R
#' @importFrom magrittr %>%
#' @export
setred <- function(
dist = "Euclidean",
learner,
theta = 0.1,
max.iter = 50,
perc.full = 0.7,
D = NULL
) {
### Check parameters ###
train_function <- function(x, y) {
y <- as.factor(y)
# Instance matrix case
gen.learner2 <- function(training.ints, cls) {
m <- learner %>% parsnip::fit_xy(x = x[training.ints,], y = cls)
return(m)
}
gen.pred2 <- function(m, testing.ints) {
prob <- predict(m, x[testing.ints,], type = "prob")
return(prob)
}
if (is.null(D))
distance <- proxy::dist(x, method = dist, by_rows = TRUE, diag = TRUE, upper = TRUE)
else
distance <- D
pred.pars <- list(type = "prob")
result <- setredG(
y,
D = distance,
gen.learner2, gen.pred2,
theta, max.iter, perc.full
)
### Result ###
result$classes = levels(y)
result$pred = "predict"
result$pred.pars = "prob"
result$pred.params = c("class","raw")
result$mode = "classification"
class(result) <- "setred"
result
}
args <- list(
dist = dist,
learner = learner,
theta = theta,
max.iter = max.iter,
perc.full = perc.full
)
new_model_sslr(train_function, "setred", args)
}
#' @title Predictions of the SETRED method
#' @description Predicts the label of instances according to the \code{setred} model.
#' @details For additional help see \code{\link{setred}} examples.
#' @param object SETRED model built with the \code{\link{setred}} function.
#' @param x A object that can be coerced as matrix.
#' Depending on how was the model built, \code{x} is interpreted as a matrix
#' with the distances between the unseen instances and the selected training instances,
#' or a matrix of instances.
#' @param ... This parameter is included for compatibility reasons.
#' @param col_name is the colname from returned tibble in class type.
#' The same from parsnip and tidymodels
#' Default is .pred_clas
#' @return Vector with the labels assigned.
#' @method predict setred
#' @importFrom stats predict
predict.setred <- function(object, x, col_name = ".pred_class", ...) {
prob <- predProb(object$model, x, object$pred, object$pred.pars)
colnames(prob) <- object$classes
result <- getClass(
checkProb(
prob = prob,
ninstances = nrow(x),
object$classes
)
)
result
}
#' @title Normal criterion
#' @details Computes the critical value using the normal distribution as the authors suggest
#' when the neighborhood is big for the instances in the RNG.
#' @return A boolean value indicating if the instance must be eliminated
#' @noRd
normalCriterion <- function(theta, Oi, vec, propi, W) {
# calculate mean and desv est of J
mean <- (1 - propi) * sum(W)
sd <- sqrt(propi * (1 - propi) * sum(W ^ 2))
# calculate p-value for Oi
vc <- stats::qnorm(theta / 2, mean, sd)
if (vc < 0 && Oi == 0) # special case where vc <0 product of the approximation by dist.
FALSE
else
Oi >= vc
}
|
limma_voom_longtable_maker <- function(dgelist_object, design){
#create a longtable of DEG FC output
fit <- eBayes(lmFit(voom(dgelist_object), design))
out_df <- data.frame()
for(i in colnames(design)){
if(!grepl('Intercept', i)){
cur_table <- topTable(fit,
coef = i,
number = Inf,
genelist = fit$genes$gene_name,
confint = TRUE,
adjust='fdr')
cur_table <- data.frame(group=i,gene_id=row.names(cur_table), cur_table)
out_df <- rbind(out_df,cur_table)
}
}
# fixing column ordering and names
gene_names <- out_df$ID
group <- out_df$group
out_df <- data.frame(gene_id=out_df$gene_id,
gene_names=gene_names,
group=group,
out_df[,3:length(out_df)],
row.names=NULL)
return(out_df)
} | /limma_voom_longtable_maker.R | no_license | lefeverde/miscellaneousR | R | false | false | 936 | r | limma_voom_longtable_maker <- function(dgelist_object, design){
#create a longtable of DEG FC output
fit <- eBayes(lmFit(voom(dgelist_object), design))
out_df <- data.frame()
for(i in colnames(design)){
if(!grepl('Intercept', i)){
cur_table <- topTable(fit,
coef = i,
number = Inf,
genelist = fit$genes$gene_name,
confint = TRUE,
adjust='fdr')
cur_table <- data.frame(group=i,gene_id=row.names(cur_table), cur_table)
out_df <- rbind(out_df,cur_table)
}
}
# fixing column ordering and names
gene_names <- out_df$ID
group <- out_df$group
out_df <- data.frame(gene_id=out_df$gene_id,
gene_names=gene_names,
group=group,
out_df[,3:length(out_df)],
row.names=NULL)
return(out_df)
} |
\name{One-Sample Tests}
\alias{Zinterval}
\alias{sample.size.Zinterval}
\alias{Tinterval}
\alias{AZinterval}
\alias{Chi2interval}
\alias{Propinterval}
\alias{sample.size.Propinterval}
\alias{Predinterval}
\alias{t.quantile}
\alias{Chi2.quantile}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
One-Sample Confidence Intervals
}
\description{
Compute confidence intervals on the population mean, population variance, and population proportion. In addition, it computes prediction interval for a single future observation from a normal distribution.
}
\usage{
#CI for pupulation mean of a normal distribution when the population variance is known:
Zinterval(level,sigma,sample) # if sample available
Zinterval(level,sigma,n,barx) # if stats are provided
# Choice of sample size for estimating the population mean when error is specified
sample.size.Zinterval(level,sigma,E)
#CI for pupulation mean when the population variance is unknown and the distribution is normal
#or when the sample size is smaller than 25:
Tinterval(level,sample) # if sample available
Tinterval(level,n,barx,s) # if stats are provided
#Large-sample CI for pupulation mean:
AZinterval(level,sample) # if sample available
AZinterval(level,n,barx,s) # if stats are provided
#CI for pupulation variance (or standard deviation) of a normal distribution:
Chi2interval(level,sample) # if sample available
Chi2interval(level,n,s) # if stats are provided
#Large-sample CI for a pupulation proportion:
Propinterval(level,n,X)
# Choice of sample size for estimating a population proportion when error is specified
sample.size.Propinterval(level,ini.p,E) # using an intial guess
sample.size.Propinterval(level,ini.p=0.5,E) # using the conservative apporach
# Prediction interval of a single future observation form a normal distribution:
Predinterval(level,sample) # if sample available
Predinterval(level,n,barx,s) # if stats are provided
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{level}{the confidence level}
\item{sample}{a vector of the observed sample}
\item{sigma}{the known population standard deviation}
\item{s}{the observed sample standard deviation}
\item{barx}{the observed sample mean}
\item{n}{the sample size}
\item{X}{number of observations belongs to a class of interest}
\item{E}{specified error in sample size calculation}
\item{ini.p}{A initial estimate of the populatin proportion. Default is 0.5 which corresponds to the conservative approach}
\item{df}{the degrees of freedom of a t or chi.square distribution}
\item{q}{a quantile value}
}
\details{
Compute CIs for the population mean, population variance, and population proportaion and PI for a single future observation from a normal distribution.}
\value{
\item{interval}{As long as the function has "interval", the outcome contains a two-sided CI (or PI) and the two one-sided confidence bounds.}
\item{sample.size}{As long as the function has "sample.size", the outcome is the minimum sample size required to control the error to be no larger than E.}
\item{t.quantile}{quantile of a t distribution}
\item{Chi2.quantile}{quantile of a chi.square distribution}
}
\references{
Chapter 8 of the textbook "Applied Statistics and Probability for Engineers" 7th edition
}
\author{
Dewei Wang
}
\note{
deweiwang@stat.sc.edu
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
#Zinterval
#must include the = sign
x=c(64.1, 64.7, 64.5, 64.6, 64.5, 64.3, 64.6, 64.8, 64.2, 64.3)
Zinterval(level=0.95,sigma=1,sample=x)
Zinterval(level=0.99,sigma=1,sample=x)
sample.size.Zinterval(E=0.5,sigma=1,level=0.95)
# Using stats, must include the = sign
Zinterval(level=0.95,sigma=2,n=9,barx=98)
#Tinterval
#must include the = sign
Tinterval(level=0.95,n=10,barx=1000,s=20)
Tinterval(level=0.95,n=25,barx=1000,s=20)
Tinterval(level=0.99,n=10,barx=1000,s=20)
Tinterval(level=0.99,n=25,barx=1000,s=20)
#Large-sample Zinterval
#must include the = sign
x=scan("https://raw.githubusercontent.com/Harrindy/StatEngine/master/Data/Mercury.csv")
AZinterval(level=0.95,sample=x)
#Chi.square interval for variance/standard deviation
#must include the = sign
Chi2interval(level=0.95,n=20,s=0.01532)
#CIs for a porpulation proportion
#must include the = sign
Propinterval(level=0.95,n=85,X=10)
sample.size.Propinterval(level=0.95,ini.p=0.12,E=0.05)
sample.size.Propinterval(level=0.95,ini.p=0.5,E=0.05)
#Prediction interval for normal distribution
#must include the = sign
x=c(19.8, 10.1, 14.9, 7.5, 15.4, 15.4, 15.4, 18.5, 7.9, 12.7, 11.9, 11.4, 11.4,
14.1, 17.6, 16.7, 15.8, 19.5, 8.8, 13.6, 11.9, 11.4)
Tinterval(level=0.95,sample=x)
Predinterval(level=0.95,sample=x)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/OneSampleCI.Rd | no_license | bgrose/StatEngine | R | false | false | 4,963 | rd | \name{One-Sample Tests}
\alias{Zinterval}
\alias{sample.size.Zinterval}
\alias{Tinterval}
\alias{AZinterval}
\alias{Chi2interval}
\alias{Propinterval}
\alias{sample.size.Propinterval}
\alias{Predinterval}
\alias{t.quantile}
\alias{Chi2.quantile}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
One-Sample Confidence Intervals
}
\description{
Compute confidence intervals on the population mean, population variance, and population proportion. In addition, it computes prediction interval for a single future observation from a normal distribution.
}
\usage{
#CI for pupulation mean of a normal distribution when the population variance is known:
Zinterval(level,sigma,sample) # if sample available
Zinterval(level,sigma,n,barx) # if stats are provided
# Choice of sample size for estimating the population mean when error is specified
sample.size.Zinterval(level,sigma,E)
#CI for pupulation mean when the population variance is unknown and the distribution is normal
#or when the sample size is smaller than 25:
Tinterval(level,sample) # if sample available
Tinterval(level,n,barx,s) # if stats are provided
#Large-sample CI for pupulation mean:
AZinterval(level,sample) # if sample available
AZinterval(level,n,barx,s) # if stats are provided
#CI for pupulation variance (or standard deviation) of a normal distribution:
Chi2interval(level,sample) # if sample available
Chi2interval(level,n,s) # if stats are provided
#Large-sample CI for a pupulation proportion:
Propinterval(level,n,X)
# Choice of sample size for estimating a population proportion when error is specified
sample.size.Propinterval(level,ini.p,E) # using an intial guess
sample.size.Propinterval(level,ini.p=0.5,E) # using the conservative apporach
# Prediction interval of a single future observation form a normal distribution:
Predinterval(level,sample) # if sample available
Predinterval(level,n,barx,s) # if stats are provided
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{level}{the confidence level}
\item{sample}{a vector of the observed sample}
\item{sigma}{the known population standard deviation}
\item{s}{the observed sample standard deviation}
\item{barx}{the observed sample mean}
\item{n}{the sample size}
\item{X}{number of observations belongs to a class of interest}
\item{E}{specified error in sample size calculation}
\item{ini.p}{A initial estimate of the populatin proportion. Default is 0.5 which corresponds to the conservative approach}
\item{df}{the degrees of freedom of a t or chi.square distribution}
\item{q}{a quantile value}
}
\details{
Compute CIs for the population mean, population variance, and population proportaion and PI for a single future observation from a normal distribution.}
\value{
\item{interval}{As long as the function has "interval", the outcome contains a two-sided CI (or PI) and the two one-sided confidence bounds.}
\item{sample.size}{As long as the function has "sample.size", the outcome is the minimum sample size required to control the error to be no larger than E.}
\item{t.quantile}{quantile of a t distribution}
\item{Chi2.quantile}{quantile of a chi.square distribution}
}
\references{
Chapter 8 of the textbook "Applied Statistics and Probability for Engineers" 7th edition
}
\author{
Dewei Wang
}
\note{
deweiwang@stat.sc.edu
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
#Zinterval
#must include the = sign
x=c(64.1, 64.7, 64.5, 64.6, 64.5, 64.3, 64.6, 64.8, 64.2, 64.3)
Zinterval(level=0.95,sigma=1,sample=x)
Zinterval(level=0.99,sigma=1,sample=x)
sample.size.Zinterval(E=0.5,sigma=1,level=0.95)
# Using stats, must include the = sign
Zinterval(level=0.95,sigma=2,n=9,barx=98)
#Tinterval
#must include the = sign
Tinterval(level=0.95,n=10,barx=1000,s=20)
Tinterval(level=0.95,n=25,barx=1000,s=20)
Tinterval(level=0.99,n=10,barx=1000,s=20)
Tinterval(level=0.99,n=25,barx=1000,s=20)
#Large-sample Zinterval
#must include the = sign
x=scan("https://raw.githubusercontent.com/Harrindy/StatEngine/master/Data/Mercury.csv")
AZinterval(level=0.95,sample=x)
#Chi.square interval for variance/standard deviation
#must include the = sign
Chi2interval(level=0.95,n=20,s=0.01532)
#CIs for a porpulation proportion
#must include the = sign
Propinterval(level=0.95,n=85,X=10)
sample.size.Propinterval(level=0.95,ini.p=0.12,E=0.05)
sample.size.Propinterval(level=0.95,ini.p=0.5,E=0.05)
#Prediction interval for normal distribution
#must include the = sign
x=c(19.8, 10.1, 14.9, 7.5, 15.4, 15.4, 15.4, 18.5, 7.9, 12.7, 11.9, 11.4, 11.4,
14.1, 17.6, 16.7, 15.8, 19.5, 8.8, 13.6, 11.9, 11.4)
Tinterval(level=0.95,sample=x)
Predinterval(level=0.95,sample=x)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allgeneric.R, R/searchlight.R
\name{run_searchlight}
\alias{run_searchlight}
\alias{run_searchlight.mvpa_model}
\alias{run_searchlight.rsa_model}
\alias{run_searchlight.manova_model}
\title{run_searchlight}
\usage{
run_searchlight(model_spec, radius, method, niter, ...)
\method{run_searchlight}{mvpa_model}(
model_spec,
radius = 8,
method = c("randomized", "standard"),
niter = 4,
combiner = "average",
permute = FALSE,
...
)
\method{run_searchlight}{rsa_model}(
model_spec,
radius = 8,
method = c("randomized", "standard"),
niter = 4,
permute = FALSE,
distmethod = c("spearman", "pearson"),
regtype = c("pearson", "spearman", "lm", "rfit"),
...
)
\method{run_searchlight}{manova_model}(
model_spec,
radius = 8,
method = c("randomized", "standard"),
niter = 4,
permute = FALSE,
...
)
}
\arguments{
\item{model_spec}{An object of type \code{manova_model} specifying the MANOVA model to be used.}
\item{radius}{The radius of the searchlight sphere (default is 8, allowable range: 1-100).}
\item{method}{The method used for the searchlight analysis ("randomized" or "standard").}
\item{niter}{The number of iterations for randomized searchlight (default is 4).}
\item{...}{Additional arguments to be passed to the function.}
\item{combiner}{A function that combines results into an appropriate output, or one of the following strings: "pool" or "average".}
\item{permute}{Whether to permute the labels (default is FALSE).}
\item{distmethod}{The method used to compute distances between searchlight samples ("spearman" or "pearson").}
\item{regtype}{The method used to fit response and predictor distance matrices ("pearson", "spearman", "lm", or "rfit").}
}
\value{
A named list of \code{NeuroVol} objects, where each element contains a performance metric (e.g. AUC) at every voxel location.
}
\description{
Execute a searchlight analysis.
This function runs a searchlight analysis using a specified MVPA model, radius, and method.
It can be customized with a combiner function and permutation options.
This function runs a searchlight analysis using a specified RSA model, radius, and method.
It can be customized with permutation options, distance computation methods, and regression methods.
This function runs a searchlight analysis using a specified MANOVA model, radius, and method.
It can be customized with permutation options.
}
\examples{
# TODO: Add an example
dataset <- gen_sample_dataset(c(4,4,4), 100, blocks=3)
cval <- blocked_cross_validation(dataset$design$block_var)
model <- load_model("sda_notune")
mspec <- mvpa_model(model, dataset$dataset, design=dataset$design, model_type="classification", crossval=cval)
res <- run_searchlight(mspec, radius=8, method="standard")
# A custom "combiner" can be used to post-process the output of the searchlight classifier for special cases.
# In the example below, the supplied "combining function" extracts the predicted probability of the correct class
# for every voxel and every trial and then stores them in a data.frame.
\dontrun{
custom_combiner <- function(mspec, good, bad) {
good \%>\% pmap(function(result, id, ...) {
data.frame(trial=1:length(result$observed), id=id, prob=prob_observed(result))
}) \%>\% bind_rows()
}
res2 <- run_searchlight(mspec, radius=8, method="standard", combiner=custom_combiner)
}
}
\references{
Bjornsdotter, M., Rylander, K., & Wessberg, J. (2011). A Monte Carlo method for locally multivariate brain mapping. Neuroimage, 56(2), 508-516.
Kriegeskorte, N., Goebel, R., & Bandettini, P. (2006). Information-based functional brain mapping. Proceedings of the National academy of Sciences of the United States of America, 103(10), 3863-3868.
}
\seealso{
\code{\link{run_searchlight.randomized}}, \code{\link{run_searchlight.standard}}
}
| /man/run_searchlight.Rd | no_license | bbuchsbaum/rMVPA | R | false | true | 3,886 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allgeneric.R, R/searchlight.R
\name{run_searchlight}
\alias{run_searchlight}
\alias{run_searchlight.mvpa_model}
\alias{run_searchlight.rsa_model}
\alias{run_searchlight.manova_model}
\title{run_searchlight}
\usage{
run_searchlight(model_spec, radius, method, niter, ...)
\method{run_searchlight}{mvpa_model}(
model_spec,
radius = 8,
method = c("randomized", "standard"),
niter = 4,
combiner = "average",
permute = FALSE,
...
)
\method{run_searchlight}{rsa_model}(
model_spec,
radius = 8,
method = c("randomized", "standard"),
niter = 4,
permute = FALSE,
distmethod = c("spearman", "pearson"),
regtype = c("pearson", "spearman", "lm", "rfit"),
...
)
\method{run_searchlight}{manova_model}(
model_spec,
radius = 8,
method = c("randomized", "standard"),
niter = 4,
permute = FALSE,
...
)
}
\arguments{
\item{model_spec}{An object of type \code{manova_model} specifying the MANOVA model to be used.}
\item{radius}{The radius of the searchlight sphere (default is 8, allowable range: 1-100).}
\item{method}{The method used for the searchlight analysis ("randomized" or "standard").}
\item{niter}{The number of iterations for randomized searchlight (default is 4).}
\item{...}{Additional arguments to be passed to the function.}
\item{combiner}{A function that combines results into an appropriate output, or one of the following strings: "pool" or "average".}
\item{permute}{Whether to permute the labels (default is FALSE).}
\item{distmethod}{The method used to compute distances between searchlight samples ("spearman" or "pearson").}
\item{regtype}{The method used to fit response and predictor distance matrices ("pearson", "spearman", "lm", or "rfit").}
}
\value{
A named list of \code{NeuroVol} objects, where each element contains a performance metric (e.g. AUC) at every voxel location.
}
\description{
Execute a searchlight analysis.
This function runs a searchlight analysis using a specified MVPA model, radius, and method.
It can be customized with a combiner function and permutation options.
This function runs a searchlight analysis using a specified RSA model, radius, and method.
It can be customized with permutation options, distance computation methods, and regression methods.
This function runs a searchlight analysis using a specified MANOVA model, radius, and method.
It can be customized with permutation options.
}
\examples{
# TODO: Add an example
dataset <- gen_sample_dataset(c(4,4,4), 100, blocks=3)
cval <- blocked_cross_validation(dataset$design$block_var)
model <- load_model("sda_notune")
mspec <- mvpa_model(model, dataset$dataset, design=dataset$design, model_type="classification", crossval=cval)
res <- run_searchlight(mspec, radius=8, method="standard")
# A custom "combiner" can be used to post-process the output of the searchlight classifier for special cases.
# In the example below, the supplied "combining function" extracts the predicted probability of the correct class
# for every voxel and every trial and then stores them in a data.frame.
\dontrun{
custom_combiner <- function(mspec, good, bad) {
good \%>\% pmap(function(result, id, ...) {
data.frame(trial=1:length(result$observed), id=id, prob=prob_observed(result))
}) \%>\% bind_rows()
}
res2 <- run_searchlight(mspec, radius=8, method="standard", combiner=custom_combiner)
}
}
\references{
Bjornsdotter, M., Rylander, K., & Wessberg, J. (2011). A Monte Carlo method for locally multivariate brain mapping. Neuroimage, 56(2), 508-516.
Kriegeskorte, N., Goebel, R., & Bandettini, P. (2006). Information-based functional brain mapping. Proceedings of the National academy of Sciences of the United States of America, 103(10), 3863-3868.
}
\seealso{
\code{\link{run_searchlight.randomized}}, \code{\link{run_searchlight.standard}}
}
|
#=================
#CalculateCongestionPolicy.R
#=================
# This module calculates the amount of congestion - automobile,
# light truck, truck, and bus vmt are allocated to freeways, arterials,
# and other roadways adjusted by policy applied for the selected scenario.
# library(visioneval)
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
#Load the alternative mode trip models from GreenSTEP
CongModel_name <-
if ( dir.exists("inst/extdata") ) {
"inst/extdata/CongModel_ls.RData"
} else {
system.file("extdata", "CongModel_ls.Rdata", package = "VETransportSupplyUse")
}
load(CongModel_name)
#' Congestion models and required parameters.
#'
#' A list of components describing congestion models and various parameters
#' required by those models.
#'
#' @format A list having 'Fwy' and 'Art' components. Each component has a
#' logistic model to indicate the level of congestion which are categorized
#' as NonePct, HvyPct, SevPct, and NonePct. This list also contains other
#' parameters that are used in the evaluation of aforementioned models.
#' @source GreenSTEP version ?.? model.
"CongModel_ls"
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
CalculateCongestionPolicySpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
#Specify input data
#Specify data to be loaded from data store
Get = items(
item(
NAME = "ITS",
TABLE = "Azone",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "Type",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
ISELEMENTOF = c("BusVmt","TruckVmt"),
SIZE = 8
),
item(
NAME = "PropVmt",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"Fwy",
"Art",
"Other"
),
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "BaseLtVehDvmt",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "BaseFwyArtProp",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c('NA', '< 0', '> 1'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Bzone",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
SIZE = 8,
ISELEMENTOF = ""
),
item(
NAME = "UrbanPop",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Bzone",
TABLE = "Bzone",
GROUP = "BaseYear",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
SIZE = 8,
ISELEMENTOF = "",
OPTIONAL = TRUE
),
item(
NAME = "UrbanPop",
TABLE = "Bzone",
GROUP = "BaseYear",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
OPTIONAL = TRUE
),
item(
NAME = "DvmtPolicy",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Marea",
TABLE = "Marea",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = c('NA', '< 0'),
SIZE = 9,
ISELEMENTOF = ""
),
item(
NAME = "TruckDvmtFuture",
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = items(
"FwyLaneMiPCFuture",
"ArtLaneMiPCFuture",
"TranRevMiPCFuture"),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = items(
"BusRevMiFuture",
"RailRevMiFuture"),
TABLE = "Marea",
GROUP = "Year",
TYPE = "distance",
UNITS = "MI",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "TranRevMiAdjFactor",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "LtVehDvmtFactor",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
item(
NAME = items(
"LtVehDvmtPolicy",
"BusDvmtPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Daily vehicle miles travelled by light vehicles",
"Daily vehicle miles travelled by bus"
)
),
item(
NAME = items(
"MpgAdjLtVehPolicy",
"MpgAdjBusPolicy",
"MpgAdjTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Fuel efficiency adjustment for light vehicles with internal combustion engine",
"Fuel efficiency adjustment for buses with internal combustion engine",
"Fuel efficiency adjustment for heavy trucks with internal combustion engine"
)
),
item(
NAME = items(
"MpKwhAdjLtVehHevPolicy",
"MpKwhAdjLtVehEvPolicy",
"MpKwhAdjBusPolicy",
"MpKwhAdjTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Power efficiency adjustment for light plugin/hybrid electric vehicles",
"Power efficiency adjustment for light electric vehicles",
"Power efficiency adjustment for buses with electric power train",
"Power efficiency adjustment for heavy trucks with electric power train"
)
),
item(
NAME = items(
"VehHrLtVehPolicy",
"VehHrBusPolicy",
"VehHrTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "time",
UNITS = "HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Total vehicle travel time for light vehicles",
"Total vehicle travel time for buses",
"Total vehicle travel time for heavy trucks"
)
),
item(
NAME = items(
"AveSpeedLtVehPolicy",
"AveSpeedBusPolicy",
"AveSpeedTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Average speed for light vehicles",
"Average speed for buses",
"Average speed for heavy trucks"
)
),
item(
NAME = items(
"FfVehHrLtVehPolicy",
"FfVehHrBusPolicy",
"FfVehHrTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "time",
UNITS = "HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Freeflow travel time for light vehicles",
"Freeflow travel time for buses",
"Freeflow travel time for heavy trucks"
)
),
item(
NAME = items(
"DelayVehHrLtVehPolicy",
"DelayVehHrBusPolicy",
"DelayVehHrTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "time",
UNITS = "HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Total vehicle delay time for light vehicles",
"Total vehicle delay time for buses",
"Total vehicle delay time for heavy trucks"
)
),
item(
NAME = "MpgAdjHhPolicy",
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = "Fuel efficiency adjustment for households"
),
item(
NAME = "MpKwhAdjHevHhPolicy",
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = "Power efficiency adjustment for households with HEV"
),
item(
NAME = "MpKwhAdjEvHhPolicy",
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = "Power efficiency adjustment for households households with EV"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for CalculateCongestionPolicy module
#'
#' A list containing specifications for the CalculateCongestionPolicy module.
#'
#' @format A list containing 4 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{Inp}{scenario input data to be loaded into the datastore for this
#' module}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source CalculateCongestionPolicy.R script.
"CalculateCongestionPolicySpecifications"
visioneval::savePackageDataset(CalculateCongestionPolicySpecifications, overwrite = TRUE)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
#Main module function that calculates the amount of congestion adjusted to policy
#------------------------------------------------------------------
#' Function to calculate the amount of congestion after adjusting to policy.
#'
#' \code{CalculateCongestionPolicy} calculates the amount of congestion after adjusting
#' for policy.
#'
#' Auto, and light truck vmt, truck vmt, and bus vmt are allocated to freeways, arterials,
#' and other roadways. Truck and bus vmt are allocated based on mode-specific data,
#' and auto and light truck vmt are allocated based on a combination of factors
#' and a model that is sensitive to the relative supplies of freeway and arterial
#' lane miles.
#'
#' System-wide ratios of vmt to lane miles for freeways and arterials
#' are used to allocate vmt to congestion levels using congestion levels defined by
#' the Texas Transportation Institute for the Urban Mobility Report. Each freeway and
#' arterial congestion level is associated with an average trip speed for conditions that
#' do and do not include ITS treatment for incident management on the roadway. Overall average
#' speeds by congestion level are calculated based on input assumptions about the degree of
#' incident management. Speed vs. fuel efficiency relationships for light vehicles, trucks,
#' and buses are used to adjust the fleet fuel efficiency averages computed for the region.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module.
#' @name CalculateCongestionPolicy
#' @import visioneval
#' @export
CalculateCongestionPolicy <- function(L) {
#Set up
#------
# Function to rename variables to be consistent with Get specfications
# of CalculateCongestionPolicy
# Function to add suffix 'Future' at the end of all the variable names
AddSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(!identical(names(noList),character(0))){
names(noList) <- paste0(names(noList),suffix)
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], AddSuffixFuture, suffix = suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Function to remove suffix 'Future' from all the variable names
RemoveSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(length(noList)>0){
names(noList) <- gsub(suffix,"",names(noList))
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], RemoveSuffixFuture, suffix = suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Modify the input data set
L <- RemoveSuffixFuture(L)
L <- RemoveSuffixFuture(L, suffix = "Policy")
#Return the results
#------------------
# Call the CalculateTravelDemand function with the new dataset
Out_ls <- CalculateCongestionBase(L)
# Add 'Future' suffix to all the variables
Out_ls <- AddSuffixFuture(Out_ls, suffix = "Policy")
#Return the outputs list
return(Out_ls)
}
#================================
#Code to aid development and test
#================================
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateCongestionPolicy",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = FALSE
# )
# L <- TestDat_$L
#Test code to check everything including running the module and checking whether
#the outputs are consistent with the 'Set' specifications
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateCongestionPolicy",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = TRUE
# )
| /sources/modules/VETransportSupplyUse/R/CalculateCongestionPolicy.R | permissive | VisionEval/VisionEval-Dev | R | false | false | 15,606 | r | #=================
#CalculateCongestionPolicy.R
#=================
# This module calculates the amount of congestion - automobile,
# light truck, truck, and bus vmt are allocated to freeways, arterials,
# and other roadways adjusted by policy applied for the selected scenario.
# library(visioneval)
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
#Load the alternative mode trip models from GreenSTEP
CongModel_name <-
if ( dir.exists("inst/extdata") ) {
"inst/extdata/CongModel_ls.RData"
} else {
system.file("extdata", "CongModel_ls.Rdata", package = "VETransportSupplyUse")
}
load(CongModel_name)
#' Congestion models and required parameters.
#'
#' A list of components describing congestion models and various parameters
#' required by those models.
#'
#' @format A list having 'Fwy' and 'Art' components. Each component has a
#' logistic model to indicate the level of congestion which are categorized
#' as NonePct, HvyPct, SevPct, and NonePct. This list also contains other
#' parameters that are used in the evaluation of aforementioned models.
#' @source GreenSTEP version ?.? model.
"CongModel_ls"
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
CalculateCongestionPolicySpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
#Specify input data
#Specify data to be loaded from data store
Get = items(
item(
NAME = "ITS",
TABLE = "Azone",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "Type",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "character",
UNITS = "category",
PROHIBIT = "NA",
ISELEMENTOF = c("BusVmt","TruckVmt"),
SIZE = 8
),
item(
NAME = "PropVmt",
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = item(
"Fwy",
"Art",
"Other"
),
TABLE = "Vmt",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "< 0", "> 1"),
ISELEMENTOF = ""
),
item(
NAME = "BaseLtVehDvmt",
TABLE = "Model",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "BaseFwyArtProp",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c('NA', '< 0', '> 1'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Bzone",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
SIZE = 8,
ISELEMENTOF = ""
),
item(
NAME = "UrbanPop",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Bzone",
TABLE = "Bzone",
GROUP = "BaseYear",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "NA",
SIZE = 8,
ISELEMENTOF = "",
OPTIONAL = TRUE
),
item(
NAME = "UrbanPop",
TABLE = "Bzone",
GROUP = "BaseYear",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
OPTIONAL = TRUE
),
item(
NAME = "DvmtPolicy",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "Marea",
TABLE = "Marea",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = c('NA', '< 0'),
SIZE = 9,
ISELEMENTOF = ""
),
item(
NAME = "TruckDvmtFuture",
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = items(
"FwyLaneMiPCFuture",
"ArtLaneMiPCFuture",
"TranRevMiPCFuture"),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = items(
"BusRevMiFuture",
"RailRevMiFuture"),
TABLE = "Marea",
GROUP = "Year",
TYPE = "distance",
UNITS = "MI",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "TranRevMiAdjFactor",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
),
item(
NAME = "LtVehDvmtFactor",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
item(
NAME = items(
"LtVehDvmtPolicy",
"BusDvmtPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/DAY",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Daily vehicle miles travelled by light vehicles",
"Daily vehicle miles travelled by bus"
)
),
item(
NAME = items(
"MpgAdjLtVehPolicy",
"MpgAdjBusPolicy",
"MpgAdjTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Fuel efficiency adjustment for light vehicles with internal combustion engine",
"Fuel efficiency adjustment for buses with internal combustion engine",
"Fuel efficiency adjustment for heavy trucks with internal combustion engine"
)
),
item(
NAME = items(
"MpKwhAdjLtVehHevPolicy",
"MpKwhAdjLtVehEvPolicy",
"MpKwhAdjBusPolicy",
"MpKwhAdjTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Power efficiency adjustment for light plugin/hybrid electric vehicles",
"Power efficiency adjustment for light electric vehicles",
"Power efficiency adjustment for buses with electric power train",
"Power efficiency adjustment for heavy trucks with electric power train"
)
),
item(
NAME = items(
"VehHrLtVehPolicy",
"VehHrBusPolicy",
"VehHrTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "time",
UNITS = "HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Total vehicle travel time for light vehicles",
"Total vehicle travel time for buses",
"Total vehicle travel time for heavy trucks"
)
),
item(
NAME = items(
"AveSpeedLtVehPolicy",
"AveSpeedBusPolicy",
"AveSpeedTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Average speed for light vehicles",
"Average speed for buses",
"Average speed for heavy trucks"
)
),
item(
NAME = items(
"FfVehHrLtVehPolicy",
"FfVehHrBusPolicy",
"FfVehHrTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "time",
UNITS = "HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Freeflow travel time for light vehicles",
"Freeflow travel time for buses",
"Freeflow travel time for heavy trucks"
)
),
item(
NAME = items(
"DelayVehHrLtVehPolicy",
"DelayVehHrBusPolicy",
"DelayVehHrTruckPolicy"
),
TABLE = "Marea",
GROUP = "Year",
TYPE = "time",
UNITS = "HR",
PROHIBIT = c("NA", "< 0"),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = items(
"Total vehicle delay time for light vehicles",
"Total vehicle delay time for buses",
"Total vehicle delay time for heavy trucks"
)
),
item(
NAME = "MpgAdjHhPolicy",
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = "Fuel efficiency adjustment for households"
),
item(
NAME = "MpKwhAdjHevHhPolicy",
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = "Power efficiency adjustment for households with HEV"
),
item(
NAME = "MpKwhAdjEvHhPolicy",
TABLE = "Marea",
GROUP = "Year",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
SIZE = 0,
ISELEMENTOF = "",
DESCRIPTION = "Power efficiency adjustment for households households with EV"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for CalculateCongestionPolicy module
#'
#' A list containing specifications for the CalculateCongestionPolicy module.
#'
#' @format A list containing 4 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{Inp}{scenario input data to be loaded into the datastore for this
#' module}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source CalculateCongestionPolicy.R script.
"CalculateCongestionPolicySpecifications"
visioneval::savePackageDataset(CalculateCongestionPolicySpecifications, overwrite = TRUE)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
#Main module function that calculates the amount of congestion adjusted to policy
#------------------------------------------------------------------
#' Function to calculate the amount of congestion after adjusting to policy.
#'
#' \code{CalculateCongestionPolicy} calculates the amount of congestion after adjusting
#' for policy.
#'
#' Auto, and light truck vmt, truck vmt, and bus vmt are allocated to freeways, arterials,
#' and other roadways. Truck and bus vmt are allocated based on mode-specific data,
#' and auto and light truck vmt are allocated based on a combination of factors
#' and a model that is sensitive to the relative supplies of freeway and arterial
#' lane miles.
#'
#' System-wide ratios of vmt to lane miles for freeways and arterials
#' are used to allocate vmt to congestion levels using congestion levels defined by
#' the Texas Transportation Institute for the Urban Mobility Report. Each freeway and
#' arterial congestion level is associated with an average trip speed for conditions that
#' do and do not include ITS treatment for incident management on the roadway. Overall average
#' speeds by congestion level are calculated based on input assumptions about the degree of
#' incident management. Speed vs. fuel efficiency relationships for light vehicles, trucks,
#' and buses are used to adjust the fleet fuel efficiency averages computed for the region.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module.
#' @name CalculateCongestionPolicy
#' @import visioneval
#' @export
CalculateCongestionPolicy <- function(L) {
#Set up
#------
# Function to rename variables to be consistent with Get specfications
# of CalculateCongestionPolicy
# Function to add suffix 'Future' at the end of all the variable names
AddSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(!identical(names(noList),character(0))){
names(noList) <- paste0(names(noList),suffix)
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], AddSuffixFuture, suffix = suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Function to remove suffix 'Future' from all the variable names
RemoveSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(length(noList)>0){
names(noList) <- gsub(suffix,"",names(noList))
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], RemoveSuffixFuture, suffix = suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Modify the input data set
L <- RemoveSuffixFuture(L)
L <- RemoveSuffixFuture(L, suffix = "Policy")
#Return the results
#------------------
# Call the CalculateTravelDemand function with the new dataset
Out_ls <- CalculateCongestionBase(L)
# Add 'Future' suffix to all the variables
Out_ls <- AddSuffixFuture(Out_ls, suffix = "Policy")
#Return the outputs list
return(Out_ls)
}
#================================
#Code to aid development and test
#================================
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateCongestionPolicy",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = FALSE
# )
# L <- TestDat_$L
#Test code to check everything including running the module and checking whether
#the outputs are consistent with the 'Set' specifications
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "CalculateCongestionPolicy",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = TRUE
# )
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/DataDoc.R
\name{NMcas}
\alias{NMcas}
\title{New Mexico Brain Cancer example-- cases}
\format{A data frame with 1175 observations and 5 variables}
\source{
Distributed with SaTScan software: \url{http://www.satscan.org}
}
\description{
A data set from New Mexico. The variables are as follows:
}
\details{
\itemize{
\item county: County name
\item cases: Number of cases
\item year: Year of case
\item agegroup: Age group of case
\item sex: Sex of case
}
}
| /man/NMcas.Rd | no_license | skhan8/rsatscan | R | false | false | 577 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/DataDoc.R
\name{NMcas}
\alias{NMcas}
\title{New Mexico Brain Cancer example-- cases}
\format{A data frame with 1175 observations and 5 variables}
\source{
Distributed with SaTScan software: \url{http://www.satscan.org}
}
\description{
A data set from New Mexico. The variables are as follows:
}
\details{
\itemize{
\item county: County name
\item cases: Number of cases
\item year: Year of case
\item agegroup: Age group of case
\item sex: Sex of case
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redist_map.R
\name{get_target}
\alias{get_target}
\title{Extract the target district population from a \code{redist_map} object}
\usage{
get_target(x)
}
\arguments{
\item{x}{the \code{redist_map} object}
}
\value{
a single numeric value, the target population
}
\description{
Extract the target district population from a \code{redist_map} object
}
\concept{prepare}
| /man/get_target.Rd | no_license | cran/redist | R | false | true | 463 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redist_map.R
\name{get_target}
\alias{get_target}
\title{Extract the target district population from a \code{redist_map} object}
\usage{
get_target(x)
}
\arguments{
\item{x}{the \code{redist_map} object}
}
\value{
a single numeric value, the target population
}
\description{
Extract the target district population from a \code{redist_map} object
}
\concept{prepare}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/p3_dshbrd_charts.R
\name{renderP3_dshbrd_pie_chart}
\alias{renderP3_dshbrd_pie_chart}
\title{C3 gauge Widget render function for use in Shiny}
\usage{
renderP3_dshbrd_pie_chart(expr, env = parent.frame(), quoted = FALSE)
}
\description{
C3 gauge Widget render function for use in Shiny
}
| /man/renderP3_dshbrd_pie_chart.Rd | no_license | pantheracorp/PantheraWidgets | R | false | true | 366 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/p3_dshbrd_charts.R
\name{renderP3_dshbrd_pie_chart}
\alias{renderP3_dshbrd_pie_chart}
\title{C3 gauge Widget render function for use in Shiny}
\usage{
renderP3_dshbrd_pie_chart(expr, env = parent.frame(), quoted = FALSE)
}
\description{
C3 gauge Widget render function for use in Shiny
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tr_masked.R
\name{masked_config}
\alias{masked_config}
\title{Returns the configuration of a masked model}
\usage{
masked_config(
model = getOption("pangoling.masked.default"),
config_model = NULL
)
}
\arguments{
\item{model}{Name of a pre-trained model or folder.}
\item{config_model}{List with other arguments that control how the
model from Hugging Face is accessed.}
}
\value{
A list with the configuration of the model.
}
\description{
Returns the configuration of a masked model.
}
\details{
A masked language model (also called BERT-like, or encoder model) is a type
of large language model that can be used to predict the content of a mask
in a sentence.
If not specified, the masked model that will be used is the one set in
specified in the global option \code{pangoling.masked.default}, this can be
accessed via \code{getOption("pangoling.masked.default")} (by default
"bert-base-uncased"). To change the default option
use \code{options(pangoling.masked.default = "newmaskedmodel")}.
A list of possible masked can be found in
\href{https://huggingface.co/models?pipeline_tag=fill-mask}{Hugging Face website}.
Using the \code{config_model} and \code{config_tokenizer} arguments, it's possible to
control how the model and tokenizer from Hugging Face is accessed, see the
python method
\href{https://huggingface.co/docs/transformers/v4.25.1/en/model_doc/auto#transformers.AutoProcessor.from_pretrained}{\code{from_pretrained}} for details. In case of errors
check the status of
\url{https://status.huggingface.co/}
}
\examples{
\dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
masked_config(model = "bert-base-uncased")
\dontshow{\}) # examplesIf}
}
\seealso{
Other masked model functions:
\code{\link{masked_lp}()},
\code{\link{masked_preload}()},
\code{\link{masked_tokens_tbl}()}
}
\concept{masked model functions}
| /man/masked_config.Rd | permissive | bnicenboim/pangoling | R | false | true | 1,969 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tr_masked.R
\name{masked_config}
\alias{masked_config}
\title{Returns the configuration of a masked model}
\usage{
masked_config(
model = getOption("pangoling.masked.default"),
config_model = NULL
)
}
\arguments{
\item{model}{Name of a pre-trained model or folder.}
\item{config_model}{List with other arguments that control how the
model from Hugging Face is accessed.}
}
\value{
A list with the configuration of the model.
}
\description{
Returns the configuration of a masked model.
}
\details{
A masked language model (also called BERT-like, or encoder model) is a type
of large language model that can be used to predict the content of a mask
in a sentence.
If not specified, the masked model that will be used is the one set in
specified in the global option \code{pangoling.masked.default}, this can be
accessed via \code{getOption("pangoling.masked.default")} (by default
"bert-base-uncased"). To change the default option
use \code{options(pangoling.masked.default = "newmaskedmodel")}.
A list of possible masked can be found in
\href{https://huggingface.co/models?pipeline_tag=fill-mask}{Hugging Face website}.
Using the \code{config_model} and \code{config_tokenizer} arguments, it's possible to
control how the model and tokenizer from Hugging Face is accessed, see the
python method
\href{https://huggingface.co/docs/transformers/v4.25.1/en/model_doc/auto#transformers.AutoProcessor.from_pretrained}{\code{from_pretrained}} for details. In case of errors
check the status of
\url{https://status.huggingface.co/}
}
\examples{
\dontshow{if (interactive()) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
masked_config(model = "bert-base-uncased")
\dontshow{\}) # examplesIf}
}
\seealso{
Other masked model functions:
\code{\link{masked_lp}()},
\code{\link{masked_preload}()},
\code{\link{masked_tokens_tbl}()}
}
\concept{masked model functions}
|
## This first line will likely take a few seconds.
# Read Data from Assignment Week 4
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
head(NEI)
# Summarise Emissions by Year
Yearly_Emissions_Summary <- NEI %>%
group_by(year) %>%
summarize(Sum_Emissions = sum(Emissions, na.rm = TRUE))
Yearly_Emissions_Summary
## create the plot of Yearly Emissions
png(filename = "plot1.png")
plot(Yearly_Emissions_Summary$year, Yearly_Emissions_Summary$Sum_Emissions,
type = "l",
main = "Total Annual Emissions of PM2.5 in the US per Year",
ylab = "Total Emissions of PM2.5 (tons)",
xlab = "Year")
dev.off()
# Difference in Emissions From 1999 to 2008
Emissions2008 <- Yearly_Emissions_Summary[Yearly_Emissions_Summary$year == 2008, 2]
Emissions1999 <- Yearly_Emissions_Summary[Yearly_Emissions_Summary$year == 1999, 2]
Diff_Emissions2008_1999 <- Emissions2008 - Emissions1999
| /Plot1.R | no_license | jogre78/Coursera-Exploratory-Data-Analysis-Week-4-Assignment | R | false | false | 936 | r | ## This first line will likely take a few seconds.
# Read Data from Assignment Week 4
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
head(NEI)
# Summarise Emissions by Year
Yearly_Emissions_Summary <- NEI %>%
group_by(year) %>%
summarize(Sum_Emissions = sum(Emissions, na.rm = TRUE))
Yearly_Emissions_Summary
## create the plot of Yearly Emissions
png(filename = "plot1.png")
plot(Yearly_Emissions_Summary$year, Yearly_Emissions_Summary$Sum_Emissions,
type = "l",
main = "Total Annual Emissions of PM2.5 in the US per Year",
ylab = "Total Emissions of PM2.5 (tons)",
xlab = "Year")
dev.off()
# Difference in Emissions From 1999 to 2008
Emissions2008 <- Yearly_Emissions_Summary[Yearly_Emissions_Summary$year == 2008, 2]
Emissions1999 <- Yearly_Emissions_Summary[Yearly_Emissions_Summary$year == 1999, 2]
Diff_Emissions2008_1999 <- Emissions2008 - Emissions1999
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataHandling.R
\name{mergeQuotesSameTimestamp}
\alias{mergeQuotesSameTimestamp}
\title{Merge multiple quote entries with the same time stamp}
\usage{
mergeQuotesSameTimestamp(qData, selection = "median")
}
\arguments{
\item{qData}{an \code{xts} object or \code{data.table} containing the time series data, with
at least two columns named \code{BID} and \code{OFR} indicating the bid and ask price
as well as two columns \code{BIDSIZ}, \code{OFRSIZ} indicating the number of round lots available at these
prices. For \code{data.table} an additional column \code{DT} is necessary that stores the date/time information.}
\item{selection}{indicates how the bid and ask price for a certain time stamp
should be calculated in case of multiple observation for a certain time
stamp. By default, \code{selection = "median"}, and the median price is taken. Alternatively:
\itemize{
\item \code{selection = "max.volume"}: use the (bid/ask) price of the entry with
largest (bid/ask) volume.
\item \code{selection = "weighted.average"}: take the weighted average of all bid (ask) prices,
weighted by "BIDSIZ" ("OFRSIZ").
}}
}
\value{
Depending on the input data type, we return either a \code{data.table} or an \code{xts} object containing the quote data which has been cleaned.
}
\description{
Merge quote entries that have the same time stamp to a single one and returns an \code{xts} or a \code{data.table} object
with unique time stamps only.
}
\author{
Jonathan Cornelissen, Kris Boudt, Onno Kleen, and Emil Sjoerup.
}
\keyword{cleaning}
| /man/mergeQuotesSameTimestamp.Rd | no_license | jonathancornelissen/highfrequency | R | false | true | 1,613 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataHandling.R
\name{mergeQuotesSameTimestamp}
\alias{mergeQuotesSameTimestamp}
\title{Merge multiple quote entries with the same time stamp}
\usage{
mergeQuotesSameTimestamp(qData, selection = "median")
}
\arguments{
\item{qData}{an \code{xts} object or \code{data.table} containing the time series data, with
at least two columns named \code{BID} and \code{OFR} indicating the bid and ask price
as well as two columns \code{BIDSIZ}, \code{OFRSIZ} indicating the number of round lots available at these
prices. For \code{data.table} an additional column \code{DT} is necessary that stores the date/time information.}
\item{selection}{indicates how the bid and ask price for a certain time stamp
should be calculated in case of multiple observation for a certain time
stamp. By default, \code{selection = "median"}, and the median price is taken. Alternatively:
\itemize{
\item \code{selection = "max.volume"}: use the (bid/ask) price of the entry with
largest (bid/ask) volume.
\item \code{selection = "weighted.average"}: take the weighted average of all bid (ask) prices,
weighted by "BIDSIZ" ("OFRSIZ").
}}
}
\value{
Depending on the input data type, we return either a \code{data.table} or an \code{xts} object containing the quote data which has been cleaned.
}
\description{
Merge quote entries that have the same time stamp to a single one and returns an \code{xts} or a \code{data.table} object
with unique time stamps only.
}
\author{
Jonathan Cornelissen, Kris Boudt, Onno Kleen, and Emil Sjoerup.
}
\keyword{cleaning}
|
#' @param scores dataframe of quality metric scores with columns Dataset_id (identifier for each dataset),
#' Traj_type (type of trajectory, provided with the ground truth information),
#' Dataset_source (whether gold, silver or simulated),
#' Method_id (method used to compute the kNN graph used for the TI task)
#' @return scores ready to be plotted (normalized, aggregated, averaged)
#' @example df <- overallscore_aggregated(scores)
### Load librairies
library(dplyr)
### Function to compare & aggregate quality scores
# Score normalization
score_norm <- function(scores) {
tmp = scores %>% group_by(Dataset_id)
for (col in grep("Met_",colnames(tmp), value = T)) {
col_norm = paste0(col,"_normed")
tmp = tmp %>% mutate(!!col_norm:=pnorm(normalize(get(col))))
}
return(tmp)
}
# Score aggregation
score_aggregation <- function(scores) {
tmp = score_norm(scores)
tmp = tmp %>% mutate(comp1=paste(Traj_type,Dataset_source,Method_id))
for (col in grep("_normed", colnames(tmp), value = T)) {
col_norm = paste0(col,"_norm2")
tmp = tmp %>%
group_by(comp1) %>%
mutate(!!col_norm:=mean(get(col)))
}
tmp = tmp %>% filter(!duplicated(comp1)) %>% mutate(comp2=paste(Traj_type,Method_id))
for (col in grep("_norm2",colnames(tmp), value = T)) {
col_norm = gsub("2","3",col)
tmp = tmp %>%
group_by(comp2) %>%
mutate(!!col_norm:=mean(get(col)))
}
tmp = tmp %>% filter(!duplicated(comp2)) %>% group_by(Method_id)
for (col in grep("_norm3",colnames(tmp), value = T)) {
col_norm = gsub("3","4",col)
tmp = tmp %>%
mutate(!!col_norm:=mean(get(col)))
}
tmp = tmp %>% filter(!duplicated(Method_id))
tmp = tmp[,c("Method_id",grep("_norm4",colnames(tmp),value=T))]
return(tmp)
}
# Overall aggregated score
overallscore_aggregated <- function(scores) {
tmp = score_aggregation(scores)
tmp$Overall_score = apply(data.frame(tmp), 1, function(x) {
cols = grep("_norm4",colnames(tmp));
vals = as.numeric(unname(unlist(x[cols])));
mean(vals)})
return(tmp)
} | /Scripts/R/TI_dynverse/2_dynverse_score_aggregation.R | no_license | EliseAld/schubness | R | false | false | 2,050 | r | #' @param scores dataframe of quality metric scores with columns Dataset_id (identifier for each dataset),
#' Traj_type (type of trajectory, provided with the ground truth information),
#' Dataset_source (whether gold, silver or simulated),
#' Method_id (method used to compute the kNN graph used for the TI task)
#' @return scores ready to be plotted (normalized, aggregated, averaged)
#' @example df <- overallscore_aggregated(scores)
### Load librairies
library(dplyr)
### Function to compare & aggregate quality scores
# Score normalization
score_norm <- function(scores) {
tmp = scores %>% group_by(Dataset_id)
for (col in grep("Met_",colnames(tmp), value = T)) {
col_norm = paste0(col,"_normed")
tmp = tmp %>% mutate(!!col_norm:=pnorm(normalize(get(col))))
}
return(tmp)
}
# Score aggregation
score_aggregation <- function(scores) {
tmp = score_norm(scores)
tmp = tmp %>% mutate(comp1=paste(Traj_type,Dataset_source,Method_id))
for (col in grep("_normed", colnames(tmp), value = T)) {
col_norm = paste0(col,"_norm2")
tmp = tmp %>%
group_by(comp1) %>%
mutate(!!col_norm:=mean(get(col)))
}
tmp = tmp %>% filter(!duplicated(comp1)) %>% mutate(comp2=paste(Traj_type,Method_id))
for (col in grep("_norm2",colnames(tmp), value = T)) {
col_norm = gsub("2","3",col)
tmp = tmp %>%
group_by(comp2) %>%
mutate(!!col_norm:=mean(get(col)))
}
tmp = tmp %>% filter(!duplicated(comp2)) %>% group_by(Method_id)
for (col in grep("_norm3",colnames(tmp), value = T)) {
col_norm = gsub("3","4",col)
tmp = tmp %>%
mutate(!!col_norm:=mean(get(col)))
}
tmp = tmp %>% filter(!duplicated(Method_id))
tmp = tmp[,c("Method_id",grep("_norm4",colnames(tmp),value=T))]
return(tmp)
}
# Overall aggregated score
overallscore_aggregated <- function(scores) {
tmp = score_aggregation(scores)
tmp$Overall_score = apply(data.frame(tmp), 1, function(x) {
cols = grep("_norm4",colnames(tmp));
vals = as.numeric(unname(unlist(x[cols])));
mean(vals)})
return(tmp)
} |
library(testthat)
library(tidycells)
test_results <- test_check("tidycells")
# get extra information
as.data.frame(test_results)[c("file", "test", "failed", "passed", "skipped", "error", "warning", "real")]
| /tests/testthat.R | permissive | dondealban/tidycells | R | false | false | 210 | r | library(testthat)
library(tidycells)
test_results <- test_check("tidycells")
# get extra information
as.data.frame(test_results)[c("file", "test", "failed", "passed", "skipped", "error", "warning", "real")]
|
# Add required libraries and packages
library(RSQLite)
library(dplyr)
library(ggplot2)
# Read the dataset using SQLite DB
db <- src_sqlite('data.sqlite', create=F)
sub <- "politics"
# Sort data with score above
dbsub <- db %>%
tbl('January2015') %>%
filter(subreddit==sub, score > 300)
df <- collect(dbsub)
# Compute high scores for comments throughout days of the week
postday <- filter(df, nchar(body) > 30) %>%
select(created_utc) %>%
mutate(created_utc = as.POSIXct(created_utc, origin = "1970-01-01"), day = as.numeric(strftime(created_utc, "%u")))
# Generate plot for high scores for comments throughout days of the week
ggplot(posttimes, aes(x=day)) + geom_histogram(binwidth=0.20) + scale_x_continuous(breaks = 0:7) + coord_polar() + ggtitle(sub) + theme_bw()
# Compute high scores for comments throughout hours of the day
postday <- filter(df, nchar(body) > 30) %>%
select(created_utc) %>%
mutate(created_utc = as.POSIXct(created_utc, origin = "1970-01-01"), hour = as.numeric(strftime(created_utc, "%h")))
# Generate plot for high scores for comments throughout days of the week
ggplot(posttimes, aes(x=hour)) + geom_histogram(binwidth=0.20) + scale_x_continuous(breaks = 0:24) + coord_polar() + ggtitle(sub) + theme_bw() | /analysis/activity.R | permissive | ansin218/reddit-comments-data-analysis | R | false | false | 1,258 | r | # Add required libraries and packages
library(RSQLite)
library(dplyr)
library(ggplot2)
# Read the dataset using SQLite DB
db <- src_sqlite('data.sqlite', create=F)
sub <- "politics"
# Sort data with score above
dbsub <- db %>%
tbl('January2015') %>%
filter(subreddit==sub, score > 300)
df <- collect(dbsub)
# Compute high scores for comments throughout days of the week
postday <- filter(df, nchar(body) > 30) %>%
select(created_utc) %>%
mutate(created_utc = as.POSIXct(created_utc, origin = "1970-01-01"), day = as.numeric(strftime(created_utc, "%u")))
# Generate plot for high scores for comments throughout days of the week
ggplot(posttimes, aes(x=day)) + geom_histogram(binwidth=0.20) + scale_x_continuous(breaks = 0:7) + coord_polar() + ggtitle(sub) + theme_bw()
# Compute high scores for comments throughout hours of the day
postday <- filter(df, nchar(body) > 30) %>%
select(created_utc) %>%
mutate(created_utc = as.POSIXct(created_utc, origin = "1970-01-01"), hour = as.numeric(strftime(created_utc, "%h")))
# Generate plot for high scores for comments throughout days of the week
ggplot(posttimes, aes(x=hour)) + geom_histogram(binwidth=0.20) + scale_x_continuous(breaks = 0:24) + coord_polar() + ggtitle(sub) + theme_bw() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aftgee_pkg.R
\docType{package}
\name{aftgee-package}
\alias{aftgee-package}
\alias{_PACKAGE}
\alias{aftgee-packages}
\title{aftgee: Accelerated Failure Time with Generalized Estimating Equation}
\description{
A package that uses Generalized Estimating Equations (GEE) to estimate
Multivariate Accelerated Failure Time Model (AFT).
This package implements recently developed inference procedures for
AFT models with both the rank-based approach and the least squares approach.
For the rank-based approach, the package allows various weight choices
and uses an induced smoothing procedure that leads to much more
efficient computation than the linear programming method.
With the rank-based estimator as an initial value, the generalized
estimating equation approach is used as an extension of the least
squares approach to the multivariate case.
Additional sampling weights are incorporated to handle missing data
needed as in case-cohort studies or general sampling schemes.
}
\references{
Chiou, S., Kim, J. and Yan, J. (2014) Marginal Semiparametric Multivariate
Accelerated Failure Time Model with Generalized Estimating Equation.
\emph{Life Time Data}, \bold{20}(4): 599--618.
Chiou, S., Kang, S. and Yan, J. (2014) Fast Accelerated Failure Time Modeling
for Case-Cohort Data. \emph{Statistics and Computing}, \bold{24}(4): 559--568.
Chiou, S., Kang, S. and Yan, J. (2014) Fitting Accelerated Failure Time Model
in Routine Survival Analysis with {R} Package \pkg{aftgee}.
\emph{Journal of Statistical Software}, \bold{61}(11): 1--23.
Huang, Y. (2002) Calibration Regression of Censored Lifetime Medical Cost.
\emph{Journal of American Statistical Association}, \bold{97}, 318--327.
Jin, Z. and Lin, D. Y. and Ying, Z. (2006)
On Least-squares Regression with Censored Data. \emph{Biometrika}, \bold{90}, 341--353.
Johnson, L. M. and Strawderman, R. L. (2009)
Induced Smoothing for the Semiparametric Accelerated Failure Time Model:
Asymptotic and Extensions to Clustered Data. \emph{Biometrika}, \bold{96}, 577 -- 590.
Zeng, D. and Lin, D. Y. (2008)
Efficient Resampling Methods for Nonsmooth Estimating Functions.
\emph{Biostatistics}, \bold{9}, 355--363
}
\seealso{
Useful links:
\itemize{
\item \url{http://github.com/stc04003/aftgee}
\item Report bugs at \url{http://github.com/stc04003/aftgee/issues}
}
}
\author{
\strong{Maintainer}: Sy Han Chiou \email{schiou@utdallas.edu}
Authors:
\itemize{
\item Sangwook Kang
\item Jun Yan
}
}
| /man/aftgee-package.Rd | no_license | pegeler/aftgee | R | false | true | 2,536 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aftgee_pkg.R
\docType{package}
\name{aftgee-package}
\alias{aftgee-package}
\alias{_PACKAGE}
\alias{aftgee-packages}
\title{aftgee: Accelerated Failure Time with Generalized Estimating Equation}
\description{
A package that uses Generalized Estimating Equations (GEE) to estimate
Multivariate Accelerated Failure Time Model (AFT).
This package implements recently developed inference procedures for
AFT models with both the rank-based approach and the least squares approach.
For the rank-based approach, the package allows various weight choices
and uses an induced smoothing procedure that leads to much more
efficient computation than the linear programming method.
With the rank-based estimator as an initial value, the generalized
estimating equation approach is used as an extension of the least
squares approach to the multivariate case.
Additional sampling weights are incorporated to handle missing data
needed as in case-cohort studies or general sampling schemes.
}
\references{
Chiou, S., Kim, J. and Yan, J. (2014) Marginal Semiparametric Multivariate
Accelerated Failure Time Model with Generalized Estimating Equation.
\emph{Life Time Data}, \bold{20}(4): 599--618.
Chiou, S., Kang, S. and Yan, J. (2014) Fast Accelerated Failure Time Modeling
for Case-Cohort Data. \emph{Statistics and Computing}, \bold{24}(4): 559--568.
Chiou, S., Kang, S. and Yan, J. (2014) Fitting Accelerated Failure Time Model
in Routine Survival Analysis with {R} Package \pkg{aftgee}.
\emph{Journal of Statistical Software}, \bold{61}(11): 1--23.
Huang, Y. (2002) Calibration Regression of Censored Lifetime Medical Cost.
\emph{Journal of American Statistical Association}, \bold{97}, 318--327.
Jin, Z. and Lin, D. Y. and Ying, Z. (2006)
On Least-squares Regression with Censored Data. \emph{Biometrika}, \bold{90}, 341--353.
Johnson, L. M. and Strawderman, R. L. (2009)
Induced Smoothing for the Semiparametric Accelerated Failure Time Model:
Asymptotic and Extensions to Clustered Data. \emph{Biometrika}, \bold{96}, 577 -- 590.
Zeng, D. and Lin, D. Y. (2008)
Efficient Resampling Methods for Nonsmooth Estimating Functions.
\emph{Biostatistics}, \bold{9}, 355--363
}
\seealso{
Useful links:
\itemize{
\item \url{http://github.com/stc04003/aftgee}
\item Report bugs at \url{http://github.com/stc04003/aftgee/issues}
}
}
\author{
\strong{Maintainer}: Sy Han Chiou \email{schiou@utdallas.edu}
Authors:
\itemize{
\item Sangwook Kang
\item Jun Yan
}
}
|
#Function for the simulation of a Mittag-Leffler random variable.
#In input:
#n is the number of ML random variable one wants to simulate;
#nu is the fractional order of the random variable;
#lambda is the parameter of the random variable.
#In output: a vector of length n contanining the simulated ML random variables.
#The script needs as source the function "rstable" contained in the library "stabledist"
MLgen<-function(n,nu,lambda){
#Simulate n exponential random variables of parameter lambda
Z1<-rexp(n,lambda)
#Set the parameters for the simulation of the stable random variable
gamma<-(cos(pi*nu/2))^(1/nu)
#Simulate n stable random variables of order nu
Z2<-rstable(n,nu,1,gamma,0,pm=1)
#Produce the simulated ML random variable
Z<-vector(length=n)
for (i in c(1:n)){
Z[i]<-(Z1[i]^(1/nu))*Z2[i]
}
return(Z)
} | /MLgen.R | permissive | GiAscione/FractionalErlangQueue | R | false | false | 865 | r | #Function for the simulation of a Mittag-Leffler random variable.
#In input:
#n is the number of ML random variable one wants to simulate;
#nu is the fractional order of the random variable;
#lambda is the parameter of the random variable.
#In output: a vector of length n contanining the simulated ML random variables.
#The script needs as source the function "rstable" contained in the library "stabledist"
MLgen<-function(n,nu,lambda){
#Simulate n exponential random variables of parameter lambda
Z1<-rexp(n,lambda)
#Set the parameters for the simulation of the stable random variable
gamma<-(cos(pi*nu/2))^(1/nu)
#Simulate n stable random variables of order nu
Z2<-rstable(n,nu,1,gamma,0,pm=1)
#Produce the simulated ML random variable
Z<-vector(length=n)
for (i in c(1:n)){
Z[i]<-(Z1[i]^(1/nu))*Z2[i]
}
return(Z)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{SCTAssay-class}
\alias{SCTAssay-class}
\alias{SCTModel}
\alias{SCTAssay}
\alias{levels.SCTAssay}
\alias{levels<-.SCTAssay}
\title{The SCTModel Class}
\usage{
\method{levels}{SCTAssay}(x)
\method{levels}{SCTAssay}(x) <- value
}
\arguments{
\item{x}{An \code{SCTAssay} object}
\item{value}{New levels, must be in the same order as the levels present}
}
\value{
\code{levels}: SCT model names
\code{levels<-}: \code{x} with updated SCT model names
}
\description{
The SCTModel object is a model and parameters storage from SCTransform.
It can be used to calculate Pearson residuals for new genes.
The SCTAssay object contains all the information found in an \code{\link{Assay}}
object, with extra information from the results of \code{\link{SCTransform}}
}
\section{Slots}{
\describe{
\item{\code{feature.attributes}}{A data.frame with feature attributes in SCTransform}
\item{\code{cell.attributes}}{A data.frame with cell attributes in SCTransform}
\item{\code{clips}}{A list of two numeric of length two specifying the min and max
values the Pearson residual will be clipped to. One for vst and one for
SCTransform}
\item{\code{umi.assay}}{Name of the assay of the seurat object containing UMI matrix
and the default is RNA}
\item{\code{model}}{A formula used in SCTransform}
\item{\code{arguments}}{other information used in SCTransform}
\item{\code{median_umi}}{Median UMI (or scale factor) used to calculate corrected counts}
\item{\code{SCTModel.list}}{A list containing SCT models}
}}
\section{Get and set SCT model names}{
SCT results are named by initial run of \code{\link{SCTransform}} in order
to keep SCT parameters straight between runs. When working with merged
\code{SCTAssay} objects, these model names are important. \code{levels}
allows querying the models present. \code{levels<-} allows the changing of
the names of the models present, useful when merging \code{SCTAssay} objects.
Note: unlike normal \code{\link[base]{levels<-}}, \code{levels<-.SCTAssay}
allows complete changing of model names, not reordering.
}
\section{Creating an \code{SCTAssay} from an \code{Assay}}{
Conversion from an \code{Assay} object to an \code{SCTAssay} object by
is done by adding the additional slots to the object. If \code{from} has
results generated by \code{\link{SCTransform}} from Seurat v3.0.0 to v3.1.1,
the conversion will automagically fill the new slots with the data
}
\examples{
\dontrun{
# SCTAssay objects are generated from SCTransform
pbmc_small <- SCTransform(pbmc_small)
}
# SCTAssay objects are generated from SCTransform
pbmc_small <- SCTransform(pbmc_small)
pbmc_small[["SCT"]]
\dontrun{
# Query and change SCT model names
levels(pbmc_small[['SCT']])
levels(pbmc_small[['SCT']]) <- '3'
levels(pbmc_small[['SCT']])
}
}
\seealso{
\code{\link{Assay}}
\code{\link{Assay}}
}
\concept{objects}
| /man/SCTAssay-class.Rd | permissive | satijalab/seurat | R | false | true | 2,944 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects.R
\docType{class}
\name{SCTAssay-class}
\alias{SCTAssay-class}
\alias{SCTModel}
\alias{SCTAssay}
\alias{levels.SCTAssay}
\alias{levels<-.SCTAssay}
\title{The SCTModel Class}
\usage{
\method{levels}{SCTAssay}(x)
\method{levels}{SCTAssay}(x) <- value
}
\arguments{
\item{x}{An \code{SCTAssay} object}
\item{value}{New levels, must be in the same order as the levels present}
}
\value{
\code{levels}: SCT model names
\code{levels<-}: \code{x} with updated SCT model names
}
\description{
The SCTModel object is a model and parameters storage from SCTransform.
It can be used to calculate Pearson residuals for new genes.
The SCTAssay object contains all the information found in an \code{\link{Assay}}
object, with extra information from the results of \code{\link{SCTransform}}
}
\section{Slots}{
\describe{
\item{\code{feature.attributes}}{A data.frame with feature attributes in SCTransform}
\item{\code{cell.attributes}}{A data.frame with cell attributes in SCTransform}
\item{\code{clips}}{A list of two numeric of length two specifying the min and max
values the Pearson residual will be clipped to. One for vst and one for
SCTransform}
\item{\code{umi.assay}}{Name of the assay of the seurat object containing UMI matrix
and the default is RNA}
\item{\code{model}}{A formula used in SCTransform}
\item{\code{arguments}}{other information used in SCTransform}
\item{\code{median_umi}}{Median UMI (or scale factor) used to calculate corrected counts}
\item{\code{SCTModel.list}}{A list containing SCT models}
}}
\section{Get and set SCT model names}{
SCT results are named by initial run of \code{\link{SCTransform}} in order
to keep SCT parameters straight between runs. When working with merged
\code{SCTAssay} objects, these model names are important. \code{levels}
allows querying the models present. \code{levels<-} allows the changing of
the names of the models present, useful when merging \code{SCTAssay} objects.
Note: unlike normal \code{\link[base]{levels<-}}, \code{levels<-.SCTAssay}
allows complete changing of model names, not reordering.
}
\section{Creating an \code{SCTAssay} from an \code{Assay}}{
Conversion from an \code{Assay} object to an \code{SCTAssay} object by
is done by adding the additional slots to the object. If \code{from} has
results generated by \code{\link{SCTransform}} from Seurat v3.0.0 to v3.1.1,
the conversion will automagically fill the new slots with the data
}
\examples{
\dontrun{
# SCTAssay objects are generated from SCTransform
pbmc_small <- SCTransform(pbmc_small)
}
# SCTAssay objects are generated from SCTransform
pbmc_small <- SCTransform(pbmc_small)
pbmc_small[["SCT"]]
\dontrun{
# Query and change SCT model names
levels(pbmc_small[['SCT']])
levels(pbmc_small[['SCT']]) <- '3'
levels(pbmc_small[['SCT']])
}
}
\seealso{
\code{\link{Assay}}
\code{\link{Assay}}
}
\concept{objects}
|
#BigData Application Performance Pridiction and Cluster Recommendation
#Date : 28.05.18
library(ggplot2)
library(dplyr)
library(gridExtra)
dev.off()
options(scipen=0)
setwd("/home/sc306/Dropbox/SA/ClusterBenchMarking/hadoop/ClusterBenchmarking")
readOps.data <- read.csv(file = "readData.csv", header = TRUE)
#readOps.data <- filter(readOps.data, readOps.data$Duration <= 1500)
writeOps.data <- read.csv(file = "writeData.csv", header = TRUE)
writeOps.data$shuffleData = as.integer((writeOps.data$DataSize*(writeOps.data$MapSelectivity/100)*writeOps.data$Mappers)/8)
#shuffleOps.data <- filter(shuffleOps.data, shuffleOps.data$Duration <= 12000)
shuffleOps.data <- read.csv(file = "shuffleData.csv", header = TRUE)
shuffleOps.data$shuffleData = as.integer((shuffleOps.data$DataSize*(shuffleOps.data$MapSelectivity/100)*shuffleOps.data$Mappers)/8)
shuffleOps.data <- filter(shuffleOps.data, shuffleOps.data$Duration <= 12000)
collectOps.data <- read.csv(file = "collectData.csv", header = TRUE)
collectOps.data$MapSelectivityData <- as.integer(collectOps.data$MapOutputRec*100/1048576)
spillOps.data <- read.csv(file = "spillData.csv", header = TRUE)
spillOps.data$MapSelectivityData <- as.integer(spillOps.data$MapOutputRec*100/1048576)
#spillOps.data <- filter(spillOps.data, spillOps.data$Duration <= 15000)
mergeOps.data <- read.csv(file = "mergeData.csv", header = TRUE)
mergeOps.data$MapSelectivityData <- as.integer(mergeOps.data$MapOutputRec*100/1048576)
mergeOps.data <- filter(mergeOps.data, mergeOps.data$Duration <= 150)
mapPhase.data <- read.csv(file = "mapdata/allOut.csv", header = TRUE)
summary(mapPhase.data)
reducePhase.data <- read.csv(file = "reducedata/out/allOut.csv", header = TRUE)
summary(reducePhase.data)
#head(readOps.data)
p1<-ggplot(readOps.data, aes(x=readOps.data$MapSelectivity, y=readOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Read Operation")
p4<-ggplot(collectOps.data, aes(x=collectOps.data$MapSelectivityData, y=collectOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Collect Operation")
p5<-ggplot(spillOps.data, aes(x=spillOps.data$MapSelectivityData, y=spillOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Spill Operation")
p6<-ggplot(mergeOps.data, aes(x=mergeOps.data$MapSelectivityData, y=mergeOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Merge Operation")
p2<-ggplot(writeOps.data, aes(x=writeOps.data$shuffleData, y=writeOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Write Operation")
p3<-ggplot(shuffleOps.data, aes(x=shuffleOps.data$shuffleData, y=shuffleOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Shuffle Operation")
p7<-ggplot(mapPhase.data, aes(x=mapPhase.data$MapSelectivity, y=mapPhase.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Map Phase")
p8<-ggplot(reducePhase.data, aes(x=reducePhase.data$MapSelectivity, y=reducePhase.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Reduce Phase")
#Both approaces works, however multiplot function implementation shoud be copied and pasted from
#multiplot(p1, p2, p3, p4,p5,p6, cols=2)
grid.arrange(p1, p4, p5, p6,p3,p2,p7,p8, ncol=2, top="Different Operations")
grid.arrange(p1, p4, p5, p6,p3,p2, ncol=2, top="Different Operations")
lmRead <- lm(readOps.data$Duration~readOps.data$DataSize)
summary(lmRead)
lmCollect <- lm(collectOps.data$Duration~collectOps.data$MapSelectivity+collectOps.data$Mappers)
summary(lmCollect)
lmSpill <- lm(spillOps.data$Duration~spillOps.data$MapSelectivity+spillOps.data$Mappers)
summary(lmSpill)
lmMerge <- lm(mergeOps.data$Duration~mergeOps.data$MapSelectivity+mergeOps.data$Mappers)
summary(lmMerge)
lmShuffle <- lm(shuffleOps.data$Duration~shuffleOps.data$MapSelectivity+shuffleOps.data$Mappers+shuffleOps.data$DataSize)
summary(lmShuffle)
lmWrite <- lm(writeOps.data$Duration~writeOps.data$MapSelectivity+writeOps.data$Mappers)
summary(lmWrite)
lmMap <- lm(mapPhase.data$Duration~mapPhase.data$MapSelectivity)
summary(lmMap)
lmReduce <- lm(reducePhase.data$Duration~reducePhase.data$MapSelectivity+reducePhase.data$DataSize)
summary(lmReduce)
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
| /analysis.R | no_license | sneceesay77/mr-modelling | R | false | false | 6,033 | r | #BigData Application Performance Pridiction and Cluster Recommendation
#Date : 28.05.18
library(ggplot2)
library(dplyr)
library(gridExtra)
dev.off()
options(scipen=0)
setwd("/home/sc306/Dropbox/SA/ClusterBenchMarking/hadoop/ClusterBenchmarking")
readOps.data <- read.csv(file = "readData.csv", header = TRUE)
#readOps.data <- filter(readOps.data, readOps.data$Duration <= 1500)
writeOps.data <- read.csv(file = "writeData.csv", header = TRUE)
writeOps.data$shuffleData = as.integer((writeOps.data$DataSize*(writeOps.data$MapSelectivity/100)*writeOps.data$Mappers)/8)
#shuffleOps.data <- filter(shuffleOps.data, shuffleOps.data$Duration <= 12000)
shuffleOps.data <- read.csv(file = "shuffleData.csv", header = TRUE)
shuffleOps.data$shuffleData = as.integer((shuffleOps.data$DataSize*(shuffleOps.data$MapSelectivity/100)*shuffleOps.data$Mappers)/8)
shuffleOps.data <- filter(shuffleOps.data, shuffleOps.data$Duration <= 12000)
collectOps.data <- read.csv(file = "collectData.csv", header = TRUE)
collectOps.data$MapSelectivityData <- as.integer(collectOps.data$MapOutputRec*100/1048576)
spillOps.data <- read.csv(file = "spillData.csv", header = TRUE)
spillOps.data$MapSelectivityData <- as.integer(spillOps.data$MapOutputRec*100/1048576)
#spillOps.data <- filter(spillOps.data, spillOps.data$Duration <= 15000)
mergeOps.data <- read.csv(file = "mergeData.csv", header = TRUE)
mergeOps.data$MapSelectivityData <- as.integer(mergeOps.data$MapOutputRec*100/1048576)
mergeOps.data <- filter(mergeOps.data, mergeOps.data$Duration <= 150)
mapPhase.data <- read.csv(file = "mapdata/allOut.csv", header = TRUE)
summary(mapPhase.data)
reducePhase.data <- read.csv(file = "reducedata/out/allOut.csv", header = TRUE)
summary(reducePhase.data)
#head(readOps.data)
p1<-ggplot(readOps.data, aes(x=readOps.data$MapSelectivity, y=readOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Read Operation")
p4<-ggplot(collectOps.data, aes(x=collectOps.data$MapSelectivityData, y=collectOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Collect Operation")
p5<-ggplot(spillOps.data, aes(x=spillOps.data$MapSelectivityData, y=spillOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Spill Operation")
p6<-ggplot(mergeOps.data, aes(x=mergeOps.data$MapSelectivityData, y=mergeOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Merge Operation")
p2<-ggplot(writeOps.data, aes(x=writeOps.data$shuffleData, y=writeOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Write Operation")
p3<-ggplot(shuffleOps.data, aes(x=shuffleOps.data$shuffleData, y=shuffleOps.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Shuffle Operation")
p7<-ggplot(mapPhase.data, aes(x=mapPhase.data$MapSelectivity, y=mapPhase.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Map Phase")
p8<-ggplot(reducePhase.data, aes(x=reducePhase.data$MapSelectivity, y=reducePhase.data$Duration)) + geom_point()+geom_smooth(method=lm)+labs(x="Map Selectivity(%)",y="Time(ms)")+ggtitle("Reduce Phase")
#Both approaces works, however multiplot function implementation shoud be copied and pasted from
#multiplot(p1, p2, p3, p4,p5,p6, cols=2)
grid.arrange(p1, p4, p5, p6,p3,p2,p7,p8, ncol=2, top="Different Operations")
grid.arrange(p1, p4, p5, p6,p3,p2, ncol=2, top="Different Operations")
lmRead <- lm(readOps.data$Duration~readOps.data$DataSize)
summary(lmRead)
lmCollect <- lm(collectOps.data$Duration~collectOps.data$MapSelectivity+collectOps.data$Mappers)
summary(lmCollect)
lmSpill <- lm(spillOps.data$Duration~spillOps.data$MapSelectivity+spillOps.data$Mappers)
summary(lmSpill)
lmMerge <- lm(mergeOps.data$Duration~mergeOps.data$MapSelectivity+mergeOps.data$Mappers)
summary(lmMerge)
lmShuffle <- lm(shuffleOps.data$Duration~shuffleOps.data$MapSelectivity+shuffleOps.data$Mappers+shuffleOps.data$DataSize)
summary(lmShuffle)
lmWrite <- lm(writeOps.data$Duration~writeOps.data$MapSelectivity+writeOps.data$Mappers)
summary(lmWrite)
lmMap <- lm(mapPhase.data$Duration~mapPhase.data$MapSelectivity)
summary(lmMap)
lmReduce <- lm(reducePhase.data$Duration~reducePhase.data$MapSelectivity+reducePhase.data$DataSize)
summary(lmReduce)
# Multiple plot function
#
# ggplot objects can be passed in ..., or to plotlist (as a list of ggplot objects)
# - cols: Number of columns in layout
# - layout: A matrix specifying the layout. If present, 'cols' is ignored.
#
# If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
# then plot 1 will go in the upper left, 2 will go in the upper right, and
# 3 will go all the way across the bottom.
#
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
|
library(ape)
testtree <- read.tree("10345_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10345_1_unrooted.txt") | /codeml_files/newick_trees_processed/10345_1/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("10345_1.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="10345_1_unrooted.txt") |
# And here is where I realize that by including a time trend, my p-value for
# Temp has skyrocketed and I am better off leaving it out of my model. This,
# obviously, is counterproductive to my investigation so I am looking into
# alternative methods of improving my model
# It is worth noting that Grdic and Nizic used an exponential model without
# a time trend to predict number of tourists based off air temperature
# in Croatia. The following analysis follows their example.
# At this point I have largely decided that using Visit.Rate with a time trend
# term is redundant. I'm looking for a reason to keep using it, if I don't
# find one, I'm switching back to Visitors.
setwd("D:/Files/R Directory/Project")
temp<-read.csv("temp.csv")
visitors<-read.csv("visitors.csv")
vrate<-read.csv("vrate.csv")
rnt<-merge(vrate, temp, by="Year")
tnv<-merge(temp, visitors, by="Year")
rnt$Year<-(rnt$Year-1980)
tnv$Year<-(tnv$Year-1980)
time.rxt<-lm(Visit.Rate ~ Temp + Year, data=rnt)
summary(time.rxt)
time.vxt<-lm(Visitors ~ Temp + Year, data=tnv)
summary(time.vxt)
# I'm siwtching back to Visitors away from Vsit.Rate
exp.vxt<-lm(Visitors ~ exp(Temp), data=tnv)
ln.vxt<-lm(log(Visitors) ~ Temp, data=tnv)
summary(exp.vxt)
summary(ln.vxt)
# exp.vxt yeilds a higher r.squared, gives me workable p-values on all my
# coefficients, and more closely follows the model used by Grdic and Nizic.
# My only qualm is that the model itself makes little sense. When graphed
# it forms a right angle that hugs quadrant 3, and with a negative
# intercept estimate it always predicts negative tourist numbers.
# CONCLUSION: Thrown out time trend term and exponential model. Looking to
# forecast.
# Next I plan on looking into Vector Autoregression
| /Script3.R | no_license | Devlin1834/Econometrics-Project-1 | R | false | false | 1,796 | r | # And here is where I realize that by including a time trend, my p-value for
# Temp has skyrocketed and I am better off leaving it out of my model. This,
# obviously, is counterproductive to my investigation so I am looking into
# alternative methods of improving my model
# It is worth noting that Grdic and Nizic used an exponential model without
# a time trend to predict number of tourists based off air temperature
# in Croatia. The following analysis follows their example.
# At this point I have largely decided that using Visit.Rate with a time trend
# term is redundant. I'm looking for a reason to keep using it, if I don't
# find one, I'm switching back to Visitors.
setwd("D:/Files/R Directory/Project")
temp<-read.csv("temp.csv")
visitors<-read.csv("visitors.csv")
vrate<-read.csv("vrate.csv")
rnt<-merge(vrate, temp, by="Year")
tnv<-merge(temp, visitors, by="Year")
rnt$Year<-(rnt$Year-1980)
tnv$Year<-(tnv$Year-1980)
time.rxt<-lm(Visit.Rate ~ Temp + Year, data=rnt)
summary(time.rxt)
time.vxt<-lm(Visitors ~ Temp + Year, data=tnv)
summary(time.vxt)
# I'm siwtching back to Visitors away from Vsit.Rate
exp.vxt<-lm(Visitors ~ exp(Temp), data=tnv)
ln.vxt<-lm(log(Visitors) ~ Temp, data=tnv)
summary(exp.vxt)
summary(ln.vxt)
# exp.vxt yeilds a higher r.squared, gives me workable p-values on all my
# coefficients, and more closely follows the model used by Grdic and Nizic.
# My only qualm is that the model itself makes little sense. When graphed
# it forms a right angle that hugs quadrant 3, and with a negative
# intercept estimate it always predicts negative tourist numbers.
# CONCLUSION: Thrown out time trend term and exponential model. Looking to
# forecast.
# Next I plan on looking into Vector Autoregression
|
pollutantmean <- function(directory, pollutant, id=1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## source("pollutantmean.R")
## pollutantmean("specdata", "sulfate", 1:10)
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
dir<-getwd();
dir_new<-paste(dir,directory,sep='/');
setwd(dir_new)
tf=dir()[id]
var_ac=vector();
numfiles<-length(id);
for (l in c(1:numfiles)){
data <- read.table(tf[l],header=T,sep=",")[ ,pollutant]
var<-na.exclude(data);
var_ac<-c(var_ac,var)
}
setwd(dir)
return(mean(var_ac))
} | /R programming Coursera/Assignment 1 Air Pollution/pollutantmean.R | no_license | VictorPelaez/Courses | R | false | false | 1,079 | r | pollutantmean <- function(directory, pollutant, id=1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## source("pollutantmean.R")
## pollutantmean("specdata", "sulfate", 1:10)
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
dir<-getwd();
dir_new<-paste(dir,directory,sep='/');
setwd(dir_new)
tf=dir()[id]
var_ac=vector();
numfiles<-length(id);
for (l in c(1:numfiles)){
data <- read.table(tf[l],header=T,sep=",")[ ,pollutant]
var<-na.exclude(data);
var_ac<-c(var_ac,var)
}
setwd(dir)
return(mean(var_ac))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02_decision_model_functions.R
\name{check_transition_probability}
\alias{check_transition_probability}
\title{Check if transition array is valid}
\usage{
check_transition_probability(a_P, err_stop = FALSE, verbose = FALSE)
}
\arguments{
\item{a_P}{A transition probability array.}
\item{err_stop}{Logical variable to stop model run if set up as TRUE. Default = FALSE.}
\item{verbose}{Logical variable to indicate print out of messages.
Default = FALSE}
}
\value{
This function stops if transition probability array is not valid and shows
what are the entries that are not valid
}
\description{
\code{check_transition_probability} checks if transition probabilities are in [0, 1].
}
| /man/check_transition_probability.Rd | permissive | fthielen/ce16_modelling_course | R | false | true | 762 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/02_decision_model_functions.R
\name{check_transition_probability}
\alias{check_transition_probability}
\title{Check if transition array is valid}
\usage{
check_transition_probability(a_P, err_stop = FALSE, verbose = FALSE)
}
\arguments{
\item{a_P}{A transition probability array.}
\item{err_stop}{Logical variable to stop model run if set up as TRUE. Default = FALSE.}
\item{verbose}{Logical variable to indicate print out of messages.
Default = FALSE}
}
\value{
This function stops if transition probability array is not valid and shows
what are the entries that are not valid
}
\description{
\code{check_transition_probability} checks if transition probabilities are in [0, 1].
}
|
library('rjags')
#################################
## Beta-binomial model
## Specify prior
prior_param = list(s=2, t=0.5)
## Specify data
data_to_model = list(a=prior_param$s*prior_param$t, b=prior_param$s*(1-prior_param$t), X=0, k=4, K = 88, N2 = 1000)
#data_to_model = list(a=prior_param$s*prior_param$t, b=prior_param$s*(1-prior_param$t), X=1, k=2, K = 88, N2 = 1000)
## Update with conjugate properties
p_post_conj = list(a = data_to_model$a + data_to_model$X,
b = data_to_model$b + data_to_model$k - data_to_model$X)
## Update with MCMC sampling
ms = "
model {
p ~ dbeta(a, b)
X ~ dbinom(p,k)
} "
m = jags.model(textConnection(ms), data=data_to_model, n.adapt=10^6, n.chains=3)
sam = coda.samples(m, c('p'), n.iter=N, thin=1)
mat = as.matrix(sam)
p_post = mat[,grep('p',colnames(mat))]
plot(density(p_post,from=0,to=1),xlab = 'p', main = 'prior and posterior of \n probability of infection in one item')
hist(p_post,add = TRUE,probability = TRUE, col = 'gray')
lines((1:999)/1000,dbeta((1:999)/1000,p_post_conj$a, p_post_conj$b),col = 'blue',lwd = 2)
lines((1:999)/1000,dbeta((1:999)/1000,prior_param$s*prior_param$t,prior_param$s*(1-prior_param$t)),col = 'red')
###########################################################
## Add prediction
##
## Update with MCMC sampling
ms = "
model {
p ~ dbeta(a,b)
X ~ dbinom(p,k)
for(i in 1:N2){
Xall[i] ~ dbinom(p,K)
}
} "
m = jags.model(textConnection(ms), data=data_to_model, n.adapt=10^6, n.chains=3)
sam = coda.samples(m, c('Xall'), n.iter=N, thin=1)
mat = as.matrix(sam)
## Derive uncertianty in the probability that more than 10% of the items contains an infection
pred_post = rowMeans(mat>(data_to_model$K*0.1))
hist(pred_post)
mean(pred_post)
(pred_post_int = HPDinterval(as.mcmc(pred_post), prob = 0.95))
| /import_risk_analysis.R | no_license | Ullrika/quantifying_uncertainty_by_probability | R | false | false | 1,856 | r | library('rjags')
#################################
## Beta-binomial model
## Specify prior
prior_param = list(s=2, t=0.5)
## Specify data
data_to_model = list(a=prior_param$s*prior_param$t, b=prior_param$s*(1-prior_param$t), X=0, k=4, K = 88, N2 = 1000)
#data_to_model = list(a=prior_param$s*prior_param$t, b=prior_param$s*(1-prior_param$t), X=1, k=2, K = 88, N2 = 1000)
## Update with conjugate properties
p_post_conj = list(a = data_to_model$a + data_to_model$X,
b = data_to_model$b + data_to_model$k - data_to_model$X)
## Update with MCMC sampling
ms = "
model {
p ~ dbeta(a, b)
X ~ dbinom(p,k)
} "
m = jags.model(textConnection(ms), data=data_to_model, n.adapt=10^6, n.chains=3)
sam = coda.samples(m, c('p'), n.iter=N, thin=1)
mat = as.matrix(sam)
p_post = mat[,grep('p',colnames(mat))]
plot(density(p_post,from=0,to=1),xlab = 'p', main = 'prior and posterior of \n probability of infection in one item')
hist(p_post,add = TRUE,probability = TRUE, col = 'gray')
lines((1:999)/1000,dbeta((1:999)/1000,p_post_conj$a, p_post_conj$b),col = 'blue',lwd = 2)
lines((1:999)/1000,dbeta((1:999)/1000,prior_param$s*prior_param$t,prior_param$s*(1-prior_param$t)),col = 'red')
###########################################################
## Add prediction
##
## Update with MCMC sampling
ms = "
model {
p ~ dbeta(a,b)
X ~ dbinom(p,k)
for(i in 1:N2){
Xall[i] ~ dbinom(p,K)
}
} "
m = jags.model(textConnection(ms), data=data_to_model, n.adapt=10^6, n.chains=3)
sam = coda.samples(m, c('Xall'), n.iter=N, thin=1)
mat = as.matrix(sam)
## Derive uncertianty in the probability that more than 10% of the items contains an infection
pred_post = rowMeans(mat>(data_to_model$K*0.1))
hist(pred_post)
mean(pred_post)
(pred_post_int = HPDinterval(as.mcmc(pred_post), prob = 0.95))
|
setwd("......")
sales <- read.csv("Soft Drink Sales.csv", header = TRUE, stringsAsFactors = TRUE)
head(sales)
sales_ts <- ts(sales$Sales, start = c(1997, 1), frequency = 4)
sales_ts
library(forecast)
#par(mfrow = c(1,1))
fit <- stl(sales_ts, s.window = "period")
plot(fit)
######### SIMPLE EXPONENTIAL SMOOTHING ###########
fit <- ets(sales_ts, model = "ANN")
fit
pred <- forecast(fit, 4)
pred
plot(pred, xlab = "Year", ylab = "Temperature (F)", main = "New Heaven Annual Mean Temperature")
accuracy(fit)
########## Visualizing Fit #################
plot(sales_ts)
lines(fit$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
######### HOLT's EXPONENTIAL SMOOTHING ###########
fit_H <- ets(sales_ts, model = "AAN")
fit_H
pred <- forecast(fit_H, 4)
pred
plot(pred, xlab = "Year", ylab = "Temperature (F)", main = "New Heaven Annual Mean Temperature")
accuracy(fit_H)
########## Visualizing Fit (HOLT's) #################
plot(sales_ts)
lines(fit_H$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
######### WINTER EXPONENTIAL SMOOTHING ###########
fit_W <- ets(sales_ts, model = "AAA")
fit_W
pred <- forecast(fit_W, 4)
pred
plot(pred, xlab = "Year", ylab = "Temperature (F)", main = "New Heaven Annual Mean Temperature")
accuracy(fit_W)
########## Visualizing Fit #################
plot(sales_ts)
lines(fit_W$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
########################################
par(mfrow = c(2,2))
plot(sales_ts)
plot(sales_ts)
lines(fit$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
plot(sales_ts)
lines(fit_H$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
plot(sales_ts)
lines(fit_W$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
rm(list = ls())
| /Forecast_soft.R | no_license | shikhilnangia/forecasting | R | false | false | 1,951 | r | setwd("......")
sales <- read.csv("Soft Drink Sales.csv", header = TRUE, stringsAsFactors = TRUE)
head(sales)
sales_ts <- ts(sales$Sales, start = c(1997, 1), frequency = 4)
sales_ts
library(forecast)
#par(mfrow = c(1,1))
fit <- stl(sales_ts, s.window = "period")
plot(fit)
######### SIMPLE EXPONENTIAL SMOOTHING ###########
fit <- ets(sales_ts, model = "ANN")
fit
pred <- forecast(fit, 4)
pred
plot(pred, xlab = "Year", ylab = "Temperature (F)", main = "New Heaven Annual Mean Temperature")
accuracy(fit)
########## Visualizing Fit #################
plot(sales_ts)
lines(fit$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
######### HOLT's EXPONENTIAL SMOOTHING ###########
fit_H <- ets(sales_ts, model = "AAN")
fit_H
pred <- forecast(fit_H, 4)
pred
plot(pred, xlab = "Year", ylab = "Temperature (F)", main = "New Heaven Annual Mean Temperature")
accuracy(fit_H)
########## Visualizing Fit (HOLT's) #################
plot(sales_ts)
lines(fit_H$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
######### WINTER EXPONENTIAL SMOOTHING ###########
fit_W <- ets(sales_ts, model = "AAA")
fit_W
pred <- forecast(fit_W, 4)
pred
plot(pred, xlab = "Year", ylab = "Temperature (F)", main = "New Heaven Annual Mean Temperature")
accuracy(fit_W)
########## Visualizing Fit #################
plot(sales_ts)
lines(fit_W$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
########################################
par(mfrow = c(2,2))
plot(sales_ts)
plot(sales_ts)
lines(fit$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
plot(sales_ts)
lines(fit_H$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
plot(sales_ts)
lines(fit_W$fitted, col = "red", lty = 2)
points(pred$mean, col = "blue", pch = 16)
rm(list = ls())
|
my.function <- function (x, y) x+y
# a list of values to hash
values <- list(
"Hello world!",
101,
3.142,
TRUE,
my.function,
(function (x, y) x+y),
functionCall(my.function, call("my.function", 10, 10)),
list(a=1, b=2, c="hello")
)
# hash the values in the list
(hashes <- lapply(values, hash))
# Note that functions with the same body will have the same hash
hashes[[5]] == hashes[[6]] | /R/examples/hash/example.hash.R | no_license | rwetherall/memofunc | R | false | false | 405 | r | my.function <- function (x, y) x+y
# a list of values to hash
values <- list(
"Hello world!",
101,
3.142,
TRUE,
my.function,
(function (x, y) x+y),
functionCall(my.function, call("my.function", 10, 10)),
list(a=1, b=2, c="hello")
)
# hash the values in the list
(hashes <- lapply(values, hash))
# Note that functions with the same body will have the same hash
hashes[[5]] == hashes[[6]] |
\name{soilwaterptf-package}
\alias{soilwaterptf-package}
\alias{soilwaterptf}
\docType{package}
\title{
\packageTitle{soilwaterptf}
}
\description{
\packageDescription{soilwaterptf}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{soilwaterptf}
\packageIndices{soilwaterptf}
}
\author{
\packageAuthor{soilwaterptf}
Maintainer: \packageMaintainer{soilwaterptf}
}
\keyword{ package }
| /pkg/soilwaterptf/man/soilwaterptf-package.Rd | no_license | TillF/soilwater | R | false | false | 389 | rd | \name{soilwaterptf-package}
\alias{soilwaterptf-package}
\alias{soilwaterptf}
\docType{package}
\title{
\packageTitle{soilwaterptf}
}
\description{
\packageDescription{soilwaterptf}
}
\details{
The DESCRIPTION file:
\packageDESCRIPTION{soilwaterptf}
\packageIndices{soilwaterptf}
}
\author{
\packageAuthor{soilwaterptf}
Maintainer: \packageMaintainer{soilwaterptf}
}
\keyword{ package }
|
`FitAR` <-
function(z,p,lag.max="default", ARModel="ARz", ...){
if (ARModel=="ARz")
out<-FitARz(z,p,lag.max=lag.max, ...)
else
out <- FitARp(z,p,lag.max=lag.max, ...)
out
}
| /FitAR/R/FitAR.R | no_license | ingted/R-Examples | R | false | false | 198 | r | `FitAR` <-
function(z,p,lag.max="default", ARModel="ARz", ...){
if (ARModel=="ARz")
out<-FitARz(z,p,lag.max=lag.max, ...)
else
out <- FitARp(z,p,lag.max=lag.max, ...)
out
}
|
testlist <- list(A = structure(c(2.32784507357645e-308, 9.54011667951623e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613108177-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 344 | r | testlist <- list(A = structure(c(2.32784507357645e-308, 9.54011667951623e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
## https://github.com/eliaskrainski/ektutorials
### load script to build the mesh
system.time(source('us_mesh.R'))
### load script to get the data
system.time(source('us_get_tmax1day.R'))
### summary of the data
summary(tmax1day)
sd(tmax1day$tmax, na.rm=TRUE)
### Construct latent model components
matern <- inla.spde2.pcmatern(
mesh=mesh, alpha=2,
prior.sigma = c(5, 0.01),
prior.range = c(25, 0.01))
### load inlabru (easier to code the model)
library(inlabru)
### define the model
model <- tmax ~ Intercept(1) +
field(main=coordinates, model = matern)
### prior for the likelihood parameter
lik.prec <- list(prec=list(prior='pc.prec', param=c(5, 0.01)))
### set some INLA parameters
inla.setOption(
inla.mode='experimental',
num.threads='4:-1',
smtp='pardiso',
pardiso.license='~/.pardiso.lic')
### fit the model using inlabru
fit <- bru(
model, tmax1day, family='gaussian',
options=list(
verbose=TRUE,
control.family=list(hyper=lik.prec)))
### some summary
fit$summary.fix
fit$summary.hyperpar
### consider the posterior mean of the random field
s.mean <- fit$summary.ran$field$mean
### project it into a grid (for plotting)
y.m <- inla.mesh.project(grid.proj, field=s.mean)
y.m[id.grid.out] <- NA
library(fields)
### visualize the random field + b0
par(mfrow=c(1,1), mar=c(0,0,0,0))
image.plot(
x=grid.proj$x,
y=grid.proj$y,
z=y.m+fit$summary.fix$mean[1], asp=1)
points(tmax1day, cex=0.05, pch=8)
plot(map.moll, add=TRUE, border=gray(0.3,0.5))
### group cross validation
system.time(gcpo <- inla.group.cv(fit, 20))
### selected locations to visualize
isel <- c(867, 1349, 1914, 2114, 2618, 3055, 3658, 4608, 4666, 5060, 5348)
### number of neighbors (with m=10) at the selected data locations
nnb <- sapply(gcpo$groups[isel], function(x) length(x$idx)-1)
nnb
### plot the neighbors for some data points
locs <- coordinates(tmax1day)
png('figz.png', 3000, 2000, res=100)
par(mfrow=c(1,1), mar=c(0,0,0,0))
image.plot(
x=grid.proj$x,
y=grid.proj$y,
z=y.m+fit$summary.fix$mean[1], asp=1)
plot(map.moll, add=TRUE, border=gray(0.3,0.5))
points(tmax1day, cex=0.5, pch=8)
for(i in isel) {
jj <- gcpo$groups[[i]]$idx[-1]
segments(locs[i, 1], locs[i, 2], locs[jj, 1], locs[jj, 2])
points(locs[jj, ], pch=19, cex=1, col='white')
}
points(locs[isel, ], pch=19, cex=3, col='white')
text(locs[isel, 1], locs[isel, 2], paste(nnb), col='blue3', cex=.8)
dev.off()
if(FALSE) {
ll <- locator()
isel <- sapply(1:length(ll[[1]]), function(i)
which.min(sqrt((locs[,1]-ll$x[i])^2 +
(locs[,2]-ll$y[i])^2)))
isel <- sort(isel)
isel
}
| /us_tmax1day_spatial.R | no_license | eliaskrainski/ektutorials | R | false | false | 2,677 | r | ## https://github.com/eliaskrainski/ektutorials
### load script to build the mesh
system.time(source('us_mesh.R'))
### load script to get the data
system.time(source('us_get_tmax1day.R'))
### summary of the data
summary(tmax1day)
sd(tmax1day$tmax, na.rm=TRUE)
### Construct latent model components
matern <- inla.spde2.pcmatern(
mesh=mesh, alpha=2,
prior.sigma = c(5, 0.01),
prior.range = c(25, 0.01))
### load inlabru (easier to code the model)
library(inlabru)
### define the model
model <- tmax ~ Intercept(1) +
field(main=coordinates, model = matern)
### prior for the likelihood parameter
lik.prec <- list(prec=list(prior='pc.prec', param=c(5, 0.01)))
### set some INLA parameters
inla.setOption(
inla.mode='experimental',
num.threads='4:-1',
smtp='pardiso',
pardiso.license='~/.pardiso.lic')
### fit the model using inlabru
fit <- bru(
model, tmax1day, family='gaussian',
options=list(
verbose=TRUE,
control.family=list(hyper=lik.prec)))
### some summary
fit$summary.fix
fit$summary.hyperpar
### consider the posterior mean of the random field
s.mean <- fit$summary.ran$field$mean
### project it into a grid (for plotting)
y.m <- inla.mesh.project(grid.proj, field=s.mean)
y.m[id.grid.out] <- NA
library(fields)
### visualize the random field + b0
par(mfrow=c(1,1), mar=c(0,0,0,0))
image.plot(
x=grid.proj$x,
y=grid.proj$y,
z=y.m+fit$summary.fix$mean[1], asp=1)
points(tmax1day, cex=0.05, pch=8)
plot(map.moll, add=TRUE, border=gray(0.3,0.5))
### group cross validation
system.time(gcpo <- inla.group.cv(fit, 20))
### selected locations to visualize
isel <- c(867, 1349, 1914, 2114, 2618, 3055, 3658, 4608, 4666, 5060, 5348)
### number of neighbors (with m=10) at the selected data locations
nnb <- sapply(gcpo$groups[isel], function(x) length(x$idx)-1)
nnb
### plot the neighbors for some data points
locs <- coordinates(tmax1day)
png('figz.png', 3000, 2000, res=100)
par(mfrow=c(1,1), mar=c(0,0,0,0))
image.plot(
x=grid.proj$x,
y=grid.proj$y,
z=y.m+fit$summary.fix$mean[1], asp=1)
plot(map.moll, add=TRUE, border=gray(0.3,0.5))
points(tmax1day, cex=0.5, pch=8)
for(i in isel) {
jj <- gcpo$groups[[i]]$idx[-1]
segments(locs[i, 1], locs[i, 2], locs[jj, 1], locs[jj, 2])
points(locs[jj, ], pch=19, cex=1, col='white')
}
points(locs[isel, ], pch=19, cex=3, col='white')
text(locs[isel, 1], locs[isel, 2], paste(nnb), col='blue3', cex=.8)
dev.off()
if(FALSE) {
ll <- locator()
isel <- sapply(1:length(ll[[1]]), function(i)
which.min(sqrt((locs[,1]-ll$x[i])^2 +
(locs[,2]-ll$y[i])^2)))
isel <- sort(isel)
isel
}
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -4.33931358612312e-153, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615767125-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -4.33931358612312e-153, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
## Put comments here that give an overall description of what your
## functions do
##makeCacheMatrix: This function creates a special
#"matrix" object that can cache its inverse.
makeCacheMatrix<- function(m=matrix()) {
m<-NULL # sets the value of m to NULL (provides a default if cacheSolve has not yet been used)
mat<-NULL # sets the value of mat to NULL (provides a default if cacheSolve has not yet been used)
setmat <- function(mat) {
m <<- mat
}
getmat <- function() m
setinv <- function(solve) m <<- solve
getinv <- function() m
## creates a list to house the four functions
list(setmat = setmat, getmat = getmat
setinv = setinv, getinv = getinv)
}
#cacheSolve: This function computes the inverse of the
#special "matrix" returned by makeCacheMatrix above. If
#the inverse has already been calculated (and the matrix
#has not changed), then the cachesolve should retrieve
#the inverse from the cache.
cacheSolve<- function(m, ...) {
## Return a matrix that is the inverse of 'x'
mat <- m$getinv()
if(!is.null(mat)) { # check to see if cacheSolve has been run before
message("getting cached data")
return(mat)
}
## otherwise
mat <- m$getinv() #run the getmatrix function to get the value of the input matrix
m <- solve(mat, ...) # compute the value of the inverse of the input matrix
m$setinv(m) # run the setinverse function on the inverse to cache the inverse
m # return inverse
}
| /cachematrix.R | no_license | sblaesi/ProgrammingAssignment2 | R | false | false | 1,668 | r | ## Put comments here that give an overall description of what your
## functions do
##makeCacheMatrix: This function creates a special
#"matrix" object that can cache its inverse.
makeCacheMatrix<- function(m=matrix()) {
m<-NULL # sets the value of m to NULL (provides a default if cacheSolve has not yet been used)
mat<-NULL # sets the value of mat to NULL (provides a default if cacheSolve has not yet been used)
setmat <- function(mat) {
m <<- mat
}
getmat <- function() m
setinv <- function(solve) m <<- solve
getinv <- function() m
## creates a list to house the four functions
list(setmat = setmat, getmat = getmat
setinv = setinv, getinv = getinv)
}
#cacheSolve: This function computes the inverse of the
#special "matrix" returned by makeCacheMatrix above. If
#the inverse has already been calculated (and the matrix
#has not changed), then the cachesolve should retrieve
#the inverse from the cache.
cacheSolve<- function(m, ...) {
## Return a matrix that is the inverse of 'x'
mat <- m$getinv()
if(!is.null(mat)) { # check to see if cacheSolve has been run before
message("getting cached data")
return(mat)
}
## otherwise
mat <- m$getinv() #run the getmatrix function to get the value of the input matrix
m <- solve(mat, ...) # compute the value of the inverse of the input matrix
m$setinv(m) # run the setinverse function on the inverse to cache the inverse
m # return inverse
}
|
png("plot1.png",width=480, height=480)
dates<-subset(data_table, Date=="1/02/2007"|Date=="2/02/2017")
hist(dates$Global_active_power, xlab = "Global Active Power (killowatts)", ylab = "Frequency", col="red", main="Global Active Power")
dev.off() | /plot1.R | no_license | HCooper-babylon/ExData_Plotting1 | R | false | false | 245 | r | png("plot1.png",width=480, height=480)
dates<-subset(data_table, Date=="1/02/2007"|Date=="2/02/2017")
hist(dates$Global_active_power, xlab = "Global Active Power (killowatts)", ylab = "Frequency", col="red", main="Global Active Power")
dev.off() |
suppl_denomination=c()
suppl_adresse=c()
suppl_cp=c()
suppl_ville=c()
suppl_codenuts=c()
suppl_nboffrerecu=c()
suppl_siret=c()
lot_intitule=c()
lot_cpv=c()
suppl_dateattribution=c()
id=c()
k=1
for(i in 1:length(list_webid_unique)){
for(j in 1:ifelse(is.na(df[i,]$nbr_lots),1,df[i,]$nbr_lots)){running_case = make_df(i)
suppl_denomination[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$denomination, error = function(e) NA)
suppl_adresse[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$adresse, error = function(e) NA)
suppl_cp[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$cp, error = function(e) NA)
suppl_ville[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$ville, error = function(e) NA)
suppl_codenuts[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$codenuts, error = function(e) NA)
suppl_nboffrerecu[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$nboffrerecu, error = function(e) NA)
lot_cpv[k]<-tryCatch(as.data.frame(running_case$donnees$objet$lots[1]$lot)$cpv[[j]]$principal, error = function(e) NA)
lot_intitule[k]<-tryCatch(running_case$donnees$attribution$decision$intitule[[j]], error = function(e) NA)
suppl_siret[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]]$codeidentnational[1], error = function(e) NA)
suppl_dateattribution[k]<-tryCatch(str_c(as.Date(as.POSIXct(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]]$dateattribution[2]/1000, origin = "1970-01-01")))
, error = function(e) NA)
id[k] <- ifelse(is.null(running_case$gestion$reference$idweb), NA, running_case$gestion$reference$idweb)
k=k+1
}
}
df[47,]$nbr_lots
test$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[2]][1,]$denomination
suppl_df = cbind(id, lot_intitule, suppl_denomination, suppl_siret, suppl_adresse,suppl_ville,suppl_cp,suppl_codenuts,suppl_nboffrerecu, lot_cpv,suppl_dateattribution)
suppl_df<-as.data.frame(suppl_df) | /Untitled.R | no_license | phmorand/DeCoMaP | R | false | false | 2,215 | r | suppl_denomination=c()
suppl_adresse=c()
suppl_cp=c()
suppl_ville=c()
suppl_codenuts=c()
suppl_nboffrerecu=c()
suppl_siret=c()
lot_intitule=c()
lot_cpv=c()
suppl_dateattribution=c()
id=c()
k=1
for(i in 1:length(list_webid_unique)){
for(j in 1:ifelse(is.na(df[i,]$nbr_lots),1,df[i,]$nbr_lots)){running_case = make_df(i)
suppl_denomination[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$denomination, error = function(e) NA)
suppl_adresse[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$adresse, error = function(e) NA)
suppl_cp[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$cp, error = function(e) NA)
suppl_ville[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$ville, error = function(e) NA)
suppl_codenuts[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$codenuts, error = function(e) NA)
suppl_nboffrerecu[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]][1,]$nboffrerecu, error = function(e) NA)
lot_cpv[k]<-tryCatch(as.data.frame(running_case$donnees$objet$lots[1]$lot)$cpv[[j]]$principal, error = function(e) NA)
lot_intitule[k]<-tryCatch(running_case$donnees$attribution$decision$intitule[[j]], error = function(e) NA)
suppl_siret[k]<-tryCatch(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]]$codeidentnational[1], error = function(e) NA)
suppl_dateattribution[k]<-tryCatch(str_c(as.Date(as.POSIXct(running_case$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[j]]$dateattribution[2]/1000, origin = "1970-01-01")))
, error = function(e) NA)
id[k] <- ifelse(is.null(running_case$gestion$reference$idweb), NA, running_case$gestion$reference$idweb)
k=k+1
}
}
df[47,]$nbr_lots
test$donnees$attribution$decision$titulaireandRENSEIGNEMENT[[2]][1,]$denomination
suppl_df = cbind(id, lot_intitule, suppl_denomination, suppl_siret, suppl_adresse,suppl_ville,suppl_cp,suppl_codenuts,suppl_nboffrerecu, lot_cpv,suppl_dateattribution)
suppl_df<-as.data.frame(suppl_df) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.