content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cert.R
\name{certinfo}
\alias{certinfo}
\alias{certs}
\alias{verify_cert}
\title{Certificates}
\usage{
certinfo(cert)
verify_cert(cert, root = system.file("cacert.pem", package = "openssl"))
}
\arguments{
\item{cert}{a certificate}
\item{root}{a root certificate or path to CA bundle}
}
\description{
Stuff for certificates
}
|
/man/certs.Rd
|
no_license
|
rOpenSec/openssl
|
R
| false
| false
| 415
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cert.R
\name{certinfo}
\alias{certinfo}
\alias{certs}
\alias{verify_cert}
\title{Certificates}
\usage{
certinfo(cert)
verify_cert(cert, root = system.file("cacert.pem", package = "openssl"))
}
\arguments{
\item{cert}{a certificate}
\item{root}{a root certificate or path to CA bundle}
}
\description{
Stuff for certificates
}
|
cvPoints.nc = function(k, mini, maxi, maxj)
{
k = (k+2) %% 5
result = list()
n = 1
for(i in mini:maxi)
for(j in 1:maxj)
if((k + i + 2*j) %% 5 == 0)
{
result[[n]] = c(i,j)
n = n + 1
}
return(result)
}
cvPoints = compiler::cmpfun(cvPoints.nc)
partialSSE.nc = function(data1, data2, points)
{
SSE = 0
for(p in points) {
SSE = SSE + (data1[p[1],p[2]] - data2[p[1],p[2]])^2
}
return(SSE)
}
partialSSE = compiler::cmpfun(partialSSE.nc)
partialSAE.nc = function(data1, data2, points)
{
SAE = 0
for(p in points) {
SAE = SAE + abs(data1[p[1],p[2]] - data2[p[1],p[2]])
}
return(SAE)
}
partialSAE = compiler::cmpfun(partialSAE.nc)
updateResiduals = function(res, data1, data2, points)
{
if(is.null(res)) res = data1*0
for(p in points) {
res[p[1],p[2]] = data2[p[1],p[2]] - data1[p[1],p[2]]
}
return(res)
}
smoothCv.nc = function(smoothFun, data, upToAgeInd = dim(data)[1], fromAgeInd = 1, folds = 0:4, ...)
{
nAges = dim(data)[1]
nYears = dim(data)[2]
SSE = SAE = l = 0
r = NULL
for(k in folds) {
lmWithNAs = data
naPoints = cvPoints(k, max(1, fromAgeInd), min(nAges, upToAgeInd), nYears)
fltPoints = list()
n = 1
for(i in naPoints) {
if(!is.na(lmWithNAs[i[1],i[2]])) {
lmWithNAs[i[1],i[2]] = NA
fltPoints[[n]] = i
n = n + 1
}
}
result = smoothFun(lmWithNAs, ...)
SSE = SSE + partialSSE(data, result, fltPoints)
SAE = SAE + partialSAE(data, result, fltPoints)
r = updateResiduals(r, data, result, fltPoints)
l = l + length(fltPoints)
}
return(list(MSE = SSE/l, MAE = SAE/l, cvResiduals = r))
}
smoothCv = compiler::cmpfun(smoothCv.nc)
|
/R/cv.R
|
no_license
|
cran/smoothAPC
|
R
| false
| false
| 1,786
|
r
|
cvPoints.nc = function(k, mini, maxi, maxj)
{
k = (k+2) %% 5
result = list()
n = 1
for(i in mini:maxi)
for(j in 1:maxj)
if((k + i + 2*j) %% 5 == 0)
{
result[[n]] = c(i,j)
n = n + 1
}
return(result)
}
cvPoints = compiler::cmpfun(cvPoints.nc)
partialSSE.nc = function(data1, data2, points)
{
SSE = 0
for(p in points) {
SSE = SSE + (data1[p[1],p[2]] - data2[p[1],p[2]])^2
}
return(SSE)
}
partialSSE = compiler::cmpfun(partialSSE.nc)
partialSAE.nc = function(data1, data2, points)
{
SAE = 0
for(p in points) {
SAE = SAE + abs(data1[p[1],p[2]] - data2[p[1],p[2]])
}
return(SAE)
}
partialSAE = compiler::cmpfun(partialSAE.nc)
updateResiduals = function(res, data1, data2, points)
{
if(is.null(res)) res = data1*0
for(p in points) {
res[p[1],p[2]] = data2[p[1],p[2]] - data1[p[1],p[2]]
}
return(res)
}
smoothCv.nc = function(smoothFun, data, upToAgeInd = dim(data)[1], fromAgeInd = 1, folds = 0:4, ...)
{
nAges = dim(data)[1]
nYears = dim(data)[2]
SSE = SAE = l = 0
r = NULL
for(k in folds) {
lmWithNAs = data
naPoints = cvPoints(k, max(1, fromAgeInd), min(nAges, upToAgeInd), nYears)
fltPoints = list()
n = 1
for(i in naPoints) {
if(!is.na(lmWithNAs[i[1],i[2]])) {
lmWithNAs[i[1],i[2]] = NA
fltPoints[[n]] = i
n = n + 1
}
}
result = smoothFun(lmWithNAs, ...)
SSE = SSE + partialSSE(data, result, fltPoints)
SAE = SAE + partialSAE(data, result, fltPoints)
r = updateResiduals(r, data, result, fltPoints)
l = l + length(fltPoints)
}
return(list(MSE = SSE/l, MAE = SAE/l, cvResiduals = r))
}
smoothCv = compiler::cmpfun(smoothCv.nc)
|
install.packages('spatstat')
install.packages('spatstat.local')
install.packages('rgdal')
install.packages('sp')
library(spatstat)
library(spatstat.local)
# LongLatToUTM
source('functions/LongLatToUTM.R')
library(dplyr)
publico<-points_in_recife %>%
filter (grupo_nat_juridica == 'PUBLICO')
privado<-points_in_recife %>%
filter (grupo_nat_juridica == 'PRIVADO')
poly_utm<-LongLatToUTM(st_coordinates(recife_geo)[,1], st_coordinates(recife_geo)[,2], 23)
pts_publico<-LongLatToUTM(st_coordinates(publico)[,1], st_coordinates(publico)[,2], 23)
pts_privado<-LongLatToUTM(st_coordinates(privado)[,1], st_coordinates(privado)[,2], 23)
poly_utm
pts_publico
pts_privado
#Define o poligono da área estudada no formato do spatstat
w<-owin(poly=data.frame(x=rev(poly_utm$X), y=rev(poly_utm$Y)))
plot(w)
#Definir coordenadas espaciais no formato específico do spatstat
publico.pts<-ppp(pts_publico$X,pts_publico$Y, window=w)
plot(publico.pts, pch=20)
privado.pts<-ppp(pts_privado$X,pts_privado$Y, window=w)
plot(privado.pts, pch=20)
plot(density(publico.pts))
plot(w, add=T)
points(publico.pts, pch=20, cex=.5, col="black")
plot(density(privado.pts))
plot(w, add=T)
points(privado.pts, pch=20, cex=.5, col="black")
norm_palette <- colorRampPalette(c("white","black"))
pal_trans <- norm_palette(5)
par(mfrow=c(1,2))
plot(density(publico.pts), main="Publico", col=pal_trans)
plot(w, add=T)
points(publico.pts, pch=20, cex=.2, col="red")
plot(density(privado.pts), main="Privado", col=pal_trans)
plot(w, add=T)
points(privado.pts, pch=20, cex=.2, col="red")
# Teste para verificar a homogeneidade na distribuicao dos dados
homtest(publico.pts, nsim = 19)
homtest(privado.pts, nsim = 19)
EL.inhom.publico <- envelope(publico.pts,
Linhom, nsim=19,
correction="best")
plot(EL.inhom.publico, . - r ~ r,
ylab=expression(hat("L")), legend=F,
xlab="Distância (m)")
EL.inhom.privado <- envelope(privado.pts,
Linhom, nsim=19,
correction="best")
plot(EL.inhom.privado, . - r ~ r,
ylab=expression(hat("L")), legend=F,
xlab="Distância (m)")
|
/spatial_analysis.R
|
no_license
|
higuchip/workshop_UFPE
|
R
| false
| false
| 2,194
|
r
|
install.packages('spatstat')
install.packages('spatstat.local')
install.packages('rgdal')
install.packages('sp')
library(spatstat)
library(spatstat.local)
# LongLatToUTM
source('functions/LongLatToUTM.R')
library(dplyr)
publico<-points_in_recife %>%
filter (grupo_nat_juridica == 'PUBLICO')
privado<-points_in_recife %>%
filter (grupo_nat_juridica == 'PRIVADO')
poly_utm<-LongLatToUTM(st_coordinates(recife_geo)[,1], st_coordinates(recife_geo)[,2], 23)
pts_publico<-LongLatToUTM(st_coordinates(publico)[,1], st_coordinates(publico)[,2], 23)
pts_privado<-LongLatToUTM(st_coordinates(privado)[,1], st_coordinates(privado)[,2], 23)
poly_utm
pts_publico
pts_privado
#Define o poligono da área estudada no formato do spatstat
w<-owin(poly=data.frame(x=rev(poly_utm$X), y=rev(poly_utm$Y)))
plot(w)
#Definir coordenadas espaciais no formato específico do spatstat
publico.pts<-ppp(pts_publico$X,pts_publico$Y, window=w)
plot(publico.pts, pch=20)
privado.pts<-ppp(pts_privado$X,pts_privado$Y, window=w)
plot(privado.pts, pch=20)
plot(density(publico.pts))
plot(w, add=T)
points(publico.pts, pch=20, cex=.5, col="black")
plot(density(privado.pts))
plot(w, add=T)
points(privado.pts, pch=20, cex=.5, col="black")
norm_palette <- colorRampPalette(c("white","black"))
pal_trans <- norm_palette(5)
par(mfrow=c(1,2))
plot(density(publico.pts), main="Publico", col=pal_trans)
plot(w, add=T)
points(publico.pts, pch=20, cex=.2, col="red")
plot(density(privado.pts), main="Privado", col=pal_trans)
plot(w, add=T)
points(privado.pts, pch=20, cex=.2, col="red")
# Teste para verificar a homogeneidade na distribuicao dos dados
homtest(publico.pts, nsim = 19)
homtest(privado.pts, nsim = 19)
EL.inhom.publico <- envelope(publico.pts,
Linhom, nsim=19,
correction="best")
plot(EL.inhom.publico, . - r ~ r,
ylab=expression(hat("L")), legend=F,
xlab="Distância (m)")
EL.inhom.privado <- envelope(privado.pts,
Linhom, nsim=19,
correction="best")
plot(EL.inhom.privado, . - r ~ r,
ylab=expression(hat("L")), legend=F,
xlab="Distância (m)")
|
#' RDN: Reliability Density Neighborhood for Applicability Domain characterization.
#'
#' The RDN package provides a straightforward way of computing a QSAR model's applicability domain (AD),
#' being currently only applicable for classification models.
#' This method scans the chemical space, starting from the locations of training instances,
#' taking into account local density, and local bias and precision. After the chemical space
#' has been mapped, the established RDN AD can be used to sort new (external) predictions
#' according to their reliability.
#' Even though the RDN mapping is calculated using \code{getRDN}, the different tasks that this entails
#' are separately available through the remaining functions in the package, which are listed below. However,
#' Despite being available for use functions should ideally not be called isolated, and the user should use
#' \code{getRDN} directly instead.
#'
#' The AD will be established according to the following workflow:
#' \itemize{
#' \item STEP #1: Calculation of an Euclidean Distance matrix of the training set through \code{getEDmatrix}.
#' This matrix will contain the distance between each training instance and each of its training neighbours, sorted
#' in ascending order of distance.
#' \item STEP #2: Calculation of individual average distance to the k-th nearest neighbours through \code{getThreshold}.
#' This distance will be used as coverage threshold around each training instance.
#' \item STEP #3: Place new queries onto the established coverage map using \code{TestInTrain}. If an instance is
#' located within the radius of coverage around any training instances, it will be deemed as covered by the AD.
#' }
#'
#' This workflow is fully automated in \code{getRDN} which runs these steps iteratively for a range of k values, which allows
#' scanning chemical space from the near vicinity around training instances outwards.
#' The full details on the theoretical background of this algorithm are available in the literature.[1]
#'
#' @references [1] N Aniceto, AA Freitas, et al. A Novel Applicability Domain Technique for Mapping Predictive Reliability Accross the Chemical
#' Space of a QSAR: Reliability-Density Neighbourhood. J Cheminf. 2016. Submitted.
#'
#' @docType package
#' @name RDN-package
#' @aliases RDN
#' @importFrom randomForest randomForest
NULL
|
/R/RDN.R
|
no_license
|
machLearnNA/RDN
|
R
| false
| false
| 2,411
|
r
|
#' RDN: Reliability Density Neighborhood for Applicability Domain characterization.
#'
#' The RDN package provides a straightforward way of computing a QSAR model's applicability domain (AD),
#' being currently only applicable for classification models.
#' This method scans the chemical space, starting from the locations of training instances,
#' taking into account local density, and local bias and precision. After the chemical space
#' has been mapped, the established RDN AD can be used to sort new (external) predictions
#' according to their reliability.
#' Even though the RDN mapping is calculated using \code{getRDN}, the different tasks that this entails
#' are separately available through the remaining functions in the package, which are listed below. However,
#' Despite being available for use functions should ideally not be called isolated, and the user should use
#' \code{getRDN} directly instead.
#'
#' The AD will be established according to the following workflow:
#' \itemize{
#' \item STEP #1: Calculation of an Euclidean Distance matrix of the training set through \code{getEDmatrix}.
#' This matrix will contain the distance between each training instance and each of its training neighbours, sorted
#' in ascending order of distance.
#' \item STEP #2: Calculation of individual average distance to the k-th nearest neighbours through \code{getThreshold}.
#' This distance will be used as coverage threshold around each training instance.
#' \item STEP #3: Place new queries onto the established coverage map using \code{TestInTrain}. If an instance is
#' located within the radius of coverage around any training instances, it will be deemed as covered by the AD.
#' }
#'
#' This workflow is fully automated in \code{getRDN} which runs these steps iteratively for a range of k values, which allows
#' scanning chemical space from the near vicinity around training instances outwards.
#' The full details on the theoretical background of this algorithm are available in the literature.[1]
#'
#' @references [1] N Aniceto, AA Freitas, et al. A Novel Applicability Domain Technique for Mapping Predictive Reliability Accross the Chemical
#' Space of a QSAR: Reliability-Density Neighbourhood. J Cheminf. 2016. Submitted.
#'
#' @docType package
#' @name RDN-package
#' @aliases RDN
#' @importFrom randomForest randomForest
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paths.R
\name{path.find}
\alias{path.find}
\title{Find all path graphs originated from a given root.}
\usage{
path.find(index, map)
}
\arguments{
\item{index}{Index of a root node (a node whose index never appears in
\code{map[, 2]}).}
\item{map}{Matrix of \code{n.edges}-by-\code{2} dimension, where \code{n.edges}
is the number of directed edges in DAG. The first column has indices of
nodes that edges directing from, whereas the second column gives the indices
of nodes the corresponding edges directing towards.}
}
\value{
Returns a list of path graphs originated from root \code{index}, for
which the \code{i}th element of the returned list is a vector of indices of
nodes in the \code{i}th path graph.
}
\description{
Recursively find all possible path graphs originated from a given root in
DAG.
}
|
/man/path.find.Rd
|
no_license
|
cran/hsm
|
R
| false
| true
| 885
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paths.R
\name{path.find}
\alias{path.find}
\title{Find all path graphs originated from a given root.}
\usage{
path.find(index, map)
}
\arguments{
\item{index}{Index of a root node (a node whose index never appears in
\code{map[, 2]}).}
\item{map}{Matrix of \code{n.edges}-by-\code{2} dimension, where \code{n.edges}
is the number of directed edges in DAG. The first column has indices of
nodes that edges directing from, whereas the second column gives the indices
of nodes the corresponding edges directing towards.}
}
\value{
Returns a list of path graphs originated from root \code{index}, for
which the \code{i}th element of the returned list is a vector of indices of
nodes in the \code{i}th path graph.
}
\description{
Recursively find all possible path graphs originated from a given root in
DAG.
}
|
#' Tidying methods for ARIMA modeling of time series
#'
#' These methods tidy the coefficients of ARIMA models of univariate time
#' series.
#'
#' @param x An object of class "Arima"
#'
#' @details `augment` is not currently implemented, as it is not clear
#' whether ARIMA predictions can or should be merged with the original
#' data frame.
#'
#' @template boilerplate
#'
#' @seealso \link{arima}
#'
#' @examples
#'
#' fit <- arima(lh, order = c(1, 0, 0))
#' tidy(fit)
#' glance(fit)
#'
#' @name Arima_tidiers
NULL
#' @rdname Arima_tidiers
#'
#' @param conf.int whether to include a confidence interval
#' @param conf.level confidence level of the interval, used only if
#' `conf.int=TRUE`
#'
#' @return `tidy` returns one row for each coefficient in the model,
#' with five columns:
#' \item{term}{The term in the nonlinear model being estimated and tested}
#' \item{estimate}{The estimated coefficient}
#' \item{std.error}{The standard error from the linear model}
#'
#' If `conf.int = TRUE`, also returns
#' \item{conf.low}{low end of confidence interval}
#' \item{conf.high}{high end of confidence interval}
#'
#' @export
tidy.Arima <- function(x, conf.int=FALSE, conf.level=.95, ...) {
coefs <- stats::coef(x)
# standard errors are computed as in stats:::print.Arima
ses <- rep.int(0, length(coefs))
ses[x$mask] <- sqrt(diag(x$var.coef))
ret <- unrowname(data.frame(
term = names(coefs),
estimate = coefs,
std.error = ses
))
if (conf.int) {
ret <- cbind(ret, confint_tidy(x))
}
tibble::as_tibble(ret)
}
#' @rdname Arima_tidiers
#'
#' @param ... extra arguments (not used)
#'
#' @return `glance` returns one row with the columns
#' \item{sigma}{the square root of the estimated residual variance}
#' \item{logLik}{the data's log-likelihood under the model}
#' \item{AIC}{the Akaike Information Criterion}
#' \item{BIC}{the Bayesian Information Criterion}
#'
#' @export
glance.Arima <- function(x, ...) {
ret <- unrowname(data.frame(sigma = sqrt(x$sigma2)))
tibble::as_tibble(finish_glance(ret, x))
}
|
/R/arima_tidiers.R
|
no_license
|
talgalili/broom
|
R
| false
| false
| 2,068
|
r
|
#' Tidying methods for ARIMA modeling of time series
#'
#' These methods tidy the coefficients of ARIMA models of univariate time
#' series.
#'
#' @param x An object of class "Arima"
#'
#' @details `augment` is not currently implemented, as it is not clear
#' whether ARIMA predictions can or should be merged with the original
#' data frame.
#'
#' @template boilerplate
#'
#' @seealso \link{arima}
#'
#' @examples
#'
#' fit <- arima(lh, order = c(1, 0, 0))
#' tidy(fit)
#' glance(fit)
#'
#' @name Arima_tidiers
NULL
#' @rdname Arima_tidiers
#'
#' @param conf.int whether to include a confidence interval
#' @param conf.level confidence level of the interval, used only if
#' `conf.int=TRUE`
#'
#' @return `tidy` returns one row for each coefficient in the model,
#' with five columns:
#' \item{term}{The term in the nonlinear model being estimated and tested}
#' \item{estimate}{The estimated coefficient}
#' \item{std.error}{The standard error from the linear model}
#'
#' If `conf.int = TRUE`, also returns
#' \item{conf.low}{low end of confidence interval}
#' \item{conf.high}{high end of confidence interval}
#'
#' @export
tidy.Arima <- function(x, conf.int=FALSE, conf.level=.95, ...) {
coefs <- stats::coef(x)
# standard errors are computed as in stats:::print.Arima
ses <- rep.int(0, length(coefs))
ses[x$mask] <- sqrt(diag(x$var.coef))
ret <- unrowname(data.frame(
term = names(coefs),
estimate = coefs,
std.error = ses
))
if (conf.int) {
ret <- cbind(ret, confint_tidy(x))
}
tibble::as_tibble(ret)
}
#' @rdname Arima_tidiers
#'
#' @param ... extra arguments (not used)
#'
#' @return `glance` returns one row with the columns
#' \item{sigma}{the square root of the estimated residual variance}
#' \item{logLik}{the data's log-likelihood under the model}
#' \item{AIC}{the Akaike Information Criterion}
#' \item{BIC}{the Bayesian Information Criterion}
#'
#' @export
glance.Arima <- function(x, ...) {
ret <- unrowname(data.frame(sigma = sqrt(x$sigma2)))
tibble::as_tibble(finish_glance(ret, x))
}
|
# Organization of the data ------------------------------------------------
require(tseries, quietly = TRUE)
ConsDiscr <- c("AAP", "AMZN", "DRI", "BBY", "CMCSA")
Energy <- c("APC", "ANDV", "APA", "BHGE", "COG")
Financial <- c("AMG", "AFL", "ALL", "AXP", "AIG")
ConsStaples <- c("MO", "ADM", "CPB", "CHD", "CLX")
TelecomServ <- c("T", "CTL", "VZ", "FTR", "BCE")
HealCare <- c("ABT", "BAX", "AET", "A", "ALXN")
Indus <- c("PWR", "RTN", "RSG", "RHI", "ROK")
InfoTecn <- c("ACN", "ATVI", "ADBE", "AMD", "AKAM")
Materials <- c("APD", "ALB", "AVY", "BLL", "DWDP")
Utilities <- c("AES", "LNT", "AEE", "AEP", "EIX")
d <- c(ConsDiscr,Energy, Financial, ConsStaples, TelecomServ, HealCare, Indus, InfoTecn, Materials, Utilities)
ddata<-matrix(NA, 1258, 50)
for (i in 1:length(d)){
ddata[,i] <- suppressWarnings(
get.hist.quote(instrument=d[i], start="2003-01-01", end="2008-01-01",
quote= "Close", provider="yahoo", drop=TRUE)
)
}
colnames(ddata)<-d
# here the matrix of data
data_mat<-matrix(NA, 1257, 50)
for (i in 1:50){
data_mat[,i]=diff(log(ddata[,i]))
}
# Pearson correlation -----------------------------------------------------
library(igraph)
library(manipulate)
# Correlation matrix (Pearson) and bootstrap
cor_mat=cor(data_mat)
colnames(cor_mat)<-d
rownames(cor_mat)<-d
B = 1000
b_vec = rep(NA, B)
for (i in 1:B){
idx <- sample(1:1257, replace = T)
R_star = data_mat[idx, ]
delta = sqrt(1257)*max(abs(cor(R_star)-cor(data_mat)))
b_vec[i] = delta
}
F_n = ecdf(b_vec)
plot(F_n, col = "blue", xlab = "Bootstrap vector of Delta", ylab = "ECDF", main = "Ecdf of the bootstrap vector")
# Adjacency Matrix and graph creation with dynamic plot
AdjacencyMatrix_Graph <- function(alpha, matr, epsi){
n = nrow(matr)
m = ncol(matr)
ad_mat = matrix(NA, n, m)
t_alpha = quantile(F_n, 1-(alpha/choose(n, 2)))
for (i in 1:n){
for (j in 1:m){
inf_CI = matr[i, j] - t_alpha*(1/sqrt(1257))
sup_CI = matr[i, j] + t_alpha*(1/sqrt(1257))
if (i==j){ad_mat[i, j]=0}
else {
if ((inf_CI<=epsi & sup_CI>=epsi) || (inf_CI<= -epsi & sup_CI >=-epsi)){ad_mat[i, j]=0}
else {ad_mat[i, j]=1}
}
}
}
colnames(ad_mat)<-d
rownames(ad_mat)<-d
G <- igraph::graph.adjacency(ad_mat, mode = "undirected", diag = F)
igraph::V(G)$color[1:5] <- "dodgerblue3" # Consumer Discretionary
igraph::V(G)$color[6:10] <- "gold2" # Energy
igraph::V(G)$color[11:15] <- "forestgreen" # Financials
igraph::V(G)$color[16:20] <- "lightblue2" # Consumer Staples
igraph::V(G)$color[21:25] <- "lightgray" # Telecommunications Services
igraph::V(G)$color[26:30] <- "indianred1" # Health Care
igraph::V(G)$color[31:35] <- "lightsalmon1" # Industrials
igraph::V(G)$color[36:40] <- "moccasin" # Information Technology
igraph::V(G)$color[41:45] <- "midnightblue" # Materials
igraph::V(G)$color[46:50] <- "chocolate1" # Utilities
return(plot(G, vertex.size = 10, vertex.label.cex = 0.50, vertex.label.color = "black"))
}
manipulate(
AdjacencyMatrix_Graph(alpha = a, matr = cor_mat, epsi = e),
a = slider(.00000001, 0.5, .00000001, "alpha", .00000001),
e = slider(.0, 0.8, .0, "epsi", .001)
)
# Spearman correlation -----------------------------------------------------
library(igraph)
library(manipulate)
# Correlation matrix (Spearman) and bootstrap
corr_mat2 <- cor(data_mat, method = "spearman")
colnames(corr_mat2)<-d
rownames(corr_mat2)<-d
B = 1000
b_vec2 = rep(NA, B)
for (i in 1:B){
idx <- sample(1:1257, replace = T)
R_star = data_mat[idx, ]
delta = sqrt(1257)*max(abs(cor(R_star, method = "spearman")-corr_mat2))
b_vec2[i] = delta
}
F_n2 = ecdf(b_vec2)
plot(F_n2, col = "blue", xlab = "Bootstrap vector of Delta", ylab = "ECDF", main = "Ecdf of the bootstrap vector")
# Adjacency Matrix and graph creation with dynamic plot
AdjacencyMatrix_Graph <- function(alpha, matr, epsi){
n = nrow(matr)
m = ncol(matr)
ad_mat = matrix(NA, n, m)
t_alpha = quantile(F_n2, 1-(alpha/choose(n, 2)))
for (i in 1:n){
for (j in 1:m){
inf_CI = matr[i, j] - t_alpha*(1/sqrt(1257))
sup_CI = matr[i, j] + t_alpha*(1/sqrt(1257))
if (i==j){ad_mat[i, j]=0}
else {
if ((inf_CI<=epsi & sup_CI>=epsi) || (inf_CI<= -epsi & sup_CI >=-epsi)){ad_mat[i, j]=0}
else {ad_mat[i, j]=1}
}
}
}
colnames(ad_mat)<-d
rownames(ad_mat)<-d
G <- igraph::graph.adjacency(ad_mat, mode = "undirected", diag = F)
igraph::V(G)$color[1:5] <- "dodgerblue3" # Consumer Discretionary
igraph::V(G)$color[6:10] <- "gold2" # Energy
igraph::V(G)$color[11:15] <- "forestgreen" # Financials
igraph::V(G)$color[16:20] <- "lightblue2" # Consumer Staples
igraph::V(G)$color[21:25] <- "lightgray" # Telecommunications Services
igraph::V(G)$color[26:30] <- "indianred1" # Health Care
igraph::V(G)$color[31:35] <- "lightsalmon1" # Industrials
igraph::V(G)$color[36:40] <- "moccasin" # Information Technology
igraph::V(G)$color[41:45] <- "midnightblue" # Materials
igraph::V(G)$color[46:50] <- "chocolate1" # Utilities
return(plot(G, vertex.size = 10, vertex.label.cex = 0.50, vertex.label.color = "black"))
}
manipulate(
AdjacencyMatrix_Graph(alpha = a, matr = corr_mat2, epsi = e),
a = slider(.00000001, 0.5, .00000001, "alpha", .00000001),
e = slider(.0, 0.8, .0, "epsi", .001)
)
|
/HW3/DynamicPlots.R
|
no_license
|
eugeniobonifazi/Statistical-Methods-for-Data-Science-I
|
R
| false
| false
| 5,515
|
r
|
# Organization of the data ------------------------------------------------
require(tseries, quietly = TRUE)
ConsDiscr <- c("AAP", "AMZN", "DRI", "BBY", "CMCSA")
Energy <- c("APC", "ANDV", "APA", "BHGE", "COG")
Financial <- c("AMG", "AFL", "ALL", "AXP", "AIG")
ConsStaples <- c("MO", "ADM", "CPB", "CHD", "CLX")
TelecomServ <- c("T", "CTL", "VZ", "FTR", "BCE")
HealCare <- c("ABT", "BAX", "AET", "A", "ALXN")
Indus <- c("PWR", "RTN", "RSG", "RHI", "ROK")
InfoTecn <- c("ACN", "ATVI", "ADBE", "AMD", "AKAM")
Materials <- c("APD", "ALB", "AVY", "BLL", "DWDP")
Utilities <- c("AES", "LNT", "AEE", "AEP", "EIX")
d <- c(ConsDiscr,Energy, Financial, ConsStaples, TelecomServ, HealCare, Indus, InfoTecn, Materials, Utilities)
ddata<-matrix(NA, 1258, 50)
for (i in 1:length(d)){
ddata[,i] <- suppressWarnings(
get.hist.quote(instrument=d[i], start="2003-01-01", end="2008-01-01",
quote= "Close", provider="yahoo", drop=TRUE)
)
}
colnames(ddata)<-d
# here the matrix of data
data_mat<-matrix(NA, 1257, 50)
for (i in 1:50){
data_mat[,i]=diff(log(ddata[,i]))
}
# Pearson correlation -----------------------------------------------------
library(igraph)
library(manipulate)
# Correlation matrix (Pearson) and bootstrap
cor_mat=cor(data_mat)
colnames(cor_mat)<-d
rownames(cor_mat)<-d
B = 1000
b_vec = rep(NA, B)
for (i in 1:B){
idx <- sample(1:1257, replace = T)
R_star = data_mat[idx, ]
delta = sqrt(1257)*max(abs(cor(R_star)-cor(data_mat)))
b_vec[i] = delta
}
F_n = ecdf(b_vec)
plot(F_n, col = "blue", xlab = "Bootstrap vector of Delta", ylab = "ECDF", main = "Ecdf of the bootstrap vector")
# Adjacency Matrix and graph creation with dynamic plot
AdjacencyMatrix_Graph <- function(alpha, matr, epsi){
n = nrow(matr)
m = ncol(matr)
ad_mat = matrix(NA, n, m)
t_alpha = quantile(F_n, 1-(alpha/choose(n, 2)))
for (i in 1:n){
for (j in 1:m){
inf_CI = matr[i, j] - t_alpha*(1/sqrt(1257))
sup_CI = matr[i, j] + t_alpha*(1/sqrt(1257))
if (i==j){ad_mat[i, j]=0}
else {
if ((inf_CI<=epsi & sup_CI>=epsi) || (inf_CI<= -epsi & sup_CI >=-epsi)){ad_mat[i, j]=0}
else {ad_mat[i, j]=1}
}
}
}
colnames(ad_mat)<-d
rownames(ad_mat)<-d
G <- igraph::graph.adjacency(ad_mat, mode = "undirected", diag = F)
igraph::V(G)$color[1:5] <- "dodgerblue3" # Consumer Discretionary
igraph::V(G)$color[6:10] <- "gold2" # Energy
igraph::V(G)$color[11:15] <- "forestgreen" # Financials
igraph::V(G)$color[16:20] <- "lightblue2" # Consumer Staples
igraph::V(G)$color[21:25] <- "lightgray" # Telecommunications Services
igraph::V(G)$color[26:30] <- "indianred1" # Health Care
igraph::V(G)$color[31:35] <- "lightsalmon1" # Industrials
igraph::V(G)$color[36:40] <- "moccasin" # Information Technology
igraph::V(G)$color[41:45] <- "midnightblue" # Materials
igraph::V(G)$color[46:50] <- "chocolate1" # Utilities
return(plot(G, vertex.size = 10, vertex.label.cex = 0.50, vertex.label.color = "black"))
}
manipulate(
AdjacencyMatrix_Graph(alpha = a, matr = cor_mat, epsi = e),
a = slider(.00000001, 0.5, .00000001, "alpha", .00000001),
e = slider(.0, 0.8, .0, "epsi", .001)
)
# Spearman correlation -----------------------------------------------------
library(igraph)
library(manipulate)
# Correlation matrix (Spearman) and bootstrap
corr_mat2 <- cor(data_mat, method = "spearman")
colnames(corr_mat2)<-d
rownames(corr_mat2)<-d
B = 1000
b_vec2 = rep(NA, B)
for (i in 1:B){
idx <- sample(1:1257, replace = T)
R_star = data_mat[idx, ]
delta = sqrt(1257)*max(abs(cor(R_star, method = "spearman")-corr_mat2))
b_vec2[i] = delta
}
F_n2 = ecdf(b_vec2)
plot(F_n2, col = "blue", xlab = "Bootstrap vector of Delta", ylab = "ECDF", main = "Ecdf of the bootstrap vector")
# Adjacency Matrix and graph creation with dynamic plot
AdjacencyMatrix_Graph <- function(alpha, matr, epsi){
n = nrow(matr)
m = ncol(matr)
ad_mat = matrix(NA, n, m)
t_alpha = quantile(F_n2, 1-(alpha/choose(n, 2)))
for (i in 1:n){
for (j in 1:m){
inf_CI = matr[i, j] - t_alpha*(1/sqrt(1257))
sup_CI = matr[i, j] + t_alpha*(1/sqrt(1257))
if (i==j){ad_mat[i, j]=0}
else {
if ((inf_CI<=epsi & sup_CI>=epsi) || (inf_CI<= -epsi & sup_CI >=-epsi)){ad_mat[i, j]=0}
else {ad_mat[i, j]=1}
}
}
}
colnames(ad_mat)<-d
rownames(ad_mat)<-d
G <- igraph::graph.adjacency(ad_mat, mode = "undirected", diag = F)
igraph::V(G)$color[1:5] <- "dodgerblue3" # Consumer Discretionary
igraph::V(G)$color[6:10] <- "gold2" # Energy
igraph::V(G)$color[11:15] <- "forestgreen" # Financials
igraph::V(G)$color[16:20] <- "lightblue2" # Consumer Staples
igraph::V(G)$color[21:25] <- "lightgray" # Telecommunications Services
igraph::V(G)$color[26:30] <- "indianred1" # Health Care
igraph::V(G)$color[31:35] <- "lightsalmon1" # Industrials
igraph::V(G)$color[36:40] <- "moccasin" # Information Technology
igraph::V(G)$color[41:45] <- "midnightblue" # Materials
igraph::V(G)$color[46:50] <- "chocolate1" # Utilities
return(plot(G, vertex.size = 10, vertex.label.cex = 0.50, vertex.label.color = "black"))
}
manipulate(
AdjacencyMatrix_Graph(alpha = a, matr = corr_mat2, epsi = e),
a = slider(.00000001, 0.5, .00000001, "alpha", .00000001),
e = slider(.0, 0.8, .0, "epsi", .001)
)
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./breast_075.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/breast/breast_075.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 343
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=TRUE)
sink('./breast_075.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(tidyverse)
library(foreach)
library(brms)
library(glue)
library(fs)
source("00_functions.R")
# data ---
cd_strat_raw <- read_rds("data/output/by-cd_ACS_gender-age-education.Rds") %>%
transform_vars() %>%
filter(year == 2017)
# model ---
outcomes <- c("ahca", "budg", "immr", "visa", "tcja", "sanc", "turn")
for (sd in c("hanretty", "01", "default")) {
cellfiles <- dir_ls(glue("data/output/reg/stan_glmer/sd-{sd}"), recurse = TRUE)
outcomes_s <- unique(str_extract(cellfiles, str_c("(", str_c(outcomes, collapse = "|"), ")")))
for (y in "turn") {
var_name <- glue("n_{y}")
fit <- read_rds(glue("data/output/reg/stan_glmer/sd-{sd}/by-cd_{y}_g-a-e-t_glmer.Rds"))
# prediced ----
all_strat <- cd_strat_raw %>%
filter(count > 0) %>%
mutate(!!sym(var_name) := count) %>%
select(cd, male, age, educ, matches("n_"))
# wide predictions by CD
cds_loop <- unique(all_strat$cd)
for (cd_i in cds_loop) {
cd_strat <- filter(all_strat, cd == cd_i)
# no longer works with stan_glmer
p_draws <- posterior_linpred(fit,
newdata = cd_strat,
transform = TRUE,
allow_new_levels = TRUE,
summary = FALSE) %>%
t() %>%
as_tibble() %>%
mutate(cell = 1:n()) %>%
bind_cols(cd_strat, .) %>%
pivot_longer(cols = matches("^V"), names_to = "iter") %>%
mutate(iter = parse_number(iter))
cd_est <- group_by(p_draws, cd, iter) %>%
summarize(p_mrp = sum(value*.data[[var_name]]) / sum(.data[[var_name]]))
write_rds(cd_est,
glue("data/output/CDs/stan_glmer/sd-{sd}/{y}-vshare/{cd_i}_gae_glmer-preds.Rds"))
}
}
}
|
/11_predict-regs.R
|
no_license
|
kuriwaki/MRP-target
|
R
| false
| false
| 1,757
|
r
|
library(tidyverse)
library(foreach)
library(brms)
library(glue)
library(fs)
source("00_functions.R")
# data ---
cd_strat_raw <- read_rds("data/output/by-cd_ACS_gender-age-education.Rds") %>%
transform_vars() %>%
filter(year == 2017)
# model ---
outcomes <- c("ahca", "budg", "immr", "visa", "tcja", "sanc", "turn")
for (sd in c("hanretty", "01", "default")) {
cellfiles <- dir_ls(glue("data/output/reg/stan_glmer/sd-{sd}"), recurse = TRUE)
outcomes_s <- unique(str_extract(cellfiles, str_c("(", str_c(outcomes, collapse = "|"), ")")))
for (y in "turn") {
var_name <- glue("n_{y}")
fit <- read_rds(glue("data/output/reg/stan_glmer/sd-{sd}/by-cd_{y}_g-a-e-t_glmer.Rds"))
# prediced ----
all_strat <- cd_strat_raw %>%
filter(count > 0) %>%
mutate(!!sym(var_name) := count) %>%
select(cd, male, age, educ, matches("n_"))
# wide predictions by CD
cds_loop <- unique(all_strat$cd)
for (cd_i in cds_loop) {
cd_strat <- filter(all_strat, cd == cd_i)
# no longer works with stan_glmer
p_draws <- posterior_linpred(fit,
newdata = cd_strat,
transform = TRUE,
allow_new_levels = TRUE,
summary = FALSE) %>%
t() %>%
as_tibble() %>%
mutate(cell = 1:n()) %>%
bind_cols(cd_strat, .) %>%
pivot_longer(cols = matches("^V"), names_to = "iter") %>%
mutate(iter = parse_number(iter))
cd_est <- group_by(p_draws, cd, iter) %>%
summarize(p_mrp = sum(value*.data[[var_name]]) / sum(.data[[var_name]]))
write_rds(cd_est,
glue("data/output/CDs/stan_glmer/sd-{sd}/{y}-vshare/{cd_i}_gae_glmer-preds.Rds"))
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.shapes.R
\name{shapes.coords2points}
\alias{shapes.coords2points}
\title{shapes.coords2points}
\usage{
shapes.coords2points(DT, proj.env.name = NULL)
}
\arguments{
\item{DT}{data.table$long, data.table$lat}
\item{proj.env.name}{Projection envrionment name (Example: denver)}
}
\description{
Convert data table with lats and longs to spatial points.
}
\keyword{consolidate}
\keyword{shapes}
|
/man/shapes.coords2points.Rd
|
no_license
|
erikbjohn/methods.shapes
|
R
| false
| true
| 477
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.shapes.R
\name{shapes.coords2points}
\alias{shapes.coords2points}
\title{shapes.coords2points}
\usage{
shapes.coords2points(DT, proj.env.name = NULL)
}
\arguments{
\item{DT}{data.table$long, data.table$lat}
\item{proj.env.name}{Projection envrionment name (Example: denver)}
}
\description{
Convert data table with lats and longs to spatial points.
}
\keyword{consolidate}
\keyword{shapes}
|
adjust_for_dividend <-function(proc, D, dt){
n <-length(proc)
dat_gbm <- proc
counter<-dt
while (counter < n){
for (i in counter:n){
dat_gbm[i] <- dat_gbm[i] - D*dat_gbm[counter]
}
counter <- counter + 60
}
dat_gbm
}
|
/adjust_for_dividend.R
|
no_license
|
KeimaCheck/dividend_simulation
|
R
| false
| false
| 228
|
r
|
adjust_for_dividend <-function(proc, D, dt){
n <-length(proc)
dat_gbm <- proc
counter<-dt
while (counter < n){
for (i in counter:n){
dat_gbm[i] <- dat_gbm[i] - D*dat_gbm[counter]
}
counter <- counter + 60
}
dat_gbm
}
|
#' ---
#' title: "Prior probabilities in the interpretation of 'some': analysis of model predictions and empirical data"
#' author: "Judith Degen"
#' date: "November 28, 2014"
#' ---
library(ggplot2)
theme_set(theme_bw(18))
setwd("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/models/complex_prior/smoothed_unbinned15/results/")
source("rscripts/helpers.r")
#' get model predictions
load("data/mp-sliderpriors.RData")
mp = read.table("data/parsed_priorslider_results.tsv", quote="", sep="\t", header=T)
nrow(mp)
head(mp)
summary(mp)
mp$Item = as.factor(gsub("_"," ",mp$Item))
# get prior expectations
priorexpectations = read.table(file="~/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/12_sinking-marbles-prior15/results/data/expectations.txt",sep="\t", header=T, quote="")
row.names(priorexpectations) = paste(priorexpectations$effect,priorexpectations$object)
head(priorexpectations)
mp$PriorExpectation_number = priorexpectations[as.character(mp$Item),]$expectation
priorprobs = read.table(file="~/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/12_sinking-marbles-prior15/results/data/smoothed_15marbles_priors_withnames.txt",sep="\t", header=T, quote="")
head(priorprobs)
row.names(priorprobs) = paste(priorprobs$effect,priorprobs$object)
mpriorprobs = melt(priorprobs, id.vars=c("effect", "object"))
head(mpriorprobs)
row.names(mpriorprobs) = paste(mpriorprobs$effect,mpriorprobs$object,mpriorprobs$variable)
mp$PriorProbability_number = mpriorprobs[paste(as.character(mp$Item)," X",mp$State,sep=""),]$value
mp$AllPriorProbability_number = priorprobs[paste(as.character(mp$Item)),]$X15
head(mp)
load("~/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/23_sinking-marbles-prior-sliders-exactly/results/data/agr-normresponses.RData")
row.names(agr) = paste(agr$slider_id, agr$Item)
expectations = ddply(agr, .(Item), summarise, expectation = sum(slider_id*normresponse))
row.names(expectations) = expectations$Item
mp$PriorProbability_slider = agr[paste(mp$State, mp$Item),]$normresponse
mp$PriorProbability_slider_ymin = agr[paste(mp$State, mp$Item),]$YMin
mp$PriorProbability_slider_ymax = agr[paste(mp$State, mp$Item),]$YMax
mp$PriorExpectation_slider = expectations[as.character(mp$Item),]$expectation
save(mp, file="data/mp-sliderpriors.RData")
ub = droplevels(subset(mp, State == 15))
agr = aggregate(PosteriorProbability ~ Item + SpeakerOptimality + PriorProbability_slider + PriorProbability_slider_ymin + PriorProbability_slider_ymax, FUN=mean, data=ub)
agr$CILow = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.low, data=ub)$PosteriorProbability
agr$CIHigh = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.high, data=ub)$PosteriorProbability
agr$YMin = agr$PosteriorProbability - agr$CILow
agr$YMax = agr$PosteriorProbability + agr$CIHigh
ggplot(agr, aes(x=PriorProbability_slider, y=PosteriorProbability, color=as.factor(SpeakerOptimality))) +
geom_point() +
#geom_line() +
geom_errorbar(aes(ymin=YMin,ymax=YMax)) +
#geom_errorbarh(aes(xmin=PriorProbability_slider_ymin,xmax=PriorProbability_slider_ymax)) +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-priorsliders.pdf",height=7)
######################
# get empirical state posteriors:
load("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/16_sinking-marbles-sliders-certain/results/data/r.RData")
head(r)
# because posteriors come in 4 bins, make Bin variable for model prediction dataset:
mp$Proportion = as.factor(ifelse(mp$State == 0, "0", ifelse(mp$State == 15, "100", ifelse(mp$State < 8, "1-50", "51-99"))))
some = droplevels(subset(r, quantifier == "Some"))
agr = aggregate(normresponse ~ Item + Proportion,data=r,FUN=mean)
agr$CILow = aggregate(normresponse ~ Item + Proportion,data=r, FUN=ci.low)$normresponse
agr$CIHigh = aggregate(normresponse ~ Item + Proportion,data=r,FUN=ci.high)$normresponse
agr$YMin = agr$normresponse - agr$CILow
agr$YMax = agr$normresponse + agr$CIHigh
row.names(agr) = paste(agr$Item, agr$Proportion)
mp$PosteriorProbability_empirical = agr[paste(mp$Item,mp$Proportion),]$normresponse
mp$PosteriorProbability_empirical_ymin = agr[paste(mp$Item,mp$Proportion),]$YMin
mp$PosteriorProbability_empirical_ymax = agr[paste(mp$Item,mp$Proportion),]$YMax
ub = droplevels(subset(mp, State == 15))
agr = aggregate(PosteriorProbability ~ Item + SpeakerOptimality + PosteriorProbability_empirical + PosteriorProbability_empirical_ymin + PosteriorProbability_empirical_ymax, FUN=mean, data=ub)
agr$CILow = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.low, data=ub)$PosteriorProbability
agr$CIHigh = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.high, data=ub)$PosteriorProbability
agr$YMin = agr$PosteriorProbability - agr$CILow
agr$YMax = agr$PosteriorProbability + agr$CIHigh
ggplot(agr, aes(x=PosteriorProbability, y=PosteriorProbability_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
#geom_line() +
geom_errorbarh(aes(xmin=YMin,xmax=YMax)) +
geom_errorbar(aes(ymin=PosteriorProbability_empirical_ymin,ymax=PosteriorProbability_empirical_ymax)) +
geom_abline(intercept=0,slope=1,color="gray60") +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-empirical-priorsliders.pdf",height=7)
library(hydroGOF)
test = ddply(agr, .(SpeakerOptimality), summarise, mse=gof(PosteriorProbability, PosteriorProbability_empirical)["MSE",],r=gof(PosteriorProbability, PosteriorProbability_empirical)["r",],R2=gof(PosteriorProbability, PosteriorProbability_empirical)["R2",])
test = test[order(test[,c("mse")]),]
head(test,10)
test = test[order(test[,c("r")],decreasing=T),]
head(test,10)
test = test[order(test[,c("R2")],decreasing=T),]
head(test,10)
head(some)
#plot empirical against predicted expectations for "some"
load("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/13_sinking-marbles-priordv-15/results/data/r.RData")
summary(r)
r$Item = as.factor(paste(r$effect, r$object))
agr = aggregate(ProportionResponse ~ Item + quantifier, data=r, FUN=mean)
agr$Quantifier = as.factor(tolower(agr$quantifier))
row.names(agr) = paste(agr$Item, agr$Quantifier)
mp$PosteriorExpectation_empirical = agr[paste(mp$Item,"some"),]$ProportionResponse*15
agr = aggregate(PosteriorProbability ~ Item + State + SpeakerOptimality + PriorExpectation_slider + PosteriorExpectation_empirical, FUN=mean, data=mp)
pexpectations = ddply(agr, .(Item,SpeakerOptimality,PriorExpectation_slider,PosteriorExpectation_empirical), summarise, PosteriorExpectation_predicted=sum(State*PosteriorProbability))
head(pexpectations)
some=pexpectations
library(hydroGOF)
test = ddply(some, .(SpeakerOptimality), summarise, mse=gof(PosteriorExpectation_predicted, PosteriorExpectation_empirical)["MSE",],r=gof(PosteriorExpectation_predicted, PosteriorExpectation_empirical)["r",],R2=gof(PosteriorExpectation_predicted, PosteriorExpectation_empirical)["R2",])
test = test[order(test[,c("mse")]),]
head(test,10)
test = test[order(test[,c("r")],decreasing=T),]
head(test,10)
test = test[order(test[,c("R2")],decreasing=T),]
head(test,10)
head(some)
ggplot(some, aes(x=PriorExpectation_slider, y=PosteriorExpectation_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
geom_smooth(method='lm') +
#geom_line() +
#geom_errorbarh(aes(xmin=YMin,xmax=YMax)) +
#geom_errorbar(aes(ymin=PosteriorProbability_empirical_ymin,ymax=PosteriorProbability_empirical_ymax)) +
geom_abline(intercept=0,slope=1,color="gray60") +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-exps-priorsliders.pdf",height=7)
ggplot(some, aes(x=PosteriorExpectation_predicted, y=PosteriorExpectation_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
geom_smooth(method='lm') +
#geom_line() +
#geom_errorbarh(aes(xmin=YMin,xmax=YMax)) +
#geom_errorbar(aes(ymin=PosteriorProbability_empirical_ymin,ymax=PosteriorProbability_empirical_ymax)) +
geom_abline(intercept=0,slope=1,color="gray60") +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-empirical-exps-priorsliders.pdf",height=7)
ggplot(some[some$SpeakerOptimality == 3,], aes(x=PriorExpectation_slider, y=PosteriorExpectation_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
geom_smooth() +
scale_color_manual(values=c("darkred")) +
scale_x_continuous(limits=c(0,15), breaks=seq(1,15,by=2), name="Prior expectation") +
scale_y_continuous(limits=c(0,15), breaks=seq(1,15,by=2), name="Posterior expectation")
ggsave("graphs/mp-empirical-exps-priorsliders.pdf",height=7)
|
/models/complex_prior/smoothed_unbinned15/results/rscripts/model-predictions-priorsliders.r
|
permissive
|
thegricean/sinking-marbles
|
R
| false
| false
| 8,645
|
r
|
#' ---
#' title: "Prior probabilities in the interpretation of 'some': analysis of model predictions and empirical data"
#' author: "Judith Degen"
#' date: "November 28, 2014"
#' ---
library(ggplot2)
theme_set(theme_bw(18))
setwd("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/models/complex_prior/smoothed_unbinned15/results/")
source("rscripts/helpers.r")
#' get model predictions
load("data/mp-sliderpriors.RData")
mp = read.table("data/parsed_priorslider_results.tsv", quote="", sep="\t", header=T)
nrow(mp)
head(mp)
summary(mp)
mp$Item = as.factor(gsub("_"," ",mp$Item))
# get prior expectations
priorexpectations = read.table(file="~/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/12_sinking-marbles-prior15/results/data/expectations.txt",sep="\t", header=T, quote="")
row.names(priorexpectations) = paste(priorexpectations$effect,priorexpectations$object)
head(priorexpectations)
mp$PriorExpectation_number = priorexpectations[as.character(mp$Item),]$expectation
priorprobs = read.table(file="~/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/12_sinking-marbles-prior15/results/data/smoothed_15marbles_priors_withnames.txt",sep="\t", header=T, quote="")
head(priorprobs)
row.names(priorprobs) = paste(priorprobs$effect,priorprobs$object)
mpriorprobs = melt(priorprobs, id.vars=c("effect", "object"))
head(mpriorprobs)
row.names(mpriorprobs) = paste(mpriorprobs$effect,mpriorprobs$object,mpriorprobs$variable)
mp$PriorProbability_number = mpriorprobs[paste(as.character(mp$Item)," X",mp$State,sep=""),]$value
mp$AllPriorProbability_number = priorprobs[paste(as.character(mp$Item)),]$X15
head(mp)
load("~/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/23_sinking-marbles-prior-sliders-exactly/results/data/agr-normresponses.RData")
row.names(agr) = paste(agr$slider_id, agr$Item)
expectations = ddply(agr, .(Item), summarise, expectation = sum(slider_id*normresponse))
row.names(expectations) = expectations$Item
mp$PriorProbability_slider = agr[paste(mp$State, mp$Item),]$normresponse
mp$PriorProbability_slider_ymin = agr[paste(mp$State, mp$Item),]$YMin
mp$PriorProbability_slider_ymax = agr[paste(mp$State, mp$Item),]$YMax
mp$PriorExpectation_slider = expectations[as.character(mp$Item),]$expectation
save(mp, file="data/mp-sliderpriors.RData")
ub = droplevels(subset(mp, State == 15))
agr = aggregate(PosteriorProbability ~ Item + SpeakerOptimality + PriorProbability_slider + PriorProbability_slider_ymin + PriorProbability_slider_ymax, FUN=mean, data=ub)
agr$CILow = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.low, data=ub)$PosteriorProbability
agr$CIHigh = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.high, data=ub)$PosteriorProbability
agr$YMin = agr$PosteriorProbability - agr$CILow
agr$YMax = agr$PosteriorProbability + agr$CIHigh
ggplot(agr, aes(x=PriorProbability_slider, y=PosteriorProbability, color=as.factor(SpeakerOptimality))) +
geom_point() +
#geom_line() +
geom_errorbar(aes(ymin=YMin,ymax=YMax)) +
#geom_errorbarh(aes(xmin=PriorProbability_slider_ymin,xmax=PriorProbability_slider_ymax)) +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-priorsliders.pdf",height=7)
######################
# get empirical state posteriors:
load("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/16_sinking-marbles-sliders-certain/results/data/r.RData")
head(r)
# because posteriors come in 4 bins, make Bin variable for model prediction dataset:
mp$Proportion = as.factor(ifelse(mp$State == 0, "0", ifelse(mp$State == 15, "100", ifelse(mp$State < 8, "1-50", "51-99"))))
some = droplevels(subset(r, quantifier == "Some"))
agr = aggregate(normresponse ~ Item + Proportion,data=r,FUN=mean)
agr$CILow = aggregate(normresponse ~ Item + Proportion,data=r, FUN=ci.low)$normresponse
agr$CIHigh = aggregate(normresponse ~ Item + Proportion,data=r,FUN=ci.high)$normresponse
agr$YMin = agr$normresponse - agr$CILow
agr$YMax = agr$normresponse + agr$CIHigh
row.names(agr) = paste(agr$Item, agr$Proportion)
mp$PosteriorProbability_empirical = agr[paste(mp$Item,mp$Proportion),]$normresponse
mp$PosteriorProbability_empirical_ymin = agr[paste(mp$Item,mp$Proportion),]$YMin
mp$PosteriorProbability_empirical_ymax = agr[paste(mp$Item,mp$Proportion),]$YMax
ub = droplevels(subset(mp, State == 15))
agr = aggregate(PosteriorProbability ~ Item + SpeakerOptimality + PosteriorProbability_empirical + PosteriorProbability_empirical_ymin + PosteriorProbability_empirical_ymax, FUN=mean, data=ub)
agr$CILow = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.low, data=ub)$PosteriorProbability
agr$CIHigh = aggregate(PosteriorProbability ~ Item + SpeakerOptimality, FUN=ci.high, data=ub)$PosteriorProbability
agr$YMin = agr$PosteriorProbability - agr$CILow
agr$YMax = agr$PosteriorProbability + agr$CIHigh
ggplot(agr, aes(x=PosteriorProbability, y=PosteriorProbability_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
#geom_line() +
geom_errorbarh(aes(xmin=YMin,xmax=YMax)) +
geom_errorbar(aes(ymin=PosteriorProbability_empirical_ymin,ymax=PosteriorProbability_empirical_ymax)) +
geom_abline(intercept=0,slope=1,color="gray60") +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-empirical-priorsliders.pdf",height=7)
library(hydroGOF)
test = ddply(agr, .(SpeakerOptimality), summarise, mse=gof(PosteriorProbability, PosteriorProbability_empirical)["MSE",],r=gof(PosteriorProbability, PosteriorProbability_empirical)["r",],R2=gof(PosteriorProbability, PosteriorProbability_empirical)["R2",])
test = test[order(test[,c("mse")]),]
head(test,10)
test = test[order(test[,c("r")],decreasing=T),]
head(test,10)
test = test[order(test[,c("R2")],decreasing=T),]
head(test,10)
head(some)
#plot empirical against predicted expectations for "some"
load("/Users/titlis/cogsci/projects/stanford/projects/thegricean_sinking-marbles/experiments/13_sinking-marbles-priordv-15/results/data/r.RData")
summary(r)
r$Item = as.factor(paste(r$effect, r$object))
agr = aggregate(ProportionResponse ~ Item + quantifier, data=r, FUN=mean)
agr$Quantifier = as.factor(tolower(agr$quantifier))
row.names(agr) = paste(agr$Item, agr$Quantifier)
mp$PosteriorExpectation_empirical = agr[paste(mp$Item,"some"),]$ProportionResponse*15
agr = aggregate(PosteriorProbability ~ Item + State + SpeakerOptimality + PriorExpectation_slider + PosteriorExpectation_empirical, FUN=mean, data=mp)
pexpectations = ddply(agr, .(Item,SpeakerOptimality,PriorExpectation_slider,PosteriorExpectation_empirical), summarise, PosteriorExpectation_predicted=sum(State*PosteriorProbability))
head(pexpectations)
some=pexpectations
library(hydroGOF)
test = ddply(some, .(SpeakerOptimality), summarise, mse=gof(PosteriorExpectation_predicted, PosteriorExpectation_empirical)["MSE",],r=gof(PosteriorExpectation_predicted, PosteriorExpectation_empirical)["r",],R2=gof(PosteriorExpectation_predicted, PosteriorExpectation_empirical)["R2",])
test = test[order(test[,c("mse")]),]
head(test,10)
test = test[order(test[,c("r")],decreasing=T),]
head(test,10)
test = test[order(test[,c("R2")],decreasing=T),]
head(test,10)
head(some)
ggplot(some, aes(x=PriorExpectation_slider, y=PosteriorExpectation_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
geom_smooth(method='lm') +
#geom_line() +
#geom_errorbarh(aes(xmin=YMin,xmax=YMax)) +
#geom_errorbar(aes(ymin=PosteriorProbability_empirical_ymin,ymax=PosteriorProbability_empirical_ymax)) +
geom_abline(intercept=0,slope=1,color="gray60") +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-exps-priorsliders.pdf",height=7)
ggplot(some, aes(x=PosteriorExpectation_predicted, y=PosteriorExpectation_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
geom_smooth(method='lm') +
#geom_line() +
#geom_errorbarh(aes(xmin=YMin,xmax=YMax)) +
#geom_errorbar(aes(ymin=PosteriorProbability_empirical_ymin,ymax=PosteriorProbability_empirical_ymax)) +
geom_abline(intercept=0,slope=1,color="gray60") +
facet_wrap(~SpeakerOptimality)
ggsave("graphs/mp-empirical-exps-priorsliders.pdf",height=7)
ggplot(some[some$SpeakerOptimality == 3,], aes(x=PriorExpectation_slider, y=PosteriorExpectation_empirical, color=as.factor(SpeakerOptimality))) +
geom_point() +
geom_smooth() +
scale_color_manual(values=c("darkred")) +
scale_x_continuous(limits=c(0,15), breaks=seq(1,15,by=2), name="Prior expectation") +
scale_y_continuous(limits=c(0,15), breaks=seq(1,15,by=2), name="Posterior expectation")
ggsave("graphs/mp-empirical-exps-priorsliders.pdf",height=7)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGAPI.R
\name{IG_create_open_pos}
\alias{IG_create_open_pos}
\title{IG API Create one or more OTC positions}
\usage{
IG_create_open_pos(headers,
url = "https://demo-api.ig.com/gateway/deal/positions/otc",
dealReference = "", currency_code = "AUD", direction = "BUY", epic,
expiry = "-", force_open = "true", guaranteed_stop = "false",
level = "", limit_distance = "", limit_level = "",
order_type = "MARKET", size, stop_distance = "", stop_level = "",
trailingStop = "false", trailingStopIncrement = "",
timeInForce = "FILL_OR_KILL", timeo = 5)
}
\arguments{
\item{headers}{Object returned from \code{IG_Auth}}
\item{url}{API URL}
\item{dealReference}{A user-defined reference identifying the submission of the order}
\item{currency_code}{Currency. Restricted to available instrument currencies}
\item{direction}{Deal direction ('BUY' or 'SELL')}
\item{epic}{Instrument epic identifier}
\item{expiry}{Instrument expiry}
\item{force_open}{True if force open is required}
\item{guaranteed_stop}{True if a guaranteed stop is required}
\item{level}{Closing deal level}
\item{limit_distance}{Limit distance}
\item{limit_level}{Limit level}
\item{order_type}{'LIMIT', 'MARKET', 'QUATE'}
\item{size}{Deal size}
\item{stop_distance}{Stop distance}
\item{stop_level}{Stop level}
\item{trailingStop}{Whether the stop has to be moved towards the current level in case of a favourable trade}
\item{trailingStopIncrement}{increment step in pips for the trailing stop}
\item{timeInForce}{'EXECUTE_AND_ELIMINATE' or 'FILL_OR_KILL'}
\item{timeo}{number of tries}
}
\value{
A \code{data.frame} Deal reference of the transaction
}
\description{
Create one or more OTC positions
}
\examples{
HEADERS = IG_Auth(" ","APIdemo1", " ")
order = IG_create_open_pos(headers = HEADERS, url ="https://demo-api.ig.com/gateway/deal/positions/otc",
dealReference = 'audcad001', currency_code = 'AUD', direction = 'BUY', epic = 'CS.D.AUDUSD.CFD.IP',
expiry = '-', force_open = 'true', guaranteed_stop = 'false', level = '', limit_distance = '', limit_level = '',
order_type = 'MARKET', size = 3,
stop_distance = 10, stop_level = '', trailingStop = 'false', trailingStopIncrement = '',
timeInForce = 'FILL_OR_KILL', timeo=5)
}
|
/man/IG_create_open_pos.Rd
|
permissive
|
ivanliu1989/RQuantAPI
|
R
| false
| true
| 2,306
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/IGAPI.R
\name{IG_create_open_pos}
\alias{IG_create_open_pos}
\title{IG API Create one or more OTC positions}
\usage{
IG_create_open_pos(headers,
url = "https://demo-api.ig.com/gateway/deal/positions/otc",
dealReference = "", currency_code = "AUD", direction = "BUY", epic,
expiry = "-", force_open = "true", guaranteed_stop = "false",
level = "", limit_distance = "", limit_level = "",
order_type = "MARKET", size, stop_distance = "", stop_level = "",
trailingStop = "false", trailingStopIncrement = "",
timeInForce = "FILL_OR_KILL", timeo = 5)
}
\arguments{
\item{headers}{Object returned from \code{IG_Auth}}
\item{url}{API URL}
\item{dealReference}{A user-defined reference identifying the submission of the order}
\item{currency_code}{Currency. Restricted to available instrument currencies}
\item{direction}{Deal direction ('BUY' or 'SELL')}
\item{epic}{Instrument epic identifier}
\item{expiry}{Instrument expiry}
\item{force_open}{True if force open is required}
\item{guaranteed_stop}{True if a guaranteed stop is required}
\item{level}{Closing deal level}
\item{limit_distance}{Limit distance}
\item{limit_level}{Limit level}
\item{order_type}{'LIMIT', 'MARKET', 'QUATE'}
\item{size}{Deal size}
\item{stop_distance}{Stop distance}
\item{stop_level}{Stop level}
\item{trailingStop}{Whether the stop has to be moved towards the current level in case of a favourable trade}
\item{trailingStopIncrement}{increment step in pips for the trailing stop}
\item{timeInForce}{'EXECUTE_AND_ELIMINATE' or 'FILL_OR_KILL'}
\item{timeo}{number of tries}
}
\value{
A \code{data.frame} Deal reference of the transaction
}
\description{
Create one or more OTC positions
}
\examples{
HEADERS = IG_Auth(" ","APIdemo1", " ")
order = IG_create_open_pos(headers = HEADERS, url ="https://demo-api.ig.com/gateway/deal/positions/otc",
dealReference = 'audcad001', currency_code = 'AUD', direction = 'BUY', epic = 'CS.D.AUDUSD.CFD.IP',
expiry = '-', force_open = 'true', guaranteed_stop = 'false', level = '', limit_distance = '', limit_level = '',
order_type = 'MARKET', size = 3,
stop_distance = 10, stop_level = '', trailingStop = 'false', trailingStopIncrement = '',
timeInForce = 'FILL_OR_KILL', timeo=5)
}
|
#!/usr/bin/env Rscript
#
# plot-roc.R <stats TSV> <destination image file> [<comma-separated "aligner" names to include> [title]]
#
# plots a pseudo-ROC that allows the comparison of different alignment methods and their mapping quality calculations
# the format is clarified in the map-sim script, and should be a table (tab separated) of:
# correct mq score aligner
# where "correct" is 0 or 1 depending on whether the alignnment is correct or not and "aligner" labels the mapping method
#
# This is not a true ROC because we are not purely plotting the binary classification performance of
# each of the methods' mapping quality calculation over the same set of candidate alignments.
# Rather, we are mixing both the alignment sensitivity of the method with the MQ classification performance.
# As such we do not ever achieve 100% sensitivity, as we have effectively scaled the y axis (TPR) by the total
# sensitivity of each mapper.
list.of.packages <- c("tidyverse", "ggrepel", "svglite")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
require("tidyverse")
require("ggrepel")
require("scales") # For squish
# Read in the combined toil-vg stats.tsv, listing:
# correct, mapq, aligner (really graph name), read name, count
dat <- read.table(commandArgs(TRUE)[1], header=T)
if (! ("count" %in% names(dat))) {
# If the count column is not present, add i
dat$count <- rep(1, nrow(dat))
}
if (length(commandArgs(TRUE)) > 2) {
# A set of aligners to plot is specified. Parse it.
aligner.set <- unlist(strsplit(commandArgs(TRUE)[3], ","))
# Subset the data to those aligners
dat <- dat[dat$aligner %in% aligner.set,]
# And restrict the aligner factor levels to just the ones in the set
dat$aligner <- factor(dat$aligner, levels=aligner.set)
}
# Determine title
title <- ''
if (length(commandArgs(TRUE)) > 3) {
title <- commandArgs(TRUE)[4]
}
# Determine the order of aligners, based on sorting in a dash-separated tag aware manner
aligner.names <- levels(dat$aligner)
name.lists <- aligner.names %>% (function(name) map(name, (function(x) as.list(unlist(strsplit(x, "-"))))))
# Transpose name fragments into a list of vectors for each position, with NAs when tag lists end early
max.parts <- max(sapply(name.lists, length))
name.cols <- list()
for (i in 1:max.parts) {
name.cols[[i]] <- sapply(name.lists, function(x) if (length(x) >= i) { x[[i]] } else { NA })
}
name.order <- do.call(order,name.cols)
aligner.names <- aligner.names[name.order]
dat$aligner <- factor(dat$aligner, levels=aligner.names)
name.lists <- name.lists[name.order]
# Determine colors for aligners
bold.colors <- c( "#e31a1c", "#6600cc", "#f8b901", "#d2e703", "#73c604", "#31c606", "#08c65d", "#09c49d", "#0bacc4", "#0c6dc5")
light.colors <- c( "#fb9a99","#e5ccff", "#fedb76", "#f1fd79", "#c5fc7c", "#9bfb7f", "#84fab9", "#86f9e1", "#89eaf8", "#8cc5f8")
# We have to go through both lists together when assigning colors, because pe and non-pe versions of a condition need corresponding colors.
cursor <- 1
# This will map from non-pe condition name string to color index.
colors <- c()
for (i in 1:length(name.lists)) {
# For each name
name.parts <- unlist(name.lists[[i]])
if (name.parts[length(name.parts)] == "pe") {
# Drop the pe tag if present
name.parts <- name.parts[-c(length(name.parts))]
}
if (name.parts[length(name.parts)] == "se") {
# Drop the se tag if present
name.parts <- name.parts[-c(length(name.parts))]
}
# Join up to a string again
name <- paste(name.parts, collapse='-')
if (! name %in% names(colors)) {
# No colors assigned for this pair of conditions, so assign them.
if (cursor > length(bold.colors)) {
write(colors, stderr())
write(aligner.names, stderr())
stop('Ran out of colors! Too many conditions!')
}
# We always assign pe and non-pe colors in lockstep, whichever we see first.
# We need two entries for -se and no tag which are the same.
new.colors <- c(bold.colors[cursor], light.colors[cursor], light.colors[cursor])
names(new.colors) <- c(paste(name, 'pe', sep='-'), paste(name, 'se', sep='-'), name)
colors <- c(colors, new.colors)
cursor <- cursor + 1
}
}
# Make colors a vector in the same order as the actually-used aligner names
colors <- colors[aligner.names]
dat$bin <- cut(dat$mq, c(-Inf,seq(0,60,1),Inf))
dat.roc <- dat %>%
mutate(Positive = (correct == 1) * count, Negative = (correct == 0) * count) %>%
group_by(aligner, mq) %>%
summarise(Positive = sum(Positive), Negative = sum(Negative)) %>%
arrange(-mq) %>%
mutate(Total=sum(Positive+Negative)) %>%
mutate(TPR = cumsum(Positive) / Total, FPR = cumsum(Negative) / Total)
# We want smart scales that know how tiny a rate of things we can care about
total.reads <- max(dat.roc$Total)
min.log10 <- floor(log10(1/total.reads))
max.log10 <- 0
# Work out a set of bounds to draw the plot on
range.log10 <- min.log10 : max.log10
range.unlogged = 10^range.log10
dat.plot <- ggplot(dat.roc, aes( x= FPR, y = TPR, color = aligner, label=mq)) +
geom_line() + geom_text_repel(data = subset(dat.roc, mq %% 60 == 0), size=3.5, point.padding=unit(0.7, "lines"), segment.alpha=I(1/2.5), show.legend=FALSE) +
geom_point(aes(size=Positive+Negative)) +
scale_color_manual(values=colors, guide=guide_legend(title=NULL, ncol=2)) +
scale_size_continuous("number", guide=guide_legend(title=NULL, ncol=4)) +
scale_x_log10(limits=c(range.unlogged[1],range.unlogged[length(range.unlogged)]), breaks=range.unlogged, oob=squish) +
geom_vline(xintercept=1/total.reads) + # vertical line at one wrong read
theme_bw() +
ggtitle(title)
if (title != '') {
# And a title
dat.plot + ggtitle(title)
}
filename <- commandArgs(TRUE)[2]
ggsave(filename, height=4, width=7)
|
/scripts/plotting/plot-roc-gbwts.R
|
no_license
|
clairemerot/giraffe-sv-paper
|
R
| false
| false
| 6,070
|
r
|
#!/usr/bin/env Rscript
#
# plot-roc.R <stats TSV> <destination image file> [<comma-separated "aligner" names to include> [title]]
#
# plots a pseudo-ROC that allows the comparison of different alignment methods and their mapping quality calculations
# the format is clarified in the map-sim script, and should be a table (tab separated) of:
# correct mq score aligner
# where "correct" is 0 or 1 depending on whether the alignnment is correct or not and "aligner" labels the mapping method
#
# This is not a true ROC because we are not purely plotting the binary classification performance of
# each of the methods' mapping quality calculation over the same set of candidate alignments.
# Rather, we are mixing both the alignment sensitivity of the method with the MQ classification performance.
# As such we do not ever achieve 100% sensitivity, as we have effectively scaled the y axis (TPR) by the total
# sensitivity of each mapper.
list.of.packages <- c("tidyverse", "ggrepel", "svglite")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
require("tidyverse")
require("ggrepel")
require("scales") # For squish
# Read in the combined toil-vg stats.tsv, listing:
# correct, mapq, aligner (really graph name), read name, count
dat <- read.table(commandArgs(TRUE)[1], header=T)
if (! ("count" %in% names(dat))) {
# If the count column is not present, add i
dat$count <- rep(1, nrow(dat))
}
if (length(commandArgs(TRUE)) > 2) {
# A set of aligners to plot is specified. Parse it.
aligner.set <- unlist(strsplit(commandArgs(TRUE)[3], ","))
# Subset the data to those aligners
dat <- dat[dat$aligner %in% aligner.set,]
# And restrict the aligner factor levels to just the ones in the set
dat$aligner <- factor(dat$aligner, levels=aligner.set)
}
# Determine title
title <- ''
if (length(commandArgs(TRUE)) > 3) {
title <- commandArgs(TRUE)[4]
}
# Determine the order of aligners, based on sorting in a dash-separated tag aware manner
aligner.names <- levels(dat$aligner)
name.lists <- aligner.names %>% (function(name) map(name, (function(x) as.list(unlist(strsplit(x, "-"))))))
# Transpose name fragments into a list of vectors for each position, with NAs when tag lists end early
max.parts <- max(sapply(name.lists, length))
name.cols <- list()
for (i in 1:max.parts) {
name.cols[[i]] <- sapply(name.lists, function(x) if (length(x) >= i) { x[[i]] } else { NA })
}
name.order <- do.call(order,name.cols)
aligner.names <- aligner.names[name.order]
dat$aligner <- factor(dat$aligner, levels=aligner.names)
name.lists <- name.lists[name.order]
# Determine colors for aligners
bold.colors <- c( "#e31a1c", "#6600cc", "#f8b901", "#d2e703", "#73c604", "#31c606", "#08c65d", "#09c49d", "#0bacc4", "#0c6dc5")
light.colors <- c( "#fb9a99","#e5ccff", "#fedb76", "#f1fd79", "#c5fc7c", "#9bfb7f", "#84fab9", "#86f9e1", "#89eaf8", "#8cc5f8")
# We have to go through both lists together when assigning colors, because pe and non-pe versions of a condition need corresponding colors.
cursor <- 1
# This will map from non-pe condition name string to color index.
colors <- c()
for (i in 1:length(name.lists)) {
# For each name
name.parts <- unlist(name.lists[[i]])
if (name.parts[length(name.parts)] == "pe") {
# Drop the pe tag if present
name.parts <- name.parts[-c(length(name.parts))]
}
if (name.parts[length(name.parts)] == "se") {
# Drop the se tag if present
name.parts <- name.parts[-c(length(name.parts))]
}
# Join up to a string again
name <- paste(name.parts, collapse='-')
if (! name %in% names(colors)) {
# No colors assigned for this pair of conditions, so assign them.
if (cursor > length(bold.colors)) {
write(colors, stderr())
write(aligner.names, stderr())
stop('Ran out of colors! Too many conditions!')
}
# We always assign pe and non-pe colors in lockstep, whichever we see first.
# We need two entries for -se and no tag which are the same.
new.colors <- c(bold.colors[cursor], light.colors[cursor], light.colors[cursor])
names(new.colors) <- c(paste(name, 'pe', sep='-'), paste(name, 'se', sep='-'), name)
colors <- c(colors, new.colors)
cursor <- cursor + 1
}
}
# Make colors a vector in the same order as the actually-used aligner names
colors <- colors[aligner.names]
dat$bin <- cut(dat$mq, c(-Inf,seq(0,60,1),Inf))
dat.roc <- dat %>%
mutate(Positive = (correct == 1) * count, Negative = (correct == 0) * count) %>%
group_by(aligner, mq) %>%
summarise(Positive = sum(Positive), Negative = sum(Negative)) %>%
arrange(-mq) %>%
mutate(Total=sum(Positive+Negative)) %>%
mutate(TPR = cumsum(Positive) / Total, FPR = cumsum(Negative) / Total)
# We want smart scales that know how tiny a rate of things we can care about
total.reads <- max(dat.roc$Total)
min.log10 <- floor(log10(1/total.reads))
max.log10 <- 0
# Work out a set of bounds to draw the plot on
range.log10 <- min.log10 : max.log10
range.unlogged = 10^range.log10
dat.plot <- ggplot(dat.roc, aes( x= FPR, y = TPR, color = aligner, label=mq)) +
geom_line() + geom_text_repel(data = subset(dat.roc, mq %% 60 == 0), size=3.5, point.padding=unit(0.7, "lines"), segment.alpha=I(1/2.5), show.legend=FALSE) +
geom_point(aes(size=Positive+Negative)) +
scale_color_manual(values=colors, guide=guide_legend(title=NULL, ncol=2)) +
scale_size_continuous("number", guide=guide_legend(title=NULL, ncol=4)) +
scale_x_log10(limits=c(range.unlogged[1],range.unlogged[length(range.unlogged)]), breaks=range.unlogged, oob=squish) +
geom_vline(xintercept=1/total.reads) + # vertical line at one wrong read
theme_bw() +
ggtitle(title)
if (title != '') {
# And a title
dat.plot + ggtitle(title)
}
filename <- commandArgs(TRUE)[2]
ggsave(filename, height=4, width=7)
|
context("Get clinical data as a table")
# 'tableClinData' is also tested through the other plot functionalities,
# and via the tests for getClinDT in clinUtils
# so other tests are skipped
test_that("A table is successfully created for clinical data", {
data <- data.frame(USUBJID = c("ID1", "ID2", "ID3", "ID4"))
tableMon <- tableClinData(data = data)
expect_s3_class(tableMon, "datatables")
})
test_that("A warning is generated if the variable for the patient profile path is not available", {
data <- data.frame(USUBJID = c("ID1", "ID2", "ID3", "ID4"))
expect_warning(
tableClinData(
data = data,
pathVar = "varName"
),
"Variable with path to subject profile: .* is not available"
)
})
test_that("The variable for the patient profile path is successfully included in a clinical data table", {
data <- data.frame(
USUBJID = c("ID1", "ID2", "ID3", "ID4"),
path = sprintf("<a href=\"./path-to-report-%d\">label</a>", 1:4),
stringsAsFactors = FALSE
)
tableMon <- tableClinData(
data = data,
pathVar = "path"
)
expect_s3_class(tableMon, "datatables")
})
test_that("The variable for the patient profile path is successfully specified as expandable in a clinical data table", {
data <- data.frame(
USUBJID = c("ID1", "ID2", "ID3", "ID4"),
path = sprintf("<a href=\"./path-to-report-%d\">label</a>", 1:4),
stringsAsFactors = FALSE
)
tableMon <- tableClinData(
data = data,
pathVar = "path",
pathExpand = TRUE
)
expect_s3_class(tableMon, "datatables")
})
|
/package/clinDataReview/tests/testthat/test_tableClinData.R
|
no_license
|
Lion666/clinDataReview
|
R
| false
| false
| 1,568
|
r
|
context("Get clinical data as a table")
# 'tableClinData' is also tested through the other plot functionalities,
# and via the tests for getClinDT in clinUtils
# so other tests are skipped
test_that("A table is successfully created for clinical data", {
data <- data.frame(USUBJID = c("ID1", "ID2", "ID3", "ID4"))
tableMon <- tableClinData(data = data)
expect_s3_class(tableMon, "datatables")
})
test_that("A warning is generated if the variable for the patient profile path is not available", {
data <- data.frame(USUBJID = c("ID1", "ID2", "ID3", "ID4"))
expect_warning(
tableClinData(
data = data,
pathVar = "varName"
),
"Variable with path to subject profile: .* is not available"
)
})
test_that("The variable for the patient profile path is successfully included in a clinical data table", {
data <- data.frame(
USUBJID = c("ID1", "ID2", "ID3", "ID4"),
path = sprintf("<a href=\"./path-to-report-%d\">label</a>", 1:4),
stringsAsFactors = FALSE
)
tableMon <- tableClinData(
data = data,
pathVar = "path"
)
expect_s3_class(tableMon, "datatables")
})
test_that("The variable for the patient profile path is successfully specified as expandable in a clinical data table", {
data <- data.frame(
USUBJID = c("ID1", "ID2", "ID3", "ID4"),
path = sprintf("<a href=\"./path-to-report-%d\">label</a>", 1:4),
stringsAsFactors = FALSE
)
tableMon <- tableClinData(
data = data,
pathVar = "path",
pathExpand = TRUE
)
expect_s3_class(tableMon, "datatables")
})
|
################## DataObserver : SERVER ################
library(shiny)
library(ggplot2)
library(ggthemes)
library(doBy)
library(dplyr)
library(plyr)
#
# shinyServer(func=function(input, output) {
# load(paste("Risk.all",input$Date,".RData", sep=""))
# # There may be some variables to rename
# try(risk.all$year <- risk.all$years)
# try(risk.all$region <- risk.all$region.x)
#
# attach(risk.all)
# #load("Risk.all.RData")
# Tx.Complete <- sum(complete.cases(risk.all)/nrow(risk.all))*100
# })
#
# Define server logic for random distribution application
shinyServer(function(input, output, session) {
# Command to activate interaction between ui and the file to load
data.work <- reactive({
load("data.work.RData")
data.work
})
observe({
updateSelectInput(session, "Y",
choices = colnames(data.work())
)
output$summary <- renderPrint({
summary(data.work())
})
# Show the first "n" observations
output$view <- renderTable({
head(data.work(), n = 10)
})
# output$MyTable <- renderDataTable({
# load(paste("Risk.all",input$Date,".RData", sep=""))
# # There may be some variables to rename
# #try(risk.all$year <- risk.all$years)
# #try(risk.all$region <- risk.all$region.x)
#
# print(head(risk.all))
# })
output$PointPlot <- renderPlot({
R <- input$R
A <- input$A
#load(paste("Risk.all",input$Date,".RData", sep=""))
#risk.all <- data.work()
# There may be some variables to rename
#try(risk.all$year <- risk.all$years)
#try(risk.all$region <- risk.all$region.x)
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# Point Plot
Plot.Point <- ggplot(data.work(), aes_string(x="year", y=input$Y)) +
geom_point(color = "grey", alpha=A) +
geom_point(dat= subset(data.work(), region4==R), alpha=0.50, color="pink") +
coord_cartesian(ylim = c(minval,maxval)) + guides(colour=FALSE)+
ggtitle(paste("Overplotted points (region4",R, "higlighted)")) +
theme_classic()
Plot.Point
})
output$JitterPlot <- renderPlot({
R <- input$R
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# Point Plot with Jitter
Plot.Jitter <- ggplot(data.work(), aes_string(x="year", y=input$Y)) +
geom_jitter(color = "grey", alpha=A) +
geom_jitter(dat= subset(data.work(), region4==R), alpha=0.50, color="pink") +
coord_cartesian(ylim = c(minval,maxval)) +
guides(colour=FALSE) +
ggtitle(paste("Jittered points (region4",R, "higlighted)")) +
theme_classic()
Plot.Jitter
})
output$BoxPlot <- renderPlot({
R <- input$R
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#BoxPlot
Plot.Box <- ggplot(data = data.work(), aes_string(x="year", y=input$Y)) +
geom_boxplot(outlier.colour= "grey", color= "darkgrey", fill="grey") +
geom_boxplot(data = subset(data.work(), region4 == R), outlier.colour= "pink", color= "darkgrey", fill="pink") +
coord_cartesian(ylim = c(minval,maxval)) +
guides(colour=FALSE, fill=FALSE)+
ggtitle(paste("Boxplots")) +
theme_classic()
Plot.Box
})
output$ParaPlot <- renderPlot({
R <- input$R
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#Parallel plot
Plot.Tot <- ggplot() +
geom_line(dat= data.work(), alpha=A, color="black",
aes_string(x="year", y=input$Y, group="factor(ident)" )) +
geom_line(dat= subset(data.work(), region4==R), alpha=0.05, color="pink",
aes_string(x="year", y=input$Y, group="factor(ident)" )) +
guides(colour=FALSE) +
coord_cartesian(ylim = c(minval,maxval)) +
ggtitle(paste("Parallel Spaghetti Plot (region4",R, "higlighted)")) +
theme_classic()
Plot.Tot
})
output$ParaMulti <- renderPlot({
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#Multiple parallel plot
Plot.Multi <- ggplot() +
geom_line(dat= data.work(), alpha=A, color="black", aes_string(x="year", y=input$Y, group="factor(ident)" )) +
guides(colour=FALSE) +
coord_cartesian(ylim = c(minval,maxval))
Plot.Multi + facet_wrap(~region4) +
ggtitle(paste("Multiple Parallel Spaghetti Plot")) +
theme_classic()
})
output$BoxMulti <- renderPlot({
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#Multiple Box-plot plot
Box.Multi <- ggplot(data = data.work(), aes_string(x="year", y=input$Y)) +
geom_boxplot(outlier.colour= "grey", color= "darkgrey", fill="grey") +
coord_cartesian(ylim = c(minval,maxval)) +
guides(colour=FALSE, fill=FALSE)
#
# #adding number of obs + mean per region4
# data.stat <- ddply(data = data.work(), .(region4),
# summarize,
# n=paste("n =", nrow(data.work)))
#
Box.Multi + facet_wrap(~region4) +
ggtitle(paste("Multiple Box Plot")) +
# + geom_text(data = data.stat, aes(x = 1.8, y = 5, label = n),
# colour = "black", inherit.aes =FALSE, parse = FALSE) +
theme_classic()
})
#Missing values
output$Missing <- renderPlot({
minval <- input$range[1]
maxval <- input$range[2]
risk.sum <- summaryBy(AR+theta+theta1+theta2+SigmaProf+Profit+AR+RP+RP.pc~year, data = data.work(),
FUN = function(x) { c(miss = sum(is.na(x)),
tx = round(sum(is.na(x))/length(x), digits=3)) } )
# We need an intermediate variable name
Mamissvar <- paste(input$Y,".tx", sep="")
Plot.miss <- ggplot(risk.sum, aes_string(x="year", y=Mamissvar, group=1)) +
geom_point(color ="black") +
geom_line(color= "grey") +
coord_cartesian(ylim = c(minval,maxval)) +
ggtitle("Missing Values rate (in % of the sample)") +
theme_classic()
Plot.miss
})
# #Line plot et quantile plots demandent la liste de toutes las variables
# output$LinePlot <- renderPlot({
#
# # Many thanks to Thibault for those lines
# minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#
# risk.sum <- summaryBy(sau+sfp+quotalait+eqvltotal+laitproduit+concvlr+prixconcvlr+
# prixlaiteriemoyen+charpot+hasfpirri+Tyear+ETPyear+DELTAyear+
# AR+theta+theta1+theta2+Profit+AR~year, data = data.work(),
# FUN = function(x) { c(med = median(x, na.rm=TRUE), mean = mean(x)) } )
#
#
# # We need an intermediate variable name
# Mavar <- paste(input$Y,".med", sep="")
#
# Plot.Line <- ggplot(risk.sum, aes_string(x="year", y=Mavar, group=1)) +
# geom_point(color ="black", size=1) +
# geom_line(color= "grey", size=1) +
# coord_cartesian(ylim = c(minval,maxval)) +
# ggtitle(paste("Median Values, ( x% of complete cases)")) +
# # ggtitle(paste("Median Values, (", round(Tx.Complete, digits = 2)," % of complete cases)")) +
# theme_classic()
# Plot.Line
#
# })
#
# output$QuantilePlot <- renderPlot({
#
# # Many thanks to Thibault for those lines
# minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#
# #Function that computes median and mean values !
# risk.sum <- summaryBy(sau+sfp+quotalait+eqvltotal+laitproduit+concvlr+prixconcvlr+
# prixlaiteriemoyen+charpot+hasfpirri+Tyear+ETPyear+DELTAyear+
# AR+theta+theta1+theta2+Profit+AR~year, data = data.work(),
# FUN = function(x) { c(med = median(x, na.rm=TRUE),
# mean = mean(x),
# sd= quantile(x,probs= c(0.05,0.95), names= FALSE, na.rm=TRUE)) } )
# # We need an intermediate variable name
# Mavar <- paste(input$Y,".med", sep="")
#
# Plot.Line <- ggplot(risk.sum, aes_string(x="year", y=Mavar, group=1)) +
# geom_point(color ="black") +
# geom_line(color= "grey") +
# coord_cartesian(ylim = c(minval,maxval)) +
# ggtitle("Median Values") +
# theme_classic()
#
# # We need intermediate variables names
# Mavar1 <- paste(input$Y,".sd1", sep="")
# Mavar2<- paste(input$Y,".sd2", sep="")
#
# Plot.quantile <- Plot.Line + geom_pointrange(data = risk.sum, aes_string(ymin=Mavar1, ymax = Mavar2),
# color = "grey", size=1) +
# ggtitle("Median Values + quantiles") +
# theme_classic()
#
# Plot.quantile
# })
#
})
})
|
/Shiny/DataObserver/server.R
|
no_license
|
XtopheB/ProgsOptilait
|
R
| false
| false
| 10,417
|
r
|
################## DataObserver : SERVER ################
library(shiny)
library(ggplot2)
library(ggthemes)
library(doBy)
library(dplyr)
library(plyr)
#
# shinyServer(func=function(input, output) {
# load(paste("Risk.all",input$Date,".RData", sep=""))
# # There may be some variables to rename
# try(risk.all$year <- risk.all$years)
# try(risk.all$region <- risk.all$region.x)
#
# attach(risk.all)
# #load("Risk.all.RData")
# Tx.Complete <- sum(complete.cases(risk.all)/nrow(risk.all))*100
# })
#
# Define server logic for random distribution application
shinyServer(function(input, output, session) {
# Command to activate interaction between ui and the file to load
data.work <- reactive({
load("data.work.RData")
data.work
})
observe({
updateSelectInput(session, "Y",
choices = colnames(data.work())
)
output$summary <- renderPrint({
summary(data.work())
})
# Show the first "n" observations
output$view <- renderTable({
head(data.work(), n = 10)
})
# output$MyTable <- renderDataTable({
# load(paste("Risk.all",input$Date,".RData", sep=""))
# # There may be some variables to rename
# #try(risk.all$year <- risk.all$years)
# #try(risk.all$region <- risk.all$region.x)
#
# print(head(risk.all))
# })
output$PointPlot <- renderPlot({
R <- input$R
A <- input$A
#load(paste("Risk.all",input$Date,".RData", sep=""))
#risk.all <- data.work()
# There may be some variables to rename
#try(risk.all$year <- risk.all$years)
#try(risk.all$region <- risk.all$region.x)
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# Point Plot
Plot.Point <- ggplot(data.work(), aes_string(x="year", y=input$Y)) +
geom_point(color = "grey", alpha=A) +
geom_point(dat= subset(data.work(), region4==R), alpha=0.50, color="pink") +
coord_cartesian(ylim = c(minval,maxval)) + guides(colour=FALSE)+
ggtitle(paste("Overplotted points (region4",R, "higlighted)")) +
theme_classic()
Plot.Point
})
output$JitterPlot <- renderPlot({
R <- input$R
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# Point Plot with Jitter
Plot.Jitter <- ggplot(data.work(), aes_string(x="year", y=input$Y)) +
geom_jitter(color = "grey", alpha=A) +
geom_jitter(dat= subset(data.work(), region4==R), alpha=0.50, color="pink") +
coord_cartesian(ylim = c(minval,maxval)) +
guides(colour=FALSE) +
ggtitle(paste("Jittered points (region4",R, "higlighted)")) +
theme_classic()
Plot.Jitter
})
output$BoxPlot <- renderPlot({
R <- input$R
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#BoxPlot
Plot.Box <- ggplot(data = data.work(), aes_string(x="year", y=input$Y)) +
geom_boxplot(outlier.colour= "grey", color= "darkgrey", fill="grey") +
geom_boxplot(data = subset(data.work(), region4 == R), outlier.colour= "pink", color= "darkgrey", fill="pink") +
coord_cartesian(ylim = c(minval,maxval)) +
guides(colour=FALSE, fill=FALSE)+
ggtitle(paste("Boxplots")) +
theme_classic()
Plot.Box
})
output$ParaPlot <- renderPlot({
R <- input$R
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#Parallel plot
Plot.Tot <- ggplot() +
geom_line(dat= data.work(), alpha=A, color="black",
aes_string(x="year", y=input$Y, group="factor(ident)" )) +
geom_line(dat= subset(data.work(), region4==R), alpha=0.05, color="pink",
aes_string(x="year", y=input$Y, group="factor(ident)" )) +
guides(colour=FALSE) +
coord_cartesian(ylim = c(minval,maxval)) +
ggtitle(paste("Parallel Spaghetti Plot (region4",R, "higlighted)")) +
theme_classic()
Plot.Tot
})
output$ParaMulti <- renderPlot({
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#Multiple parallel plot
Plot.Multi <- ggplot() +
geom_line(dat= data.work(), alpha=A, color="black", aes_string(x="year", y=input$Y, group="factor(ident)" )) +
guides(colour=FALSE) +
coord_cartesian(ylim = c(minval,maxval))
Plot.Multi + facet_wrap(~region4) +
ggtitle(paste("Multiple Parallel Spaghetti Plot")) +
theme_classic()
})
output$BoxMulti <- renderPlot({
A <- input$A
# Many thanks to Thibault for those lines
minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#Multiple Box-plot plot
Box.Multi <- ggplot(data = data.work(), aes_string(x="year", y=input$Y)) +
geom_boxplot(outlier.colour= "grey", color= "darkgrey", fill="grey") +
coord_cartesian(ylim = c(minval,maxval)) +
guides(colour=FALSE, fill=FALSE)
#
# #adding number of obs + mean per region4
# data.stat <- ddply(data = data.work(), .(region4),
# summarize,
# n=paste("n =", nrow(data.work)))
#
Box.Multi + facet_wrap(~region4) +
ggtitle(paste("Multiple Box Plot")) +
# + geom_text(data = data.stat, aes(x = 1.8, y = 5, label = n),
# colour = "black", inherit.aes =FALSE, parse = FALSE) +
theme_classic()
})
#Missing values
output$Missing <- renderPlot({
minval <- input$range[1]
maxval <- input$range[2]
risk.sum <- summaryBy(AR+theta+theta1+theta2+SigmaProf+Profit+AR+RP+RP.pc~year, data = data.work(),
FUN = function(x) { c(miss = sum(is.na(x)),
tx = round(sum(is.na(x))/length(x), digits=3)) } )
# We need an intermediate variable name
Mamissvar <- paste(input$Y,".tx", sep="")
Plot.miss <- ggplot(risk.sum, aes_string(x="year", y=Mamissvar, group=1)) +
geom_point(color ="black") +
geom_line(color= "grey") +
coord_cartesian(ylim = c(minval,maxval)) +
ggtitle("Missing Values rate (in % of the sample)") +
theme_classic()
Plot.miss
})
# #Line plot et quantile plots demandent la liste de toutes las variables
# output$LinePlot <- renderPlot({
#
# # Many thanks to Thibault for those lines
# minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#
# risk.sum <- summaryBy(sau+sfp+quotalait+eqvltotal+laitproduit+concvlr+prixconcvlr+
# prixlaiteriemoyen+charpot+hasfpirri+Tyear+ETPyear+DELTAyear+
# AR+theta+theta1+theta2+Profit+AR~year, data = data.work(),
# FUN = function(x) { c(med = median(x, na.rm=TRUE), mean = mean(x)) } )
#
#
# # We need an intermediate variable name
# Mavar <- paste(input$Y,".med", sep="")
#
# Plot.Line <- ggplot(risk.sum, aes_string(x="year", y=Mavar, group=1)) +
# geom_point(color ="black", size=1) +
# geom_line(color= "grey", size=1) +
# coord_cartesian(ylim = c(minval,maxval)) +
# ggtitle(paste("Median Values, ( x% of complete cases)")) +
# # ggtitle(paste("Median Values, (", round(Tx.Complete, digits = 2)," % of complete cases)")) +
# theme_classic()
# Plot.Line
#
# })
#
# output$QuantilePlot <- renderPlot({
#
# # Many thanks to Thibault for those lines
# minval <- -input$range[1]*min(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
# maxval <- input$range[2]*max(data.work()[,names(data.work())%in%as.character(input$Y)], na.rm=T)/100
#
# #Function that computes median and mean values !
# risk.sum <- summaryBy(sau+sfp+quotalait+eqvltotal+laitproduit+concvlr+prixconcvlr+
# prixlaiteriemoyen+charpot+hasfpirri+Tyear+ETPyear+DELTAyear+
# AR+theta+theta1+theta2+Profit+AR~year, data = data.work(),
# FUN = function(x) { c(med = median(x, na.rm=TRUE),
# mean = mean(x),
# sd= quantile(x,probs= c(0.05,0.95), names= FALSE, na.rm=TRUE)) } )
# # We need an intermediate variable name
# Mavar <- paste(input$Y,".med", sep="")
#
# Plot.Line <- ggplot(risk.sum, aes_string(x="year", y=Mavar, group=1)) +
# geom_point(color ="black") +
# geom_line(color= "grey") +
# coord_cartesian(ylim = c(minval,maxval)) +
# ggtitle("Median Values") +
# theme_classic()
#
# # We need intermediate variables names
# Mavar1 <- paste(input$Y,".sd1", sep="")
# Mavar2<- paste(input$Y,".sd2", sep="")
#
# Plot.quantile <- Plot.Line + geom_pointrange(data = risk.sum, aes_string(ymin=Mavar1, ymax = Mavar2),
# color = "grey", size=1) +
# ggtitle("Median Values + quantiles") +
# theme_classic()
#
# Plot.quantile
# })
#
})
})
|
library(pdftools)
library(rvest)
library(stringr)
url = "https://www.tccs.act.gov.au/city-living/trees/design-standards-23-draft-tree-species-list/"
url_list = c("native-15m", "native-10-15m", "native-less-than-10m", "introduced-15m", "introduced-10-15m", "introduced-less-than-10m", "conifers")
list_of_search_pages = str_c(url, url_list)
plant_url_all = character()
for (i in list_of_search_pages){
webpage = read_html(i)
plant_url_html = html_nodes(webpage, ".pdf")
plant_url = html_attr(plant_url_html, name = "href")
plant_url_all = append(plant_url_all, plant_url)
}
#Different format and jams the code
plant_url_all = plant_url_all[-125]
df = data.frame(trait_name = character(), value = character(), species = character())
for (Q in plant_url_all){
N = sub("(.*[0-9]{6})[/]", "", plant_url_all[Q])
# download the pdf
download.file(Q, N, mode = "wb")
# turn the pdf into text
x = pdf_text(N)
# take a new element every time R sees a "\r\n"
x <- strsplit(x, "\r\n")
# unlist the pdf
x = x[[1]]
# Find all the rows that have a large amount of space and replace with a nothing.
# In this case, the pattern is a large space, the dot represents any type of characters
# and the star repeats any number of times to the end of the string. (I think. Worked it out from trial and error)
x = gsub(" .*", "", x)
# Now take out the lines that = ""
x1 = x[!(x=="")]
# Paste the lines that start with (signified by ^) " " onto the previous line.
for (i in c(1:length(x1))){
# if the line starts with a large space,
if (grepl("^ ", x1[i]) == T){
# paste it onto the previous line
x1[(i-1)] = paste0(x1[(i-1)], x1[i])
}
}
# remove the large space starting lines
x2 = x1[!(grepl("^ ", x1) == T)]
x2 = str_squish(x2)
# Split the string for botanical name and common name. This creates a matrix.
x3 = as.data.frame(str_split(x2, ":", simplify = T), stringsAsFactors = F)
# Now move all the traits over to the other half. To do this we need to isolate the traits #
# with this ridiculous dumb character. So elaborate.
traits = which((str_match(x3$V1, "[^[:alnum:]]")!= " "))
# Now we want to do three things:
#1 Delete the pesky first character of each of these positions
#2 Copy the element over to the second column
#3 Replace the first column value with the value preceding it to get the trait right.
for (i in traits){
# 1
x3$V1[i] = as.character(substring(x3$V1[i], 3))
# 2
x3$V2[i] = x3$V1[i]
# 3
x3$V1[i] = x3$V1[(i-1)]
}
# Get the number for the height over as well
for (i in which(str_match(x3$V1, "[^[:digit:]]")== " ")){
x3$V2[i] = x3$V1[i]
# 3
x3$V1[i] = x3$V1[(i-1)]
}
# Finally, remove the rows with nothing in the second column
x4 = x3[which(x3$V2 != ""),]
# And creat a species column
x4$species = x3[1,2]
names(x4) = c("trait_name", "value", "species")
df = rbind(df, x4)
}
df$species = str_trim(df$species)
# get rid of anything before a closed bracket. No idea why
df$species = gsub("(....(.)))","", df$species)
df$species[which(df$species == "")] = "Prunus yedoensis"
df$study = "ACTplanting"
df = select(df, study, species, trait_name, value)
write.csv(df, "TCCScanberraraw.csv", row.names = F)
|
/Scraped data/TCCS/TCSSpdfs.R
|
no_license
|
dcol2804/Traits-Database
|
R
| false
| false
| 3,228
|
r
|
library(pdftools)
library(rvest)
library(stringr)
url = "https://www.tccs.act.gov.au/city-living/trees/design-standards-23-draft-tree-species-list/"
url_list = c("native-15m", "native-10-15m", "native-less-than-10m", "introduced-15m", "introduced-10-15m", "introduced-less-than-10m", "conifers")
list_of_search_pages = str_c(url, url_list)
plant_url_all = character()
for (i in list_of_search_pages){
webpage = read_html(i)
plant_url_html = html_nodes(webpage, ".pdf")
plant_url = html_attr(plant_url_html, name = "href")
plant_url_all = append(plant_url_all, plant_url)
}
#Different format and jams the code
plant_url_all = plant_url_all[-125]
df = data.frame(trait_name = character(), value = character(), species = character())
for (Q in plant_url_all){
N = sub("(.*[0-9]{6})[/]", "", plant_url_all[Q])
# download the pdf
download.file(Q, N, mode = "wb")
# turn the pdf into text
x = pdf_text(N)
# take a new element every time R sees a "\r\n"
x <- strsplit(x, "\r\n")
# unlist the pdf
x = x[[1]]
# Find all the rows that have a large amount of space and replace with a nothing.
# In this case, the pattern is a large space, the dot represents any type of characters
# and the star repeats any number of times to the end of the string. (I think. Worked it out from trial and error)
x = gsub(" .*", "", x)
# Now take out the lines that = ""
x1 = x[!(x=="")]
# Paste the lines that start with (signified by ^) " " onto the previous line.
for (i in c(1:length(x1))){
# if the line starts with a large space,
if (grepl("^ ", x1[i]) == T){
# paste it onto the previous line
x1[(i-1)] = paste0(x1[(i-1)], x1[i])
}
}
# remove the large space starting lines
x2 = x1[!(grepl("^ ", x1) == T)]
x2 = str_squish(x2)
# Split the string for botanical name and common name. This creates a matrix.
x3 = as.data.frame(str_split(x2, ":", simplify = T), stringsAsFactors = F)
# Now move all the traits over to the other half. To do this we need to isolate the traits #
# with this ridiculous dumb character. So elaborate.
traits = which((str_match(x3$V1, "[^[:alnum:]]")!= " "))
# Now we want to do three things:
#1 Delete the pesky first character of each of these positions
#2 Copy the element over to the second column
#3 Replace the first column value with the value preceding it to get the trait right.
for (i in traits){
# 1
x3$V1[i] = as.character(substring(x3$V1[i], 3))
# 2
x3$V2[i] = x3$V1[i]
# 3
x3$V1[i] = x3$V1[(i-1)]
}
# Get the number for the height over as well
for (i in which(str_match(x3$V1, "[^[:digit:]]")== " ")){
x3$V2[i] = x3$V1[i]
# 3
x3$V1[i] = x3$V1[(i-1)]
}
# Finally, remove the rows with nothing in the second column
x4 = x3[which(x3$V2 != ""),]
# And creat a species column
x4$species = x3[1,2]
names(x4) = c("trait_name", "value", "species")
df = rbind(df, x4)
}
df$species = str_trim(df$species)
# get rid of anything before a closed bracket. No idea why
df$species = gsub("(....(.)))","", df$species)
df$species[which(df$species == "")] = "Prunus yedoensis"
df$study = "ACTplanting"
df = select(df, study, species, trait_name, value)
write.csv(df, "TCCScanberraraw.csv", row.names = F)
|
dt_env = new.env()
developer_ownership = function(database_host, database_name, working_dir, web_working_dir = working_dir) {
library(reshape2)
library(ggplot2)
library(gplots)
library(RColorBrewer)
library(gdata)
library(grid)
library(gridExtra)
library(htmlTable)
dt_env$database_host = database_host
dt_env$database_name = database_name
dt_env$working_dir = working_dir
dt_env$web_working_dir = web_working_dir
projects_names = new.env(hash=T, parent=emptyenv())
projects_names[["https://github.com/matthieu-foucault/jquery.git"]] = "JQuery"
projects_names[["https://github.com/rails/rails.git"]] = "Rails"
projects_names[["https://github.com/jenkinsci/jenkins.git"]] = "Jenkins"
projects_names[["https://github.com/ansible/ansible.git"]] = "Ansible"
projects_names[["https://github.com/angular/angular.js.git"]] = "Angular.JS"
projects_names[["https://github.com/mono/mono.git"]] = "Mono"
projects_names[["https://github.com/sebastianbergmann/phpunit.git"]] = "PHPUnit"
dt_env$projects_names = projects_names
release_duration = new.env(hash=T, parent=emptyenv())
release_duration[["https://github.com/matthieu-foucault/jquery.git"]] = 9
release_duration[["https://github.com/rails/rails.git"]] = 6
release_duration[["https://github.com/jenkinsci/jenkins.git"]] = 7
release_duration[["https://github.com/ansible/ansible.git"]] = 3
release_duration[["https://github.com/angular/angular.js.git"]] = 9
release_duration[["https://github.com/mono/mono.git"]] = 5
release_duration[["https://github.com/sebastianbergmann/phpunit.git"]] = 11
dt_env$release_duration = release_duration
S0 = new.env(hash=T, parent=emptyenv())
S0[["https://github.com/jenkinsci/jenkins.git"]] = "3991cd04fd13aa086c25820bdfaa9460f0810284"
S0[["https://github.com/rails/rails.git"]] = "73fc42cc0b5e94541480032c2941a50edd4080c2"
S0[["https://github.com/matthieu-foucault/jquery.git"]] = "95559f5117c8a21c1b8cc99f4badc320fd3dcbda"
S0[["https://github.com/ansible/ansible.git"]] = "6221a2740f5c3023c817d13e4a564f301ed3bc73"
S0[["https://github.com/angular/angular.js.git"]] = "519bef4f3d1cdac497c782f77457fd2f67184601"
S0[["https://github.com/sebastianbergmann/phpunit.git"]] = "6ae460aa82080dccca52995c260f4fe40a97deb7"
S0[["https://github.com/mono/mono.git"]] = "675dc5b693495cb50c3004499a1d1f137722b988"
dir.create(working_dir, showWarnings =F)
dir.create(web_working_dir, showWarnings =F)
cat("loading from database... "); flush.console()
load_developers_activity()
cat(" DONE\n"); flush.console()
cat("computing metrics and correlation with quality... "); flush.console()
compute_ownership_correlations()
cat("DONE\n"); flush.console()
}
|
/R/developer_ownership.R
|
no_license
|
matthieu-foucault/RdeveloperTurnover
|
R
| false
| false
| 2,715
|
r
|
dt_env = new.env()
developer_ownership = function(database_host, database_name, working_dir, web_working_dir = working_dir) {
library(reshape2)
library(ggplot2)
library(gplots)
library(RColorBrewer)
library(gdata)
library(grid)
library(gridExtra)
library(htmlTable)
dt_env$database_host = database_host
dt_env$database_name = database_name
dt_env$working_dir = working_dir
dt_env$web_working_dir = web_working_dir
projects_names = new.env(hash=T, parent=emptyenv())
projects_names[["https://github.com/matthieu-foucault/jquery.git"]] = "JQuery"
projects_names[["https://github.com/rails/rails.git"]] = "Rails"
projects_names[["https://github.com/jenkinsci/jenkins.git"]] = "Jenkins"
projects_names[["https://github.com/ansible/ansible.git"]] = "Ansible"
projects_names[["https://github.com/angular/angular.js.git"]] = "Angular.JS"
projects_names[["https://github.com/mono/mono.git"]] = "Mono"
projects_names[["https://github.com/sebastianbergmann/phpunit.git"]] = "PHPUnit"
dt_env$projects_names = projects_names
release_duration = new.env(hash=T, parent=emptyenv())
release_duration[["https://github.com/matthieu-foucault/jquery.git"]] = 9
release_duration[["https://github.com/rails/rails.git"]] = 6
release_duration[["https://github.com/jenkinsci/jenkins.git"]] = 7
release_duration[["https://github.com/ansible/ansible.git"]] = 3
release_duration[["https://github.com/angular/angular.js.git"]] = 9
release_duration[["https://github.com/mono/mono.git"]] = 5
release_duration[["https://github.com/sebastianbergmann/phpunit.git"]] = 11
dt_env$release_duration = release_duration
S0 = new.env(hash=T, parent=emptyenv())
S0[["https://github.com/jenkinsci/jenkins.git"]] = "3991cd04fd13aa086c25820bdfaa9460f0810284"
S0[["https://github.com/rails/rails.git"]] = "73fc42cc0b5e94541480032c2941a50edd4080c2"
S0[["https://github.com/matthieu-foucault/jquery.git"]] = "95559f5117c8a21c1b8cc99f4badc320fd3dcbda"
S0[["https://github.com/ansible/ansible.git"]] = "6221a2740f5c3023c817d13e4a564f301ed3bc73"
S0[["https://github.com/angular/angular.js.git"]] = "519bef4f3d1cdac497c782f77457fd2f67184601"
S0[["https://github.com/sebastianbergmann/phpunit.git"]] = "6ae460aa82080dccca52995c260f4fe40a97deb7"
S0[["https://github.com/mono/mono.git"]] = "675dc5b693495cb50c3004499a1d1f137722b988"
dir.create(working_dir, showWarnings =F)
dir.create(web_working_dir, showWarnings =F)
cat("loading from database... "); flush.console()
load_developers_activity()
cat(" DONE\n"); flush.console()
cat("computing metrics and correlation with quality... "); flush.console()
compute_ownership_correlations()
cat("DONE\n"); flush.console()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/termkey.R
\name{validate_termkey}
\alias{validate_termkey}
\title{Determine if a termkey is valid}
\usage{
validate_termkey(termkey, allow_seasonkeys = FALSE)
}
\arguments{
\item{termkey}{TermKey for record pulled from SQL database}
}
\value{
either the valid termkey, or \code{NA_integer} is not a valid termkey
}
\description{
For use with Indiana CHE \code{TermKey}s.
Takes into account the change in reporting method in summer 2016.
}
\examples{
validate_termkey(20081) # Valid, summer 2, 2007
validate_termkey(20082) # Valid, Fall 2007
validate_termkey(20083) # Valid, Spring 2008
validate_termkey(20084) # Valid, Summer 1, 2008
validate_termkey(20085) # Not Valid
validate_termkey(20181) # Not Valid
validate_termkey(20182) # Valid, Fall 2017
validate_termkey(20183) # Valid, Spring 2018
validate_termkey(20184) # Not Valid
validate_termkey(20185) # Valid, Trailing Summer 2018
}
|
/man/validate_termkey.Rd
|
no_license
|
IndianaCHE/IndianaCHEmisc
|
R
| false
| true
| 966
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/termkey.R
\name{validate_termkey}
\alias{validate_termkey}
\title{Determine if a termkey is valid}
\usage{
validate_termkey(termkey, allow_seasonkeys = FALSE)
}
\arguments{
\item{termkey}{TermKey for record pulled from SQL database}
}
\value{
either the valid termkey, or \code{NA_integer} is not a valid termkey
}
\description{
For use with Indiana CHE \code{TermKey}s.
Takes into account the change in reporting method in summer 2016.
}
\examples{
validate_termkey(20081) # Valid, summer 2, 2007
validate_termkey(20082) # Valid, Fall 2007
validate_termkey(20083) # Valid, Spring 2008
validate_termkey(20084) # Valid, Summer 1, 2008
validate_termkey(20085) # Not Valid
validate_termkey(20181) # Not Valid
validate_termkey(20182) # Valid, Fall 2017
validate_termkey(20183) # Valid, Spring 2018
validate_termkey(20184) # Not Valid
validate_termkey(20185) # Valid, Trailing Summer 2018
}
|
\name{pseudoR2}
\alias{pseudoR2}
\alias{pseudoR2.ppm}
\alias{pseudoR2.slrm}
\title{
Calculate Pseudo-R-Squared for Point Process Model
}
\description{
Given a fitted point process model, calculate
the pseudo-R-squared value, which measures the
fraction of variation in the data that is explained
by the model.
}
\usage{
pseudoR2(object, \dots)
\method{pseudoR2}{ppm}(object, \dots, keepoffset=TRUE)
\method{pseudoR2}{slrm}(object, \dots, keepoffset=TRUE)
}
\arguments{
\item{object}{
Fitted point process model.
An object of class \code{"ppm"} or \code{"slrm"}.
}
\item{keepoffset}{
Logical value indicating whether to retain offset terms in the model
when computing the deviance difference. See Details.
}
\item{\dots}{
Additional arguments passed to
\code{\link{deviance.ppm}} or \code{\link{deviance.slrm}}.
}
}
\details{
The function \code{pseudoR2} is generic, with methods
for fitted point process models
of class \code{"ppm"} and \code{"slrm"}.
This function computes McFadden's pseudo-Rsquared
\deqn{
R^2 = 1 - \frac{D}{D_0}
}{
R^2 = 1 - D/D0
}
where \eqn{D} is the deviance of the fitted model \code{object},
and \eqn{D_0}{D0} is the deviance of the null model.
Deviance is defined as twice the negative log-likelihood
or log-pseudolikelihood.
The null model is usually obtained by re-fitting the model
using the trend formula \code{~1}.
However if the original model formula included \code{offset} terms,
and if \code{keepoffset=TRUE} (the default),
then the null model formula consists of these offset terms. This
ensures that the \code{pseudoR2} value is non-negative.
}
\value{
A single numeric value.
}
\author{
\spatstatAuthors.
}
\seealso{
\code{\link{deviance.ppm}},
\code{\link{deviance.slrm}}.
}
\examples{
fit <- ppm(swedishpines ~ x+y)
pseudoR2(fit)
xcoord <- as.im(function(x,y) x, Window(swedishpines))
fut <- ppm(swedishpines ~ offset(xcoord/200) + y)
pseudoR2(fut)
}
\keyword{spatial}
\keyword{models}
|
/man/pseudoR2.Rd
|
no_license
|
spatstat/spatstat.core
|
R
| false
| false
| 2,052
|
rd
|
\name{pseudoR2}
\alias{pseudoR2}
\alias{pseudoR2.ppm}
\alias{pseudoR2.slrm}
\title{
Calculate Pseudo-R-Squared for Point Process Model
}
\description{
Given a fitted point process model, calculate
the pseudo-R-squared value, which measures the
fraction of variation in the data that is explained
by the model.
}
\usage{
pseudoR2(object, \dots)
\method{pseudoR2}{ppm}(object, \dots, keepoffset=TRUE)
\method{pseudoR2}{slrm}(object, \dots, keepoffset=TRUE)
}
\arguments{
\item{object}{
Fitted point process model.
An object of class \code{"ppm"} or \code{"slrm"}.
}
\item{keepoffset}{
Logical value indicating whether to retain offset terms in the model
when computing the deviance difference. See Details.
}
\item{\dots}{
Additional arguments passed to
\code{\link{deviance.ppm}} or \code{\link{deviance.slrm}}.
}
}
\details{
The function \code{pseudoR2} is generic, with methods
for fitted point process models
of class \code{"ppm"} and \code{"slrm"}.
This function computes McFadden's pseudo-Rsquared
\deqn{
R^2 = 1 - \frac{D}{D_0}
}{
R^2 = 1 - D/D0
}
where \eqn{D} is the deviance of the fitted model \code{object},
and \eqn{D_0}{D0} is the deviance of the null model.
Deviance is defined as twice the negative log-likelihood
or log-pseudolikelihood.
The null model is usually obtained by re-fitting the model
using the trend formula \code{~1}.
However if the original model formula included \code{offset} terms,
and if \code{keepoffset=TRUE} (the default),
then the null model formula consists of these offset terms. This
ensures that the \code{pseudoR2} value is non-negative.
}
\value{
A single numeric value.
}
\author{
\spatstatAuthors.
}
\seealso{
\code{\link{deviance.ppm}},
\code{\link{deviance.slrm}}.
}
\examples{
fit <- ppm(swedishpines ~ x+y)
pseudoR2(fit)
xcoord <- as.im(function(x,y) x, Window(swedishpines))
fut <- ppm(swedishpines ~ offset(xcoord/200) + y)
pseudoR2(fut)
}
\keyword{spatial}
\keyword{models}
|
data <- read_csv("data.csv")
data$type_employer = as.character(data$type_employer)
data$occupation = as.character(data$occupation)
data$country = as.character(data$country)
data$race = as.character(data$race)
data$marital = as.character(data$marital)
data$marital[data$marital=="Never-married"] = "Never-Married"
data$marital[data$marital=="Married-AF-spouse"] = "Married"
data$marital[data$marital=="Married-civ-spouse"] = "Married"
data$marital[data$marital=="Married-spouse-absent"] = "Not-Married"
data$marital[data$marital=="Separated"] = "Not-Married"
data$marital[data$marital=="Divorced"] = "Not-Married"
data$marital[data$marital=="Widowed"] = "Widowed"
data$country[data$country=="Cambodia"] = "SE-Asia" # blocking Country of Origin
data$country[data$country=="Canada"] = "British-Commonwealth"
data$country[data$country=="China"] = "China"
data$country[data$country=="Columbia"] = "South-America"
data$country[data$country=="Cuba"] = "Other"
data$country[data$country=="Dominican-Republic"] = "Latin-America"
data$country[data$country=="Ecuador"] = "South-America"
data$country[data$country=="El-Salvador"] = "South-America"
data$country[data$country=="England"] = "British-Commonwealth"
data$country[data$country=="France"] = "Euro_1"
data$country[data$country=="Germany"] = "Euro_1"
data$country[data$country=="Greece"] = "Euro_2"
data$country[data$country=="Guatemala"] = "Latin-America"
data$country[data$country=="Haiti"] = "Latin-America"
data$country[data$country=="Holand-Netherlands"] = "Euro_1"
data$country[data$country=="Honduras"] = "Latin-America"
data$country[data$country=="Hong"] = "China"
data$country[data$country=="Hungary"] = "Euro_2"
data$country[data$country=="India"] = "British-Commonwealth"
data$country[data$country=="Iran"] = "Other"
data$country[data$country=="Ireland"] = "British-Commonwealth"
data$country[data$country=="Italy"] = "Euro_1"
data$country[data$country=="Jamaica"] = "Latin-America"
data$country[data$country=="Japan"] = "Other"
data$country[data$country=="Laos"] = "SE-Asia"
data$country[data$country=="Mexico"] = "Latin-America"
data$country[data$country=="Nicaragua"] = "Latin-America"
data$country[data$country=="Outlying-US(Guam-USVI-etc)"] = "Latin-America"
data$country[data$country=="Peru"] = "South-America"
data$country[data$country=="Philippines"] = "SE-Asia"
data$country[data$country=="Poland"] = "Euro_2"
data$country[data$country=="Portugal"] = "Euro_2"
data$country[data$country=="Puerto-Rico"] = "Latin-America"
data$country[data$country=="Scotland"] = "British-Commonwealth"
data$country[data$country=="South"] = "Euro_2"
data$country[data$country=="Taiwan"] = "China"
data$country[data$country=="Thailand"] = "SE-Asia"
data$country[data$country=="Trinadad&Tobago"] = "Latin-America"
data$country[data$country=="United-States"] = "United-States"
data$country[data$country=="Vietnam"] = "SE-Asia"
data$country[data$country=="Yugoslavia"] = "Euro_2"
data$type_employer = gsub("^Federal-gov","Federal-Govt",data$type_employer)
data$type_employer = gsub("^Local-gov","Other-Govt",data$type_employer)
data$type_employer = gsub("^State-gov","Other-Govt",data$type_employer)
data$type_employer = gsub("^Private","Private",data$type_employer)
data$type_employer = gsub("^Self-emp-inc","Self-Employed",data$type_employer)
data$type_employer = gsub("^Self-emp-not-inc","Self-Employed",data$type_employer)
data$type_employer = gsub("^Without-pay","Not-Working",data$type_employer)
data$type_employer = gsub("^Never-worked","Not-Working",data$type_employer)
data$occupation = gsub("^Adm-clerical","Admin",data$occupation)
data$occupation = gsub("^Armed-Forces","Military",data$occupation)
data$occupation = gsub("^Craft-repair","Blue-Collar",data$occupation)
data$occupation = gsub("^Exec-managerial","White-Collar",data$occupation)
data$occupation = gsub("^Farming-fishing","Blue-Collar",data$occupation)
data$occupation = gsub("^Handlers-cleaners","Blue-Collar",data$occupation)
data$occupation = gsub("^Machine-op-inspct","Blue-Collar",data$occupation)
data$occupation = gsub("^Other-service","Service",data$occupation)
data$occupation = gsub("^Priv-house-serv","Service",data$occupation)
data$occupation = gsub("^Prof-specialty","Professional",data$occupation)
data$occupation = gsub("^Protective-serv","Other-Occupations",data$occupation)
data$occupation = gsub("^Sales","Sales",data$occupation)
data$occupation = gsub("^Tech-support","Other-Occupations",data$occupation)
data$occupation = gsub("^Transport-moving","Blue-Collar",data$occupation)
data$race[data$race=="White"] = "White"
data$race[data$race=="Black"] = "Black"
data$race[data$race=="Amer-Indian-Eskimo"] = "Amer-Indian"
data$race[data$race=="Asian-Pac-Islander"] = "Asian"
data$race[data$race=="Other"] = "Other"
data[sapply(data, is.character)] <- lapply(data[sapply(data, is.character)], as.factor)
data[sapply(data, is.character)] <- lapply(data[sapply(data, is.numeric)], scale)
data[["capital_gain"]] <- ordered(cut(data$capital_gain,c(-Inf, 0, median(data[["capital_gain"]][data[["capital_gain"]] >0]), Inf)), labels = c("None", "Low", "High"))
data[["capital_loss"]] <- ordered(cut(data$capital_loss,c(-Inf, 0, median(data[["capital_loss"]][data[["capital_loss"]] >0]), Inf)), labels = c("None", "Low", "High"))
summary(data)
head(data)
library(nnet)
a = nnet(income~., data=train,size=10,maxit=150,decay=.001)
plot.nnet(a)
table(data$val$income,predict(a,newdata=data$val,type="class"))
data <- read_csv("data.csv")
db.adult <- data
library(dplyr)
Asia_East <- c(" Cambodia", " China", " Hong", " Laos", " Thailand",
" Japan", " Taiwan", " Vietnam")
Asia_Central <- c(" India", " Iran")
Central_America <- c(" Cuba", " Guatemala", " Jamaica", " Nicaragua",
" Puerto-Rico", " Dominican-Republic", " El-Salvador",
" Haiti", " Honduras", " Mexico", " Trinadad&Tobago")
South_America <- c(" Ecuador", " Peru", " Columbia")
Europe_West <- c(" England", " Germany", " Holand-Netherlands", " Ireland",
" France", " Greece", " Italy", " Portugal", " Scotland")
Europe_East <- c(" Poland", " Yugoslavia", " Hungary")
db.adult$
db.adult <- mutate(db.adult,
native_region = ifelse(country %in% Asia_East, " East-Asia",
ifelse(country %in% Asia_Central, " Central-Asia",
ifelse(country %in% Central_America, " Central-America",
ifelse(country %in% South_America, " South-America",
ifelse(country %in% Europe_West, " Europe-West",
ifelse(country %in% Europe_East, " Europe-East",
ifelse(country == " United-States", " United-States",
" Outlying-US" ))))))))
db.adult <- mutate(db.adult,
cap_gain = ifelse(db.adult$capital_gain < 3464, " Low",
ifelse(db.adult$capital_gain >= 3464 &
db.adult$capital_gain <= 14080, " Medium", " High")))
db.adult$cap_gain <- factor(db.adult$cap_gain,
ordered = TRUE,
levels = c(" Low", " Medium", " High"))
db.adult <- mutate(db.adult,
cap_loss = ifelse(db.adult$capital_loss < 1672, " Low",
ifelse(db.adult$capital_loss >= 1672 &
db.adult$capital_loss <= 1977, " Medium", " High")))
db.adult$cap_loss <- factor(db.adult$cap_loss,
ordered = TRUE,
levels = c(" Low", " Medium", " High"))
a <- data
a$age[a$age>=17 & a$age<29] <- 1
a$age[a$age>=29 & a$age<38] <- 2
a$age[a$age>=38 & a$age<48] <- 3
a$age[a$age>=48 & a$age<=90] <- 4
a$hrperweek[a$hrperweek>=0 & a$hrperweek <40] <- 1
a$hrperweek[a$hrperweek>=40 & a$hrperweek <45] <- 2
a$hrperweek[a$hrperweek>=45 & a$hrperweek <60] <- 3
a$hrperweek[a$hrperweek>=60 & a$hrperweek <80] <- 4
a$hrperweek[a$hrperweek>=80 & a$hrperweek <100] <- 5
a$capitalgain[a$capitalgain>=0 & a$capitalgain<=114] <- 1
a$capitalgain[a$capitalgain>114 & a$capitalgain<=3464] <- 2
a$capitalgain[a$capitalgain>3464 & a$capitalgain<=7298] <- 3
a$capitalgain[a$capitalgain>7298 & a$capitalgain<=14084] <- 4
a$capitalgain[a$capitalgain>14084 & a$capitalgain<=99999] <- 5
a$capitalloss[a$capitalloss>=0 & a$capitalloss<=155] <- 1
a$capitalloss[a$capitalloss>155 & a$capitalloss<=1672] <- 2
a$capitalloss[a$capitalloss>1672 & a$capitalloss<=1887] <- 3
a$capitalloss[a$capitalloss>1887 & a$capitalloss<=1977] <- 4
a$capitalloss[a$capitalloss>1977 & a$capitalloss<=4356] <- 5
a$educ[a$educ>0 & a$educ<=8] <- 1
a$educ[a$educ>8 & a$educ<=10] <- 2
a$educ[a$educ>10 & a$educ<=13] <- 3
a$educ[a$educ>13 & a$educ<=16] <- 4
summary(a)
write.csv(a,file = "output.csv",)
?write.csv
|
/Temp/preprop.R
|
no_license
|
ksrikanthcnc/Data-Mining
|
R
| false
| false
| 9,159
|
r
|
data <- read_csv("data.csv")
data$type_employer = as.character(data$type_employer)
data$occupation = as.character(data$occupation)
data$country = as.character(data$country)
data$race = as.character(data$race)
data$marital = as.character(data$marital)
data$marital[data$marital=="Never-married"] = "Never-Married"
data$marital[data$marital=="Married-AF-spouse"] = "Married"
data$marital[data$marital=="Married-civ-spouse"] = "Married"
data$marital[data$marital=="Married-spouse-absent"] = "Not-Married"
data$marital[data$marital=="Separated"] = "Not-Married"
data$marital[data$marital=="Divorced"] = "Not-Married"
data$marital[data$marital=="Widowed"] = "Widowed"
data$country[data$country=="Cambodia"] = "SE-Asia" # blocking Country of Origin
data$country[data$country=="Canada"] = "British-Commonwealth"
data$country[data$country=="China"] = "China"
data$country[data$country=="Columbia"] = "South-America"
data$country[data$country=="Cuba"] = "Other"
data$country[data$country=="Dominican-Republic"] = "Latin-America"
data$country[data$country=="Ecuador"] = "South-America"
data$country[data$country=="El-Salvador"] = "South-America"
data$country[data$country=="England"] = "British-Commonwealth"
data$country[data$country=="France"] = "Euro_1"
data$country[data$country=="Germany"] = "Euro_1"
data$country[data$country=="Greece"] = "Euro_2"
data$country[data$country=="Guatemala"] = "Latin-America"
data$country[data$country=="Haiti"] = "Latin-America"
data$country[data$country=="Holand-Netherlands"] = "Euro_1"
data$country[data$country=="Honduras"] = "Latin-America"
data$country[data$country=="Hong"] = "China"
data$country[data$country=="Hungary"] = "Euro_2"
data$country[data$country=="India"] = "British-Commonwealth"
data$country[data$country=="Iran"] = "Other"
data$country[data$country=="Ireland"] = "British-Commonwealth"
data$country[data$country=="Italy"] = "Euro_1"
data$country[data$country=="Jamaica"] = "Latin-America"
data$country[data$country=="Japan"] = "Other"
data$country[data$country=="Laos"] = "SE-Asia"
data$country[data$country=="Mexico"] = "Latin-America"
data$country[data$country=="Nicaragua"] = "Latin-America"
data$country[data$country=="Outlying-US(Guam-USVI-etc)"] = "Latin-America"
data$country[data$country=="Peru"] = "South-America"
data$country[data$country=="Philippines"] = "SE-Asia"
data$country[data$country=="Poland"] = "Euro_2"
data$country[data$country=="Portugal"] = "Euro_2"
data$country[data$country=="Puerto-Rico"] = "Latin-America"
data$country[data$country=="Scotland"] = "British-Commonwealth"
data$country[data$country=="South"] = "Euro_2"
data$country[data$country=="Taiwan"] = "China"
data$country[data$country=="Thailand"] = "SE-Asia"
data$country[data$country=="Trinadad&Tobago"] = "Latin-America"
data$country[data$country=="United-States"] = "United-States"
data$country[data$country=="Vietnam"] = "SE-Asia"
data$country[data$country=="Yugoslavia"] = "Euro_2"
data$type_employer = gsub("^Federal-gov","Federal-Govt",data$type_employer)
data$type_employer = gsub("^Local-gov","Other-Govt",data$type_employer)
data$type_employer = gsub("^State-gov","Other-Govt",data$type_employer)
data$type_employer = gsub("^Private","Private",data$type_employer)
data$type_employer = gsub("^Self-emp-inc","Self-Employed",data$type_employer)
data$type_employer = gsub("^Self-emp-not-inc","Self-Employed",data$type_employer)
data$type_employer = gsub("^Without-pay","Not-Working",data$type_employer)
data$type_employer = gsub("^Never-worked","Not-Working",data$type_employer)
data$occupation = gsub("^Adm-clerical","Admin",data$occupation)
data$occupation = gsub("^Armed-Forces","Military",data$occupation)
data$occupation = gsub("^Craft-repair","Blue-Collar",data$occupation)
data$occupation = gsub("^Exec-managerial","White-Collar",data$occupation)
data$occupation = gsub("^Farming-fishing","Blue-Collar",data$occupation)
data$occupation = gsub("^Handlers-cleaners","Blue-Collar",data$occupation)
data$occupation = gsub("^Machine-op-inspct","Blue-Collar",data$occupation)
data$occupation = gsub("^Other-service","Service",data$occupation)
data$occupation = gsub("^Priv-house-serv","Service",data$occupation)
data$occupation = gsub("^Prof-specialty","Professional",data$occupation)
data$occupation = gsub("^Protective-serv","Other-Occupations",data$occupation)
data$occupation = gsub("^Sales","Sales",data$occupation)
data$occupation = gsub("^Tech-support","Other-Occupations",data$occupation)
data$occupation = gsub("^Transport-moving","Blue-Collar",data$occupation)
data$race[data$race=="White"] = "White"
data$race[data$race=="Black"] = "Black"
data$race[data$race=="Amer-Indian-Eskimo"] = "Amer-Indian"
data$race[data$race=="Asian-Pac-Islander"] = "Asian"
data$race[data$race=="Other"] = "Other"
data[sapply(data, is.character)] <- lapply(data[sapply(data, is.character)], as.factor)
data[sapply(data, is.character)] <- lapply(data[sapply(data, is.numeric)], scale)
data[["capital_gain"]] <- ordered(cut(data$capital_gain,c(-Inf, 0, median(data[["capital_gain"]][data[["capital_gain"]] >0]), Inf)), labels = c("None", "Low", "High"))
data[["capital_loss"]] <- ordered(cut(data$capital_loss,c(-Inf, 0, median(data[["capital_loss"]][data[["capital_loss"]] >0]), Inf)), labels = c("None", "Low", "High"))
summary(data)
head(data)
library(nnet)
a = nnet(income~., data=train,size=10,maxit=150,decay=.001)
plot.nnet(a)
table(data$val$income,predict(a,newdata=data$val,type="class"))
data <- read_csv("data.csv")
db.adult <- data
library(dplyr)
Asia_East <- c(" Cambodia", " China", " Hong", " Laos", " Thailand",
" Japan", " Taiwan", " Vietnam")
Asia_Central <- c(" India", " Iran")
Central_America <- c(" Cuba", " Guatemala", " Jamaica", " Nicaragua",
" Puerto-Rico", " Dominican-Republic", " El-Salvador",
" Haiti", " Honduras", " Mexico", " Trinadad&Tobago")
South_America <- c(" Ecuador", " Peru", " Columbia")
Europe_West <- c(" England", " Germany", " Holand-Netherlands", " Ireland",
" France", " Greece", " Italy", " Portugal", " Scotland")
Europe_East <- c(" Poland", " Yugoslavia", " Hungary")
db.adult$
db.adult <- mutate(db.adult,
native_region = ifelse(country %in% Asia_East, " East-Asia",
ifelse(country %in% Asia_Central, " Central-Asia",
ifelse(country %in% Central_America, " Central-America",
ifelse(country %in% South_America, " South-America",
ifelse(country %in% Europe_West, " Europe-West",
ifelse(country %in% Europe_East, " Europe-East",
ifelse(country == " United-States", " United-States",
" Outlying-US" ))))))))
db.adult <- mutate(db.adult,
cap_gain = ifelse(db.adult$capital_gain < 3464, " Low",
ifelse(db.adult$capital_gain >= 3464 &
db.adult$capital_gain <= 14080, " Medium", " High")))
db.adult$cap_gain <- factor(db.adult$cap_gain,
ordered = TRUE,
levels = c(" Low", " Medium", " High"))
db.adult <- mutate(db.adult,
cap_loss = ifelse(db.adult$capital_loss < 1672, " Low",
ifelse(db.adult$capital_loss >= 1672 &
db.adult$capital_loss <= 1977, " Medium", " High")))
db.adult$cap_loss <- factor(db.adult$cap_loss,
ordered = TRUE,
levels = c(" Low", " Medium", " High"))
a <- data
a$age[a$age>=17 & a$age<29] <- 1
a$age[a$age>=29 & a$age<38] <- 2
a$age[a$age>=38 & a$age<48] <- 3
a$age[a$age>=48 & a$age<=90] <- 4
a$hrperweek[a$hrperweek>=0 & a$hrperweek <40] <- 1
a$hrperweek[a$hrperweek>=40 & a$hrperweek <45] <- 2
a$hrperweek[a$hrperweek>=45 & a$hrperweek <60] <- 3
a$hrperweek[a$hrperweek>=60 & a$hrperweek <80] <- 4
a$hrperweek[a$hrperweek>=80 & a$hrperweek <100] <- 5
a$capitalgain[a$capitalgain>=0 & a$capitalgain<=114] <- 1
a$capitalgain[a$capitalgain>114 & a$capitalgain<=3464] <- 2
a$capitalgain[a$capitalgain>3464 & a$capitalgain<=7298] <- 3
a$capitalgain[a$capitalgain>7298 & a$capitalgain<=14084] <- 4
a$capitalgain[a$capitalgain>14084 & a$capitalgain<=99999] <- 5
a$capitalloss[a$capitalloss>=0 & a$capitalloss<=155] <- 1
a$capitalloss[a$capitalloss>155 & a$capitalloss<=1672] <- 2
a$capitalloss[a$capitalloss>1672 & a$capitalloss<=1887] <- 3
a$capitalloss[a$capitalloss>1887 & a$capitalloss<=1977] <- 4
a$capitalloss[a$capitalloss>1977 & a$capitalloss<=4356] <- 5
a$educ[a$educ>0 & a$educ<=8] <- 1
a$educ[a$educ>8 & a$educ<=10] <- 2
a$educ[a$educ>10 & a$educ<=13] <- 3
a$educ[a$educ>13 & a$educ<=16] <- 4
summary(a)
write.csv(a,file = "output.csv",)
?write.csv
|
\name{virtualArrayComBat}
\alias{virtualArrayComBat}
\alias{virtualArrayComBat,ExpressionSet-method}
\alias{virtualArrayComBat,data.frame-method}
\alias{virtualArrayComBat,character-method}
\title{
Removes batch effects from microarray derived expression matrices. Modified version.
}
\description{
This is a modified version of the R script "ComBat.R" (see references). It is used to adjust for batch effects in microarray data. The modification is restricted to make the script accept expression matrices and data.frames instead of plain text files.
}
\usage{
virtualArrayComBat(expression_xls, sample_info_file, type = "txt", write = FALSE, covariates = "Batch", par.prior = TRUE, filter = FALSE, skip = 0, prior.plots = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{expression_xls}{
%% The expression matrix to adjust.
The expression matrix to adjust.
}
\item{sample_info_file}{
%% The sample information data.frame regarding batch contribution and possibly covariates.
The sample information data.frame regarding batch contribution and possibly covariates.
}
\item{type}{
The type of input; Defaults to "txt".
}
\item{write}{
Write output to external file or provide new expression matrix.
}
\item{covariates}{
Describe which Covariates to use in the process and which to dismiss. The default is to use only "Batch".
}
\item{par.prior}{
Logical; set prior parameters or not; Use prespecified values for the variables ("TRUE") or start a priori ("FALSE").
}
\item{filter}{
Filter for genes not present in a given percentage of the samples. Requires present/absent calls in the data. Can be either "FALSE" or a numeric between "0" and "1". Recommended is "0.8" or "FALSE".
}
\item{skip}{
Columns to skip in the input "expression_xls" matrix.
}
\item{prior.plots}{
Create quantile-quantile and kernel density plots including prior estimates to assess the quality of the estimation.
}
}
% \details{% ~~ If necessary, more details than the description above ~~}
\value{
%% Returns a matrix holding adjusted expression values.
Returns a matrix holding adjusted expression values.
}
\references{
%%
Johnson, WE, Rabinovic, A, and Li, C (2007). Adjusting batch effects in microarray expression data using Empirical Bayes methods. Biostatistics 8(1):118-127.
}
\author{
%%
Original author: Johnson, WE, Rabinovic, A, and Li, C (2007)
Modified by: Andreas Heider (2011)
}
\note{
%% ~~further notes~~
Original code by Johnson, WE, Rabinovic, A, and Li, C, made available in this package by Andreas Heider
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
virtualArray-package, virtualArray.ExpressionSet, virtualArrayCompile
}
\examples{
## EMPTY
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ batch effects }
\keyword{ batch }% __ONLY ONE__ keyword per line
|
/man/virtualArrayComBat.Rd
|
no_license
|
scfurl/virtualArray
|
R
| false
| false
| 2,970
|
rd
|
\name{virtualArrayComBat}
\alias{virtualArrayComBat}
\alias{virtualArrayComBat,ExpressionSet-method}
\alias{virtualArrayComBat,data.frame-method}
\alias{virtualArrayComBat,character-method}
\title{
Removes batch effects from microarray derived expression matrices. Modified version.
}
\description{
This is a modified version of the R script "ComBat.R" (see references). It is used to adjust for batch effects in microarray data. The modification is restricted to make the script accept expression matrices and data.frames instead of plain text files.
}
\usage{
virtualArrayComBat(expression_xls, sample_info_file, type = "txt", write = FALSE, covariates = "Batch", par.prior = TRUE, filter = FALSE, skip = 0, prior.plots = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{expression_xls}{
%% The expression matrix to adjust.
The expression matrix to adjust.
}
\item{sample_info_file}{
%% The sample information data.frame regarding batch contribution and possibly covariates.
The sample information data.frame regarding batch contribution and possibly covariates.
}
\item{type}{
The type of input; Defaults to "txt".
}
\item{write}{
Write output to external file or provide new expression matrix.
}
\item{covariates}{
Describe which Covariates to use in the process and which to dismiss. The default is to use only "Batch".
}
\item{par.prior}{
Logical; set prior parameters or not; Use prespecified values for the variables ("TRUE") or start a priori ("FALSE").
}
\item{filter}{
Filter for genes not present in a given percentage of the samples. Requires present/absent calls in the data. Can be either "FALSE" or a numeric between "0" and "1". Recommended is "0.8" or "FALSE".
}
\item{skip}{
Columns to skip in the input "expression_xls" matrix.
}
\item{prior.plots}{
Create quantile-quantile and kernel density plots including prior estimates to assess the quality of the estimation.
}
}
% \details{% ~~ If necessary, more details than the description above ~~}
\value{
%% Returns a matrix holding adjusted expression values.
Returns a matrix holding adjusted expression values.
}
\references{
%%
Johnson, WE, Rabinovic, A, and Li, C (2007). Adjusting batch effects in microarray expression data using Empirical Bayes methods. Biostatistics 8(1):118-127.
}
\author{
%%
Original author: Johnson, WE, Rabinovic, A, and Li, C (2007)
Modified by: Andreas Heider (2011)
}
\note{
%% ~~further notes~~
Original code by Johnson, WE, Rabinovic, A, and Li, C, made available in this package by Andreas Heider
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
virtualArray-package, virtualArray.ExpressionSet, virtualArrayCompile
}
\examples{
## EMPTY
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ batch effects }
\keyword{ batch }% __ONLY ONE__ keyword per line
|
#- New ExpandYear function ----
expandYear <- function (data, areaVar = "geographicAreaM49", elementVar = "measuredElement",
itemVar = "measuredItemCPC", yearVar = "timePointYears",
valueVar = "Value", obsflagVar = "flagObservationStatus",
methFlagVar = "flagMethod", newYears = NULL)
{
key = c(elementVar, areaVar, itemVar)
keyDataFrame = data[, key, with = FALSE]
keyDataFrame = keyDataFrame[with(keyDataFrame, order(get(key)))]
keyDataFrame = keyDataFrame[!duplicated(keyDataFrame)]
yearDataFrame = unique(data[, get(yearVar)])
if (!is.null(newYears)) {
yearDataFrame = unique(c(yearDataFrame, newYears, newYears -
1, newYears - 2))
}
yearDataFrame = data.table(yearVar = yearDataFrame)
colnames(yearDataFrame) = yearVar
completeBasis = data.table(merge.data.frame(keyDataFrame,
yearDataFrame))
expandedData = merge(completeBasis, data, by = colnames(completeBasis),
all.x = TRUE)
expandedData = fillRecord(expandedData, areaVar = areaVar,
itemVar = itemVar, yearVar = yearVar,
flagObsVar = obsflagVar,
flagMethodVar = methFlagVar)
seriesToBlock = expandedData[(get(methFlagVar) != "u"), ]
seriesToBlock[, `:=`(lastYearAvailable, max(get(yearVar))),
by = key]
seriesToBlock[, `:=`(flagComb, paste(get(obsflagVar), get(methFlagVar),
sep = ";"))]
seriesToBlock = seriesToBlock[get(yearVar) == lastYearAvailable &
flagComb == "M;-"]
if (nrow(seriesToBlock) > 0) {
seriesToBlock = seriesToBlock[, {
max_year = max(as.integer(.SD[, timePointYears]))
data.table(timePointYears = seq.int(max_year + 1,
newYears), Value = NA_real_, flagObservationStatus = "M",
flagMethod = "-")[max_year < newYears]
}, by = key]
expandedData = merge(expandedData, seriesToBlock, by = c(areaVar,
elementVar, itemVar, yearVar), all.x = TRUE, suffixes = c("",
"_MDash"))
expandedData[!is.na(flagMethod_MDash), `:=`(flagMethod,
flagMethod_MDash)]
expandedData = expandedData[, colnames(data), with = FALSE]
}
expandedData
}
imputeVariable <- function(data, imputationParameters){
if (!exists("ensuredImputationData") || !ensuredImputationData)
ensureImputationInputs(data = data, imputationParameters = imputationParameters)
if (imputationParameters$newImputationColumn == "") {
newValueColumn = imputationParameters$imputationValueColumn
newObsFlagColumn = imputationParameters$imputationFlagColumn
newMethodFlagColumn = imputationParameters$imputationMethodColumn
}
else {
newValueColumn = paste0("Value_", imputationParameters$newImputationColumn)
newObsFlagColumn = paste0("flagObservationStatus_",
imputationParameters$newImputationColumn)
newMethodFlagColumn = paste0("flagMethod_", imputationParameters$newImputationColumn)
}
imputeSingleObservation(data, imputationParameters)
missingIndex = data[[imputationParameters$imputationFlagColumn]] ==
"M" & data[[imputationParameters$imputationMethodColumn]] ==
"u"
ensemble = ensembleImpute(data = data, imputationParameters = imputationParameters)
if(!is.null(nrow(ensemble))) {
data = cbind(data, ensemble)
data[missingIndex & !is.na(ensemble), `:=`(c(newValueColumn), ensemble)]
data = data[, `:=`(ensemble, NULL)]
}
imputedIndex = missingIndex & !is.na(data[[newValueColumn]])
invisible(data[imputedIndex, `:=`(c(newObsFlagColumn, newMethodFlagColumn),
list(imputationParameters$imputationFlag, imputationParameters$newMethodFlag))])
return(data)
}
|
/shinyProducerPrices3/modified_functions.R
|
no_license
|
SWS-Methodology/faoswsProducerPrices
|
R
| false
| false
| 4,155
|
r
|
#- New ExpandYear function ----
expandYear <- function (data, areaVar = "geographicAreaM49", elementVar = "measuredElement",
itemVar = "measuredItemCPC", yearVar = "timePointYears",
valueVar = "Value", obsflagVar = "flagObservationStatus",
methFlagVar = "flagMethod", newYears = NULL)
{
key = c(elementVar, areaVar, itemVar)
keyDataFrame = data[, key, with = FALSE]
keyDataFrame = keyDataFrame[with(keyDataFrame, order(get(key)))]
keyDataFrame = keyDataFrame[!duplicated(keyDataFrame)]
yearDataFrame = unique(data[, get(yearVar)])
if (!is.null(newYears)) {
yearDataFrame = unique(c(yearDataFrame, newYears, newYears -
1, newYears - 2))
}
yearDataFrame = data.table(yearVar = yearDataFrame)
colnames(yearDataFrame) = yearVar
completeBasis = data.table(merge.data.frame(keyDataFrame,
yearDataFrame))
expandedData = merge(completeBasis, data, by = colnames(completeBasis),
all.x = TRUE)
expandedData = fillRecord(expandedData, areaVar = areaVar,
itemVar = itemVar, yearVar = yearVar,
flagObsVar = obsflagVar,
flagMethodVar = methFlagVar)
seriesToBlock = expandedData[(get(methFlagVar) != "u"), ]
seriesToBlock[, `:=`(lastYearAvailable, max(get(yearVar))),
by = key]
seriesToBlock[, `:=`(flagComb, paste(get(obsflagVar), get(methFlagVar),
sep = ";"))]
seriesToBlock = seriesToBlock[get(yearVar) == lastYearAvailable &
flagComb == "M;-"]
if (nrow(seriesToBlock) > 0) {
seriesToBlock = seriesToBlock[, {
max_year = max(as.integer(.SD[, timePointYears]))
data.table(timePointYears = seq.int(max_year + 1,
newYears), Value = NA_real_, flagObservationStatus = "M",
flagMethod = "-")[max_year < newYears]
}, by = key]
expandedData = merge(expandedData, seriesToBlock, by = c(areaVar,
elementVar, itemVar, yearVar), all.x = TRUE, suffixes = c("",
"_MDash"))
expandedData[!is.na(flagMethod_MDash), `:=`(flagMethod,
flagMethod_MDash)]
expandedData = expandedData[, colnames(data), with = FALSE]
}
expandedData
}
imputeVariable <- function(data, imputationParameters){
if (!exists("ensuredImputationData") || !ensuredImputationData)
ensureImputationInputs(data = data, imputationParameters = imputationParameters)
if (imputationParameters$newImputationColumn == "") {
newValueColumn = imputationParameters$imputationValueColumn
newObsFlagColumn = imputationParameters$imputationFlagColumn
newMethodFlagColumn = imputationParameters$imputationMethodColumn
}
else {
newValueColumn = paste0("Value_", imputationParameters$newImputationColumn)
newObsFlagColumn = paste0("flagObservationStatus_",
imputationParameters$newImputationColumn)
newMethodFlagColumn = paste0("flagMethod_", imputationParameters$newImputationColumn)
}
imputeSingleObservation(data, imputationParameters)
missingIndex = data[[imputationParameters$imputationFlagColumn]] ==
"M" & data[[imputationParameters$imputationMethodColumn]] ==
"u"
ensemble = ensembleImpute(data = data, imputationParameters = imputationParameters)
if(!is.null(nrow(ensemble))) {
data = cbind(data, ensemble)
data[missingIndex & !is.na(ensemble), `:=`(c(newValueColumn), ensemble)]
data = data[, `:=`(ensemble, NULL)]
}
imputedIndex = missingIndex & !is.na(data[[newValueColumn]])
invisible(data[imputedIndex, `:=`(c(newObsFlagColumn, newMethodFlagColumn),
list(imputationParameters$imputationFlag, imputationParameters$newMethodFlag))])
return(data)
}
|
library(forecast)
library(quantmod)
library(timeSeries)
library(tseries)
library(xts)
library(lmtest)
library(rugarch)
source('funcs.R')
# 1. Prepare overall data
df=read.csv('datasets_created_python/merged_all.csv')
df$date=as.POSIXct(as.Date(df$date))
df=df[seq(51,dim(df)[1],1),]
summary(df)
crypto_abr=c('BTC','ETH','XRP')
fits_of_garch=list()
fits_of_garch_better=list()
cor(df[,-1])
models_all=list()
# 2. Loop over all currencies and calculate volatility, that was associated with speculative processes
tsdisplay(y_here)
for (cryptos in crypto_abr){
# cryptos='BTC'
print(cryptos)
steping=dim(df)[1]-1
for (i in seq(1,dim(df)[1]-steping,steping)){
if (cryptos=='XRP'){
garch_mdel=list(model = "csGARCH",# external.regressors = as.matrix(ext_regressor_here),
garchOrder = c(1,1))
}
else{
garch_mdel=list(model = "sGARCH", #external.regressors = as.matrix(ext_regressor_here),
garchOrder = c(1,1))
}
df_new=df[seq(i,i+steping,1),]
dates=df_new[,grepl('date', colnames(df_new))]
# 2.1 Prepare dep.variable y, that will be used in ARMAX-GARCH model
y_here=df_new[,grepl(paste('R_',cryptos,sep=''), colnames(df_new)) | grepl('date', colnames(df_new)) ]
y_here <- xts(y_here[,-1], order.by=as.POSIXct(y_here$date))
# 2.2 Prepare exogenious variable, that will be used in ARMAX part of ARMAX-GARCH model
ext_regressor_here=df_new[,grepl(paste('RV_',cryptos,sep=''), colnames(df_new))]
# ext_regressor_here=abs(ext_regressor_here)*abs(ext_regressor_here)
# 2.3 Describe ARMAX(1,1)-GARCH(1,1) model
g1=ugarchspec(variance.model = garch_mdel,
mean.model = list(armaOrder = c(1,0), external.regressors = as.matrix(ext_regressor_here),
include.mean = TRUE),
# mean.model = list(external.regressors = as.matrix(df_new[,c(2)])),
distribution.model = "std")
# 2.4 Fit model with appropriate solvers
g1fit=ugarchfit(g1,data=y_here,solver='hybrid')
models_all[[cryptos]]<-list(g1fit)
# 2.5 Prepare dataset for GARCH regression
df_to_reg=cbind(g1fit@fit$sigma,ext_regressor_here)
colnames(df_to_reg)=c(paste('sigma_',cryptos,sep=''),paste('RV_',cryptos,sep=''))
df_to_reg=as.data.frame(df_to_reg)
# 2.6 Fit regression model GARCH(1,1)~b0+b1*Speculation , where Speculation is the measure of speculation
# as described in 'Blau M. Price dynamics and speculative trading in bitcoinBenjamin,2017'
# and is based on 'Guillermo L. Dynamic Volume-Return Relation of Individual Stocks,2000'
m1<-lm(df_to_reg[,1]~c(0,df_to_reg[-dim(df_to_reg)[1],2]),data = df_to_reg)
# windows()
# plot(df_to_reg[,1])
print(summary(m1))
# 2.7 Save volatility of a given cryptocyrrency, that is associated (caused by) with speculation
fits_of_garch=append(fits_of_garch,list(m1$fitted.values))
# fits_of_garch=append(fits_of_garch,list(g1fit@fit$sigma))
fits_of_garch_better[[cryptos]]<-list(m1$fitted.values)
}
}
# save(g1fit, file = paste('saved_models/',paste(cryptos,'GARCH_model.rds',sep='_'),sep=''))
save(models_all, file = paste('saved_models/','GARCH_model.rds',sep=''))
save(fits_of_garch_better, file = paste('saved_models/','fits_of_garch_better.rds',sep=''))
# 3 . Conduct Granger casuality test to test the H0, which is as follows:
# Volatility, associated with speculative processes on cryptocurrency X cause ( based on granger test)
# speculative volatility on cryptocurrency Y, where X and Y are currencies from c('BTC','ETH','XRP')
# 3.1. BTC -> ETH
grangertest(unlist(fits_of_garch[2]) ~ unlist(fits_of_garch[1]), order = 3) #0.194 H0 rejected #0.16
# 3.2. ETH -> BTC
grangertest(unlist(fits_of_garch[1]) ~ unlist(fits_of_garch[2]), order = 3) #0.001692 ** H0 not rejected 0.001936 **
grangertest(unlist(fits_of_garch[1]) ~ unlist(fits_of_garch[2]), order = 1) #0.001692 ** H0 not rejected 0.001936 **
# 3.3. BTC -> XRP
grangertest(unlist(fits_of_garch[3]) ~ unlist(fits_of_garch[1]), order = 5) #0.8227 H0 rejected
# 3.4. XRP -> BTC
grangertest(unlist(fits_of_garch[1]) ~ unlist(fits_of_garch[3]), order = 3) #0.8551 H0 rejected
# 3.3. ETH -> XRP
grangertest(unlist(fits_of_garch[3]) ~ unlist(fits_of_garch[2]), order = 3) #0.03617 * H0 not rejected
# 3.4. XRP -> ETH
grangertest(unlist(fits_of_garch[2]) ~ unlist(fits_of_garch[3]), order = 1) # 0.6793 H0 rejected
|
/masters_work.R
|
no_license
|
ssh352/Speculation-and-volatility-of-cryptocurrencies
|
R
| false
| false
| 4,459
|
r
|
library(forecast)
library(quantmod)
library(timeSeries)
library(tseries)
library(xts)
library(lmtest)
library(rugarch)
source('funcs.R')
# 1. Prepare overall data
df=read.csv('datasets_created_python/merged_all.csv')
df$date=as.POSIXct(as.Date(df$date))
df=df[seq(51,dim(df)[1],1),]
summary(df)
crypto_abr=c('BTC','ETH','XRP')
fits_of_garch=list()
fits_of_garch_better=list()
cor(df[,-1])
models_all=list()
# 2. Loop over all currencies and calculate volatility, that was associated with speculative processes
tsdisplay(y_here)
for (cryptos in crypto_abr){
# cryptos='BTC'
print(cryptos)
steping=dim(df)[1]-1
for (i in seq(1,dim(df)[1]-steping,steping)){
if (cryptos=='XRP'){
garch_mdel=list(model = "csGARCH",# external.regressors = as.matrix(ext_regressor_here),
garchOrder = c(1,1))
}
else{
garch_mdel=list(model = "sGARCH", #external.regressors = as.matrix(ext_regressor_here),
garchOrder = c(1,1))
}
df_new=df[seq(i,i+steping,1),]
dates=df_new[,grepl('date', colnames(df_new))]
# 2.1 Prepare dep.variable y, that will be used in ARMAX-GARCH model
y_here=df_new[,grepl(paste('R_',cryptos,sep=''), colnames(df_new)) | grepl('date', colnames(df_new)) ]
y_here <- xts(y_here[,-1], order.by=as.POSIXct(y_here$date))
# 2.2 Prepare exogenious variable, that will be used in ARMAX part of ARMAX-GARCH model
ext_regressor_here=df_new[,grepl(paste('RV_',cryptos,sep=''), colnames(df_new))]
# ext_regressor_here=abs(ext_regressor_here)*abs(ext_regressor_here)
# 2.3 Describe ARMAX(1,1)-GARCH(1,1) model
g1=ugarchspec(variance.model = garch_mdel,
mean.model = list(armaOrder = c(1,0), external.regressors = as.matrix(ext_regressor_here),
include.mean = TRUE),
# mean.model = list(external.regressors = as.matrix(df_new[,c(2)])),
distribution.model = "std")
# 2.4 Fit model with appropriate solvers
g1fit=ugarchfit(g1,data=y_here,solver='hybrid')
models_all[[cryptos]]<-list(g1fit)
# 2.5 Prepare dataset for GARCH regression
df_to_reg=cbind(g1fit@fit$sigma,ext_regressor_here)
colnames(df_to_reg)=c(paste('sigma_',cryptos,sep=''),paste('RV_',cryptos,sep=''))
df_to_reg=as.data.frame(df_to_reg)
# 2.6 Fit regression model GARCH(1,1)~b0+b1*Speculation , where Speculation is the measure of speculation
# as described in 'Blau M. Price dynamics and speculative trading in bitcoinBenjamin,2017'
# and is based on 'Guillermo L. Dynamic Volume-Return Relation of Individual Stocks,2000'
m1<-lm(df_to_reg[,1]~c(0,df_to_reg[-dim(df_to_reg)[1],2]),data = df_to_reg)
# windows()
# plot(df_to_reg[,1])
print(summary(m1))
# 2.7 Save volatility of a given cryptocyrrency, that is associated (caused by) with speculation
fits_of_garch=append(fits_of_garch,list(m1$fitted.values))
# fits_of_garch=append(fits_of_garch,list(g1fit@fit$sigma))
fits_of_garch_better[[cryptos]]<-list(m1$fitted.values)
}
}
# save(g1fit, file = paste('saved_models/',paste(cryptos,'GARCH_model.rds',sep='_'),sep=''))
save(models_all, file = paste('saved_models/','GARCH_model.rds',sep=''))
save(fits_of_garch_better, file = paste('saved_models/','fits_of_garch_better.rds',sep=''))
# 3 . Conduct Granger casuality test to test the H0, which is as follows:
# Volatility, associated with speculative processes on cryptocurrency X cause ( based on granger test)
# speculative volatility on cryptocurrency Y, where X and Y are currencies from c('BTC','ETH','XRP')
# 3.1. BTC -> ETH
grangertest(unlist(fits_of_garch[2]) ~ unlist(fits_of_garch[1]), order = 3) #0.194 H0 rejected #0.16
# 3.2. ETH -> BTC
grangertest(unlist(fits_of_garch[1]) ~ unlist(fits_of_garch[2]), order = 3) #0.001692 ** H0 not rejected 0.001936 **
grangertest(unlist(fits_of_garch[1]) ~ unlist(fits_of_garch[2]), order = 1) #0.001692 ** H0 not rejected 0.001936 **
# 3.3. BTC -> XRP
grangertest(unlist(fits_of_garch[3]) ~ unlist(fits_of_garch[1]), order = 5) #0.8227 H0 rejected
# 3.4. XRP -> BTC
grangertest(unlist(fits_of_garch[1]) ~ unlist(fits_of_garch[3]), order = 3) #0.8551 H0 rejected
# 3.3. ETH -> XRP
grangertest(unlist(fits_of_garch[3]) ~ unlist(fits_of_garch[2]), order = 3) #0.03617 * H0 not rejected
# 3.4. XRP -> ETH
grangertest(unlist(fits_of_garch[2]) ~ unlist(fits_of_garch[3]), order = 1) # 0.6793 H0 rejected
|
## This script creates a "legoplot" similar to those produced by the Broad Institute
## The plot shows the relative abundance of each of the 6 possible mutations in the
## 16 sequence contexts
## Load packages
library(rgl)
#### START OF FUNCTIONS
## Functions modified from the "demo(hist3d)" examples in the rgl package:
# library(rgl)
# demo(hist3d)
## Note; would it have killed the original author to comment their code?
## Draws a single "column" or "stack".
## X and Y coordinates determine the area of the column
## The Z coordinate determines the height of the column
## We include "lit=FALSE" arguments to remove the nasty shiny surfaces caused by lighting
stackplot.3d<-function(x,y,z,alpha=1,topcol="#078E53",sidecol="#aaaaaa",mode='m5'){
if(mode=='m2'){
z.bot = z[1]
z.top = z[2]
}else if(mode=='m5'){
z.bot = z[1]
z.q1 = z[2]
z.mean=z[3]
z.q3=z[4]
z.top = z[5]
}
## These lines allow the active rgl device to be updated with multiple changes
## This is necessary to draw the sides and ends of the column separately
save <- par3d(skipRedraw=TRUE)
on.exit(par3d(save))
if(mode=='m2'){
## Determine the coordinates of each surface of the column and its edges
x1=c(rep(c(x[1],x[2],x[2],x[1]),3),rep(x[1],4),rep(x[2],4))
z1=c(rep(z.bot,4),rep(c(z.bot,z.bot,z.top,z.top),4))
y1=c(y[1],y[1],y[2],y[2],rep(y[1],4),rep(y[2],4),rep(c(y[1],y[2],y[2],y[1]),2))
x2=c(rep(c(x[1],x[1],x[2],x[2]),2),rep(c(x[1],x[2],rep(x[1],3),rep(x[2],3)),2))
z2=c(rep(c(z.bot,z.top),4),rep(z.bot,8),rep(z.top,8) )
y2=c(rep(y[1],4),rep(y[2],4),rep(c(rep(y[1],3),rep(y[2],3),y[1],y[2]),2) )
## These lines create the sides of the column and its coloured top surface
rgl.quads(x1,z1,y1,col=rep(sidecol,each=4),alpha=alpha,lit=FALSE)
rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.top,4),c(y[1],y[1],y[2],y[2]),
col=rep(topcol,each=4),alpha=1,lit=FALSE)
rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.bot,4),c(y[1],y[1],y[2],y[2]),
col=rep(topcol,each=4),alpha=1,lit=FALSE)
## This line adds black edges to the column
rgl.lines(x2,z2,y2,col="#000000",lit=FALSE)
}else if(mode=='m5'){
## Determine the coordinates of each surface of the column and its edges
x1=c(rep(c(x[1],x[2],x[2],x[1]),3),rep(x[1],4),rep(x[2],4))
z1=c(rep(z.bot,4),rep(c(z.bot,z.bot,z.mean,z.mean),4))
y1=c(y[1],y[1],y[2],y[2],rep(y[1],4),rep(y[2],4),rep(c(y[1],y[2],y[2],y[1]),2))
x2=c(rep(c(x[1],x[1],x[2],x[2]),2),rep(c(x[1],x[2],rep(x[1],3),rep(x[2],3)),2),
rep((x[1]+x[2])/2,2))
z2=c(rep(c(z.bot,z.mean),4),rep(z.bot,8),rep(z.mean,8), z.q1,z.q3)
y2=c(rep(y[1],4),rep(y[2],4),rep(c(rep(y[1],3),rep(y[2],3),y[1],y[2]),2),
rep((y[1]+y[2])/2,2))
## These lines create the sides of the column and its coloured top surface
## Side surfaces of the main box
rgl.quads(x1,z1,y1,col=rep(sidecol,each=4),alpha=alpha,lit=FALSE)
## Top and bottom surfaces of the main box
rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.mean,4),c(y[1],y[1],y[2],y[2]),
col=rep(topcol,each=4),alpha=1,lit=FALSE)
# rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.q1,4),c(y[1],y[1],y[2],y[2]),
# col=rep(topcol,each=4),alpha=1,lit=FALSE)
## Max and min surfaces
# rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.top,4),c(y[1],y[1],y[2],y[2]),
# col=rep(topcol,each=4),alpha=.2,lit=FALSE)
# rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.bot,4),c(y[1],y[1],y[2],y[2]),
# col=rep(topcol,each=4),alpha=.2,lit=FALSE)
## This line adds black edges to the column
rgl.lines(x2,z2,y2,col="#000000",lit=FALSE)
# bg.x = c(rep(c(10,10),4),rep(c(10,70),4))
# bg.y = c(rep(rep(c(80,120,160,200),each = 2),2))
# bg.z = c(rep(c(-10,-100),4),rep(c(-100,-100),4))
# rgl.lines(bg.x,bg.y,bg.z,col="#000000",lit=FALSE)
bg.x = rep(c(rep(10,56),seq(10,70,2)),4)
bg.y = rep(c(80,120,160,200),each=87)
bg.z = rep(c(seq(-10,-120,-2),rep(-120,31)),4)
rgl.points(bg.x,bg.y,bg.z,col="#000000",lit=FALSE,size = 0.3)
}
}
# Example:
# stackplot.3d(c(0,1),c(0,1),3,alpha=0.6)
## Calls stackplot.3d repeatedly to create a barplot
## z.top is the heights of the columns and must be an appropriately named vector
barplot3d<-function(z,alpha=1,scalexy=10,scalez=1,gap=0.2,mode='m5',gap.sce.mode=TRUE){
## These lines allow the active rgl device to be updated with multiple changes
## This is necessary to add each column sequentially
if(mode=='m2'){
if(dim(z)[2] != 2){
return(print('2 columns are expected!'))
}
z=z[,c(1,2)]
}else if(mode=='m5'){
if(dim(z)[2]!=5){
return(print('5 columns are expected!'))
}
z=z[,c(1:5)]
}else{
return(print('Pls specify mode!'))
}
save <- par3d(skipRedraw=TRUE)
on.exit(par3d(save))
# ## Recreate Broad order
# types=c("Low",'Intermediate','High')
# contexts=c("jc1p1","jc1p2","jc1p3","jc1p4","jc1p5","jc1p6",
# "jc2p1","jc2p2","jc2p3","jc2p4","jc2p5","jc2p6",
# "jc3p1","jc3p2","jc3p3","jc3p4","jc3p5","jc3p6")
# typeorder=c()
# for(type in types){
# typeorder=c(typeorder,paste(type,contexts,sep="_"))
# }
# names(z.top)=typeorder
# names(z.bot)=typeorder
## Reorder data into 6 regions
neworder=c(1:nrow(z))
## Define dimensions of the plot
dimensions=c(9,6)
## Scale column area and the gap between columns
y=seq(1,dimensions[1]+2)*scalexy
x=seq(1,dimensions[2])*scalexy
gap=gap*scalexy
z = z*scalez
## Set up colour palette
broadcolors=c("#8CD790","#EFDC05","#30A9DE")
colors=as.vector(sapply(broadcolors,rep,18))
## Scale z.top coordinate
if(mode=='m2'){
## Plot each of the columns
for(i in 1:dimensions[1]){
for(j in 1:dimensions[2]){
# Variable to work out which column to plot
it=(i-1)*dimensions[2]+j
stackplot.3d(c(gap+x[j],x[j]+scalexy),
c(-gap-y[i],-y[i]-scalexy),
z[neworder[it],],
alpha=alpha,
topcol=colors[neworder[it]],
sidecol=colors[neworder[it]],
mode=mode)
}
}
}else if(mode=='m5'){
## Plot each of the columns
for(i in 1:dimensions[1]){
for(j in 1:dimensions[2]){
it=(i-1)*dimensions[2]+j # Variable to work out which column to plot; counts from 1:96
if(gap.sce.mode==TRUE)gap.sce = (i-1)%/%3*scalexy
else gap.sce=0
stackplot.3d(c(gap+x[j],x[j]+scalexy),
c(-gap-y[i]-gap.sce,-y[i]-scalexy-gap.sce),
z[neworder[it],],
alpha=alpha,
topcol=colors[neworder[it]],
sidecol=colors[neworder[it]],
mode=mode)
}
}
}
## Set the viewpoint and add axes and labels
## theta: the horizontal angle phi: the vertical angle
rgl.viewpoint(theta=70,phi=35,fov=30)
axes3d("y-+",labels=TRUE,at=seq(80,200,40),nticks=4,lwd=2)
# axis for phi
zlabels <- c('0','0.5','1')
axes3d("z+-", labels=zlabels,nticks=3,at=seq(-15,-35,-10),lwd=2)
# axis for sigma_phi
xlabels <- c('0','1e2','1e4','1e6','1e8','Inf')
axis3d("x-+",nticks=6,at=seq(15,65,10),labels=xlabels,lwd=2)
text3d(matrix(c(0,105,40,180,80,80,-40,-25,20),ncol=3),
texts=c('Abundance',expression(psi), expression(sigma[phi]) ),
cex = 2)
}
|
/Pro3/R_p3/barplot3d.R
|
no_license
|
xl0418/Code
|
R
| false
| false
| 7,498
|
r
|
## This script creates a "legoplot" similar to those produced by the Broad Institute
## The plot shows the relative abundance of each of the 6 possible mutations in the
## 16 sequence contexts
## Load packages
library(rgl)
#### START OF FUNCTIONS
## Functions modified from the "demo(hist3d)" examples in the rgl package:
# library(rgl)
# demo(hist3d)
## Note; would it have killed the original author to comment their code?
## Draws a single "column" or "stack".
## X and Y coordinates determine the area of the column
## The Z coordinate determines the height of the column
## We include "lit=FALSE" arguments to remove the nasty shiny surfaces caused by lighting
stackplot.3d<-function(x,y,z,alpha=1,topcol="#078E53",sidecol="#aaaaaa",mode='m5'){
if(mode=='m2'){
z.bot = z[1]
z.top = z[2]
}else if(mode=='m5'){
z.bot = z[1]
z.q1 = z[2]
z.mean=z[3]
z.q3=z[4]
z.top = z[5]
}
## These lines allow the active rgl device to be updated with multiple changes
## This is necessary to draw the sides and ends of the column separately
save <- par3d(skipRedraw=TRUE)
on.exit(par3d(save))
if(mode=='m2'){
## Determine the coordinates of each surface of the column and its edges
x1=c(rep(c(x[1],x[2],x[2],x[1]),3),rep(x[1],4),rep(x[2],4))
z1=c(rep(z.bot,4),rep(c(z.bot,z.bot,z.top,z.top),4))
y1=c(y[1],y[1],y[2],y[2],rep(y[1],4),rep(y[2],4),rep(c(y[1],y[2],y[2],y[1]),2))
x2=c(rep(c(x[1],x[1],x[2],x[2]),2),rep(c(x[1],x[2],rep(x[1],3),rep(x[2],3)),2))
z2=c(rep(c(z.bot,z.top),4),rep(z.bot,8),rep(z.top,8) )
y2=c(rep(y[1],4),rep(y[2],4),rep(c(rep(y[1],3),rep(y[2],3),y[1],y[2]),2) )
## These lines create the sides of the column and its coloured top surface
rgl.quads(x1,z1,y1,col=rep(sidecol,each=4),alpha=alpha,lit=FALSE)
rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.top,4),c(y[1],y[1],y[2],y[2]),
col=rep(topcol,each=4),alpha=1,lit=FALSE)
rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.bot,4),c(y[1],y[1],y[2],y[2]),
col=rep(topcol,each=4),alpha=1,lit=FALSE)
## This line adds black edges to the column
rgl.lines(x2,z2,y2,col="#000000",lit=FALSE)
}else if(mode=='m5'){
## Determine the coordinates of each surface of the column and its edges
x1=c(rep(c(x[1],x[2],x[2],x[1]),3),rep(x[1],4),rep(x[2],4))
z1=c(rep(z.bot,4),rep(c(z.bot,z.bot,z.mean,z.mean),4))
y1=c(y[1],y[1],y[2],y[2],rep(y[1],4),rep(y[2],4),rep(c(y[1],y[2],y[2],y[1]),2))
x2=c(rep(c(x[1],x[1],x[2],x[2]),2),rep(c(x[1],x[2],rep(x[1],3),rep(x[2],3)),2),
rep((x[1]+x[2])/2,2))
z2=c(rep(c(z.bot,z.mean),4),rep(z.bot,8),rep(z.mean,8), z.q1,z.q3)
y2=c(rep(y[1],4),rep(y[2],4),rep(c(rep(y[1],3),rep(y[2],3),y[1],y[2]),2),
rep((y[1]+y[2])/2,2))
## These lines create the sides of the column and its coloured top surface
## Side surfaces of the main box
rgl.quads(x1,z1,y1,col=rep(sidecol,each=4),alpha=alpha,lit=FALSE)
## Top and bottom surfaces of the main box
rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.mean,4),c(y[1],y[1],y[2],y[2]),
col=rep(topcol,each=4),alpha=1,lit=FALSE)
# rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.q1,4),c(y[1],y[1],y[2],y[2]),
# col=rep(topcol,each=4),alpha=1,lit=FALSE)
## Max and min surfaces
# rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.top,4),c(y[1],y[1],y[2],y[2]),
# col=rep(topcol,each=4),alpha=.2,lit=FALSE)
# rgl.quads(c(x[1],x[2],x[2],x[1]),rep(z.bot,4),c(y[1],y[1],y[2],y[2]),
# col=rep(topcol,each=4),alpha=.2,lit=FALSE)
## This line adds black edges to the column
rgl.lines(x2,z2,y2,col="#000000",lit=FALSE)
# bg.x = c(rep(c(10,10),4),rep(c(10,70),4))
# bg.y = c(rep(rep(c(80,120,160,200),each = 2),2))
# bg.z = c(rep(c(-10,-100),4),rep(c(-100,-100),4))
# rgl.lines(bg.x,bg.y,bg.z,col="#000000",lit=FALSE)
bg.x = rep(c(rep(10,56),seq(10,70,2)),4)
bg.y = rep(c(80,120,160,200),each=87)
bg.z = rep(c(seq(-10,-120,-2),rep(-120,31)),4)
rgl.points(bg.x,bg.y,bg.z,col="#000000",lit=FALSE,size = 0.3)
}
}
# Example:
# stackplot.3d(c(0,1),c(0,1),3,alpha=0.6)
## Calls stackplot.3d repeatedly to create a barplot
## z.top is the heights of the columns and must be an appropriately named vector
barplot3d<-function(z,alpha=1,scalexy=10,scalez=1,gap=0.2,mode='m5',gap.sce.mode=TRUE){
## These lines allow the active rgl device to be updated with multiple changes
## This is necessary to add each column sequentially
if(mode=='m2'){
if(dim(z)[2] != 2){
return(print('2 columns are expected!'))
}
z=z[,c(1,2)]
}else if(mode=='m5'){
if(dim(z)[2]!=5){
return(print('5 columns are expected!'))
}
z=z[,c(1:5)]
}else{
return(print('Pls specify mode!'))
}
save <- par3d(skipRedraw=TRUE)
on.exit(par3d(save))
# ## Recreate Broad order
# types=c("Low",'Intermediate','High')
# contexts=c("jc1p1","jc1p2","jc1p3","jc1p4","jc1p5","jc1p6",
# "jc2p1","jc2p2","jc2p3","jc2p4","jc2p5","jc2p6",
# "jc3p1","jc3p2","jc3p3","jc3p4","jc3p5","jc3p6")
# typeorder=c()
# for(type in types){
# typeorder=c(typeorder,paste(type,contexts,sep="_"))
# }
# names(z.top)=typeorder
# names(z.bot)=typeorder
## Reorder data into 6 regions
neworder=c(1:nrow(z))
## Define dimensions of the plot
dimensions=c(9,6)
## Scale column area and the gap between columns
y=seq(1,dimensions[1]+2)*scalexy
x=seq(1,dimensions[2])*scalexy
gap=gap*scalexy
z = z*scalez
## Set up colour palette
broadcolors=c("#8CD790","#EFDC05","#30A9DE")
colors=as.vector(sapply(broadcolors,rep,18))
## Scale z.top coordinate
if(mode=='m2'){
## Plot each of the columns
for(i in 1:dimensions[1]){
for(j in 1:dimensions[2]){
# Variable to work out which column to plot
it=(i-1)*dimensions[2]+j
stackplot.3d(c(gap+x[j],x[j]+scalexy),
c(-gap-y[i],-y[i]-scalexy),
z[neworder[it],],
alpha=alpha,
topcol=colors[neworder[it]],
sidecol=colors[neworder[it]],
mode=mode)
}
}
}else if(mode=='m5'){
## Plot each of the columns
for(i in 1:dimensions[1]){
for(j in 1:dimensions[2]){
it=(i-1)*dimensions[2]+j # Variable to work out which column to plot; counts from 1:96
if(gap.sce.mode==TRUE)gap.sce = (i-1)%/%3*scalexy
else gap.sce=0
stackplot.3d(c(gap+x[j],x[j]+scalexy),
c(-gap-y[i]-gap.sce,-y[i]-scalexy-gap.sce),
z[neworder[it],],
alpha=alpha,
topcol=colors[neworder[it]],
sidecol=colors[neworder[it]],
mode=mode)
}
}
}
## Set the viewpoint and add axes and labels
## theta: the horizontal angle phi: the vertical angle
rgl.viewpoint(theta=70,phi=35,fov=30)
axes3d("y-+",labels=TRUE,at=seq(80,200,40),nticks=4,lwd=2)
# axis for phi
zlabels <- c('0','0.5','1')
axes3d("z+-", labels=zlabels,nticks=3,at=seq(-15,-35,-10),lwd=2)
# axis for sigma_phi
xlabels <- c('0','1e2','1e4','1e6','1e8','Inf')
axis3d("x-+",nticks=6,at=seq(15,65,10),labels=xlabels,lwd=2)
text3d(matrix(c(0,105,40,180,80,80,-40,-25,20),ncol=3),
texts=c('Abundance',expression(psi), expression(sigma[phi]) ),
cex = 2)
}
|
\name{SNPsm} % DESCRIPTION OF FUNCTION SNPsm, 23.10.2012
\alias{SNPsm}
\alias{SNPsm.default}
\alias{plot.SNPsm}
\alias{SNPsm2}
\title{
The spatial and temporal model of succession in the Swiss National Park
}
\description{
A dynamic model of succession on alp Stabelchod in the Swiss Nationl Park using differential equations and numerial integration. 6 species guilds are considered. Space is conceived as a grid of 30 times 40 cells. Typical simulation time is around 500yr.
}
\usage{
SNPsm(trange,tsl,diff,r6,...)
SNPsm2(trange=100,tsl=5.0,diff=0.001,r6=NULL)
\method{SNPsm}{default}(trange, tsl, diff, r6, ...)
\method{plot}{SNPsm}(x, ...,out.seq=1,col=FALSE)
}
\arguments{
\item{trange}{
Time range of simulation in yr
}
\item{tsl}{
Time range of simulation in yr
}
\item{out.seq}{
Time interval (yr) at which maps of the state are printed
}
\item{diff}{
A diffusion coefficient driving random spatial propagation
}
\item{r6}{
Growth rates of 6 guilds involved, increase in cover percentage per yr
}
\item{\dots}{
Parameter out.seq, the plotting interval
}
\item{x}{
An object of class "SNPsm"
}
\item{col}{
A logical variable to suppress color printing
}
}
\value{
An object of class "SNPsm" with at least the following items:
\item{n.time.steps }{Number of time steps used for numerical integration}
\item{imax }{Vertical grid count}
\item{jmax }{Horizontal grid count}
\item{time.step.length }{The time step length in yr}
\item{veg.types }{The names of the vegetation types, i.e., the species}
\item{vegdef }{A nspecies x nspecies matrix defining composition of vegetation types}
\item{growth.rates }{The growth rates given upon input}
\item{sim.data}{Simulated scores of all species (guilds) during simulation time}
\item{tmap}{The 30 x 40 grid map of types used as initial condition}
\item{igmap}{The same as tmap}
\item{frame}{A 30 x 40 grid showing initial forest edges, used for printing}
}
\references{
Wildi, O. 2002. Modeling succession from pasture to forest in time and space. Community Ecology 3: 181--189.
Wildi, O. 2017. Data Analysis in Vegetation Ecology. 3rd ed. CABI, Oxfordshire, Boston.
}
\author{
Otto Wildi
}
\examples{
r6=NULL # imposes default growth rates
o.stSNP<- SNPsm(trange=100,tsl=10.0,diff=0.001,r6)
plot(o.stSNP,out.seq=50)
}
\keyword{ models }
\keyword{ multivariate }
|
/man/SNPsm.Rd
|
no_license
|
cran/dave
|
R
| false
| false
| 2,349
|
rd
|
\name{SNPsm} % DESCRIPTION OF FUNCTION SNPsm, 23.10.2012
\alias{SNPsm}
\alias{SNPsm.default}
\alias{plot.SNPsm}
\alias{SNPsm2}
\title{
The spatial and temporal model of succession in the Swiss National Park
}
\description{
A dynamic model of succession on alp Stabelchod in the Swiss Nationl Park using differential equations and numerial integration. 6 species guilds are considered. Space is conceived as a grid of 30 times 40 cells. Typical simulation time is around 500yr.
}
\usage{
SNPsm(trange,tsl,diff,r6,...)
SNPsm2(trange=100,tsl=5.0,diff=0.001,r6=NULL)
\method{SNPsm}{default}(trange, tsl, diff, r6, ...)
\method{plot}{SNPsm}(x, ...,out.seq=1,col=FALSE)
}
\arguments{
\item{trange}{
Time range of simulation in yr
}
\item{tsl}{
Time range of simulation in yr
}
\item{out.seq}{
Time interval (yr) at which maps of the state are printed
}
\item{diff}{
A diffusion coefficient driving random spatial propagation
}
\item{r6}{
Growth rates of 6 guilds involved, increase in cover percentage per yr
}
\item{\dots}{
Parameter out.seq, the plotting interval
}
\item{x}{
An object of class "SNPsm"
}
\item{col}{
A logical variable to suppress color printing
}
}
\value{
An object of class "SNPsm" with at least the following items:
\item{n.time.steps }{Number of time steps used for numerical integration}
\item{imax }{Vertical grid count}
\item{jmax }{Horizontal grid count}
\item{time.step.length }{The time step length in yr}
\item{veg.types }{The names of the vegetation types, i.e., the species}
\item{vegdef }{A nspecies x nspecies matrix defining composition of vegetation types}
\item{growth.rates }{The growth rates given upon input}
\item{sim.data}{Simulated scores of all species (guilds) during simulation time}
\item{tmap}{The 30 x 40 grid map of types used as initial condition}
\item{igmap}{The same as tmap}
\item{frame}{A 30 x 40 grid showing initial forest edges, used for printing}
}
\references{
Wildi, O. 2002. Modeling succession from pasture to forest in time and space. Community Ecology 3: 181--189.
Wildi, O. 2017. Data Analysis in Vegetation Ecology. 3rd ed. CABI, Oxfordshire, Boston.
}
\author{
Otto Wildi
}
\examples{
r6=NULL # imposes default growth rates
o.stSNP<- SNPsm(trange=100,tsl=10.0,diff=0.001,r6)
plot(o.stSNP,out.seq=50)
}
\keyword{ models }
\keyword{ multivariate }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_class.R
\docType{class}
\name{NIMLSurfaceDataMetaInfo-class}
\alias{NIMLSurfaceDataMetaInfo-class}
\title{NIMLSurfaceDataMetaInfo}
\description{
This class contains meta information for surface-based data for the NIML data format
}
\section{Slots}{
\describe{
\item{\code{data}}{the numeric data matrix of surface values (rows = nodes, columns=surface vectors)}
\item{\code{node_indices}}{the indices of the nodes for mapping to associated surface geometry.}
}}
|
/man/NIMLSurfaceDataMetaInfo-class.Rd
|
no_license
|
bbuchsbaum/neurosurf
|
R
| false
| true
| 547
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_class.R
\docType{class}
\name{NIMLSurfaceDataMetaInfo-class}
\alias{NIMLSurfaceDataMetaInfo-class}
\title{NIMLSurfaceDataMetaInfo}
\description{
This class contains meta information for surface-based data for the NIML data format
}
\section{Slots}{
\describe{
\item{\code{data}}{the numeric data matrix of surface values (rows = nodes, columns=surface vectors)}
\item{\code{node_indices}}{the indices of the nodes for mapping to associated surface geometry.}
}}
|
library(BioPhysConnectoR)
library(ggplot2)
library(viridis)
library(parallel)
library(DECIPHER)
if(!exists("primerF")){
source("R/1_generalAA.R")
}
aln <- read.fasta("/SAN/db/RDP/Silva_123/silva.nr_v123_EUK.align")
keep <- !apply(aln$ali, 2, function (x) all(x %in% c("-", ".")) )
aln <- aln$ali[,keep]
ent <- get.entropy(aln, gapchar = "-")
ent <- data.frame(entropy=ent, plain.length=1:length(ent))
## get the length of non gap characters for each sequence in the
## alingment
non.gap.lenght <- t(apply(aln, 1, function (x) {
cumsum(x != "-")
}))
mean.no.gap.length <- colMeans(non.gap.lenght)
ent$no.gap.l <- mean.no.gap.length
ent$perc.gap <- apply(aln, 2,
function(x) sum(x=="-")/length(x) *100)
ent$trimmed.ent <- ifelse(ent$perc.gap < 50, ent$entropy, NA)
ent$smooth.ent <- ent$trimmed.ent
ent$smooth.ent[!is.na(ent$trimmed.ent)] <-
runmed(ent$trimmed.ent[!is.na(ent$trimmed.ent)], 21)
## mapping the primers against sequence
matches <- mclapply(seq_along(primerF), function (i){
pmatches <- apply(aln, 1, function (x) {
nogap <- x[x != "-"]
seq <- DNAString(paste(nogap, collapse=""))
hitF <- matchPattern(primerF[[i]],
seq, fixed=FALSE)
hitR <- matchPattern(reverseComplement(DNAString(primerR[[i]])),
seq, fixed=FALSE)
list(start(hitF), end(hitR))
})
return(pmatches)
}, mc.cores = 20)
names(matches) <- paste(names(primerF), names(primerR), sep=".")
mat.list <- lapply(matches, function (x) do.call(rbind, x))
## something like this for getting the tax scope of forward and
## reverse primer binding
## foo <- lapply(mat.list, function(x) !isEmpty(x[, 1]) & !isEmpty(x[,
## 2]))
## for now only hte start and end points
meanStart <- unlist(lapply(mat.list,
function(x) mean(unlist(x[, 1]), na.rm=TRUE)))
meanEnd <- unlist(lapply(mat.list,
function(x) mean(unlist(x[, 2]), na.rm=TRUE)))
Pranges <- as.data.frame(cbind(meanStart, meanEnd))
## DODGY FIX for EukB... watch out for this primer!
Pranges[grepl(".EukB", rownames(Pranges)), ]$meanEnd <-
max(ent$no.gap.l)
## DODGY FIX for Medin... watch out for this primer!
Pranges[grepl("Medin.", rownames(Pranges)), ]$meanStart <-
min(ent$no.gap.l)
Pranges <- Pranges[order(Pranges$meanStart, Pranges$meanEnd), ]
Pranges$y.pos <- seq(2.5, 100, by = 0.1)[1:nrow(Pranges)]
Pranges <- merge(Pranges, PrimTax, by=0)
pdf("figures/entropy_primers_norm.pdf", width=10, height=6)
ggplot(ent, aes(no.gap.l, trimmed.ent)) +
geom_hex(binwidth=c(31, 0.1)) +
scale_fill_viridis(option = "viridis") +
geom_segment(mapping=aes(x = meanStart, y = y.pos, xend = meanEnd,
yend = y.pos, color=log10(num.reads)),
size=2,
data=Pranges)+
scale_color_viridis(option = "plasma")+
geom_text(aes(x = meanStart, y = y.pos+0.04, label = Row.names), Pranges, size=2) +
scale_x_continuous("mean non-gapped alignement length")+
theme_bw()
dev.off()
## EukB Primers are at wrong location!
devtools::source_gist("524eade46135f6348140",
filename = "ggplot_smooth_func.R")
pdf("figures/size_vs_num.pdf", width=8, height=6)
ggplot(Pranges, aes(meanEnd-meanStart, num.reads)) +
geom_point(aes(size=Genus, color=Phylum))+
scale_color_viridis(option = "plasma")+
scale_x_continuous("lenght of amplicon") +
scale_y_log10("number of sequencing reads")+
stat_smooth_func(geom="text", method="lm", hjust = -1.5, parse=TRUE) +
stat_smooth(method="lm", se=FALSE) +
annotation_logticks(sides="l") +
theme_bw() +
theme(panel.grid.minor = element_blank())
dev.off()
|
/R/4_entropy.R
|
no_license
|
derele/AA_Metabarcoding
|
R
| false
| false
| 3,764
|
r
|
library(BioPhysConnectoR)
library(ggplot2)
library(viridis)
library(parallel)
library(DECIPHER)
if(!exists("primerF")){
source("R/1_generalAA.R")
}
aln <- read.fasta("/SAN/db/RDP/Silva_123/silva.nr_v123_EUK.align")
keep <- !apply(aln$ali, 2, function (x) all(x %in% c("-", ".")) )
aln <- aln$ali[,keep]
ent <- get.entropy(aln, gapchar = "-")
ent <- data.frame(entropy=ent, plain.length=1:length(ent))
## get the length of non gap characters for each sequence in the
## alingment
non.gap.lenght <- t(apply(aln, 1, function (x) {
cumsum(x != "-")
}))
mean.no.gap.length <- colMeans(non.gap.lenght)
ent$no.gap.l <- mean.no.gap.length
ent$perc.gap <- apply(aln, 2,
function(x) sum(x=="-")/length(x) *100)
ent$trimmed.ent <- ifelse(ent$perc.gap < 50, ent$entropy, NA)
ent$smooth.ent <- ent$trimmed.ent
ent$smooth.ent[!is.na(ent$trimmed.ent)] <-
runmed(ent$trimmed.ent[!is.na(ent$trimmed.ent)], 21)
## mapping the primers against sequence
matches <- mclapply(seq_along(primerF), function (i){
pmatches <- apply(aln, 1, function (x) {
nogap <- x[x != "-"]
seq <- DNAString(paste(nogap, collapse=""))
hitF <- matchPattern(primerF[[i]],
seq, fixed=FALSE)
hitR <- matchPattern(reverseComplement(DNAString(primerR[[i]])),
seq, fixed=FALSE)
list(start(hitF), end(hitR))
})
return(pmatches)
}, mc.cores = 20)
names(matches) <- paste(names(primerF), names(primerR), sep=".")
mat.list <- lapply(matches, function (x) do.call(rbind, x))
## something like this for getting the tax scope of forward and
## reverse primer binding
## foo <- lapply(mat.list, function(x) !isEmpty(x[, 1]) & !isEmpty(x[,
## 2]))
## for now only hte start and end points
meanStart <- unlist(lapply(mat.list,
function(x) mean(unlist(x[, 1]), na.rm=TRUE)))
meanEnd <- unlist(lapply(mat.list,
function(x) mean(unlist(x[, 2]), na.rm=TRUE)))
Pranges <- as.data.frame(cbind(meanStart, meanEnd))
## DODGY FIX for EukB... watch out for this primer!
Pranges[grepl(".EukB", rownames(Pranges)), ]$meanEnd <-
max(ent$no.gap.l)
## DODGY FIX for Medin... watch out for this primer!
Pranges[grepl("Medin.", rownames(Pranges)), ]$meanStart <-
min(ent$no.gap.l)
Pranges <- Pranges[order(Pranges$meanStart, Pranges$meanEnd), ]
Pranges$y.pos <- seq(2.5, 100, by = 0.1)[1:nrow(Pranges)]
Pranges <- merge(Pranges, PrimTax, by=0)
pdf("figures/entropy_primers_norm.pdf", width=10, height=6)
ggplot(ent, aes(no.gap.l, trimmed.ent)) +
geom_hex(binwidth=c(31, 0.1)) +
scale_fill_viridis(option = "viridis") +
geom_segment(mapping=aes(x = meanStart, y = y.pos, xend = meanEnd,
yend = y.pos, color=log10(num.reads)),
size=2,
data=Pranges)+
scale_color_viridis(option = "plasma")+
geom_text(aes(x = meanStart, y = y.pos+0.04, label = Row.names), Pranges, size=2) +
scale_x_continuous("mean non-gapped alignement length")+
theme_bw()
dev.off()
## EukB Primers are at wrong location!
devtools::source_gist("524eade46135f6348140",
filename = "ggplot_smooth_func.R")
pdf("figures/size_vs_num.pdf", width=8, height=6)
ggplot(Pranges, aes(meanEnd-meanStart, num.reads)) +
geom_point(aes(size=Genus, color=Phylum))+
scale_color_viridis(option = "plasma")+
scale_x_continuous("lenght of amplicon") +
scale_y_log10("number of sequencing reads")+
stat_smooth_func(geom="text", method="lm", hjust = -1.5, parse=TRUE) +
stat_smooth(method="lm", se=FALSE) +
annotation_logticks(sides="l") +
theme_bw() +
theme(panel.grid.minor = element_blank())
dev.off()
|
#Data Table - Learnign how to work with it
#DT[i, j, by]
## R: i j by
## SQL: where | order by select | update group by
#Take DT, subset/reorder rows using i, then calculate j, grouped by by.
#Source=https://cran.r-project.org/web/packages/data.table/vignettes/datatable-intro.html
#
#
library(data.table)
#
input <- if (file.exists("flights14.csv")) {
"flights14.csv"
} else {
"https://raw.githubusercontent.com/Rdatatable/data.table/master/vignettes/flights14.csv"
}
flights <- fread(input)
###Subsetting rows
#Get all the flights with "JFK" as the origin airport in the month of June.
ans <- flights[origin == "JFK" & month == 6]
#Get the rows 4&5
ans <- flights[4:5,,]
ans <- flights[4:5]
#Sort flights first by column origin in ascending order, and then by dest in descending order
ans <- flights[order(origin,-distance)]
###Subsetting columns
#Select arr_delay column, but return it as a vector
ans <- flights[,arr_delay]
head(ans)
#Select both arr_delay and dep_delay columns
ans <- flights[,list(arr_delay,dep_delay)]
ans
#Select both arr_delay and dep_delay columns and rename them to delay_arr and delay_dep
ans <- flights[,list(delay_arr=arr_delay,delay_dep=dep_delay)]
head(ans)
#Doing in the same way that the data.frame way: select both arr_delay and dep_delay columns
ans <- flights[, c("arr_delay", "dep_delay")]
head(ans)
###Doing computations
#How many trips have had total delay < 0?
ans <- flights[,sum((arr_delay + dep_delay)<0)]
ans
#Calculate the average arrival and departure delay for all flights with "JFK" as
#the origin airport in the month of June.
ans <- flights[origin == "JFK" & month == 6,
list(m.arr= mean(arr_delay),m.dep= mean(dep_delay))
ans
#How many trips have been made in 2014 from "JFK" airport in the month of June?
ans <- flights[origin == "JFK" & year == 2014 & month == 6,length(dest)]
#length determines the length of the vector, giving the number of rows
ans
ans <- flights[origin == "JFK" & year == 2014 & month == 6,.N] #.N gives the number of rows
###Aggregations
#How can we get the number of trips corresponding to each origin airport?
ans <- flights[,.N,by=.(origin)]
ans
#How can we calculate the number of trips for each origin airport for carrier code "AA"?
ans <- flights[carrier == "AA",.N,by=origin]
ans
#How can we get the total number of trips for each origin, dest pair for carrier code "AA"?
ans <- flights[carrier == "AA",.N,by=.(origin,dest)]
ans
#How can we get the average arrival and departure delay for each orig,dest pair
#for each month for carrier code "AA"?
ans <- flights[carrier == "AA",
.(m.arr= mean(arr_delay),m.dep= mean(dep_delay)),
by=.(origin,dest,month)]
ans
#So how can we directly order by all the grouping variables?
ans <- flights[carrier == "AA",
.(m.arr= mean(arr_delay),m.dep= mean(dep_delay)),
keyby=.(origin,dest,month)]
ans
#Doing cumulative sums, accorign to some specirif order and by specific key
dt.hp <- dt.hp [order(CountryExp,DateRep),
Total.Cases.cumsum := cumsum(NewConfCases), by=.(CountryExp)]
#Chaining expressions
#So how can we directly order by all the grouping variables?
ans <- flights[carrier == "AA", .N, by = .(origin, dest)]
ans
#Sort by origin/dest
ans <- flights[carrier == "AA", .N, by = .(origin, dest)][order(origin, -dest)]
ans
#Expressions in by (not only columns)
#Number of flights that started late but arrived early (or on time), started and arrived late
ans <- flights[, .N, by =.(dep_delay>0, arr_delay>0,carrier)]
ans
ans <- flights[(dep_delay>0 OR arr_delay>0), .N, by =.(carrier)]
ans
#Special symbol .SD:
#It stands for "Subset of Data". It is a data.table by itself
#that holds the data for the current group defined using by.
flights[carrier == "AA", ## Only on trips with carrier "AA"
lapply(.SD, mean), ## compute the mean
by = .(origin, dest, month), ## for every 'origin,dest,month'
.SDcols = c("arr_delay", "dep_delay", "air_time")] ## for just those specified in .SDcols
#How can we return the first two rows for each month?
ans <- flights[, head(.SD, 2), by = month]
head(ans)
#How can we specify just the columns we would like to compute the mean?
flights[carrier == "AA",lapply(.SD, mean),
by = .(origin, dest, month),
.SDcols = c("arr_delay", "dep_delay")]
|
/Introduction_data_table.R
|
no_license
|
secun/Learning_R_Examples
|
R
| false
| false
| 4,439
|
r
|
#Data Table - Learnign how to work with it
#DT[i, j, by]
## R: i j by
## SQL: where | order by select | update group by
#Take DT, subset/reorder rows using i, then calculate j, grouped by by.
#Source=https://cran.r-project.org/web/packages/data.table/vignettes/datatable-intro.html
#
#
library(data.table)
#
input <- if (file.exists("flights14.csv")) {
"flights14.csv"
} else {
"https://raw.githubusercontent.com/Rdatatable/data.table/master/vignettes/flights14.csv"
}
flights <- fread(input)
###Subsetting rows
#Get all the flights with "JFK" as the origin airport in the month of June.
ans <- flights[origin == "JFK" & month == 6]
#Get the rows 4&5
ans <- flights[4:5,,]
ans <- flights[4:5]
#Sort flights first by column origin in ascending order, and then by dest in descending order
ans <- flights[order(origin,-distance)]
###Subsetting columns
#Select arr_delay column, but return it as a vector
ans <- flights[,arr_delay]
head(ans)
#Select both arr_delay and dep_delay columns
ans <- flights[,list(arr_delay,dep_delay)]
ans
#Select both arr_delay and dep_delay columns and rename them to delay_arr and delay_dep
ans <- flights[,list(delay_arr=arr_delay,delay_dep=dep_delay)]
head(ans)
#Doing in the same way that the data.frame way: select both arr_delay and dep_delay columns
ans <- flights[, c("arr_delay", "dep_delay")]
head(ans)
###Doing computations
#How many trips have had total delay < 0?
ans <- flights[,sum((arr_delay + dep_delay)<0)]
ans
#Calculate the average arrival and departure delay for all flights with "JFK" as
#the origin airport in the month of June.
ans <- flights[origin == "JFK" & month == 6,
list(m.arr= mean(arr_delay),m.dep= mean(dep_delay))
ans
#How many trips have been made in 2014 from "JFK" airport in the month of June?
ans <- flights[origin == "JFK" & year == 2014 & month == 6,length(dest)]
#length determines the length of the vector, giving the number of rows
ans
ans <- flights[origin == "JFK" & year == 2014 & month == 6,.N] #.N gives the number of rows
###Aggregations
#How can we get the number of trips corresponding to each origin airport?
ans <- flights[,.N,by=.(origin)]
ans
#How can we calculate the number of trips for each origin airport for carrier code "AA"?
ans <- flights[carrier == "AA",.N,by=origin]
ans
#How can we get the total number of trips for each origin, dest pair for carrier code "AA"?
ans <- flights[carrier == "AA",.N,by=.(origin,dest)]
ans
#How can we get the average arrival and departure delay for each orig,dest pair
#for each month for carrier code "AA"?
ans <- flights[carrier == "AA",
.(m.arr= mean(arr_delay),m.dep= mean(dep_delay)),
by=.(origin,dest,month)]
ans
#So how can we directly order by all the grouping variables?
ans <- flights[carrier == "AA",
.(m.arr= mean(arr_delay),m.dep= mean(dep_delay)),
keyby=.(origin,dest,month)]
ans
#Doing cumulative sums, accorign to some specirif order and by specific key
dt.hp <- dt.hp [order(CountryExp,DateRep),
Total.Cases.cumsum := cumsum(NewConfCases), by=.(CountryExp)]
#Chaining expressions
#So how can we directly order by all the grouping variables?
ans <- flights[carrier == "AA", .N, by = .(origin, dest)]
ans
#Sort by origin/dest
ans <- flights[carrier == "AA", .N, by = .(origin, dest)][order(origin, -dest)]
ans
#Expressions in by (not only columns)
#Number of flights that started late but arrived early (or on time), started and arrived late
ans <- flights[, .N, by =.(dep_delay>0, arr_delay>0,carrier)]
ans
ans <- flights[(dep_delay>0 OR arr_delay>0), .N, by =.(carrier)]
ans
#Special symbol .SD:
#It stands for "Subset of Data". It is a data.table by itself
#that holds the data for the current group defined using by.
flights[carrier == "AA", ## Only on trips with carrier "AA"
lapply(.SD, mean), ## compute the mean
by = .(origin, dest, month), ## for every 'origin,dest,month'
.SDcols = c("arr_delay", "dep_delay", "air_time")] ## for just those specified in .SDcols
#How can we return the first two rows for each month?
ans <- flights[, head(.SD, 2), by = month]
head(ans)
#How can we specify just the columns we would like to compute the mean?
flights[carrier == "AA",lapply(.SD, mean),
by = .(origin, dest, month),
.SDcols = c("arr_delay", "dep_delay")]
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsetbydaterange.R
\name{subset_by_date_range}
\alias{subset_by_date_range}
\title{subset_by_date_range}
\usage{
subset_by_date_range(data_set, date_col = "detected_at", start_date,
end_date, na.rm = FALSE)
}
\arguments{
\item{data_set}{The data set to get data from}
\item{date_col}{The column in the data set storing the date
of observation}
\item{start_date}{Enter as character string "mm-dd-yyyy"}
\item{end_date}{Enter as character string "mm-dd-yyyy"}
}
\value{
A data frame subsetted to observations only inclusively
within specified range, but including all original columns
}
\description{
subset_by_date_range
}
|
/man/subset_by_date_range.Rd
|
no_license
|
Keegan-Evans/pitDataR
|
R
| false
| true
| 732
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subsetbydaterange.R
\name{subset_by_date_range}
\alias{subset_by_date_range}
\title{subset_by_date_range}
\usage{
subset_by_date_range(data_set, date_col = "detected_at", start_date,
end_date, na.rm = FALSE)
}
\arguments{
\item{data_set}{The data set to get data from}
\item{date_col}{The column in the data set storing the date
of observation}
\item{start_date}{Enter as character string "mm-dd-yyyy"}
\item{end_date}{Enter as character string "mm-dd-yyyy"}
}
\value{
A data frame subsetted to observations only inclusively
within specified range, but including all original columns
}
\description{
subset_by_date_range
}
|
library(readxl)
library(dplyr)
setwd("C:/Users/lenovo/Documents/analisis/postales")
## Leyendo EEFF 2020 SUPERCIAS
postales<-read.csv("bal2020.txt", sep="\t", header=TRUE, dec=",", colClasses = c("RUC"="character"), fileEncoding="UTF-16", skipNul = TRUE, fill=TRUE)
## EEFF de empresas con TH postal
postalTH<-read_excel("registrados.xlsx")
postalTH<-postalTH %>% select(-c(5:13))
colnames(postalTH)[2]<-"RUC"
postal<-postales
postal<-merge(postal, postalTH, by="RUC")
write.table(postal, "EEFFTH.txt", sep="\t", col.names=TRUE, row.names = FALSE)
## Posibles operadores postales
CIIU<-postal$CIIU
CIIU<-CIIU[!duplicated(CIIU)]
Npostal<-postales[(postales$CIIU %in% CIIU),]
Npostal<-Npostal[!(Npostal$RUC %in% postal$RUC),]
write.table(Npostal, "posibles.txt", sep="\t", col.names=TRUE, row.names = FALSE)
## Posibles H53 y con ingresos
NpostalH53<-Npostal[grep("H53", Npostal$CIIU),]
write.table(Npostalh53, "posiblesH53.txt", sep="\t", col.names=TRUE, row.names = FALSE)
|
/script.R
|
no_license
|
mminita8/postales
|
R
| false
| false
| 1,006
|
r
|
library(readxl)
library(dplyr)
setwd("C:/Users/lenovo/Documents/analisis/postales")
## Leyendo EEFF 2020 SUPERCIAS
postales<-read.csv("bal2020.txt", sep="\t", header=TRUE, dec=",", colClasses = c("RUC"="character"), fileEncoding="UTF-16", skipNul = TRUE, fill=TRUE)
## EEFF de empresas con TH postal
postalTH<-read_excel("registrados.xlsx")
postalTH<-postalTH %>% select(-c(5:13))
colnames(postalTH)[2]<-"RUC"
postal<-postales
postal<-merge(postal, postalTH, by="RUC")
write.table(postal, "EEFFTH.txt", sep="\t", col.names=TRUE, row.names = FALSE)
## Posibles operadores postales
CIIU<-postal$CIIU
CIIU<-CIIU[!duplicated(CIIU)]
Npostal<-postales[(postales$CIIU %in% CIIU),]
Npostal<-Npostal[!(Npostal$RUC %in% postal$RUC),]
write.table(Npostal, "posibles.txt", sep="\t", col.names=TRUE, row.names = FALSE)
## Posibles H53 y con ingresos
NpostalH53<-Npostal[grep("H53", Npostal$CIIU),]
write.table(Npostalh53, "posiblesH53.txt", sep="\t", col.names=TRUE, row.names = FALSE)
|
require(quantmod)
require(PerformanceAnalytics)
require(DEoptim)
require(parallel)
set.seed(1)
# Step 1: Get the data
getSymbols("PH")
# Step 2: Create your indicator
dvi <- DVI(Cl(PH))
func <- function(x) {
# Step 3: Construct your trading rule
sig <- Lag(ifelse(dvi$dvi < x[1], 1, -1))
# Step 4: The trading rules/equity curve
ret <- ROC(Cl(PH))*sig
ret <- ret['2012-01-01/2013-04-01']
eq <- exp(cumsum(ret))
dd <- maxDrawdown(ret)
rc <- Return.cumulative(ret)
if(rc<0) rc = 1e6
ff <- dd + 1/rc
return(ff)
}
#opt1 <- system.time(DEoptim(func,0,1,control=list(NP = 100, itermax = 100, trace = F,parallelType=0)))
optP <- system.time(DEoptim(func,0,1,control=list(NP = 10, itermax = 500, trace = F,parallelType=1)))
#(opt1)
(optP)
|
/simple_backtest_opt.R
|
no_license
|
githubfun/omitt
|
R
| false
| false
| 811
|
r
|
require(quantmod)
require(PerformanceAnalytics)
require(DEoptim)
require(parallel)
set.seed(1)
# Step 1: Get the data
getSymbols("PH")
# Step 2: Create your indicator
dvi <- DVI(Cl(PH))
func <- function(x) {
# Step 3: Construct your trading rule
sig <- Lag(ifelse(dvi$dvi < x[1], 1, -1))
# Step 4: The trading rules/equity curve
ret <- ROC(Cl(PH))*sig
ret <- ret['2012-01-01/2013-04-01']
eq <- exp(cumsum(ret))
dd <- maxDrawdown(ret)
rc <- Return.cumulative(ret)
if(rc<0) rc = 1e6
ff <- dd + 1/rc
return(ff)
}
#opt1 <- system.time(DEoptim(func,0,1,control=list(NP = 100, itermax = 100, trace = F,parallelType=0)))
optP <- system.time(DEoptim(func,0,1,control=list(NP = 10, itermax = 500, trace = F,parallelType=1)))
#(opt1)
(optP)
|
\name{CCcheck}
\alias{CCcheck}
\title{Counter Clockwise check}
\description{Check for counter-clockwise orientation
for polygons. Positive is counterclockwise.
}
\usage{
CCcheck(Z)
}
\arguments{
\item{Z}{list(x,y) }
}
\details{ Uses sign of the area of the polygon to determine
polarity.
}
\value{
\item{j}{sign of area}
}
\author{Jonathan M. Lees<jonathan.lees@unc.edu>}
\note{
Based on the idea calculated area of a polygon.
}
\examples{
Y=list()
Y$x=c(170,175,184,191,194,190,177,166,162,164)
Y$y=c(-54,-60,-60,-50,-26,8,34,37,10,-15)
plot(c(160, 200),c(-85, 85), type='n')
points(Y)
lines(Y)
CCcheck(Y)
Z = list(x=rev(Y$x), y=rev(Y$y))
CCcheck(Z)
}
\keyword{misc}
|
/man/CCcheck.Rd
|
no_license
|
cran/GEOmap
|
R
| false
| false
| 691
|
rd
|
\name{CCcheck}
\alias{CCcheck}
\title{Counter Clockwise check}
\description{Check for counter-clockwise orientation
for polygons. Positive is counterclockwise.
}
\usage{
CCcheck(Z)
}
\arguments{
\item{Z}{list(x,y) }
}
\details{ Uses sign of the area of the polygon to determine
polarity.
}
\value{
\item{j}{sign of area}
}
\author{Jonathan M. Lees<jonathan.lees@unc.edu>}
\note{
Based on the idea calculated area of a polygon.
}
\examples{
Y=list()
Y$x=c(170,175,184,191,194,190,177,166,162,164)
Y$y=c(-54,-60,-60,-50,-26,8,34,37,10,-15)
plot(c(160, 200),c(-85, 85), type='n')
points(Y)
lines(Y)
CCcheck(Y)
Z = list(x=rev(Y$x), y=rev(Y$y))
CCcheck(Z)
}
\keyword{misc}
|
rm(list=ls())
yes=read.csv('YES.csv')
jjj12=read.csv('joined12.csv')
jjj13=read.csv('joined13.csv')
jjj14=read.csv('joined.csv')
jjj15=read.csv('joined15 new.csv')
ALLIDS=rbind(jjj12,jjj13,jjj14,jjj15)
myvars=c('game_id','home_team_pts','away_team_pts')
ALLIDS2=ALLIDS[myvars]
new=merge(yes,ALLIDS2,by='game_id')
n=nrow(new)
new$season=NA
new$season[1:1074]='2012-2013'
new$season[1075:2148]='2013-2014'
new$season[2149:3220]='2014-2015'
new$season[3220:n]='2015-2016'
age=read.csv('age.csv')
night=read.csv('m.csv')
help=read.csv('ids away.csv')
age2=merge(help, age, by=c("awayteam"))
age2$season=as.character(age2$season)
new=merge(new, age2, by=c("awayteam_id","season"))
new=merge(new,night,by='city.x')
new$night=as.factor(new$night)
new$cat=as.factor(new$cat)
new$total_points=new$home_team_pts+new$away_team_pts
new <- new[order(new$game_id),]
write.csv(new,'trying stuff here.csv')
ref=read.csv('ref.csv')
new=merge(new,ref,by='game_id')
new$ref_one_id=as.factor(new$ref_one_id)
new$ref_two_id=as.factor(new$ref_two_id)
new$ref_three_id=as.factor(new$ref_three_id)
lines=read.csv('lines2015.csv')
new2015=new[3285:3485,]
new2015$date.x=as.Date(new2015$date.x,format='%a %b %d %Y')
lines$date.x=as.Date(lines$date.x,format='%a %b %d %Y')
newtest2015=merge(new2015,lines,by=c('date.x','city.x'))
new2=new[1:3284,]
write.csv(new2,'new2.csv')
new2=read.csv('new2.csv')
new1=new[1:3517,]
newtest=new[3518:3530,]
newtest2015$ref_three_id=as.numeric(newtest2015$ref_three_id)
new2$ref_three_id=as.numeric(new2$ref_three_id)
x=newtest2015$ref_two_id[!new2$ref_two_id %in% newtest2015$ref_two_id]
x=unique(x)
x
install.packages('randomForest')
library(randomForest)
set.seed(415)
fit <- randomForest(total_points ~ hometeam_offrtg+hometeam_defrtg+hometeam_pace+hometeam_fg3m + hometeam_fg3mSD
+ awayteam_offrtg+awayteam_defrtg+awayteam_pace+awayteam_fg3m +awayteam_fg3mSD
+ hometeam_travelinpastfive+ hometeam_travelinpastten+ hometeam_gamesinpastfive
+ hometeam_gamesinpastten
+ awayteam_travelinpastfive+ awayteam_travelinpastten+ awayteam_gamesinpastfive
+ awayteam_gamesinpastten
+ hometeam_fg3m_opp + hometeam_fg3m_oppSD
+ awayteam_fg3m_opp + awayteam_fg3m_oppSD
+ref_one_id +ref_two_id +ref_three_id
,data=new2,mtry=10,ntree=2000)
fit2 <- lm(total_points ~ hometeam_offrtg+hometeam_defrtg+hometeam_pace+hometeam_fg3m + hometeam_fg3mSD
+ awayteam_offrtg+awayteam_defrtg+awayteam_pace+awayteam_fg3m +awayteam_fg3mSD
+ hometeam_travelinpastfive+ hometeam_travelinpastten+ hometeam_gamesinpastfive
+ hometeam_gamesinpastten
+ awayteam_travelinpastfive+ awayteam_travelinpastten+ awayteam_gamesinpastfive
+ awayteam_gamesinpastten
+ hometeam_fg3m_opp + hometeam_fg3m_oppSD
+ awayteam_fg3m_opp + awayteam_fg3m_oppSD
,data=new1)
newtest$prediction=predict(fit, newtest,type='response')
newtest$prediction2=predict(fit2, newtest,type='response')
newtest2015$pred=predict(fit,newtest2015,type='response')
write.csv(newtest2015,'looking at lines.csv')
c=newtest[,1]
a=newtest[,8:9]
rf=newtest[,327]
lmmod=newtest[,328]
j=cbind(c,a,rf,lmmod)
write.csv(j,'jola 1226.csv')
summary(fit)
smallertest$prediction=predict(fit, smallertest,type='response')
todaytest$prediction=predict(fit, todaytest,type='response')
a=smallertest[,1]
a=as.character(a)
c=smallertest[,11]
c=as.character(c)
b=smallertest[,326:327]
j2=cbind(a,c,b)
write.csv(j2,'jola2 smaller dos.csv')
saveRDS(fit, "my-fitted-rf for betting.rds")
fit <- readRDS("my-fitted-rf.rds")
|
/nba RF.R
|
no_license
|
garretthill/NBA
|
R
| false
| false
| 4,518
|
r
|
rm(list=ls())
yes=read.csv('YES.csv')
jjj12=read.csv('joined12.csv')
jjj13=read.csv('joined13.csv')
jjj14=read.csv('joined.csv')
jjj15=read.csv('joined15 new.csv')
ALLIDS=rbind(jjj12,jjj13,jjj14,jjj15)
myvars=c('game_id','home_team_pts','away_team_pts')
ALLIDS2=ALLIDS[myvars]
new=merge(yes,ALLIDS2,by='game_id')
n=nrow(new)
new$season=NA
new$season[1:1074]='2012-2013'
new$season[1075:2148]='2013-2014'
new$season[2149:3220]='2014-2015'
new$season[3220:n]='2015-2016'
age=read.csv('age.csv')
night=read.csv('m.csv')
help=read.csv('ids away.csv')
age2=merge(help, age, by=c("awayteam"))
age2$season=as.character(age2$season)
new=merge(new, age2, by=c("awayteam_id","season"))
new=merge(new,night,by='city.x')
new$night=as.factor(new$night)
new$cat=as.factor(new$cat)
new$total_points=new$home_team_pts+new$away_team_pts
new <- new[order(new$game_id),]
write.csv(new,'trying stuff here.csv')
ref=read.csv('ref.csv')
new=merge(new,ref,by='game_id')
new$ref_one_id=as.factor(new$ref_one_id)
new$ref_two_id=as.factor(new$ref_two_id)
new$ref_three_id=as.factor(new$ref_three_id)
lines=read.csv('lines2015.csv')
new2015=new[3285:3485,]
new2015$date.x=as.Date(new2015$date.x,format='%a %b %d %Y')
lines$date.x=as.Date(lines$date.x,format='%a %b %d %Y')
newtest2015=merge(new2015,lines,by=c('date.x','city.x'))
new2=new[1:3284,]
write.csv(new2,'new2.csv')
new2=read.csv('new2.csv')
new1=new[1:3517,]
newtest=new[3518:3530,]
newtest2015$ref_three_id=as.numeric(newtest2015$ref_three_id)
new2$ref_three_id=as.numeric(new2$ref_three_id)
x=newtest2015$ref_two_id[!new2$ref_two_id %in% newtest2015$ref_two_id]
x=unique(x)
x
install.packages('randomForest')
library(randomForest)
set.seed(415)
fit <- randomForest(total_points ~ hometeam_offrtg+hometeam_defrtg+hometeam_pace+hometeam_fg3m + hometeam_fg3mSD
+ awayteam_offrtg+awayteam_defrtg+awayteam_pace+awayteam_fg3m +awayteam_fg3mSD
+ hometeam_travelinpastfive+ hometeam_travelinpastten+ hometeam_gamesinpastfive
+ hometeam_gamesinpastten
+ awayteam_travelinpastfive+ awayteam_travelinpastten+ awayteam_gamesinpastfive
+ awayteam_gamesinpastten
+ hometeam_fg3m_opp + hometeam_fg3m_oppSD
+ awayteam_fg3m_opp + awayteam_fg3m_oppSD
+ref_one_id +ref_two_id +ref_three_id
,data=new2,mtry=10,ntree=2000)
fit2 <- lm(total_points ~ hometeam_offrtg+hometeam_defrtg+hometeam_pace+hometeam_fg3m + hometeam_fg3mSD
+ awayteam_offrtg+awayteam_defrtg+awayteam_pace+awayteam_fg3m +awayteam_fg3mSD
+ hometeam_travelinpastfive+ hometeam_travelinpastten+ hometeam_gamesinpastfive
+ hometeam_gamesinpastten
+ awayteam_travelinpastfive+ awayteam_travelinpastten+ awayteam_gamesinpastfive
+ awayteam_gamesinpastten
+ hometeam_fg3m_opp + hometeam_fg3m_oppSD
+ awayteam_fg3m_opp + awayteam_fg3m_oppSD
,data=new1)
newtest$prediction=predict(fit, newtest,type='response')
newtest$prediction2=predict(fit2, newtest,type='response')
newtest2015$pred=predict(fit,newtest2015,type='response')
write.csv(newtest2015,'looking at lines.csv')
c=newtest[,1]
a=newtest[,8:9]
rf=newtest[,327]
lmmod=newtest[,328]
j=cbind(c,a,rf,lmmod)
write.csv(j,'jola 1226.csv')
summary(fit)
smallertest$prediction=predict(fit, smallertest,type='response')
todaytest$prediction=predict(fit, todaytest,type='response')
a=smallertest[,1]
a=as.character(a)
c=smallertest[,11]
c=as.character(c)
b=smallertest[,326:327]
j2=cbind(a,c,b)
write.csv(j2,'jola2 smaller dos.csv')
saveRDS(fit, "my-fitted-rf for betting.rds")
fit <- readRDS("my-fitted-rf.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscaling_operations.R
\name{autoscaling_create_launch_configuration}
\alias{autoscaling_create_launch_configuration}
\title{Creates a launch configuration}
\usage{
autoscaling_create_launch_configuration(LaunchConfigurationName,
ImageId, KeyName, SecurityGroups, ClassicLinkVPCId,
ClassicLinkVPCSecurityGroups, UserData, InstanceId, InstanceType,
KernelId, RamdiskId, BlockDeviceMappings, InstanceMonitoring, SpotPrice,
IamInstanceProfile, EbsOptimized, AssociatePublicIpAddress,
PlacementTenancy)
}
\arguments{
\item{LaunchConfigurationName}{[required] The name of the launch configuration. This name must be unique within
the scope of your AWS account.}
\item{ImageId}{The ID of the Amazon Machine Image (AMI) to use to launch your EC2
instances.
If you do not specify \code{InstanceId}, you must specify \code{ImageId}.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html}{Finding an AMI}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{KeyName}{The name of the key pair. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html}{Amazon EC2 Key Pairs}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{SecurityGroups}{One or more security groups with which to associate the instances.
If your instances are launched in EC2-Classic, you can either specify
security group names or the security group IDs. For more information,
see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html}{Amazon EC2 Security Groups}
in the \emph{Amazon EC2 User Guide for Linux Instances}.
If your instances are launched into a VPC, specify security group IDs.
For more information, see \href{https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html}{Security Groups for Your VPC}
in the \emph{Amazon Virtual Private Cloud User Guide}.}
\item{ClassicLinkVPCId}{The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances
to. This parameter is supported only if you are launching EC2-Classic
instances. For more information, see
\href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html}{ClassicLink}
in the \emph{Amazon EC2 User Guide for Linux Instances} and \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink}{Linking EC2-Classic Instances to a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{ClassicLinkVPCSecurityGroups}{The IDs of one or more security groups for the specified
ClassicLink-enabled VPC. For more information, see
\href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html}{ClassicLink}
in the \emph{Amazon EC2 User Guide for Linux Instances} and \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink}{Linking EC2-Classic Instances to a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
Conditional: This parameter is required if you specify a
ClassicLink-enabled VPC, and is not supported otherwise.}
\item{UserData}{The user data to make available to the launched EC2 instances. For more
information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html}{Instance Metadata and User Data}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{InstanceId}{The ID of the instance to use to create the launch configuration. The
new launch configuration derives attributes from the instance, except
for the block device mapping.
If you do not specify \code{InstanceId}, you must specify both \code{ImageId} and
\code{InstanceType}.
To create a launch configuration with a block device mapping or override
any other instance attributes, specify them as part of the same request.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-lc-with-instanceID.html}{Create a Launch Configuration Using an EC2 Instance}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{InstanceType}{The instance type of the EC2 instance.
If you do not specify \code{InstanceId}, you must specify \code{InstanceType}.
For information about available instance types, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes}{Available Instance Types}
in the \emph{Amazon EC2 User Guide for Linux Instances.}}
\item{KernelId}{The ID of the kernel associated with the AMI.}
\item{RamdiskId}{The ID of the RAM disk associated with the AMI.}
\item{BlockDeviceMappings}{One or more mappings that specify how block devices are exposed to the
instance. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html}{Block Device Mapping}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{InstanceMonitoring}{Enables detailed monitoring (\code{true}) or basic monitoring (\code{false}) for
the Auto Scaling instances. The default value is \code{true}.}
\item{SpotPrice}{The maximum hourly price to be paid for any Spot Instance launched to
fulfill the request. Spot Instances are launched when the price you
specify exceeds the current Spot market price. For more information, see
\href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html}{Launching Spot Instances in Your Auto Scaling Group}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{IamInstanceProfile}{The name or the Amazon Resource Name (ARN) of the instance profile
associated with the IAM role for the instance.
EC2 instances launched with an IAM role automatically have AWS security
credentials available. You can use IAM roles with Amazon EC2 Auto
Scaling to automatically enable applications running on your EC2
instances to securely access other AWS resources. For more information,
see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html}{Use an IAM Role for Applications That Run on Amazon EC2 Instances}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{EbsOptimized}{Indicates whether the instance is optimized for Amazon EBS I/O. By
default, the instance is not optimized for EBS I/O. The optimization
provides dedicated throughput to Amazon EBS and an optimized
configuration stack to provide optimal I/O performance. This
optimization is not available with all instance types. Additional usage
charges apply. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html}{Amazon EBS-Optimized Instances}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{AssociatePublicIpAddress}{Used for groups that launch instances into a virtual private cloud
(VPC). Specifies whether to assign a public IP address to each instance.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html}{Launching Auto Scaling Instances in a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
If you specify this parameter, be sure to specify at least one subnet
when you create your group.
Default: If the instance is launched into a default subnet, the default
is to assign a public IP address. If the instance is launched into a
nondefault subnet, the default is not to assign a public IP address.}
\item{PlacementTenancy}{The tenancy of the instance. An instance with a tenancy of \code{dedicated}
runs on single-tenant hardware and can only be launched into a VPC.
To launch Dedicated Instances into a shared tenancy VPC (a VPC with the
instance placement tenancy attribute set to \code{default}), you must set the
value of this parameter to \code{dedicated}.
If you specify this parameter, be sure to specify at least one subnet
when you create your group.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html}{Launching Auto Scaling Instances in a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
Valid values: \code{default} \| \code{dedicated}}
}
\description{
Creates a launch configuration.
}
\details{
If you exceed your maximum limit of launch configurations, the call
fails. For information about viewing this limit, see
DescribeAccountLimits. For information about updating this limit, see
\href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html}{Amazon EC2 Auto Scaling Limits}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html}{Launch Configurations}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_launch_configuration(
LaunchConfigurationName = "string",
ImageId = "string",
KeyName = "string",
SecurityGroups = list(
"string"
),
ClassicLinkVPCId = "string",
ClassicLinkVPCSecurityGroups = list(
"string"
),
UserData = "string",
InstanceId = "string",
InstanceType = "string",
KernelId = "string",
RamdiskId = "string",
BlockDeviceMappings = list(
list(
VirtualName = "string",
DeviceName = "string",
Ebs = list(
SnapshotId = "string",
VolumeSize = 123,
VolumeType = "string",
DeleteOnTermination = TRUE|FALSE,
Iops = 123,
Encrypted = TRUE|FALSE
),
NoDevice = TRUE|FALSE
)
),
InstanceMonitoring = list(
Enabled = TRUE|FALSE
),
SpotPrice = "string",
IamInstanceProfile = "string",
EbsOptimized = TRUE|FALSE,
AssociatePublicIpAddress = TRUE|FALSE,
PlacementTenancy = "string"
)
}
}
\examples{
# This example creates a launch configuration.
\donttest{svc$create_launch_configuration(
IamInstanceProfile = "my-iam-role",
ImageId = "ami-12345678",
InstanceType = "m3.medium",
LaunchConfigurationName = "my-launch-config",
SecurityGroups = list(
"sg-eb2af88e"
)
)}
}
\keyword{internal}
|
/paws/man/autoscaling_create_launch_configuration.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 9,962
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autoscaling_operations.R
\name{autoscaling_create_launch_configuration}
\alias{autoscaling_create_launch_configuration}
\title{Creates a launch configuration}
\usage{
autoscaling_create_launch_configuration(LaunchConfigurationName,
ImageId, KeyName, SecurityGroups, ClassicLinkVPCId,
ClassicLinkVPCSecurityGroups, UserData, InstanceId, InstanceType,
KernelId, RamdiskId, BlockDeviceMappings, InstanceMonitoring, SpotPrice,
IamInstanceProfile, EbsOptimized, AssociatePublicIpAddress,
PlacementTenancy)
}
\arguments{
\item{LaunchConfigurationName}{[required] The name of the launch configuration. This name must be unique within
the scope of your AWS account.}
\item{ImageId}{The ID of the Amazon Machine Image (AMI) to use to launch your EC2
instances.
If you do not specify \code{InstanceId}, you must specify \code{ImageId}.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/finding-an-ami.html}{Finding an AMI}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{KeyName}{The name of the key pair. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html}{Amazon EC2 Key Pairs}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{SecurityGroups}{One or more security groups with which to associate the instances.
If your instances are launched in EC2-Classic, you can either specify
security group names or the security group IDs. For more information,
see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html}{Amazon EC2 Security Groups}
in the \emph{Amazon EC2 User Guide for Linux Instances}.
If your instances are launched into a VPC, specify security group IDs.
For more information, see \href{https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html}{Security Groups for Your VPC}
in the \emph{Amazon Virtual Private Cloud User Guide}.}
\item{ClassicLinkVPCId}{The ID of a ClassicLink-enabled VPC to link your EC2-Classic instances
to. This parameter is supported only if you are launching EC2-Classic
instances. For more information, see
\href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html}{ClassicLink}
in the \emph{Amazon EC2 User Guide for Linux Instances} and \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink}{Linking EC2-Classic Instances to a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{ClassicLinkVPCSecurityGroups}{The IDs of one or more security groups for the specified
ClassicLink-enabled VPC. For more information, see
\href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html}{ClassicLink}
in the \emph{Amazon EC2 User Guide for Linux Instances} and \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html#as-ClassicLink}{Linking EC2-Classic Instances to a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
Conditional: This parameter is required if you specify a
ClassicLink-enabled VPC, and is not supported otherwise.}
\item{UserData}{The user data to make available to the launched EC2 instances. For more
information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html}{Instance Metadata and User Data}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{InstanceId}{The ID of the instance to use to create the launch configuration. The
new launch configuration derives attributes from the instance, except
for the block device mapping.
If you do not specify \code{InstanceId}, you must specify both \code{ImageId} and
\code{InstanceType}.
To create a launch configuration with a block device mapping or override
any other instance attributes, specify them as part of the same request.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/create-lc-with-instanceID.html}{Create a Launch Configuration Using an EC2 Instance}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{InstanceType}{The instance type of the EC2 instance.
If you do not specify \code{InstanceId}, you must specify \code{InstanceType}.
For information about available instance types, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-types.html#AvailableInstanceTypes}{Available Instance Types}
in the \emph{Amazon EC2 User Guide for Linux Instances.}}
\item{KernelId}{The ID of the kernel associated with the AMI.}
\item{RamdiskId}{The ID of the RAM disk associated with the AMI.}
\item{BlockDeviceMappings}{One or more mappings that specify how block devices are exposed to the
instance. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html}{Block Device Mapping}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{InstanceMonitoring}{Enables detailed monitoring (\code{true}) or basic monitoring (\code{false}) for
the Auto Scaling instances. The default value is \code{true}.}
\item{SpotPrice}{The maximum hourly price to be paid for any Spot Instance launched to
fulfill the request. Spot Instances are launched when the price you
specify exceeds the current Spot market price. For more information, see
\href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-launch-spot-instances.html}{Launching Spot Instances in Your Auto Scaling Group}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{IamInstanceProfile}{The name or the Amazon Resource Name (ARN) of the instance profile
associated with the IAM role for the instance.
EC2 instances launched with an IAM role automatically have AWS security
credentials available. You can use IAM roles with Amazon EC2 Auto
Scaling to automatically enable applications running on your EC2
instances to securely access other AWS resources. For more information,
see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/us-iam-role.html}{Use an IAM Role for Applications That Run on Amazon EC2 Instances}
in the \emph{Amazon EC2 Auto Scaling User Guide}.}
\item{EbsOptimized}{Indicates whether the instance is optimized for Amazon EBS I/O. By
default, the instance is not optimized for EBS I/O. The optimization
provides dedicated throughput to Amazon EBS and an optimized
configuration stack to provide optimal I/O performance. This
optimization is not available with all instance types. Additional usage
charges apply. For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html}{Amazon EBS-Optimized Instances}
in the \emph{Amazon EC2 User Guide for Linux Instances}.}
\item{AssociatePublicIpAddress}{Used for groups that launch instances into a virtual private cloud
(VPC). Specifies whether to assign a public IP address to each instance.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html}{Launching Auto Scaling Instances in a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
If you specify this parameter, be sure to specify at least one subnet
when you create your group.
Default: If the instance is launched into a default subnet, the default
is to assign a public IP address. If the instance is launched into a
nondefault subnet, the default is not to assign a public IP address.}
\item{PlacementTenancy}{The tenancy of the instance. An instance with a tenancy of \code{dedicated}
runs on single-tenant hardware and can only be launched into a VPC.
To launch Dedicated Instances into a shared tenancy VPC (a VPC with the
instance placement tenancy attribute set to \code{default}), you must set the
value of this parameter to \code{dedicated}.
If you specify this parameter, be sure to specify at least one subnet
when you create your group.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-in-vpc.html}{Launching Auto Scaling Instances in a VPC}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
Valid values: \code{default} \| \code{dedicated}}
}
\description{
Creates a launch configuration.
}
\details{
If you exceed your maximum limit of launch configurations, the call
fails. For information about viewing this limit, see
DescribeAccountLimits. For information about updating this limit, see
\href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/as-account-limits.html}{Amazon EC2 Auto Scaling Limits}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
For more information, see \href{https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html}{Launch Configurations}
in the \emph{Amazon EC2 Auto Scaling User Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_launch_configuration(
LaunchConfigurationName = "string",
ImageId = "string",
KeyName = "string",
SecurityGroups = list(
"string"
),
ClassicLinkVPCId = "string",
ClassicLinkVPCSecurityGroups = list(
"string"
),
UserData = "string",
InstanceId = "string",
InstanceType = "string",
KernelId = "string",
RamdiskId = "string",
BlockDeviceMappings = list(
list(
VirtualName = "string",
DeviceName = "string",
Ebs = list(
SnapshotId = "string",
VolumeSize = 123,
VolumeType = "string",
DeleteOnTermination = TRUE|FALSE,
Iops = 123,
Encrypted = TRUE|FALSE
),
NoDevice = TRUE|FALSE
)
),
InstanceMonitoring = list(
Enabled = TRUE|FALSE
),
SpotPrice = "string",
IamInstanceProfile = "string",
EbsOptimized = TRUE|FALSE,
AssociatePublicIpAddress = TRUE|FALSE,
PlacementTenancy = "string"
)
}
}
\examples{
# This example creates a launch configuration.
\donttest{svc$create_launch_configuration(
IamInstanceProfile = "my-iam-role",
ImageId = "ami-12345678",
InstanceType = "m3.medium",
LaunchConfigurationName = "my-launch-config",
SecurityGroups = list(
"sg-eb2af88e"
)
)}
}
\keyword{internal}
|
extractplate = function(datbefore, datafter, plate, replicate){
datbefore = datbefore[[replicate]]
datafter = datafter[[replicate]]
if (plate == 1){
datbefore = datbefore[seq(1,nrow(datbefore),2),]
datbefore = datbefore[,seq(1,24,2)]
datafter = datafter[seq(1,nrow(datafter),2),]
datafter = datafter[,seq(1,24,2)]
}
else if (plate == 2){
datbefore = datbefore[seq(1,nrow(datbefore),2),]
datbefore = datbefore[,seq(2,24,2)]
datafter = datafter[seq(1,nrow(datafter),2),]
datafter = datafter[,seq(2,24,2)]
}
else if (plate == 3){
datbefore = datbefore[seq(2,nrow(datbefore),2),]
datbefore = datbefore[,seq(1,24,2)]
datafter = datafter[seq(2,nrow(datafter),2),]
datafter = datafter[,seq(1,24,2)]
}
else if (plate == 4){
datbefore = datbefore[seq(2,nrow(datbefore),2),]
datbefore = datbefore[,seq(2,24,2)]
datafter = datafter[seq(2,nrow(datafter),2),]
datafter = datafter[,seq(2,24,2)]
}
else
stop ("unknown plate.")
datall = list(datbefore=datbefore, datafter=datafter)
return(datall)
}
|
/highSCREEN/R/extractplate.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,084
|
r
|
extractplate = function(datbefore, datafter, plate, replicate){
datbefore = datbefore[[replicate]]
datafter = datafter[[replicate]]
if (plate == 1){
datbefore = datbefore[seq(1,nrow(datbefore),2),]
datbefore = datbefore[,seq(1,24,2)]
datafter = datafter[seq(1,nrow(datafter),2),]
datafter = datafter[,seq(1,24,2)]
}
else if (plate == 2){
datbefore = datbefore[seq(1,nrow(datbefore),2),]
datbefore = datbefore[,seq(2,24,2)]
datafter = datafter[seq(1,nrow(datafter),2),]
datafter = datafter[,seq(2,24,2)]
}
else if (plate == 3){
datbefore = datbefore[seq(2,nrow(datbefore),2),]
datbefore = datbefore[,seq(1,24,2)]
datafter = datafter[seq(2,nrow(datafter),2),]
datafter = datafter[,seq(1,24,2)]
}
else if (plate == 4){
datbefore = datbefore[seq(2,nrow(datbefore),2),]
datbefore = datbefore[,seq(2,24,2)]
datafter = datafter[seq(2,nrow(datafter),2),]
datafter = datafter[,seq(2,24,2)]
}
else
stop ("unknown plate.")
datall = list(datbefore=datbefore, datafter=datafter)
return(datall)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildRecord.R
\name{getAnalyticalInfo}
\alias{getAnalyticalInfo}
\alias{gatherCompound}
\alias{gatherSpectrum}
\title{Compose data block of MassBank record}
\usage{
gatherCompound(spec, aggregated, additionalPeaks = NULL, retrieval="standard")
gatherSpectrum(spec, msmsdata, ac_ms, ac_lc, aggregated,
additionalPeaks = NULL, retrieval="standard")
}
\arguments{
\item{spec}{A \code{RmbSpectraSet} object, representing a compound with multiple spectra.}
\item{aggregated}{An aggregate peak table where the peaks are extracted from.}
\item{msmsdata}{A \code{RmbSpectrum2} object from the \code{spec} spectra set, representing a single spectrum to give a record.}
\item{ac_ms, ac_lc}{Information for the AC\$MASS_SPECTROMETRY and
AC\$CHROMATOGRAPHY fields in the MassBank record, created by
\code{gatherCompound} and then fed into \code{gatherSpectrum}.}
\item{additionalPeaks}{If present, a table with additional peaks to add into the spectra.
As loaded with \code{\link{addPeaks}}.}
\item{retrieval}{A value that determines whether the files should be handled either as "standard",
if the compoundlist is complete, "tentative", if at least a formula is present or "unknown"
if the only know thing is the m/z}
}
\value{
\code{gatherCompound} returns a list of tree-like MassBank data
blocks. \code{gatherSpectrum} returns one single MassBank data block or
\code{NA} if no useful peak is in the spectrum.
}
\description{
\code{gatherCompound} composes the data blocks (the "lower half") of all
MassBank records for a compound, using the annotation data in the RMassBank
options, spectrum info data from the \code{analyzedSpec}-type record and the
peaks from the reanalyzed, multiplicity-filtered peak table. It calls
\code{gatherSpectrum} for each child spectrum.
}
\details{
The returned data blocks are in format \code{list( "AC\$MASS_SPECTROMETRY" =
list('FRAGMENTATION_MODE' = 'CID', ...), ...)} etc.
}
\note{
Note that the global table \code{additionalPeaks} is also used as an
additional source of peaks.
}
\examples{
\dontrun{
myspectrum <- w@spectra[[1]]
massbankdata <- gatherCompound(myspectrum, w@aggregated)
# Note: ac_lc and ac_ms are data blocks usually generated in gatherCompound and
# passed on from there. The call below gives a relatively useless result :)
ac_lc_dummy <- list()
ac_ms_dummy <- list()
justOneSpectrum <- gatherSpectrum(myspectrum, myspectrum@child[[2]],
ac_ms_dummy, ac_lc_dummy, w@aggregated)
}
}
\references{
MassBank record format:
\url{http://www.massbank.jp/manuals/MassBankRecord_en.pdf}
}
\seealso{
\code{\link{mbWorkflow}}, \code{\link{compileRecord}}
}
\author{
Michael Stravs
}
|
/man/getAnalyticalInfo.Rd
|
no_license
|
sneumann/RMassBank
|
R
| false
| true
| 2,732
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/buildRecord.R
\name{getAnalyticalInfo}
\alias{getAnalyticalInfo}
\alias{gatherCompound}
\alias{gatherSpectrum}
\title{Compose data block of MassBank record}
\usage{
gatherCompound(spec, aggregated, additionalPeaks = NULL, retrieval="standard")
gatherSpectrum(spec, msmsdata, ac_ms, ac_lc, aggregated,
additionalPeaks = NULL, retrieval="standard")
}
\arguments{
\item{spec}{A \code{RmbSpectraSet} object, representing a compound with multiple spectra.}
\item{aggregated}{An aggregate peak table where the peaks are extracted from.}
\item{msmsdata}{A \code{RmbSpectrum2} object from the \code{spec} spectra set, representing a single spectrum to give a record.}
\item{ac_ms, ac_lc}{Information for the AC\$MASS_SPECTROMETRY and
AC\$CHROMATOGRAPHY fields in the MassBank record, created by
\code{gatherCompound} and then fed into \code{gatherSpectrum}.}
\item{additionalPeaks}{If present, a table with additional peaks to add into the spectra.
As loaded with \code{\link{addPeaks}}.}
\item{retrieval}{A value that determines whether the files should be handled either as "standard",
if the compoundlist is complete, "tentative", if at least a formula is present or "unknown"
if the only know thing is the m/z}
}
\value{
\code{gatherCompound} returns a list of tree-like MassBank data
blocks. \code{gatherSpectrum} returns one single MassBank data block or
\code{NA} if no useful peak is in the spectrum.
}
\description{
\code{gatherCompound} composes the data blocks (the "lower half") of all
MassBank records for a compound, using the annotation data in the RMassBank
options, spectrum info data from the \code{analyzedSpec}-type record and the
peaks from the reanalyzed, multiplicity-filtered peak table. It calls
\code{gatherSpectrum} for each child spectrum.
}
\details{
The returned data blocks are in format \code{list( "AC\$MASS_SPECTROMETRY" =
list('FRAGMENTATION_MODE' = 'CID', ...), ...)} etc.
}
\note{
Note that the global table \code{additionalPeaks} is also used as an
additional source of peaks.
}
\examples{
\dontrun{
myspectrum <- w@spectra[[1]]
massbankdata <- gatherCompound(myspectrum, w@aggregated)
# Note: ac_lc and ac_ms are data blocks usually generated in gatherCompound and
# passed on from there. The call below gives a relatively useless result :)
ac_lc_dummy <- list()
ac_ms_dummy <- list()
justOneSpectrum <- gatherSpectrum(myspectrum, myspectrum@child[[2]],
ac_ms_dummy, ac_lc_dummy, w@aggregated)
}
}
\references{
MassBank record format:
\url{http://www.massbank.jp/manuals/MassBankRecord_en.pdf}
}
\seealso{
\code{\link{mbWorkflow}}, \code{\link{compileRecord}}
}
\author{
Michael Stravs
}
|
simulate.rtgs.records <- function(table,date_column='date',time_column='time',sender_column='sender',receiver_column='receiver',value_column='value',priority_column='priority',date_format='As defined in default format settings.',time_format='As defined in default format settings.',decimal_separator='As defined in default format settings.',save_to='None',debug='false'){
l <- as.list(match.call())
l2 <- list()
for (i in names(l[-1])) {
l2 <- c(l2, eval(dplyr::sym(i)))
}
names(l2) <- names(l[-1])
l3 <- c(l[1], l2)
FNA::exec_command(FNA::check(l3))
}
|
/R/simulate.rtgs.records.R
|
no_license
|
lubospernis/FNA_package
|
R
| false
| false
| 578
|
r
|
simulate.rtgs.records <- function(table,date_column='date',time_column='time',sender_column='sender',receiver_column='receiver',value_column='value',priority_column='priority',date_format='As defined in default format settings.',time_format='As defined in default format settings.',decimal_separator='As defined in default format settings.',save_to='None',debug='false'){
l <- as.list(match.call())
l2 <- list()
for (i in names(l[-1])) {
l2 <- c(l2, eval(dplyr::sym(i)))
}
names(l2) <- names(l[-1])
l3 <- c(l[1], l2)
FNA::exec_command(FNA::check(l3))
}
|
# This script will create a RGSet for the discovery cohort and a RGSet for the validation cohort
funnormDir <- "/amber1/archive/sgseq/workspace/hansen_lab1/funnorm_repro"
rawDir <- paste0(funnormDir,"/raw_datasets")
disValDir <- paste0(funnormDir,"/dis_val_datasets")
designDir <- paste0(funnormDir,"/designs")
normDir <- paste0(funnormDir,"/norm_datasets")
scriptDir <- paste0(funnormDir,"/scripts")
svaDir <- paste0(funnormDir,"/sva_results")
ruvFunnormDir <- paste0(funnormDir,"/ruv_funnorm_results")
dataset_names <- c("ontario_ebv","ontario_blood","kirc")
dataset_names <- c(paste0("dis_",dataset_names), paste0("val_",dataset_names))
dataset_names <- c(dataset_names,"aml","ontario_gender")
k_vector <- c(25,3,0,6,1,3,0,18)
setwd(ruvFunnormDir)
for (i in 1:8){
k <- k_vector[i]
data.file <- paste0("ruv_funnorm_results_",dataset_names[i],"_k_",k,".Rda")
load(data.file)
object <- ruv.results
p <- t(object$p)
dmps <- cbind(t(object$t),p)
dmps <- as.data.frame(dmps)
colnames(dmps) <- c("f","p.val")
dmps <- dmps[order(dmps$p.val),]
dmps <- list(ruv=dmps)
save(dmps, file=paste0("ruv_funnorm_dmps_",dataset_names[i],".Rda"))
print(i)
}
|
/ruv_funnorm_results/create.ruv.funnorm.dmps.R
|
no_license
|
Jfortin1/funnorm_repro
|
R
| false
| false
| 1,167
|
r
|
# This script will create a RGSet for the discovery cohort and a RGSet for the validation cohort
funnormDir <- "/amber1/archive/sgseq/workspace/hansen_lab1/funnorm_repro"
rawDir <- paste0(funnormDir,"/raw_datasets")
disValDir <- paste0(funnormDir,"/dis_val_datasets")
designDir <- paste0(funnormDir,"/designs")
normDir <- paste0(funnormDir,"/norm_datasets")
scriptDir <- paste0(funnormDir,"/scripts")
svaDir <- paste0(funnormDir,"/sva_results")
ruvFunnormDir <- paste0(funnormDir,"/ruv_funnorm_results")
dataset_names <- c("ontario_ebv","ontario_blood","kirc")
dataset_names <- c(paste0("dis_",dataset_names), paste0("val_",dataset_names))
dataset_names <- c(dataset_names,"aml","ontario_gender")
k_vector <- c(25,3,0,6,1,3,0,18)
setwd(ruvFunnormDir)
for (i in 1:8){
k <- k_vector[i]
data.file <- paste0("ruv_funnorm_results_",dataset_names[i],"_k_",k,".Rda")
load(data.file)
object <- ruv.results
p <- t(object$p)
dmps <- cbind(t(object$t),p)
dmps <- as.data.frame(dmps)
colnames(dmps) <- c("f","p.val")
dmps <- dmps[order(dmps$p.val),]
dmps <- list(ruv=dmps)
save(dmps, file=paste0("ruv_funnorm_dmps_",dataset_names[i],".Rda"))
print(i)
}
|
library(ggplot2)
extract_cod <- function (trnas, anticod){
output = data.frame(row.names = anticod)
trnas_acod = sapply(rownames(trnas), function(x) substr(x,nchar(x)-2,nchar(x)))
for (s in colnames(trnas)){
output[,s] = sapply(anticod, function(x) if(any(trnas_acod==x)){mean(trnas[trnas_acod==x,s])}else{0})
}
return(output)
}
transformdata <- function(data,transf){
aa_idx = regexpr("i?[A-Z][a-z]{2}[A-Z]{3}",rownames(data))==1
data = data[aa_idx,]
if (transf=="log"){
outdata = sapply(data,log)
# Remove inf values
outdata[outdata==-Inf] = NaN
rownames(outdata)=rownames(data)
}else if (transf=="arcsinh"){
outdata = sapply(data,asinh)
rownames(outdata)=rownames(data)
}else if (transf=="sqrt"){
outdata = sapply(data,sqrt)
rownames(outdata)=rownames(data)
}else if (transf=="rel"){
# Compute relative data
outdata = data.frame(matrix(ncol = ncol(data), nrow = nrow(data)),row.names = rownames(data))
colnames(outdata)= colnames(data)
aa = sapply(rownames(outdata),function(x) substr(x,1,nchar(x)-3))
uniqueaa = unique(aa)
for (n in uniqueaa){
idx = (aa %in% n)
idx_data = matrix(as.matrix(data[idx,]), ncol = ncol(data), nrow = sum(idx))
total = colSums(idx_data)
outdata[idx,] = t(apply(idx_data,1,function(x) x/total))
iszero = (total %in% 0)
if (any(iszero)){
outdata[idx,iszero] = 1.0/sum(idx)
}
}
}else{
outdata=data
}
return(outdata)
}
# Codon table
codons = read.csv("data/codons_table.tab", sep="\t", row.names = 1)
## CALCULATE CU OF STRUCTURAL PROTEINS
# Human CU
codus = read.csv("data/refseq_humanvirus_CoCoPUT.tsv",sep="\t")
is_str = grep("Xs",codus$annotation)
codus=codus[is_str,]
codus_clean = t(codus[,14:ncol(codus)])
# Compute the RCU
rownames(codus_clean) = sapply(rownames(codus_clean),function(x) paste(codons[x,"AA"],x,sep=""))
codon = transformdata(codus_clean,"rel")
# Compute average of each pathway
species = data.frame(row.names=as.character(unique(codus$Species)))
species[,rownames(codon)] = t(sapply(rownames(species),
function(x) if (sum(codus$Species %in% x)>1){rowMeans(codon[,codus$Species %in% x],na.rm=T)}
else if (sum(codus$Species %in% x)==1){codon[,codus$Species %in% x]}))
# Save output
write.csv(species,"results/virus_Xs_RCUs.csv")
|
/5-2_subsets_CU.R
|
no_license
|
hexavier/tRNA_viruses
|
R
| false
| false
| 2,473
|
r
|
library(ggplot2)
extract_cod <- function (trnas, anticod){
output = data.frame(row.names = anticod)
trnas_acod = sapply(rownames(trnas), function(x) substr(x,nchar(x)-2,nchar(x)))
for (s in colnames(trnas)){
output[,s] = sapply(anticod, function(x) if(any(trnas_acod==x)){mean(trnas[trnas_acod==x,s])}else{0})
}
return(output)
}
transformdata <- function(data,transf){
aa_idx = regexpr("i?[A-Z][a-z]{2}[A-Z]{3}",rownames(data))==1
data = data[aa_idx,]
if (transf=="log"){
outdata = sapply(data,log)
# Remove inf values
outdata[outdata==-Inf] = NaN
rownames(outdata)=rownames(data)
}else if (transf=="arcsinh"){
outdata = sapply(data,asinh)
rownames(outdata)=rownames(data)
}else if (transf=="sqrt"){
outdata = sapply(data,sqrt)
rownames(outdata)=rownames(data)
}else if (transf=="rel"){
# Compute relative data
outdata = data.frame(matrix(ncol = ncol(data), nrow = nrow(data)),row.names = rownames(data))
colnames(outdata)= colnames(data)
aa = sapply(rownames(outdata),function(x) substr(x,1,nchar(x)-3))
uniqueaa = unique(aa)
for (n in uniqueaa){
idx = (aa %in% n)
idx_data = matrix(as.matrix(data[idx,]), ncol = ncol(data), nrow = sum(idx))
total = colSums(idx_data)
outdata[idx,] = t(apply(idx_data,1,function(x) x/total))
iszero = (total %in% 0)
if (any(iszero)){
outdata[idx,iszero] = 1.0/sum(idx)
}
}
}else{
outdata=data
}
return(outdata)
}
# Codon table
codons = read.csv("data/codons_table.tab", sep="\t", row.names = 1)
## CALCULATE CU OF STRUCTURAL PROTEINS
# Human CU
codus = read.csv("data/refseq_humanvirus_CoCoPUT.tsv",sep="\t")
is_str = grep("Xs",codus$annotation)
codus=codus[is_str,]
codus_clean = t(codus[,14:ncol(codus)])
# Compute the RCU
rownames(codus_clean) = sapply(rownames(codus_clean),function(x) paste(codons[x,"AA"],x,sep=""))
codon = transformdata(codus_clean,"rel")
# Compute average of each pathway
species = data.frame(row.names=as.character(unique(codus$Species)))
species[,rownames(codon)] = t(sapply(rownames(species),
function(x) if (sum(codus$Species %in% x)>1){rowMeans(codon[,codus$Species %in% x],na.rm=T)}
else if (sum(codus$Species %in% x)==1){codon[,codus$Species %in% x]}))
# Save output
write.csv(species,"results/virus_Xs_RCUs.csv")
|
snap.read.2 = function(file, what, ndim, type, debug, gas, thin=1){
if(missing(what)) what="HEAD"
what=gsub("^\\s+|\\s+$", "", what)
if(missing(debug)) debug=0
if(missing(ndim) && missing(type)){
tmp=snap.select.type.2(what)
ndim=tmp$ndim
type=tmp$type
}else{
if(missing(ndim)) ndim=1
if(missing(type)) type=numeric()
}#from here on, there is always a type and ndim
#if(missing(which)) which=c(TRUE,TRUE,TRUE,TRUE,TRUE,TRUE)
if(missing(gas)) gas=0
if(gas > 0) cat("Reading with GAS = ",gas,"\n")
data = file(file,'rb')
#first LABEL block
skip=readBin(data,'integer',n=1)
label=readChar(data,4,useBytes=TRUE)
label=gsub("^\\s+|\\s+$", "", label)
block=readBin(data,'integer',n=1)
skip=readBin(data,'integer',n=1)
cat("Reading LABEL= ", label, " of ",block,'\n')
#first header block
skip=readBin(data,'integer',n=1)
Npart=readBin(data,'integer',n=6)
Massarr=readBin(data,'numeric',n=6,size=8)
Time=readBin(data,'numeric',n=1,size=8)
z=readBin(data,'numeric',n=1,size=8)
FlagSfr=readBin(data,'integer',n=1)
FlagFeedback=readBin(data,'integer',n=1)
Nall=readBin(data,'integer',n=6)
FlagCooling=readBin(data,'integer',n=1)
NumFiles=readBin(data,'integer',n=1)
BoxSize=readBin(data,'numeric',n=1,size=8)
OmegaM=readBin(data,'numeric',n=1,size=8)
OmegaL=readBin(data,'numeric',n=1,size=8)
h=readBin(data,'numeric',n=1,size=8)
FlagAge=readBin(data,'integer',n=1)
FlagMetals=readBin(data,'integer',n=1)
NallHW=readBin(data,'integer',n=6)
flag_entr_ics=readBin(data,'integer',n=1)
readBin(data,'integer',n=256-241)
#last head block
skip=readBin(data,'integer',n=1)
if((block - skip - 8) != 0) {
close(data)
stop("Something wrong!")
}
skip=readBin(data,integer(),n=1)
while(length(skip)>0){
label=readChar(data,4,useBytes=TRUE)
label=gsub("^\\s+|\\s+$", "", label)
block=readBin(data,integer(),n=1)
skip=readBin(data,integer(),n=1)
if(debug > 0)
cat("Reading LABEL= ", label, " of ",block,'\n')
skip=readBin(data,integer(),n=1)
if(what==label){
blo=.readBinThin(data,type,n=skip/4,size=4,thin=thin,ndim=ndim)
}else{
seek(data,where=block-8,origin='current')
}
skip=readBin(data,integer(),n=1) #this ends the block
if((block - skip - 8) != 0) print("Something wrong!")
skip=readBin(data,integer(),n=1) #starts new block
}
close(data)
if(ndim == 3 ){
extract=((1:sum(Npart))*3)-2
blo=data.frame( x=blo[extract],y=blo[extract+1],z=blo[extract+2])
}
if(gas > 0 && what != 'HEAD'){
if(ndim == 3 ){
blo=data.frame(x=blo$x[1:Npart[1]], y=blo$y[1:Npart[1]], z=blo$z[1:Npart[1]] )
} else {
blo=blo[1:Npart[gas]]
}
}
if(what=="HEAD")
return(list(Npart = Npart, Massarr= Massarr, Time= Time, z= z, FlagSfr= FlagSfr, FlagFeedback= FlagFeedback,
Nall= Nall, FlagCooling= FlagCooling, NumFiles= NumFiles, BoxSize= BoxSize, OmegaM= OmegaM,
OmegaL= OmegaL, h=h , FlagAge= FlagAge, FlagMetals= FlagMetals, NallHW= NallHW,
flag_entr_ics=flag_entr_ics))
else
return(blo)
}
|
/R/snap.read.2.R
|
no_license
|
asgr/snapshot
|
R
| false
| false
| 3,271
|
r
|
snap.read.2 = function(file, what, ndim, type, debug, gas, thin=1){
if(missing(what)) what="HEAD"
what=gsub("^\\s+|\\s+$", "", what)
if(missing(debug)) debug=0
if(missing(ndim) && missing(type)){
tmp=snap.select.type.2(what)
ndim=tmp$ndim
type=tmp$type
}else{
if(missing(ndim)) ndim=1
if(missing(type)) type=numeric()
}#from here on, there is always a type and ndim
#if(missing(which)) which=c(TRUE,TRUE,TRUE,TRUE,TRUE,TRUE)
if(missing(gas)) gas=0
if(gas > 0) cat("Reading with GAS = ",gas,"\n")
data = file(file,'rb')
#first LABEL block
skip=readBin(data,'integer',n=1)
label=readChar(data,4,useBytes=TRUE)
label=gsub("^\\s+|\\s+$", "", label)
block=readBin(data,'integer',n=1)
skip=readBin(data,'integer',n=1)
cat("Reading LABEL= ", label, " of ",block,'\n')
#first header block
skip=readBin(data,'integer',n=1)
Npart=readBin(data,'integer',n=6)
Massarr=readBin(data,'numeric',n=6,size=8)
Time=readBin(data,'numeric',n=1,size=8)
z=readBin(data,'numeric',n=1,size=8)
FlagSfr=readBin(data,'integer',n=1)
FlagFeedback=readBin(data,'integer',n=1)
Nall=readBin(data,'integer',n=6)
FlagCooling=readBin(data,'integer',n=1)
NumFiles=readBin(data,'integer',n=1)
BoxSize=readBin(data,'numeric',n=1,size=8)
OmegaM=readBin(data,'numeric',n=1,size=8)
OmegaL=readBin(data,'numeric',n=1,size=8)
h=readBin(data,'numeric',n=1,size=8)
FlagAge=readBin(data,'integer',n=1)
FlagMetals=readBin(data,'integer',n=1)
NallHW=readBin(data,'integer',n=6)
flag_entr_ics=readBin(data,'integer',n=1)
readBin(data,'integer',n=256-241)
#last head block
skip=readBin(data,'integer',n=1)
if((block - skip - 8) != 0) {
close(data)
stop("Something wrong!")
}
skip=readBin(data,integer(),n=1)
while(length(skip)>0){
label=readChar(data,4,useBytes=TRUE)
label=gsub("^\\s+|\\s+$", "", label)
block=readBin(data,integer(),n=1)
skip=readBin(data,integer(),n=1)
if(debug > 0)
cat("Reading LABEL= ", label, " of ",block,'\n')
skip=readBin(data,integer(),n=1)
if(what==label){
blo=.readBinThin(data,type,n=skip/4,size=4,thin=thin,ndim=ndim)
}else{
seek(data,where=block-8,origin='current')
}
skip=readBin(data,integer(),n=1) #this ends the block
if((block - skip - 8) != 0) print("Something wrong!")
skip=readBin(data,integer(),n=1) #starts new block
}
close(data)
if(ndim == 3 ){
extract=((1:sum(Npart))*3)-2
blo=data.frame( x=blo[extract],y=blo[extract+1],z=blo[extract+2])
}
if(gas > 0 && what != 'HEAD'){
if(ndim == 3 ){
blo=data.frame(x=blo$x[1:Npart[1]], y=blo$y[1:Npart[1]], z=blo$z[1:Npart[1]] )
} else {
blo=blo[1:Npart[gas]]
}
}
if(what=="HEAD")
return(list(Npart = Npart, Massarr= Massarr, Time= Time, z= z, FlagSfr= FlagSfr, FlagFeedback= FlagFeedback,
Nall= Nall, FlagCooling= FlagCooling, NumFiles= NumFiles, BoxSize= BoxSize, OmegaM= OmegaM,
OmegaL= OmegaL, h=h , FlagAge= FlagAge, FlagMetals= FlagMetals, NallHW= NallHW,
flag_entr_ics=flag_entr_ics))
else
return(blo)
}
|
folder_out <-paste0(output_path, "/presentation_plots")
dir.create(folder_out)
folder.out2 <-paste0(output_path, "/cum_fluxes_14")
cum.flux.1st.14 <- paste0(folder.out2, "/1st_event_cum14_fluxes.dat")
cum.flux.2nd.14 <- paste0(folder.out2, "/2nd_event_cum14_fluxes.dat")
cum.flux.3rd.14 <- paste0(folder.out2, "/3rd_event_cum14_fluxes.dat")
cum.flux.1st.29 <- paste0(folder.out2, "/1st_event_cum29_fluxes.dat")
cum.flux.1st.44 <- paste0(folder.out2, "/1st_event_cum44_fluxes.dat")
data <- fread(input = cum.flux.1st.44)
################################################################################
### A incubation
cum.A <- copy(data)
cum.A[, NO:= aNO]
cum.A[, N2O:= aN2O]
cum.A[, CO2:= aCO2]
cum.A[, CH4:= aCH4]
# cum.A[, todelete:=NULL, with=FALSE]
cum.A[, incubation:= "A"]
### B incubation
cum.B <- copy(data)
cum.B[, NO:= bNO]
cum.B[, N2O:= bN2O]
cum.B[, CO2:= bCO2]
cum.B[, CH4:= bCH4]
# cum.B[, todelete:=NULL, with=FALSE]
cum.B[, incubation:= "B"]
# cum.B[,days:= days+0.0007] #add 1miniute, so that it is not overplot
### C incubation
cum.C <- copy(data)
cum.C[, NO:= cNO]
cum.C[, N2O:= cN2O]
cum.C[, CO2:= cCO2]
cum.C[, CH4:= cCH4]
# cum.C[, todelete:=NULL, with=FALSE]
cum.C[, incubation:= "C"]
# cum.C[,days:= days+0.0007] #add 1miniute, so that it is not overplot
### D incubation
cum.D <- copy(data)
cum.D[, NO:= dNO]
cum.D[, N2O:= dN2O]
cum.D[, CO2:= dCO2]
cum.D[, CH4:= dCH4]
# cum.D[, todelete:=NULL, with=FALSE]
cum.D[, incubation:= "D"]
# cum.D[,days:= days+0.0007] #add 1miniute, so that it is not overplot
### all incubation binding
data <- rbind(cum.A, cum.B, cum.C, cum.D)
################################################################################
################################################################################
data[, labelT:= paste(fertilizer, precipitation, sep="-")]
data[, labelP:= paste(tillage, fertilizer, sep="-")]
data[, labelF:= paste(tillage, precipitation, sep="-")]
data[tillage=="NT", legendT:= "No"]
data[tillage=="TT", legendT:= "traditional"]
mydata <- data.frame(data)
mydata$tillage <- factor(mydata$tillage, levels = c("NT", "TT"), labels=c("no", "traditional"))
mydata$precipitation <- factor(mydata$precipitation, levels = c("c", "i", "d"), labels=c("constant", "increasing", "decreasing"))
mydata$fertilizer <- factor(mydata$fertilizer, levels = c(0, 50, 100), labels=c("0 kg-N/ha", "50", "100"))
mydata$labelT <- factor(mydata$labelT, levels = c("0-c", "0-i", "0-d",
"50-c", "50-i", "50-d",
"100-c", "100-i", "100-d"))
mydata$labelP <- factor(mydata$labelP, levels = c("NT-0", "NT-50", "NT-100",
"TT-0", "TT-50", "TT-100"))
mydata$labelF <- factor(mydata$labelF, levels = c("NT-c", "NT-i", "NT-d",
"TT-c", "TT-i", "TT-d"))
str(mydata)
table(mydata$tillage, mydata$precipitation, mydata$fertilizer)
################################################################################
fit <- aov(N2O ~ fertilizer + precipitation + tillage , data = mydata)
# fit <- Anova(aov(N2O ~ fertilizer + precipitation*tillage, data = mydata))
summary(fit)
TukeyHSD(fit)
par(las=2)
par(mar=c(5,8,4,2))
plot(TukeyHSD(fit, which= c("precipitation")))
# plot(TukeyHSD(fit, which= c("fertilizer")))
# plot(TukeyHSD(fit, which= c("tillage")))
################################################################################
### tillage boxplot
# by treatment
myplot <- paste0(folder_out, "/N2O_boxplot_tillage_bytreatment.png")
png(filename = myplot, width = 1600, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage*precipitation*fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = tillage, y=N2O, fill=tillage))
p +
theme_bw() +
geom_boxplot() +
facet_wrap(~ labelT, ncol = 9) +
scale_fill_manual(values = c("grey", "red")) +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(legend.title = element_text(size = 50, face = 'bold')) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ggtitle("Tillage effect on cumulative N2O emission \n [mg-N / m2]") +
theme(plot.title = element_text(size = 50, lineheight=0.8, face="bold", vjust=2)) +
theme(legend.key.height = unit(5, "cm"))
dev.off()
# all treatments together
myplot <- paste0(folder_out, "/N2O_boxplot_tillage.png")
png(filename = myplot, width = 320, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage, mydata, mean)
p <- ggplot(data=mydata, aes(x = tillage, y=N2O, fill=tillage))
p +
theme_bw() +
geom_boxplot() +
# facet_wrap(~ tillage, ncol = 1) +
scale_fill_manual(values = c("grey", "red")) +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank(), axis.text.y = element_blank()) +
theme(legend.position="none")
dev.off()
################################################################################
### precipitation boxplot
# by treatment
myplot <- paste0(folder_out, "/N2O_boxplot_precipitation_bytreatment.png")
png(filename = myplot, width = 1600, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage*precipitation*fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = precipitation, y=N2O, fill=precipitation))
p +
theme_bw() +
geom_boxplot() +
facet_wrap(~ labelP, ncol = 6) +
scale_fill_manual(values = c("white", "deepskyblue","dodgerblue4"), name="Rain pattern") +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(legend.title = element_text(size = 50, face = 'bold')) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ggtitle("Rain-pattern effect on cumulative N2O emission \n [mg-N / m2]") +
theme(plot.title = element_text(size = 50, lineheight=0.8, face="bold", vjust=2)) +
theme(legend.key.height = unit(5, "cm"))
dev.off()
# all treatments together
myplot <- paste0(folder_out, "/N2O_boxplot_precipitation.png")
png(filename = myplot, width = 320, height = 1200, units = "px")
means <- aggregate(N2O ~ precipitation, mydata, mean)
p <- ggplot(data=mydata, aes(x = precipitation, y=N2O, fill=precipitation))
p +
theme_bw() +
geom_boxplot() +
# facet_wrap(~ tillage, ncol = 1) +
scale_fill_manual(values = c("white", "deepskyblue","dodgerblue4"), name="Rain pattern") +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank(), axis.text.y = element_blank()) +
theme(legend.position="none")
dev.off()
################################################################################
### fertilizer boxplot
# by treatment
myplot <- paste0(folder_out, "/N2O_boxplot_fertilizer_bytreatment.png")
png(filename = myplot, width = 1600, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage*precipitation*fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = fertilizer, y=N2O, fill=fertilizer))
p +
theme_bw() +
geom_boxplot() +
facet_wrap(~ labelF, ncol = 6) +
scale_fill_manual(values = c("white","olivedrab2", "olivedrab4"), name="Fertilizer \n load") +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(legend.title = element_text(size = 50, face = 'bold')) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ggtitle("Fertilizer-load effect on cumulative N2O emission \n [mg-N / m2]") +
theme(plot.title = element_text(size = 50, lineheight=0.8, face="bold", vjust=2)) +
theme(legend.key.height = unit(5, "cm"))
dev.off()
dev.off()
# all treatments together
myplot <- paste0(folder_out, "/N2O_boxplot_fertilizer.png")
png(filename = myplot, width = 320, height = 1200, units = "px")
means <- aggregate(N2O ~ fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = fertilizer, y=N2O, fill=fertilizer))
p +
theme_bw() +
geom_boxplot() +
# facet_wrap(~ tillage, ncol = 1) +
scale_fill_manual(values = c("white","olivedrab2", "olivedrab4")) +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank(), axis.text.y = element_blank()) +
theme(legend.position="none")
dev.off()
####
# Pairwise comparisons using t tests with pooled SD
pairwise.t.test(data$N2O,data$treatment,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$labelF,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$labelT,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$labelP,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$tillage,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$precipitation,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$fertilizer,p.adjust.method = "holm")
|
/boxplots_N2O.R
|
no_license
|
pz10/all_incubations
|
R
| false
| false
| 12,160
|
r
|
folder_out <-paste0(output_path, "/presentation_plots")
dir.create(folder_out)
folder.out2 <-paste0(output_path, "/cum_fluxes_14")
cum.flux.1st.14 <- paste0(folder.out2, "/1st_event_cum14_fluxes.dat")
cum.flux.2nd.14 <- paste0(folder.out2, "/2nd_event_cum14_fluxes.dat")
cum.flux.3rd.14 <- paste0(folder.out2, "/3rd_event_cum14_fluxes.dat")
cum.flux.1st.29 <- paste0(folder.out2, "/1st_event_cum29_fluxes.dat")
cum.flux.1st.44 <- paste0(folder.out2, "/1st_event_cum44_fluxes.dat")
data <- fread(input = cum.flux.1st.44)
################################################################################
### A incubation
cum.A <- copy(data)
cum.A[, NO:= aNO]
cum.A[, N2O:= aN2O]
cum.A[, CO2:= aCO2]
cum.A[, CH4:= aCH4]
# cum.A[, todelete:=NULL, with=FALSE]
cum.A[, incubation:= "A"]
### B incubation
cum.B <- copy(data)
cum.B[, NO:= bNO]
cum.B[, N2O:= bN2O]
cum.B[, CO2:= bCO2]
cum.B[, CH4:= bCH4]
# cum.B[, todelete:=NULL, with=FALSE]
cum.B[, incubation:= "B"]
# cum.B[,days:= days+0.0007] #add 1miniute, so that it is not overplot
### C incubation
cum.C <- copy(data)
cum.C[, NO:= cNO]
cum.C[, N2O:= cN2O]
cum.C[, CO2:= cCO2]
cum.C[, CH4:= cCH4]
# cum.C[, todelete:=NULL, with=FALSE]
cum.C[, incubation:= "C"]
# cum.C[,days:= days+0.0007] #add 1miniute, so that it is not overplot
### D incubation
cum.D <- copy(data)
cum.D[, NO:= dNO]
cum.D[, N2O:= dN2O]
cum.D[, CO2:= dCO2]
cum.D[, CH4:= dCH4]
# cum.D[, todelete:=NULL, with=FALSE]
cum.D[, incubation:= "D"]
# cum.D[,days:= days+0.0007] #add 1miniute, so that it is not overplot
### all incubation binding
data <- rbind(cum.A, cum.B, cum.C, cum.D)
################################################################################
################################################################################
data[, labelT:= paste(fertilizer, precipitation, sep="-")]
data[, labelP:= paste(tillage, fertilizer, sep="-")]
data[, labelF:= paste(tillage, precipitation, sep="-")]
data[tillage=="NT", legendT:= "No"]
data[tillage=="TT", legendT:= "traditional"]
mydata <- data.frame(data)
mydata$tillage <- factor(mydata$tillage, levels = c("NT", "TT"), labels=c("no", "traditional"))
mydata$precipitation <- factor(mydata$precipitation, levels = c("c", "i", "d"), labels=c("constant", "increasing", "decreasing"))
mydata$fertilizer <- factor(mydata$fertilizer, levels = c(0, 50, 100), labels=c("0 kg-N/ha", "50", "100"))
mydata$labelT <- factor(mydata$labelT, levels = c("0-c", "0-i", "0-d",
"50-c", "50-i", "50-d",
"100-c", "100-i", "100-d"))
mydata$labelP <- factor(mydata$labelP, levels = c("NT-0", "NT-50", "NT-100",
"TT-0", "TT-50", "TT-100"))
mydata$labelF <- factor(mydata$labelF, levels = c("NT-c", "NT-i", "NT-d",
"TT-c", "TT-i", "TT-d"))
str(mydata)
table(mydata$tillage, mydata$precipitation, mydata$fertilizer)
################################################################################
fit <- aov(N2O ~ fertilizer + precipitation + tillage , data = mydata)
# fit <- Anova(aov(N2O ~ fertilizer + precipitation*tillage, data = mydata))
summary(fit)
TukeyHSD(fit)
par(las=2)
par(mar=c(5,8,4,2))
plot(TukeyHSD(fit, which= c("precipitation")))
# plot(TukeyHSD(fit, which= c("fertilizer")))
# plot(TukeyHSD(fit, which= c("tillage")))
################################################################################
### tillage boxplot
# by treatment
myplot <- paste0(folder_out, "/N2O_boxplot_tillage_bytreatment.png")
png(filename = myplot, width = 1600, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage*precipitation*fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = tillage, y=N2O, fill=tillage))
p +
theme_bw() +
geom_boxplot() +
facet_wrap(~ labelT, ncol = 9) +
scale_fill_manual(values = c("grey", "red")) +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(legend.title = element_text(size = 50, face = 'bold')) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ggtitle("Tillage effect on cumulative N2O emission \n [mg-N / m2]") +
theme(plot.title = element_text(size = 50, lineheight=0.8, face="bold", vjust=2)) +
theme(legend.key.height = unit(5, "cm"))
dev.off()
# all treatments together
myplot <- paste0(folder_out, "/N2O_boxplot_tillage.png")
png(filename = myplot, width = 320, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage, mydata, mean)
p <- ggplot(data=mydata, aes(x = tillage, y=N2O, fill=tillage))
p +
theme_bw() +
geom_boxplot() +
# facet_wrap(~ tillage, ncol = 1) +
scale_fill_manual(values = c("grey", "red")) +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank(), axis.text.y = element_blank()) +
theme(legend.position="none")
dev.off()
################################################################################
### precipitation boxplot
# by treatment
myplot <- paste0(folder_out, "/N2O_boxplot_precipitation_bytreatment.png")
png(filename = myplot, width = 1600, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage*precipitation*fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = precipitation, y=N2O, fill=precipitation))
p +
theme_bw() +
geom_boxplot() +
facet_wrap(~ labelP, ncol = 6) +
scale_fill_manual(values = c("white", "deepskyblue","dodgerblue4"), name="Rain pattern") +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(legend.title = element_text(size = 50, face = 'bold')) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ggtitle("Rain-pattern effect on cumulative N2O emission \n [mg-N / m2]") +
theme(plot.title = element_text(size = 50, lineheight=0.8, face="bold", vjust=2)) +
theme(legend.key.height = unit(5, "cm"))
dev.off()
# all treatments together
myplot <- paste0(folder_out, "/N2O_boxplot_precipitation.png")
png(filename = myplot, width = 320, height = 1200, units = "px")
means <- aggregate(N2O ~ precipitation, mydata, mean)
p <- ggplot(data=mydata, aes(x = precipitation, y=N2O, fill=precipitation))
p +
theme_bw() +
geom_boxplot() +
# facet_wrap(~ tillage, ncol = 1) +
scale_fill_manual(values = c("white", "deepskyblue","dodgerblue4"), name="Rain pattern") +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank(), axis.text.y = element_blank()) +
theme(legend.position="none")
dev.off()
################################################################################
### fertilizer boxplot
# by treatment
myplot <- paste0(folder_out, "/N2O_boxplot_fertilizer_bytreatment.png")
png(filename = myplot, width = 1600, height = 1200, units = "px")
means <- aggregate(N2O ~ tillage*precipitation*fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = fertilizer, y=N2O, fill=fertilizer))
p +
theme_bw() +
geom_boxplot() +
facet_wrap(~ labelF, ncol = 6) +
scale_fill_manual(values = c("white","olivedrab2", "olivedrab4"), name="Fertilizer \n load") +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(legend.title = element_text(size = 50, face = 'bold')) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank()) +
ggtitle("Fertilizer-load effect on cumulative N2O emission \n [mg-N / m2]") +
theme(plot.title = element_text(size = 50, lineheight=0.8, face="bold", vjust=2)) +
theme(legend.key.height = unit(5, "cm"))
dev.off()
dev.off()
# all treatments together
myplot <- paste0(folder_out, "/N2O_boxplot_fertilizer.png")
png(filename = myplot, width = 320, height = 1200, units = "px")
means <- aggregate(N2O ~ fertilizer, mydata, mean)
p <- ggplot(data=mydata, aes(x = fertilizer, y=N2O, fill=fertilizer))
p +
theme_bw() +
geom_boxplot() +
# facet_wrap(~ tillage, ncol = 1) +
scale_fill_manual(values = c("white","olivedrab2", "olivedrab4")) +
# scale_colour_manual(values = c("grey","red")) +
stat_summary(fun.y=mean, colour="black", geom="point",
shape=20, size=5,show_guide = FALSE) +
theme( panel.grid.major.x = element_blank() ) + # remove the vertical grid lines
theme(strip.text = element_text(size = 35)) +
theme(axis.text.y = element_text(size = 35)) +
theme(legend.text = element_text(size = 35)) +
theme(axis.title.x = element_blank(), axis.ticks = element_blank(), axis.text.x = element_blank()) +
theme(axis.title.y = element_blank(), axis.text.y = element_blank()) +
theme(legend.position="none")
dev.off()
####
# Pairwise comparisons using t tests with pooled SD
pairwise.t.test(data$N2O,data$treatment,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$labelF,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$labelT,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$labelP,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$tillage,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$precipitation,p.adjust.method = "holm")
pairwise.t.test(data$N2O,data$fertilizer,p.adjust.method = "holm")
|
6db405d7de3ff92c7279f85a2946e070 ttt_5x5-shape-4-GTTT-2-1-torus-1.qdimacs 2154 9289
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_5x5-shape-4-GTTT-2-1-torus-1/ttt_5x5-shape-4-GTTT-2-1-torus-1.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 83
|
r
|
6db405d7de3ff92c7279f85a2946e070 ttt_5x5-shape-4-GTTT-2-1-torus-1.qdimacs 2154 9289
|
#' Process sample contamination checks
#'
#' @description
#' Takes *selfSM reports generated by VerifyBamID during alignment, and returns a vector of freemix scores.
#' The freemix score is a sequence only estimate of sample contamination that ranges from 0 to 1.
#'
#' Note: Targeted panels are often too small for this step to work properly.
#'
#' @inheritParams get.coverage.by.sample.statistics
#'
#' @return freemix.scores Data frame giving sample contamination (column freemix) score per sample.
#'
#' @references \url{https://genome.sph.umich.edu/wiki/VerifyBamID}
process.sample.contamination.checks <- function(project.directory) {
sample.contamination.check.paths <- system.ls(pattern = "*/*selfSM", directory = project.directory, error = TRUE);
sample.ids <- extract.sample.ids(sample.contamination.check.paths, from.filename = TRUE);
freemix.scores <- list();
for(i in seq_along(sample.contamination.check.paths)) {
path <- sample.contamination.check.paths[i];
sample.id <- sample.ids[i];
# Single row data frame, where header gives variable and the row gives value
# The sample contamination score is stored in the column called FREEMIX.
# For more information, see https://genome.sph.umich.edu/wiki/VerifyBamID#Column_information_in_the_output_files
contamination.check <- utils::read.delim(
path,
sep = "\t",
as.is = TRUE,
header = TRUE,
stringsAsFactors = FALSE
);
freemix.scores[[ sample.id ]] <- data.frame(
"sample.id" = sample.id,
"freemix" = contamination.check[1, "FREEMIX"]
);
}
freemix.scores <- do.call(rbind, freemix.scores);
return(freemix.scores);
}
|
/R/process.sample.contamination.checks.R
|
no_license
|
cran/varitas
|
R
| false
| false
| 1,821
|
r
|
#' Process sample contamination checks
#'
#' @description
#' Takes *selfSM reports generated by VerifyBamID during alignment, and returns a vector of freemix scores.
#' The freemix score is a sequence only estimate of sample contamination that ranges from 0 to 1.
#'
#' Note: Targeted panels are often too small for this step to work properly.
#'
#' @inheritParams get.coverage.by.sample.statistics
#'
#' @return freemix.scores Data frame giving sample contamination (column freemix) score per sample.
#'
#' @references \url{https://genome.sph.umich.edu/wiki/VerifyBamID}
process.sample.contamination.checks <- function(project.directory) {
sample.contamination.check.paths <- system.ls(pattern = "*/*selfSM", directory = project.directory, error = TRUE);
sample.ids <- extract.sample.ids(sample.contamination.check.paths, from.filename = TRUE);
freemix.scores <- list();
for(i in seq_along(sample.contamination.check.paths)) {
path <- sample.contamination.check.paths[i];
sample.id <- sample.ids[i];
# Single row data frame, where header gives variable and the row gives value
# The sample contamination score is stored in the column called FREEMIX.
# For more information, see https://genome.sph.umich.edu/wiki/VerifyBamID#Column_information_in_the_output_files
contamination.check <- utils::read.delim(
path,
sep = "\t",
as.is = TRUE,
header = TRUE,
stringsAsFactors = FALSE
);
freemix.scores[[ sample.id ]] <- data.frame(
"sample.id" = sample.id,
"freemix" = contamination.check[1, "FREEMIX"]
);
}
freemix.scores <- do.call(rbind, freemix.scores);
return(freemix.scores);
}
|
test_that("Checking anlz_tbnimet, tbni metrics only", {
# raw metric data
dat <- anlz_tbnimet(fimdata)
# get last row of data
result <- dat[nrow(dat), ]
expect_equal(result, structure(list(Reference = "TBM2019121309", Year = 2019, Month = 12,
Season = "Winter", bay_segment = "MTB", NumTaxa = 5, Shannon = 0.727591345714938,
TaxaSelect = 0, NumGuilds = 2, BenthicTaxa = 5), row.names = c(NA,
-1L), class = c("tbl_df", "tbl", "data.frame"))
)
})
test_that("Checking anlz_tbnimet, all metrics", {
# raw metric data
dat <- anlz_tbnimet(fimdata, all = T)
# get last row of data
result <- dat[nrow(dat), ]
expect_equal(result, structure(list(Reference = "TBM2019121309", Year = 2019, Month = 12,
Season = "Winter", bay_segment = "MTB", NumTaxa = 5, NumIndiv = 76,
Shannon = 0.727591345714938, Simpson = 1.61974200785193,
Pielou = 0.452077921175931, TaxaSelect = 0, NumGuilds = 2,
TSTaxa = 5, TGTaxa = 0, BenthicTaxa = 5, PelagicTaxa = 0,
OblTaxa = 5, MSTaxa = 2, ESTaxa = 3, SelectIndiv = 0, Taxa90 = 2,
TSAbund = 76, TGAbund = 0, BenthicAbund = 76, PelagicAbund = 0,
OblAbund = 76, ESAbund = 73, MSAbund = 3, Num_LR = 0, PropTG = 0,
PropTS = 1, PropBenthic = 1, PropPelagic = 0, PropObl = 1,
PropMS = 0.0394736842105263, PropES = 0.960526315789474,
PropSelect = 0), row.names = c(NA, -1L), class = c("tbl_df",
"tbl", "data.frame"))
)
})
|
/tests/testthat/test-anlz_tbnimet.R
|
permissive
|
mikewessel/tbeptools
|
R
| false
| false
| 2,011
|
r
|
test_that("Checking anlz_tbnimet, tbni metrics only", {
# raw metric data
dat <- anlz_tbnimet(fimdata)
# get last row of data
result <- dat[nrow(dat), ]
expect_equal(result, structure(list(Reference = "TBM2019121309", Year = 2019, Month = 12,
Season = "Winter", bay_segment = "MTB", NumTaxa = 5, Shannon = 0.727591345714938,
TaxaSelect = 0, NumGuilds = 2, BenthicTaxa = 5), row.names = c(NA,
-1L), class = c("tbl_df", "tbl", "data.frame"))
)
})
test_that("Checking anlz_tbnimet, all metrics", {
# raw metric data
dat <- anlz_tbnimet(fimdata, all = T)
# get last row of data
result <- dat[nrow(dat), ]
expect_equal(result, structure(list(Reference = "TBM2019121309", Year = 2019, Month = 12,
Season = "Winter", bay_segment = "MTB", NumTaxa = 5, NumIndiv = 76,
Shannon = 0.727591345714938, Simpson = 1.61974200785193,
Pielou = 0.452077921175931, TaxaSelect = 0, NumGuilds = 2,
TSTaxa = 5, TGTaxa = 0, BenthicTaxa = 5, PelagicTaxa = 0,
OblTaxa = 5, MSTaxa = 2, ESTaxa = 3, SelectIndiv = 0, Taxa90 = 2,
TSAbund = 76, TGAbund = 0, BenthicAbund = 76, PelagicAbund = 0,
OblAbund = 76, ESAbund = 73, MSAbund = 3, Num_LR = 0, PropTG = 0,
PropTS = 1, PropBenthic = 1, PropPelagic = 0, PropObl = 1,
PropMS = 0.0394736842105263, PropES = 0.960526315789474,
PropSelect = 0), row.names = c(NA, -1L), class = c("tbl_df",
"tbl", "data.frame"))
)
})
|
load("data/diagnozaOsoby2011.RData")
variablesOriginalNamesYear2000 <- c("ap83_1", "ap83_2", "ap83_3", "ap84", "ap85", "ap86", "ap100", "ac8",
"wiek2000", "wiek6_2000", "status9_2000", "eduk4_2000", "PLEC", "bp107") # We cannot take bp107 - these results are not from year 2000.
variablesOriginalNamesYear2011 <- c("fp44", "fp45", "fp46", "fp72", "fp73", "fp74", "fp88", "fC11", "wiek2011", "wiek6_2011", "status9_2011", "eduk4_2011", "PLEC", "fp65")
variablesDescriptionPolish2000 <- c( "Czy pali papierosy",
"Ile przecietnie papierosow dziennie wypala",
"Czy kiedykolwiek palil papierosy",
"Korzystalem z porad psychologa (psychiatry)",
"Pilem za duzo alkoholu",
"Probowalem narkotykow",
"Oskarzono mnie o dokonanie czynu karalnego",
"Stan cywilny",
"Wiek",
"Kategoria wiekowa",
"Grupa zawodowa",
"Poziom wyksztalcenia",
"Plec",
"Dochod miesieczny",
"Kategorie palaczy")
variablesDescriptionPolish2011 <- c( "Czy pali papierosy",
"Ile przecietnie papierosow dziennie wypala",
"Czy kiedykolwiek palil papierosy",
"Korzystalem z porad psychologa (psychiatry)",
"Pilem za duzo alkoholu",
"Probowalem narkotykow/dopalaczy",
"Zostalem oskarzony w sprawie cywilnej",
"Stan cywilny",
"Wiek",
"Kategoria wiekowa",
"Grupa zawodowa",
"Poziom wyksztalcenia",
"Plec",
"Osobisty dochod miesieczny netto - srednia z ostatnich trzech miesiecy",
"Kategorie palaczy")
variablesDescriptionEnglish <- c( "smokes",
"daily smoked cigarettes", # Check >= 0 if not NA.
"ever smoked", # Maybe frow out obs where smokes=yes and ever_smoked = no.
"psychiatric treatment",
"former alcohol addict",
"tried drugs",
"accused of offence",
"marital status", #translation: http://en.wikipedia.org/wiki/Marital_status
"age",
"age group",
"employment status",
"education",
"gender",
"monthly income",
"smoker group"
)
variablesNames <- c("Smokes",
"Daily_Smokes",
"Ever_Smoked",
"Psychiatric",
"Alcohol",
"Drugs",
"Criminal",
"Marital_Status",
"Age",
"Age_Group",
"Employment",
"Education",
"Gender",
"Income",
"Smoker_Group")
Data <- diagnozaOsoby2011[,variablesOriginalNamesYear2011]
rm( diagnozaOsoby2011,
variablesOriginalNamesYear2000,
variablesOriginalNamesYear2011,
variablesDescriptionPolish2000,
variablesDescriptionPolish2011,
variablesDescriptionEnglish)
colnames(Data) <- variablesNames[1:14]
# usuwa te wiersze, w których wszystkie istotne kolumny są NA
# Przygotowuje data.frame wypełniony TRUE i NA.
temp <- data.frame(ifelse(is.na(Data[c(1:7,11,12,14)]), T, NA))
temp[,'indeks'] = 1:nrow(Data)
Data <- Data[-na.omit(temp)[,'indeks'],]
rm(temp)
fixLevels <- function(lvls, d=NULL, var=NULL, order=NULL, skip=0) {
f <- function(d1, var1) {
if (skip > 0) {
levels(d1[,var1]) <- c(levels(d1[,var1])[1:skip],lvls)
} else {
levels(d1[,var1]) <- lvls
}
o <- T
if (is.null(order)) {
o <- F
order <- 1:length(lvls)
} else if (order == F) { o <- F }
d1[,var1] <- factor(d1[,var1],
levels = sapply(order, function(x) { lvls[x] }),
ordered = o)
d1
}
if (is.null(d)) {
f
} else {
if (is.null(var)) { var <- colnames(d) }
if (length(var) > 1) {
for (x in var) {
d <- f(d,x)
}
} else {
d <- f(d,var)
}
d
}
}
Data <- fixLevels(c("yes","no"), Data, c("Smokes", "Ever_Smoked","Psychiatric", "Drugs", "Criminal", "Alcohol"),skip=1)
#Data$Daily_Smokes[is.na(Data$Daily_Smokes)] <- -1
#Data$Daily_Smokes[is.na(Data$Daily_Smokes)] <- 0 # To chyba zbyt słabe.
Data <- fixLevels(c("single","married","widowed","divorced","separated","unknown"), Data, "Marital_Status",skip=2)
Data <- fixLevels(c("0-24","25-34","35-44","45-59","60-64","65+"),Data, "Age_Group",skip=1, order=T)
Data <- fixLevels(c("civil servant", "private sector", "entrepreneur",
"farmer", "pensioner","retiree", "pupil or student",
"unemployed", "other non-active"), Data, "Employment", skip=1)
Data <- fixLevels(c("primary or less", "technical", "secondary", "beyond secondary"), Data, "Education", skip=1, order=T)
Data <- fixLevels(c("male","female"),Data,"Gender", skip=1)
rm(fixLevels)
###########################################################################################################
# Removing inconsistencies and making small repairs. Works for any data set - we can change years as well.
source("./scripts/inputControl.R")
###########################################################################################################
smokerLevels <- c('never smoked', 'former smoker', 'up to half a pack', 'up to one pack', 'more than one pack')
Data[,"Smoker_Group"] <- factor(ifelse(Data$Smokes == "no" & Data$Ever_Smoked == "no", smokerLevels[1],
ifelse(Data$Smokes == "no" & Data$Ever_Smoked == "yes", smokerLevels[2],
ifelse(Data$Smokes == "yes" & Data$Daily_Smokes <= 10, smokerLevels[3],
ifelse(Data$Smokes == "yes" & Data$Daily_Smokes > 10 &Data$Daily_Smokes <= 20, smokerLevels[4],
smokerLevels[5])))),
levels = smokerLevels, ordered = T)
rm(smokerLevels)
###########################################################################################################
#save(Data, file="data/Data.RData")
|
/scripts/Cleanup.R
|
no_license
|
MatteoLacki/projectFive
|
R
| false
| false
| 7,205
|
r
|
load("data/diagnozaOsoby2011.RData")
variablesOriginalNamesYear2000 <- c("ap83_1", "ap83_2", "ap83_3", "ap84", "ap85", "ap86", "ap100", "ac8",
"wiek2000", "wiek6_2000", "status9_2000", "eduk4_2000", "PLEC", "bp107") # We cannot take bp107 - these results are not from year 2000.
variablesOriginalNamesYear2011 <- c("fp44", "fp45", "fp46", "fp72", "fp73", "fp74", "fp88", "fC11", "wiek2011", "wiek6_2011", "status9_2011", "eduk4_2011", "PLEC", "fp65")
variablesDescriptionPolish2000 <- c( "Czy pali papierosy",
"Ile przecietnie papierosow dziennie wypala",
"Czy kiedykolwiek palil papierosy",
"Korzystalem z porad psychologa (psychiatry)",
"Pilem za duzo alkoholu",
"Probowalem narkotykow",
"Oskarzono mnie o dokonanie czynu karalnego",
"Stan cywilny",
"Wiek",
"Kategoria wiekowa",
"Grupa zawodowa",
"Poziom wyksztalcenia",
"Plec",
"Dochod miesieczny",
"Kategorie palaczy")
variablesDescriptionPolish2011 <- c( "Czy pali papierosy",
"Ile przecietnie papierosow dziennie wypala",
"Czy kiedykolwiek palil papierosy",
"Korzystalem z porad psychologa (psychiatry)",
"Pilem za duzo alkoholu",
"Probowalem narkotykow/dopalaczy",
"Zostalem oskarzony w sprawie cywilnej",
"Stan cywilny",
"Wiek",
"Kategoria wiekowa",
"Grupa zawodowa",
"Poziom wyksztalcenia",
"Plec",
"Osobisty dochod miesieczny netto - srednia z ostatnich trzech miesiecy",
"Kategorie palaczy")
variablesDescriptionEnglish <- c( "smokes",
"daily smoked cigarettes", # Check >= 0 if not NA.
"ever smoked", # Maybe frow out obs where smokes=yes and ever_smoked = no.
"psychiatric treatment",
"former alcohol addict",
"tried drugs",
"accused of offence",
"marital status", #translation: http://en.wikipedia.org/wiki/Marital_status
"age",
"age group",
"employment status",
"education",
"gender",
"monthly income",
"smoker group"
)
variablesNames <- c("Smokes",
"Daily_Smokes",
"Ever_Smoked",
"Psychiatric",
"Alcohol",
"Drugs",
"Criminal",
"Marital_Status",
"Age",
"Age_Group",
"Employment",
"Education",
"Gender",
"Income",
"Smoker_Group")
Data <- diagnozaOsoby2011[,variablesOriginalNamesYear2011]
rm( diagnozaOsoby2011,
variablesOriginalNamesYear2000,
variablesOriginalNamesYear2011,
variablesDescriptionPolish2000,
variablesDescriptionPolish2011,
variablesDescriptionEnglish)
colnames(Data) <- variablesNames[1:14]
# usuwa te wiersze, w których wszystkie istotne kolumny są NA
# Przygotowuje data.frame wypełniony TRUE i NA.
temp <- data.frame(ifelse(is.na(Data[c(1:7,11,12,14)]), T, NA))
temp[,'indeks'] = 1:nrow(Data)
Data <- Data[-na.omit(temp)[,'indeks'],]
rm(temp)
fixLevels <- function(lvls, d=NULL, var=NULL, order=NULL, skip=0) {
f <- function(d1, var1) {
if (skip > 0) {
levels(d1[,var1]) <- c(levels(d1[,var1])[1:skip],lvls)
} else {
levels(d1[,var1]) <- lvls
}
o <- T
if (is.null(order)) {
o <- F
order <- 1:length(lvls)
} else if (order == F) { o <- F }
d1[,var1] <- factor(d1[,var1],
levels = sapply(order, function(x) { lvls[x] }),
ordered = o)
d1
}
if (is.null(d)) {
f
} else {
if (is.null(var)) { var <- colnames(d) }
if (length(var) > 1) {
for (x in var) {
d <- f(d,x)
}
} else {
d <- f(d,var)
}
d
}
}
Data <- fixLevels(c("yes","no"), Data, c("Smokes", "Ever_Smoked","Psychiatric", "Drugs", "Criminal", "Alcohol"),skip=1)
#Data$Daily_Smokes[is.na(Data$Daily_Smokes)] <- -1
#Data$Daily_Smokes[is.na(Data$Daily_Smokes)] <- 0 # To chyba zbyt słabe.
Data <- fixLevels(c("single","married","widowed","divorced","separated","unknown"), Data, "Marital_Status",skip=2)
Data <- fixLevels(c("0-24","25-34","35-44","45-59","60-64","65+"),Data, "Age_Group",skip=1, order=T)
Data <- fixLevels(c("civil servant", "private sector", "entrepreneur",
"farmer", "pensioner","retiree", "pupil or student",
"unemployed", "other non-active"), Data, "Employment", skip=1)
Data <- fixLevels(c("primary or less", "technical", "secondary", "beyond secondary"), Data, "Education", skip=1, order=T)
Data <- fixLevels(c("male","female"),Data,"Gender", skip=1)
rm(fixLevels)
###########################################################################################################
# Removing inconsistencies and making small repairs. Works for any data set - we can change years as well.
source("./scripts/inputControl.R")
###########################################################################################################
smokerLevels <- c('never smoked', 'former smoker', 'up to half a pack', 'up to one pack', 'more than one pack')
Data[,"Smoker_Group"] <- factor(ifelse(Data$Smokes == "no" & Data$Ever_Smoked == "no", smokerLevels[1],
ifelse(Data$Smokes == "no" & Data$Ever_Smoked == "yes", smokerLevels[2],
ifelse(Data$Smokes == "yes" & Data$Daily_Smokes <= 10, smokerLevels[3],
ifelse(Data$Smokes == "yes" & Data$Daily_Smokes > 10 &Data$Daily_Smokes <= 20, smokerLevels[4],
smokerLevels[5])))),
levels = smokerLevels, ordered = T)
rm(smokerLevels)
###########################################################################################################
#save(Data, file="data/Data.RData")
|
# Make an example table
a <- matrix(rnorm(n=100), nrow=100, ncol=100)
b <- matrix(rnorm(n=100), nrow=100, ncol=100)
c <- matrix(rnorm(n=100), nrow=100, ncol=100)
# Get values in upper triangle
values <- getUpperTriangle(a)
output <- getUpperTriangleOfMatrices(a, b, c)
#############
# FUNCTIONS #
#############
getUpperTriangleOfMatrices <- function(genetic, spatial, temporal){
# Initialise a dataframe to store the values in the upper triangle from each matrix
output <- data.frame("Genetic"=NA, "Spatial"=NA, "Temporal"=NA)
row <- 0
# Use nested loops to visit each entry in upper trianle
for(i in 1:nrow(genetic)){
for(j in 1:ncol(genetic)){
# Ignore upper triangle and self comparisons
if(i >= j){
next
}
# Note progress
#Sys.sleep(1) # Make computer sleep for 1 second
#cat(paste("Current row =", i, "\tCurrent column =", j, "\n"))
# Increment the row in the output dataframe
row <- row + 1
# Store values from upper triangles of matrices
output[row, "Genetic"] <- genetic[i, j]
output[row, "Spatial"] <- spatial[i, j]
output[row, "Temporal"] <- temporal[i, j]
}
}
return(output)
}
getUpperTriangle <- function(matrix){
# Initialise a vector to store the values in the upper triangle
vector <- c()
# Use nested loops to visit each entry in upper trianle
for(i in 1:nrow(matrix)){
for(j in 1:ncol(matrix)){
# Ignore upper triangle and self comparisons
if(i >= j){
next
}
# Note progress
#Sys.sleep(1) # Make computer sleep for 1 second
#cat(paste("Current row =", i, "\tCurrent column =", j, "\n"))
# Store value
vector[length(vector) + 1] <- matrix[i, j]
}
}
return(vector)
}
|
/FlattenMatrix_Adrian_13-03-18.R
|
no_license
|
AdrianAllen1977/R-code
|
R
| false
| false
| 1,830
|
r
|
# Make an example table
a <- matrix(rnorm(n=100), nrow=100, ncol=100)
b <- matrix(rnorm(n=100), nrow=100, ncol=100)
c <- matrix(rnorm(n=100), nrow=100, ncol=100)
# Get values in upper triangle
values <- getUpperTriangle(a)
output <- getUpperTriangleOfMatrices(a, b, c)
#############
# FUNCTIONS #
#############
getUpperTriangleOfMatrices <- function(genetic, spatial, temporal){
# Initialise a dataframe to store the values in the upper triangle from each matrix
output <- data.frame("Genetic"=NA, "Spatial"=NA, "Temporal"=NA)
row <- 0
# Use nested loops to visit each entry in upper trianle
for(i in 1:nrow(genetic)){
for(j in 1:ncol(genetic)){
# Ignore upper triangle and self comparisons
if(i >= j){
next
}
# Note progress
#Sys.sleep(1) # Make computer sleep for 1 second
#cat(paste("Current row =", i, "\tCurrent column =", j, "\n"))
# Increment the row in the output dataframe
row <- row + 1
# Store values from upper triangles of matrices
output[row, "Genetic"] <- genetic[i, j]
output[row, "Spatial"] <- spatial[i, j]
output[row, "Temporal"] <- temporal[i, j]
}
}
return(output)
}
getUpperTriangle <- function(matrix){
# Initialise a vector to store the values in the upper triangle
vector <- c()
# Use nested loops to visit each entry in upper trianle
for(i in 1:nrow(matrix)){
for(j in 1:ncol(matrix)){
# Ignore upper triangle and self comparisons
if(i >= j){
next
}
# Note progress
#Sys.sleep(1) # Make computer sleep for 1 second
#cat(paste("Current row =", i, "\tCurrent column =", j, "\n"))
# Store value
vector[length(vector) + 1] <- matrix[i, j]
}
}
return(vector)
}
|
#####
## FOR (EVENTUALLY) RUNNING IN BATCH MODE ON AWS WITH ARGUMENTS DESCRIBED BELOW
#####
## SOURCE IN SHARED .Rprofile WHICH CONTAINS SYNAPSE LOGIN HOOK,
## SETS COMMON SYNAPSE CACHE FOR ALL WORKERS, AND SETS COMMON LIBPATH
source("/shared/code/R/.Rprofile")
#####
## TAKES FOR ARGUMENTS (PASSED FROM sgeKickoff.R)
#####
## dataset: dataset to analyze
#####
myArgs <- commandArgs(trailingOnly=T)
ds <- myArgs[1]
# ds <- "tcga_rnaseqAll"
group <- "cms4"
options(stringsAsFactors=F)
require(synapseClient)
require(rGithubClient)
require(affy)
require(limma)
require(hgu133plus2.db)
require(hgu133a2.db)
require(org.Hs.eg.db)
## GENE SET METHODS TO BE USED
require(GSA)
# password will be request after calling this
# synapseLogin()
## SOURCE IN BACKGROUND FUNCTIONS FROM JG
crcRepo <- getRepo("Sage-Bionetworks/crcsc")
sourceRepoFile(crcRepo, "groups/G/pipeline/JGLibrary.R")
code1 <- getPermlink(crcRepo, "groups/G/pipeline/JGLibrary.R")
sourceRepoFile(crcRepo, "groups/G/pipeline/subtypePipelineFuncs.R")
code2 <- getPermlink(crcRepo, "groups/G/pipeline/subtypePipelineFuncs.R")
sourceRepoFile(crcRepo, "evals/evalFuncs.R")
code3 <- getPermlink(crcRepo, "evals/evalFuncs.R")
## SOURCE CODE TO READ IN DATA
sourceRepoFile(crcRepo, "evals/getDataFuncs.R")
code4 <- getPermlink(crcRepo, "evals/getDataFuncs.R")
## THIS SCRIPT
thisCode <- getPermlink(crcRepo, "evals/evalGenesetsConsensus.R")
#####
## GET ALL NECESSARY DATA TO RUN GENESET ANALYSIS FOR THIS GROUP AND DATASET
#####
## GET CONSENSUS RESULTS
grpResId <- "syn2469968"
c <- synGet(grpResId)
cms <- read.csv(getFileLocation(c), as.is=T)
d <- sapply(strsplit(cms$dataset.sample, ".", fixed=T), "[", 1)
cms$dataset <- d
cms <- cms[cms$dataset != "tcga_rnaseq", ]
samp <- sapply(strsplit(cms$dataset.sample, ".", fixed=T), "[", 2)
cms$sample <- samp
rownames(cms) <- samp
cms <- cms[ cms$dataset == ds, ]
theseCfs <- names(table(cms$cms4))
tmp <- lapply(as.list(theseCfs), function(x){
as.numeric(cms$cms4 == x)
})
st <- do.call(cbind, tmp)
colnames(st) <- theseCfs
rownames(st) <- rownames(cms)
nSubtypes <- ncol(st)
## GET THE EXPRESSION DATA FOR THIS DATASET
## SUBSET TO AND ORDER LIKE THE SAMPLES IN THE SUBTYPE MATRIX
d <- getExprSet(ds)
sampleNames(d) <- clean.names(sampleNames(d))
d <- d[, as.character(rownames(st)) ]
d <- d[apply(exprs(d), 1, sd) != 0, ]
## GET THE GENESETS
genesets <- load.gmt.data(getFileLocation(synGet("syn2321865")))
genesets <- lapply(genesets, function(x){
x <- x[ x != "" ]
x <- unlist(symbolMap(x))
x <- x[ !is.na(x) ]
intersect(x, featureNames(d))
})
#####
## FIRST JUST RUN LMFIT ON EXPRESSION DATA FOR EACH SUBTYPE
#####
diffExprResults <- sapply(as.list(1:nSubtypes), function(i){
resp <- st[, i]
fit <- lmFit(d, design=model.matrix(~ factor(resp)))
fit <- eBayes(fit)
})
diffExprPvalues <- sapply(diffExprResults, function(x){
x$p.value[, "factor(resp)1"]
})
rownames(diffExprPvalues) <- featureNames(d)
colnames(diffExprPvalues) <- colnames(st)
diffExprFCs <- sapply(diffExprResults, function(x){
2^x$coefficients[, "factor(resp)1"]
})
rownames(diffExprFCs) <- featureNames(d)
colnames(diffExprFCs) <- colnames(st)
pvalFile <- file.path(tempdir(), paste("diffExprPvalues-", group, "-", ds, ".tsv", sep=""))
write.table(diffExprPvalues, file=pvalFile, quote=F, sep="\t", col.names=NA)
pvalSyn <- synStore(File(path=pvalFile, parentId="syn2476109", group=group, dataset=ds, method="eBayes", stat="pvalue", evalDate=as.character(Sys.Date())),
activity=Activity(name="differential expression",
used=list(
list(name=basename(code1), url=code1, wasExecuted=F),
list(name=basename(code2), url=code2, wasExecuted=F),
list(name=basename(code3), url=code3, wasExecuted=F),
list(name=basename(code4), url=code4, wasExecuted=F),
list(entity=synGet(allDatasets[[ds]]$exprSynId, downloadFile=F), wasExecuted=F),
list(entity=synGet(grpResId, downloadFile=F), wasExecuted=F),
list(name=basename(thisCode), url=thisCode, wasExecuted=T)
)))
fcFile <- file.path(tempdir(), paste("diffExprFCs-", group, "-", ds, ".tsv", sep=""))
write.table(diffExprFCs, file=fcFile, quote=F, sep="\t", col.names=NA)
fcSyn <- synStore(File(path=fcFile, parentId="syn2476109", group=group, dataset=ds, method="eBayes", stat="fc", evalDate=as.character(Sys.Date())),
activity=Activity(name="differential expression",
used=list(
list(name=basename(code1), url=code1, wasExecuted=F),
list(name=basename(code2), url=code2, wasExecuted=F),
list(name=basename(code3), url=code3, wasExecuted=F),
list(name=basename(code4), url=code4, wasExecuted=F),
list(entity=synGet(allDatasets[[ds]]$exprSynId, downloadFile=F), wasExecuted=F),
list(entity=synGet(grpResId, downloadFile=F), wasExecuted=F),
list(name=basename(thisCode), url=thisCode, wasExecuted=T)
)))
#####
## RUN GENESET EVALUATION
#####
## GSA
## RESULTS AVAILABLE FOR BOTH HI AND LO
gsaResults <- lapply(as.list(1:nSubtypes), function(i){
## GSA REQUIRES 1 AND 2 INSTEAD OF 0 AND 1
resp <- st[, i] + 1
op <- GSA(x=exprs(d), y=resp, genesets=genesets, genenames=featureNames(d), resp.type="Two class unpaired", nperms=10000, minsize=3)
op
})
gsaHiResults <- sapply(gsaResults, function(r){
r$pvalues.hi
})
rownames(gsaHiResults) <- names(genesets)
colnames(gsaHiResults) <- colnames(st)
gsaFile <- file.path(tempdir(), paste("gsa-", group, "-", ds, ".tsv", sep=""))
write.table(gsaHiResults, file=gsaFile, quote=F, sep="\t", col.names=NA)
gsaSyn <- synStore(File(path=gsaFile, parentId="syn2476109", group=group, dataset=ds, method="gsa", evalDate=as.character(Sys.Date())),
activity=Activity(name="geneset evaluation",
used=list(
list(name=basename(code1), url=code1, wasExecuted=F),
list(name=basename(code2), url=code2, wasExecuted=F),
list(name=basename(code3), url=code3, wasExecuted=F),
list(name=basename(code4), url=code4, wasExecuted=F),
list(entity=synGet(allDatasets[[ds]]$exprSynId, downloadFile=F), wasExecuted=F),
list(entity=synGet("syn2321865", downloadFile=F), wasExecuted=F),
list(entity=synGet(grpResId, downloadFile=F), wasExecuted=F),
list(name=basename(thisCode), url=thisCode, wasExecuted=T)
)))
|
/evals/evalGenesetsConsensus.R
|
no_license
|
laderast/crcsc
|
R
| false
| false
| 7,234
|
r
|
#####
## FOR (EVENTUALLY) RUNNING IN BATCH MODE ON AWS WITH ARGUMENTS DESCRIBED BELOW
#####
## SOURCE IN SHARED .Rprofile WHICH CONTAINS SYNAPSE LOGIN HOOK,
## SETS COMMON SYNAPSE CACHE FOR ALL WORKERS, AND SETS COMMON LIBPATH
source("/shared/code/R/.Rprofile")
#####
## TAKES FOR ARGUMENTS (PASSED FROM sgeKickoff.R)
#####
## dataset: dataset to analyze
#####
myArgs <- commandArgs(trailingOnly=T)
ds <- myArgs[1]
# ds <- "tcga_rnaseqAll"
group <- "cms4"
options(stringsAsFactors=F)
require(synapseClient)
require(rGithubClient)
require(affy)
require(limma)
require(hgu133plus2.db)
require(hgu133a2.db)
require(org.Hs.eg.db)
## GENE SET METHODS TO BE USED
require(GSA)
# password will be request after calling this
# synapseLogin()
## SOURCE IN BACKGROUND FUNCTIONS FROM JG
crcRepo <- getRepo("Sage-Bionetworks/crcsc")
sourceRepoFile(crcRepo, "groups/G/pipeline/JGLibrary.R")
code1 <- getPermlink(crcRepo, "groups/G/pipeline/JGLibrary.R")
sourceRepoFile(crcRepo, "groups/G/pipeline/subtypePipelineFuncs.R")
code2 <- getPermlink(crcRepo, "groups/G/pipeline/subtypePipelineFuncs.R")
sourceRepoFile(crcRepo, "evals/evalFuncs.R")
code3 <- getPermlink(crcRepo, "evals/evalFuncs.R")
## SOURCE CODE TO READ IN DATA
sourceRepoFile(crcRepo, "evals/getDataFuncs.R")
code4 <- getPermlink(crcRepo, "evals/getDataFuncs.R")
## THIS SCRIPT
thisCode <- getPermlink(crcRepo, "evals/evalGenesetsConsensus.R")
#####
## GET ALL NECESSARY DATA TO RUN GENESET ANALYSIS FOR THIS GROUP AND DATASET
#####
## GET CONSENSUS RESULTS
grpResId <- "syn2469968"
c <- synGet(grpResId)
cms <- read.csv(getFileLocation(c), as.is=T)
d <- sapply(strsplit(cms$dataset.sample, ".", fixed=T), "[", 1)
cms$dataset <- d
cms <- cms[cms$dataset != "tcga_rnaseq", ]
samp <- sapply(strsplit(cms$dataset.sample, ".", fixed=T), "[", 2)
cms$sample <- samp
rownames(cms) <- samp
cms <- cms[ cms$dataset == ds, ]
theseCfs <- names(table(cms$cms4))
tmp <- lapply(as.list(theseCfs), function(x){
as.numeric(cms$cms4 == x)
})
st <- do.call(cbind, tmp)
colnames(st) <- theseCfs
rownames(st) <- rownames(cms)
nSubtypes <- ncol(st)
## GET THE EXPRESSION DATA FOR THIS DATASET
## SUBSET TO AND ORDER LIKE THE SAMPLES IN THE SUBTYPE MATRIX
d <- getExprSet(ds)
sampleNames(d) <- clean.names(sampleNames(d))
d <- d[, as.character(rownames(st)) ]
d <- d[apply(exprs(d), 1, sd) != 0, ]
## GET THE GENESETS
genesets <- load.gmt.data(getFileLocation(synGet("syn2321865")))
genesets <- lapply(genesets, function(x){
x <- x[ x != "" ]
x <- unlist(symbolMap(x))
x <- x[ !is.na(x) ]
intersect(x, featureNames(d))
})
#####
## FIRST JUST RUN LMFIT ON EXPRESSION DATA FOR EACH SUBTYPE
#####
diffExprResults <- sapply(as.list(1:nSubtypes), function(i){
resp <- st[, i]
fit <- lmFit(d, design=model.matrix(~ factor(resp)))
fit <- eBayes(fit)
})
diffExprPvalues <- sapply(diffExprResults, function(x){
x$p.value[, "factor(resp)1"]
})
rownames(diffExprPvalues) <- featureNames(d)
colnames(diffExprPvalues) <- colnames(st)
diffExprFCs <- sapply(diffExprResults, function(x){
2^x$coefficients[, "factor(resp)1"]
})
rownames(diffExprFCs) <- featureNames(d)
colnames(diffExprFCs) <- colnames(st)
pvalFile <- file.path(tempdir(), paste("diffExprPvalues-", group, "-", ds, ".tsv", sep=""))
write.table(diffExprPvalues, file=pvalFile, quote=F, sep="\t", col.names=NA)
pvalSyn <- synStore(File(path=pvalFile, parentId="syn2476109", group=group, dataset=ds, method="eBayes", stat="pvalue", evalDate=as.character(Sys.Date())),
activity=Activity(name="differential expression",
used=list(
list(name=basename(code1), url=code1, wasExecuted=F),
list(name=basename(code2), url=code2, wasExecuted=F),
list(name=basename(code3), url=code3, wasExecuted=F),
list(name=basename(code4), url=code4, wasExecuted=F),
list(entity=synGet(allDatasets[[ds]]$exprSynId, downloadFile=F), wasExecuted=F),
list(entity=synGet(grpResId, downloadFile=F), wasExecuted=F),
list(name=basename(thisCode), url=thisCode, wasExecuted=T)
)))
fcFile <- file.path(tempdir(), paste("diffExprFCs-", group, "-", ds, ".tsv", sep=""))
write.table(diffExprFCs, file=fcFile, quote=F, sep="\t", col.names=NA)
fcSyn <- synStore(File(path=fcFile, parentId="syn2476109", group=group, dataset=ds, method="eBayes", stat="fc", evalDate=as.character(Sys.Date())),
activity=Activity(name="differential expression",
used=list(
list(name=basename(code1), url=code1, wasExecuted=F),
list(name=basename(code2), url=code2, wasExecuted=F),
list(name=basename(code3), url=code3, wasExecuted=F),
list(name=basename(code4), url=code4, wasExecuted=F),
list(entity=synGet(allDatasets[[ds]]$exprSynId, downloadFile=F), wasExecuted=F),
list(entity=synGet(grpResId, downloadFile=F), wasExecuted=F),
list(name=basename(thisCode), url=thisCode, wasExecuted=T)
)))
#####
## RUN GENESET EVALUATION
#####
## GSA
## RESULTS AVAILABLE FOR BOTH HI AND LO
gsaResults <- lapply(as.list(1:nSubtypes), function(i){
## GSA REQUIRES 1 AND 2 INSTEAD OF 0 AND 1
resp <- st[, i] + 1
op <- GSA(x=exprs(d), y=resp, genesets=genesets, genenames=featureNames(d), resp.type="Two class unpaired", nperms=10000, minsize=3)
op
})
gsaHiResults <- sapply(gsaResults, function(r){
r$pvalues.hi
})
rownames(gsaHiResults) <- names(genesets)
colnames(gsaHiResults) <- colnames(st)
gsaFile <- file.path(tempdir(), paste("gsa-", group, "-", ds, ".tsv", sep=""))
write.table(gsaHiResults, file=gsaFile, quote=F, sep="\t", col.names=NA)
gsaSyn <- synStore(File(path=gsaFile, parentId="syn2476109", group=group, dataset=ds, method="gsa", evalDate=as.character(Sys.Date())),
activity=Activity(name="geneset evaluation",
used=list(
list(name=basename(code1), url=code1, wasExecuted=F),
list(name=basename(code2), url=code2, wasExecuted=F),
list(name=basename(code3), url=code3, wasExecuted=F),
list(name=basename(code4), url=code4, wasExecuted=F),
list(entity=synGet(allDatasets[[ds]]$exprSynId, downloadFile=F), wasExecuted=F),
list(entity=synGet("syn2321865", downloadFile=F), wasExecuted=F),
list(entity=synGet(grpResId, downloadFile=F), wasExecuted=F),
list(name=basename(thisCode), url=thisCode, wasExecuted=T)
)))
|
b47addcb02c5a49eb36fee58c0f7a436 ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp.qdimacs 5459 15838
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp/ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 91
|
r
|
b47addcb02c5a49eb36fee58c0f7a436 ctrl.e#1.a#3.E#132.A#48.c#.w#5.s#54.asp.qdimacs 5459 15838
|
library(testthat)
test_check("newsfreq")
|
/tests/test-all.R
|
no_license
|
hrbrmstr/newsfreq
|
R
| false
| false
| 41
|
r
|
library(testthat)
test_check("newsfreq")
|
##' Function to add leading zeroes to maintain fixed width.
##' @description This function ensures that fixed width data is the right
##' length by padding zeroes to the front of values. This is a common problem
##' with fixed width data after importing into R as non-character type.
##' @param x a vector of numeric data that should be fixed width but is
##' missing leading zeroes.
##' @param digits an integer representing the desired width of \code{x}
##' @return A character vector of length \code{digits}
##' @details If x contains negative values then the width specified by digits
##' will include one space taken up for the negative sign. The function does not
##' trim values that are longer than digits, so the vector produced will not
##' have a uniform width if \code{nchar(x) > d}
##' @author Jason P. Becker
##' @author Jared E. Knowles
##' @export
##' @examples
##' a <- seq(1,10)
##' a <- leading_zero(a, digits = 3)
##' a
leading_zero <- function(x, digits = 2){
stopifnot(any(c("numeric", "integer") %in% class(x)))
if(any(x < 0)){
digits <- digits + 1
}
if(digits < 0){
warning("Digits < 0 does not make sense, defaulting to 0")
digits <- 0
}
return(formatC(x, digits = digits-1, format = "d", flag = "0"))
}
|
/R/leading_zero.R
|
no_license
|
cran/eeptools
|
R
| false
| false
| 1,290
|
r
|
##' Function to add leading zeroes to maintain fixed width.
##' @description This function ensures that fixed width data is the right
##' length by padding zeroes to the front of values. This is a common problem
##' with fixed width data after importing into R as non-character type.
##' @param x a vector of numeric data that should be fixed width but is
##' missing leading zeroes.
##' @param digits an integer representing the desired width of \code{x}
##' @return A character vector of length \code{digits}
##' @details If x contains negative values then the width specified by digits
##' will include one space taken up for the negative sign. The function does not
##' trim values that are longer than digits, so the vector produced will not
##' have a uniform width if \code{nchar(x) > d}
##' @author Jason P. Becker
##' @author Jared E. Knowles
##' @export
##' @examples
##' a <- seq(1,10)
##' a <- leading_zero(a, digits = 3)
##' a
leading_zero <- function(x, digits = 2){
stopifnot(any(c("numeric", "integer") %in% class(x)))
if(any(x < 0)){
digits <- digits + 1
}
if(digits < 0){
warning("Digits < 0 does not make sense, defaulting to 0")
digits <- 0
}
return(formatC(x, digits = digits-1, format = "d", flag = "0"))
}
|
# Reading, naming and subsetting power consumption data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# initiating a composite plot with many graphs
par(mfrow=c(2,2))
# calling the basic plot function that calls different plot functions to build the 4 plots that form the graph
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
|
/Plot4.R
|
no_license
|
sbaga90/Exploratory-Data-Analysis-project1
|
R
| false
| false
| 1,779
|
r
|
# Reading, naming and subsetting power consumption data
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
# Transforming the Date and Time vars from characters into objects of type Date and POSIXlt respectively
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
# initiating a composite plot with many graphs
par(mfrow=c(2,2))
# calling the basic plot function that calls different plot functions to build the 4 plots that form the graph
with(subpower,{
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l", xlab="",ylab="Global Active Power")
plot(subpower$Time,as.numeric(as.character(subpower$Voltage)), type="l",xlab="datetime",ylab="Voltage")
plot(subpower$Time,subpower$Sub_metering_1,type="n",xlab="",ylab="Energy sub metering")
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_1))))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_2)),col="red"))
with(subpower,lines(Time,as.numeric(as.character(Sub_metering_3)),col="blue"))
legend("topright", lty=1, col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), cex = 0.6)
plot(subpower$Time,as.numeric(as.character(subpower$Global_reactive_power)),type="l",xlab="datetime",ylab="Global_reactive_power")
})
|
# Parameters
waypoints = seq (2, 10)
runs = seq (1, 100)
#### Doorways. Experiment ?: (93, 56) --> (62, 148)
# Load data
df <- read.csv ("/home/krell/Downloads/experimentTable_Doorways.csv", header = FALSE)
# Separate out the obstacle table
obstacles <- df[df$V5 == -1,]
df <- df[df$V5 != -1, ]
getByWaypoint <- function (odf, path, waypoint) {
odf[odf$V3 == waypoint, ]
}
# Table to store feasability percentage
feasability <- data.frame(waypoint=character(),
feasableCount=integer(),
feasableProportion=integer(),
stringsAsFactors=FALSE)
colnames (feasability) <- c ("Waypoints", "NumFeasible", "PropFeasible")
for (i in waypoints) {
o <- getByWaypoint (obstacles, 1, i)
numFeasible <- length (which (o$V6 == 0))
propFeasible <- numFeasible / length (runs)
if (is.infinite(propFeasible)) {
propFeasible <- 0
}
feasability[nrow(feasability) + 1,] = list(i, numFeasible, propFeasible)
}
library(forcats) # Used to enforce order of bar chart X axis
# Inside bars
ggplot(data=feasability, aes(x=Waypoints, y=PropFeasible)) +
geom_bar(stat="identity", fill="steelblue")+
geom_text(aes(label=PropFeasible), vjust=1.6, color="white", size=3.5)+
theme_minimal() + aes(x = fct_inorder(Waypoints)) + labs (y = "Proportion of Collision-Free Paths", x = "Waypoints")
# Separate into feasible/infeasible data frames
##############
## Feasible ##
##############
getFeasibleIdx <- function (df, path, waypoint){
o <- getByWaypoint (obstacles, path, waypoint)
which (o$V6 == 0)
}
feasibleRunIdxs <- list ()
for (w in waypoints) {
feasibleRunIdxs[[w]] <- getFeasibleIdx (obstacles, 1, w)
}
dfFeasible <- data.frame (V1=character(), V2=character(), V3=character(), V4=character(), V5=character(), V6=character())
for (w in waypoints){
dfw <- df[df$V3 == w, ]
dfwF <- dfw[dfw$V4 %in% feasibleRunIdxs[[w]], ]
if (length (dfwF) != 0){
dfFeasible <- rbind (dfFeasible, dfwF)
}
}
# Get a specific run's data
extractRun <- function (df, path, waypoint, run) {
w <- getByWaypoint (df, path, waypoint)
r <- w[w$V4 == run, ]
if (length (r) > 0){
as.numeric (r$V6)
}
}
# Get a list where each elem is a table of runs and steps for each waypoint
convergence = vector("list", length (waypoints))
for (way in waypoints) {
convergence[[way]] <- sapply (X = feasibleRunIdxs[[way]], FUN = function (x) {extractRun (dfFeasible, 1, way, x)})
}
# Get the avg run for a waypoint
avgRun <- function (cdf, waypoint){
w <- cdf[[waypoint]]
if (length (w) > 0) {
apply (X = w, MARGIN = 1, FUN = mean)
}
}
convergence_avg = vector("list", length (waypoints))
for (way in waypoints) {
convergence_avg[[way]] <- avgRun (convergence, way)
}
# the the best run for a waypoint
bestRun <- function (cdf, waypoint) {
w <- cdf[[waypoint]]
if (length (w) > 0){
bestIdx <- which.min (w[length (w[,1]),1:50])
w[,bestIdx]
}
}
convergence_best = vector ("list", length (waypoints))
for (way in waypoints) {
convergence_best[[way]] <- bestRun (convergence, way)
}
# the worst run for a waypoint
worstRun <- function (cdf, waypoint) {
w <- cdf[[waypoint]]
if (length (w) > 0){
bestIdx <- which.max (w[length (w[,1]),1:50])
w[,bestIdx]
}
}
convergence_worst = vector ("list", length (waypoints))
for (way in waypoints) {
convergence_worst[[way]] <- worstRun (convergence, way)
}
# Get number of steps
extractSteps <- function (df, path, waypoint, run) {
w <- getByWaypoint (df, path, waypoint)
r <- w[w$V4 == run, ]
as.numeric (r$V5)
}
steps <- extractSteps (df, 1, 1, 1)
# Plot
addlinetoplot <- function(dataset, varx, vary, color) {
list(
geom_line(data=dataset, aes_string(x=varx, y=vary), colour = color)
)
}
library(gridExtra)
p <- list ()
for (way in waypoints){
titleA = paste0 ("waypoints = ", way)
titleB = paste0 ("PSO dimensions = ", way * 2)
title = paste (titleA, titleB, sep = "\n")
d <- data.frame(fitness = unlist(convergence_avg[[way]]),
iteration = steps)
p[[way-1]] <-ggplot(d,aes(x = iteration, y = fitness)) +
geom_line(colour="#009E73") + xlim(0, 50000) + ylim(100, 500) +
labs(title=title) + theme(axis.text.x=element_text(angle = 90, vjust = 0.5))
d2 <- data.frame (fitness = unlist (convergence_best[[way]]),
iteration = steps)
p[[way-1]] <- p[[way-1]] + addlinetoplot(d2, varx = "iteration", vary = "fitness", color = "#CC79A7")
d3 <- data.frame (fitness = unlist (convergence_worst[[way]]),
iteration = steps)
p[[way-1]] <- p[[way-1]] + addlinetoplot(d3, varx = "iteration", vary = "fitness", color = "#D55E00")
}
do.call (grid.arrange, p)
#############
# Scratch pad
#############
# N = 3
p[[2]] + xlim (0, 1500) + ylim (100, 450)
s <- 1500 / 10
ca_3 <- convergence_avg[[2]][1:s]
cb_3 <- convergence_best[[2]][1:s]
cw_3 <- convergence_worst[[2]][1:s]
c_3 <- rbind.data.frame(ca_3, cb_3, cw_3)
colnames (c_3) <- seq (from = 1, to = 1500, by = 10)
# N = 4
s <- 15000 / 10
p[[3]] + xlim (0, 15000) + ylim (100, 450)
ca_4 <- convergence_avg[[3]][1:s]
cb_4 <- convergence_best[[3]][1:s]
cw_4 <- convergence_worst[[3]][1:s]
c_4 <- rbind.data.frame (ca_4, cb_4, cw_4)
colnames (c_4) <- seq (from = 1, to = 15000, by = 10)
# N = 5
s <- 50000 / 10
p[[4]] + xlim (0, 50000) + ylim (100, 450)
ca_5 <- convergence_avg[[4]][1:s]
cb_5 <- convergence_best[[4]][1:s]
cw_5 <- convergence_worst[[4]][1:s]
c_5 <- rbind.data.frame (ca_5, cb_5, cw_5)
colnames (c_5) <- seq (from = 1, to = 50000, by = 10)
################
## Infeasible ##
################
infeasibleRunIdxs <- list ()
for (way in waypoints) {
infeasibleRunIdxs[[way]] <- setdiff (runs, feasibleRunIdxs[[way]])
}
dfNFeasible <- data.frame (V1=character(), V2=character(), V3=character(), V4=character(), V5=character(), V6=character())
for (w in waypoints){
dfw <- df[df$V3 == w, ]
dfwF <- dfw[dfw$V4 %in% infeasibleRunIdxs[[w]], ]
if (length (dfwF) != 0){
dfNFeasible <- rbind (dfNFeasible, dfwF)
}
else{
NULL
}
}
# Get a list where each elem is a table of runs and steps for each waypoint
convergence = vector("list", length (waypoints))
for (way in waypoints) {
convergence[[way]] <- sapply (X = infeasibleRunIdxs[[way]], FUN = function (x) {extractRun (dfNFeasible, 1, way, x)})
}
convergence_avg = vector("list", length (waypoints))
for (way in waypoints[3:9]) {
convergence_avg[[way]] <- avgRun (convergence, way)
}
convergence_best = vector ("list", length (waypoints))
for (way in waypoints[3:9]) {
convergence_best[[way]] <- bestRun (convergence, way)
}
convergence_worst = vector ("list", length (waypoints))
for (way in waypoints[3:9]) {
convergence_worst[[way]] <- worstRun (convergence, way)
}
p <- list ()
for (way in waypoints[3:9]){
titleA = paste0 ("waypoints = ", way)
titleB = paste0 ("PSO dimensions = ", way * 2)
title = paste (titleA, titleB, sep = "\n")
d <- data.frame(fitness = unlist(convergence_avg[[way]]),
iteration = steps)
p[[way-3]] <-ggplot(d,aes(x = iteration, y = fitness)) +
geom_line() + xlim(0, 50000) + ylim(100, 500) +
labs(title=title) + theme(axis.text.x=element_text(angle = 90, vjust = 0.5))
d2 <- data.frame (fitness = unlist (convergence_best[[way]]),
iteration = steps)
p[[way-3]] <- p[[way-3]] + addlinetoplot(d2, varx = "iteration", vary = "fitness", color = "#CC79A7")
d3 <- data.frame (fitness = unlist (convergence_worst[[way]]),
iteration = steps)
p[[way-3]] <- p[[way-3]] + addlinetoplot(d3, varx = "iteration", vary = "fitness", color = "#D55E00")
}
do.call (grid.arrange, p)
|
/navigation/pso/evaluation.R
|
no_license
|
vanshgoyal/rotf-software
|
R
| false
| false
| 7,704
|
r
|
# Parameters
waypoints = seq (2, 10)
runs = seq (1, 100)
#### Doorways. Experiment ?: (93, 56) --> (62, 148)
# Load data
df <- read.csv ("/home/krell/Downloads/experimentTable_Doorways.csv", header = FALSE)
# Separate out the obstacle table
obstacles <- df[df$V5 == -1,]
df <- df[df$V5 != -1, ]
getByWaypoint <- function (odf, path, waypoint) {
odf[odf$V3 == waypoint, ]
}
# Table to store feasability percentage
feasability <- data.frame(waypoint=character(),
feasableCount=integer(),
feasableProportion=integer(),
stringsAsFactors=FALSE)
colnames (feasability) <- c ("Waypoints", "NumFeasible", "PropFeasible")
for (i in waypoints) {
o <- getByWaypoint (obstacles, 1, i)
numFeasible <- length (which (o$V6 == 0))
propFeasible <- numFeasible / length (runs)
if (is.infinite(propFeasible)) {
propFeasible <- 0
}
feasability[nrow(feasability) + 1,] = list(i, numFeasible, propFeasible)
}
library(forcats) # Used to enforce order of bar chart X axis
# Inside bars
ggplot(data=feasability, aes(x=Waypoints, y=PropFeasible)) +
geom_bar(stat="identity", fill="steelblue")+
geom_text(aes(label=PropFeasible), vjust=1.6, color="white", size=3.5)+
theme_minimal() + aes(x = fct_inorder(Waypoints)) + labs (y = "Proportion of Collision-Free Paths", x = "Waypoints")
# Separate into feasible/infeasible data frames
##############
## Feasible ##
##############
getFeasibleIdx <- function (df, path, waypoint){
o <- getByWaypoint (obstacles, path, waypoint)
which (o$V6 == 0)
}
feasibleRunIdxs <- list ()
for (w in waypoints) {
feasibleRunIdxs[[w]] <- getFeasibleIdx (obstacles, 1, w)
}
dfFeasible <- data.frame (V1=character(), V2=character(), V3=character(), V4=character(), V5=character(), V6=character())
for (w in waypoints){
dfw <- df[df$V3 == w, ]
dfwF <- dfw[dfw$V4 %in% feasibleRunIdxs[[w]], ]
if (length (dfwF) != 0){
dfFeasible <- rbind (dfFeasible, dfwF)
}
}
# Get a specific run's data
extractRun <- function (df, path, waypoint, run) {
w <- getByWaypoint (df, path, waypoint)
r <- w[w$V4 == run, ]
if (length (r) > 0){
as.numeric (r$V6)
}
}
# Get a list where each elem is a table of runs and steps for each waypoint
convergence = vector("list", length (waypoints))
for (way in waypoints) {
convergence[[way]] <- sapply (X = feasibleRunIdxs[[way]], FUN = function (x) {extractRun (dfFeasible, 1, way, x)})
}
# Get the avg run for a waypoint
avgRun <- function (cdf, waypoint){
w <- cdf[[waypoint]]
if (length (w) > 0) {
apply (X = w, MARGIN = 1, FUN = mean)
}
}
convergence_avg = vector("list", length (waypoints))
for (way in waypoints) {
convergence_avg[[way]] <- avgRun (convergence, way)
}
# the the best run for a waypoint
bestRun <- function (cdf, waypoint) {
w <- cdf[[waypoint]]
if (length (w) > 0){
bestIdx <- which.min (w[length (w[,1]),1:50])
w[,bestIdx]
}
}
convergence_best = vector ("list", length (waypoints))
for (way in waypoints) {
convergence_best[[way]] <- bestRun (convergence, way)
}
# the worst run for a waypoint
worstRun <- function (cdf, waypoint) {
w <- cdf[[waypoint]]
if (length (w) > 0){
bestIdx <- which.max (w[length (w[,1]),1:50])
w[,bestIdx]
}
}
convergence_worst = vector ("list", length (waypoints))
for (way in waypoints) {
convergence_worst[[way]] <- worstRun (convergence, way)
}
# Get number of steps
extractSteps <- function (df, path, waypoint, run) {
w <- getByWaypoint (df, path, waypoint)
r <- w[w$V4 == run, ]
as.numeric (r$V5)
}
steps <- extractSteps (df, 1, 1, 1)
# Plot
addlinetoplot <- function(dataset, varx, vary, color) {
list(
geom_line(data=dataset, aes_string(x=varx, y=vary), colour = color)
)
}
library(gridExtra)
p <- list ()
for (way in waypoints){
titleA = paste0 ("waypoints = ", way)
titleB = paste0 ("PSO dimensions = ", way * 2)
title = paste (titleA, titleB, sep = "\n")
d <- data.frame(fitness = unlist(convergence_avg[[way]]),
iteration = steps)
p[[way-1]] <-ggplot(d,aes(x = iteration, y = fitness)) +
geom_line(colour="#009E73") + xlim(0, 50000) + ylim(100, 500) +
labs(title=title) + theme(axis.text.x=element_text(angle = 90, vjust = 0.5))
d2 <- data.frame (fitness = unlist (convergence_best[[way]]),
iteration = steps)
p[[way-1]] <- p[[way-1]] + addlinetoplot(d2, varx = "iteration", vary = "fitness", color = "#CC79A7")
d3 <- data.frame (fitness = unlist (convergence_worst[[way]]),
iteration = steps)
p[[way-1]] <- p[[way-1]] + addlinetoplot(d3, varx = "iteration", vary = "fitness", color = "#D55E00")
}
do.call (grid.arrange, p)
#############
# Scratch pad
#############
# N = 3
p[[2]] + xlim (0, 1500) + ylim (100, 450)
s <- 1500 / 10
ca_3 <- convergence_avg[[2]][1:s]
cb_3 <- convergence_best[[2]][1:s]
cw_3 <- convergence_worst[[2]][1:s]
c_3 <- rbind.data.frame(ca_3, cb_3, cw_3)
colnames (c_3) <- seq (from = 1, to = 1500, by = 10)
# N = 4
s <- 15000 / 10
p[[3]] + xlim (0, 15000) + ylim (100, 450)
ca_4 <- convergence_avg[[3]][1:s]
cb_4 <- convergence_best[[3]][1:s]
cw_4 <- convergence_worst[[3]][1:s]
c_4 <- rbind.data.frame (ca_4, cb_4, cw_4)
colnames (c_4) <- seq (from = 1, to = 15000, by = 10)
# N = 5
s <- 50000 / 10
p[[4]] + xlim (0, 50000) + ylim (100, 450)
ca_5 <- convergence_avg[[4]][1:s]
cb_5 <- convergence_best[[4]][1:s]
cw_5 <- convergence_worst[[4]][1:s]
c_5 <- rbind.data.frame (ca_5, cb_5, cw_5)
colnames (c_5) <- seq (from = 1, to = 50000, by = 10)
################
## Infeasible ##
################
infeasibleRunIdxs <- list ()
for (way in waypoints) {
infeasibleRunIdxs[[way]] <- setdiff (runs, feasibleRunIdxs[[way]])
}
dfNFeasible <- data.frame (V1=character(), V2=character(), V3=character(), V4=character(), V5=character(), V6=character())
for (w in waypoints){
dfw <- df[df$V3 == w, ]
dfwF <- dfw[dfw$V4 %in% infeasibleRunIdxs[[w]], ]
if (length (dfwF) != 0){
dfNFeasible <- rbind (dfNFeasible, dfwF)
}
else{
NULL
}
}
# Get a list where each elem is a table of runs and steps for each waypoint
convergence = vector("list", length (waypoints))
for (way in waypoints) {
convergence[[way]] <- sapply (X = infeasibleRunIdxs[[way]], FUN = function (x) {extractRun (dfNFeasible, 1, way, x)})
}
convergence_avg = vector("list", length (waypoints))
for (way in waypoints[3:9]) {
convergence_avg[[way]] <- avgRun (convergence, way)
}
convergence_best = vector ("list", length (waypoints))
for (way in waypoints[3:9]) {
convergence_best[[way]] <- bestRun (convergence, way)
}
convergence_worst = vector ("list", length (waypoints))
for (way in waypoints[3:9]) {
convergence_worst[[way]] <- worstRun (convergence, way)
}
p <- list ()
for (way in waypoints[3:9]){
titleA = paste0 ("waypoints = ", way)
titleB = paste0 ("PSO dimensions = ", way * 2)
title = paste (titleA, titleB, sep = "\n")
d <- data.frame(fitness = unlist(convergence_avg[[way]]),
iteration = steps)
p[[way-3]] <-ggplot(d,aes(x = iteration, y = fitness)) +
geom_line() + xlim(0, 50000) + ylim(100, 500) +
labs(title=title) + theme(axis.text.x=element_text(angle = 90, vjust = 0.5))
d2 <- data.frame (fitness = unlist (convergence_best[[way]]),
iteration = steps)
p[[way-3]] <- p[[way-3]] + addlinetoplot(d2, varx = "iteration", vary = "fitness", color = "#CC79A7")
d3 <- data.frame (fitness = unlist (convergence_worst[[way]]),
iteration = steps)
p[[way-3]] <- p[[way-3]] + addlinetoplot(d3, varx = "iteration", vary = "fitness", color = "#D55E00")
}
do.call (grid.arrange, p)
|
###################################
### Create Node and Edge Frames ###
###################################
Derive_Edge_Weights <- function(Node_Frame, Edge_Frame){
check_list <- Node_Frame[,1]
rownames(Node_Frame) <- check_list
final_edge_frame_2 <- Edge_Frame[((is.element(Edge_Frame[,'from'],check_list)==TRUE) & (is.element(Edge_Frame[,'to'],check_list)==TRUE) ),]
final_edge_frame_2[,3] <- rep(.1,nrow(final_edge_frame_2))
final_edge_frame_2[Node_Frame[final_edge_frame_2[,1],3]==Node_Frame[final_edge_frame_2[,2],3],3] <- 2
final_edge_frame_2 <- cbind(final_edge_frame_2, color='black')
edge_weights <- unlist(final_edge_frame_2[,3])
return (list(final_edge_frame_2,edge_weights,Node_Frame, unique(unlist(Node_Frame[,3]))))
}
|
/execution/JUMPn_Helpers/JUMPn_functions/Network_Analysis.R
|
no_license
|
VanderwallDavid/JUMPn_1.0.0
|
R
| false
| false
| 749
|
r
|
###################################
### Create Node and Edge Frames ###
###################################
Derive_Edge_Weights <- function(Node_Frame, Edge_Frame){
check_list <- Node_Frame[,1]
rownames(Node_Frame) <- check_list
final_edge_frame_2 <- Edge_Frame[((is.element(Edge_Frame[,'from'],check_list)==TRUE) & (is.element(Edge_Frame[,'to'],check_list)==TRUE) ),]
final_edge_frame_2[,3] <- rep(.1,nrow(final_edge_frame_2))
final_edge_frame_2[Node_Frame[final_edge_frame_2[,1],3]==Node_Frame[final_edge_frame_2[,2],3],3] <- 2
final_edge_frame_2 <- cbind(final_edge_frame_2, color='black')
edge_weights <- unlist(final_edge_frame_2[,3])
return (list(final_edge_frame_2,edge_weights,Node_Frame, unique(unlist(Node_Frame[,3]))))
}
|
library(symbolicDA)
### Name: generate.SO
### Title: generation of artifficial symbolic data table with given cluster
### structure
### Aliases: generate.SO
### Keywords: symbolic,SDA
### ** Examples
# Example will be available in next version of package, thank You for your patience :-)
|
/data/genthat_extracted_code/symbolicDA/examples/generate.SO.rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 297
|
r
|
library(symbolicDA)
### Name: generate.SO
### Title: generation of artifficial symbolic data table with given cluster
### structure
### Aliases: generate.SO
### Keywords: symbolic,SDA
### ** Examples
# Example will be available in next version of package, thank You for your patience :-)
|
#' Plot the ROC Curves
#'
#'
#' @author Elías Alegría <elias.alegria@ug.uchile.cl>
#' @param models list of h2o models class H2OBinomialModel
#' @param newdata dataframe class H2OFrame
#' @param xval if TRUE plot the ROC Curves on cross validation
#'
#' @return ggplot graph
#' @export
#'
#' @seealso h2o.plotLift(), h2o.plotVarImp()
#' @examples
#' # Initialize h2o
#' h2o.init(min_mem_size = '1G', max_mem_size = '4G')
#'
#' # Read the data
#' prostate <- h2o.uploadFile(path = system.file("extdata", "prostate.csv", package = "h2o"),
#' destination_frame = "prostate.hex")
#'
#' # Rename target for binomial clasification
#' prostate[,"CAPSULE"] <- h2o.ifelse(prostate[,"CAPSULE"] == 1, 'TRUE', 'FALSE')
#'
#' # Split the data
#' split_h2o <- h2o.splitFrame(prostate, ratios = .7, destination_frames = c('train','test'))
#' train <- split_h2o[[1]]
#' test <- split_h2o[[2]]
#'
#' # Train models
#' y = "CAPSULE"
#' x = c("AGE", "RACE", "PSA", "VOL", "GLEASON")
#'
#' drf <- h2o.randomForest(y = y, x = x, training_frame = train)
#' glm <- h2o.glm(y = y, x = x, training_frame = train, family = "binomial")
#' gbm <- h2o.gbm(y = y, x = x, training_frame = train)
#'
#' # List of models
#' models <- list(GLM = glm, DRF = drf, GBM = gbm)
#'
#' # Let's Plot ROC Curves
#' h2plots::h2o.plotROC(models, test)
#'
#' # Finish H2O
#' h2o.shutdown()
h2o.plotROC <- function(models, newdata = NULL, xval = FALSE) {
require(h2o)
require(dplyr)
require(ggplot2)
if (xval & !is.null(newdata)) {
stop('If the xval argument is TRUE, newdata must bu NULL')
}
if (class(newdata) != 'H2OFrame' & !is.null(newdata)) {
stop('The newdata argument must be class H2OFrame')
}
if(is.list(models)) {
n_models <- length(models)
data <- NULL
if (is.null(names(models))) {
names <- (paste0('model ',1:n_models))
} else {
names <- names(models)
}
for (i in 1:n_models) {
# data type validation
if (class(models[[i]]) != 'H2OBinomialModel') {
stop('The models list must be class H2OBinomialModel')
}
if (xval) {
performance <- h2o.performance(models[[i]], xval = TRUE)
} else if (is.null(newdata)) {
performance <- h2o.performance(models[[i]])
} else {
performance <- h2o.performance(models[[i]], newdata = newdata)
}
roc_data <- performance@metrics$thresholds_and_metric_scores %>% tbl_df %>% mutate(model = names[i])
data = bind_rows(data, roc_data)
}
g <- ggplot(data, aes(fpr,tpr, color = model)) + geom_line(size=1, alpha = .8)
} else {
# data type validation
if (class(models) != 'H2OBinomialModel') {
stop('The model must be class H2OBinomialModel')
}
performance <- h2o.performance(models, newdata = newdata)
data <- performance@metrics$thresholds_and_metric_scores %>% tbl_df
g <- ggplot(data, aes(fpr,tpr)) + geom_line(size=1, alpha = .7, color = '#2655ff')
}
g <- g + geom_line(aes(fpr,fpr), data = data, color = 'grey', linetype = 'dashed') +
ggtitle('ROC Curve') + xlab('False Positive Rate') + ylab('True Positive Rate')
return(g)
}
|
/R/h2o.plotROC.R
|
no_license
|
huasin/h2plots
|
R
| false
| false
| 3,164
|
r
|
#' Plot the ROC Curves
#'
#'
#' @author Elías Alegría <elias.alegria@ug.uchile.cl>
#' @param models list of h2o models class H2OBinomialModel
#' @param newdata dataframe class H2OFrame
#' @param xval if TRUE plot the ROC Curves on cross validation
#'
#' @return ggplot graph
#' @export
#'
#' @seealso h2o.plotLift(), h2o.plotVarImp()
#' @examples
#' # Initialize h2o
#' h2o.init(min_mem_size = '1G', max_mem_size = '4G')
#'
#' # Read the data
#' prostate <- h2o.uploadFile(path = system.file("extdata", "prostate.csv", package = "h2o"),
#' destination_frame = "prostate.hex")
#'
#' # Rename target for binomial clasification
#' prostate[,"CAPSULE"] <- h2o.ifelse(prostate[,"CAPSULE"] == 1, 'TRUE', 'FALSE')
#'
#' # Split the data
#' split_h2o <- h2o.splitFrame(prostate, ratios = .7, destination_frames = c('train','test'))
#' train <- split_h2o[[1]]
#' test <- split_h2o[[2]]
#'
#' # Train models
#' y = "CAPSULE"
#' x = c("AGE", "RACE", "PSA", "VOL", "GLEASON")
#'
#' drf <- h2o.randomForest(y = y, x = x, training_frame = train)
#' glm <- h2o.glm(y = y, x = x, training_frame = train, family = "binomial")
#' gbm <- h2o.gbm(y = y, x = x, training_frame = train)
#'
#' # List of models
#' models <- list(GLM = glm, DRF = drf, GBM = gbm)
#'
#' # Let's Plot ROC Curves
#' h2plots::h2o.plotROC(models, test)
#'
#' # Finish H2O
#' h2o.shutdown()
h2o.plotROC <- function(models, newdata = NULL, xval = FALSE) {
require(h2o)
require(dplyr)
require(ggplot2)
if (xval & !is.null(newdata)) {
stop('If the xval argument is TRUE, newdata must bu NULL')
}
if (class(newdata) != 'H2OFrame' & !is.null(newdata)) {
stop('The newdata argument must be class H2OFrame')
}
if(is.list(models)) {
n_models <- length(models)
data <- NULL
if (is.null(names(models))) {
names <- (paste0('model ',1:n_models))
} else {
names <- names(models)
}
for (i in 1:n_models) {
# data type validation
if (class(models[[i]]) != 'H2OBinomialModel') {
stop('The models list must be class H2OBinomialModel')
}
if (xval) {
performance <- h2o.performance(models[[i]], xval = TRUE)
} else if (is.null(newdata)) {
performance <- h2o.performance(models[[i]])
} else {
performance <- h2o.performance(models[[i]], newdata = newdata)
}
roc_data <- performance@metrics$thresholds_and_metric_scores %>% tbl_df %>% mutate(model = names[i])
data = bind_rows(data, roc_data)
}
g <- ggplot(data, aes(fpr,tpr, color = model)) + geom_line(size=1, alpha = .8)
} else {
# data type validation
if (class(models) != 'H2OBinomialModel') {
stop('The model must be class H2OBinomialModel')
}
performance <- h2o.performance(models, newdata = newdata)
data <- performance@metrics$thresholds_and_metric_scores %>% tbl_df
g <- ggplot(data, aes(fpr,tpr)) + geom_line(size=1, alpha = .7, color = '#2655ff')
}
g <- g + geom_line(aes(fpr,fpr), data = data, color = 'grey', linetype = 'dashed') +
ggtitle('ROC Curve') + xlab('False Positive Rate') + ylab('True Positive Rate')
return(g)
}
|
require("mboost")
if (require("partykit")) {
set.seed(290875)
tst <- try(data("BostonHousing", package = "mlbench"))
if (!inherits(tst, "try-error")) {
system.time(a <- blackboost(medv ~ ., data = BostonHousing,
tree_controls = ctree_control(teststat = "max",
testtype = "Teststatistic",
mincriterion = 0,
maxdepth = 2),
control = boost_control(mstop = 500)))
print(ae <- mean((predict(a) - BostonHousing$medv)^2))
pdiffs <- max(abs(predict(update(a, model.weights(a))) - predict(a)))
stopifnot(pdiffs < sqrt(.Machine$double.eps))
### attach `gbm', quietly
sink("tmpfile")
if (require("gbm")) cat()
sink()
file.remove("tmpfile")
if (require("gbm")) {
system.time(b <- gbm(medv ~ ., data = BostonHousing,
n.trees = 500, interaction = 2, distribution = "gaussian",
shrinkage = 0.1, bag = 1))
print(be <- mean((predict(b, newdata = BostonHousing, n.trees = 500) -
BostonHousing$medv)^2))
plot(BostonHousing$medv, predict(a), col = "red", pch = "+")
points(BostonHousing$medv,
predict(b, newdata = BostonHousing, n.trees = 500),
col = "blue", pch = "+")
stopifnot(ae < be)
}
}
### with by-argument, a certain type of interaction
tctrl <- ctree_control(teststat = "max",
testtype = "Teststatistic",
mincriterion = 0,
maxdepth = 2)
bb <- mboost(medv ~ btree(crim, zn, indus, nox, age) +
btree(crim, zn, indus, nox, age, by = chas),
data = BostonHousing)
stopifnot(isTRUE(all.equal(fitted(bb)[1:10],
c(predict(bb, newdata = BostonHousing[1:10,])),
check.attributes = FALSE)))
nd <- BostonHousing[1:10,]
nd$chas[] <- "0"
p0 <- predict(bb, newdata = nd, which = 1)
stopifnot(isTRUE(all.equal(c(unique(predict(bb, newdata = nd, which = 2))), 0,
check.attributes = FALSE)))
nd$chas[] <- "1"
p1 <- predict(bb, newdata = nd, which = 1)
stopifnot(isTRUE(all.equal(p0, p1)))
print(predict(bb, newdata = nd, which = 2))
print(table(selected(bb)))
print(table(selected(bb[50])))
### check different interfaces
x <- as.matrix(BostonHousing[,colnames(BostonHousing) != "medv"])
y <- BostonHousing$medv
p2 <- predict(blackboost(medv ~ ., data = BostonHousing, family = Laplace()),
newdata = BostonHousing)
## Cox model
library("survival")
fit2 <- blackboost(Surv(futime,fustat) ~ age + resid.ds + rx + ecog.ps,
data = ovarian, family = CoxPH(), control = boost_control(mstop = 1000))
A2 <- survFit(fit2)
print(A2)
newdata <- ovarian[c(1,3,12),]
A2 <- survFit(fit2, newdata = newdata)
print(A2)
### predictions:
set.seed(1907)
x1 <- rnorm(100)
x2 <- rnorm(100)
x3 <- rnorm(100)
y <- rnorm(100, mean = 3 * x1, sd = 2)
DF <- data.frame(y = y, x1 = x1, x2 = x2, x3 = x3)
amod <- blackboost(y ~ -1 + x1 + x2, data = DF)
agg <- c("none", "sum", "cumsum")
whi <- list(NULL, 1)
for (i in 1:2){
pred <- vector("list", length=3)
for (j in 1:3){
pred[[j]] <- predict(amod, aggregate=agg[j], which = whi[[i]])
}
if (i == 1){
stopifnot(max(abs(pred[[2]] - pred[[3]][,ncol(pred[[3]])])) < sqrt(.Machine$double.eps))
if ((pred[[2]] - rowSums(pred[[1]]))[1] - amod$offset < sqrt(.Machine$double.eps))
warning(sQuote("aggregate = sum"), " adds the offset, ", sQuote("aggregate = none"), " doesn't.")
stopifnot(max(abs(pred[[2]] - rowSums(pred[[1]]) - amod$offset)) < sqrt(.Machine$double.eps))
} else {
stopifnot(max(abs(pred[[2]] - sapply(pred[[3]], function(obj) obj[,ncol(obj)]))) < sqrt(.Machine$double.eps))
stopifnot(max(abs(pred[[2]] - sapply(pred[[1]], function(obj) rowSums(obj)))) < sqrt(.Machine$double.eps))
}
}
stopifnot(all(predict(amod, which=1) + amod$offset - predict(amod) < sqrt(.Machine$double.eps)))
# check type argument
set.seed(1907)
x1 <- rnorm(100)
p <- 1/(1 + exp(- 3 * x1))
y <- as.factor(runif(100) < p)
DF <- data.frame(y = y, x1 = x1)
mod <- blackboost(y ~ x1, family = Binomial(),
data = DF, control=boost_control(mstop=5000))
pr <- predict(mod)
pr <- predict(mod, type="class")
foo <- table(pr, y)
stopifnot(foo[1,2] + foo[2,1] == 0)
pr <- predict(mod, type="response")
# <FIXME> How do we check "correctness" of results?</FIXME>
}
|
/tests/regtest-blackboost.R
|
no_license
|
boost-R/mboost
|
R
| false
| false
| 4,494
|
r
|
require("mboost")
if (require("partykit")) {
set.seed(290875)
tst <- try(data("BostonHousing", package = "mlbench"))
if (!inherits(tst, "try-error")) {
system.time(a <- blackboost(medv ~ ., data = BostonHousing,
tree_controls = ctree_control(teststat = "max",
testtype = "Teststatistic",
mincriterion = 0,
maxdepth = 2),
control = boost_control(mstop = 500)))
print(ae <- mean((predict(a) - BostonHousing$medv)^2))
pdiffs <- max(abs(predict(update(a, model.weights(a))) - predict(a)))
stopifnot(pdiffs < sqrt(.Machine$double.eps))
### attach `gbm', quietly
sink("tmpfile")
if (require("gbm")) cat()
sink()
file.remove("tmpfile")
if (require("gbm")) {
system.time(b <- gbm(medv ~ ., data = BostonHousing,
n.trees = 500, interaction = 2, distribution = "gaussian",
shrinkage = 0.1, bag = 1))
print(be <- mean((predict(b, newdata = BostonHousing, n.trees = 500) -
BostonHousing$medv)^2))
plot(BostonHousing$medv, predict(a), col = "red", pch = "+")
points(BostonHousing$medv,
predict(b, newdata = BostonHousing, n.trees = 500),
col = "blue", pch = "+")
stopifnot(ae < be)
}
}
### with by-argument, a certain type of interaction
tctrl <- ctree_control(teststat = "max",
testtype = "Teststatistic",
mincriterion = 0,
maxdepth = 2)
bb <- mboost(medv ~ btree(crim, zn, indus, nox, age) +
btree(crim, zn, indus, nox, age, by = chas),
data = BostonHousing)
stopifnot(isTRUE(all.equal(fitted(bb)[1:10],
c(predict(bb, newdata = BostonHousing[1:10,])),
check.attributes = FALSE)))
nd <- BostonHousing[1:10,]
nd$chas[] <- "0"
p0 <- predict(bb, newdata = nd, which = 1)
stopifnot(isTRUE(all.equal(c(unique(predict(bb, newdata = nd, which = 2))), 0,
check.attributes = FALSE)))
nd$chas[] <- "1"
p1 <- predict(bb, newdata = nd, which = 1)
stopifnot(isTRUE(all.equal(p0, p1)))
print(predict(bb, newdata = nd, which = 2))
print(table(selected(bb)))
print(table(selected(bb[50])))
### check different interfaces
x <- as.matrix(BostonHousing[,colnames(BostonHousing) != "medv"])
y <- BostonHousing$medv
p2 <- predict(blackboost(medv ~ ., data = BostonHousing, family = Laplace()),
newdata = BostonHousing)
## Cox model
library("survival")
fit2 <- blackboost(Surv(futime,fustat) ~ age + resid.ds + rx + ecog.ps,
data = ovarian, family = CoxPH(), control = boost_control(mstop = 1000))
A2 <- survFit(fit2)
print(A2)
newdata <- ovarian[c(1,3,12),]
A2 <- survFit(fit2, newdata = newdata)
print(A2)
### predictions:
set.seed(1907)
x1 <- rnorm(100)
x2 <- rnorm(100)
x3 <- rnorm(100)
y <- rnorm(100, mean = 3 * x1, sd = 2)
DF <- data.frame(y = y, x1 = x1, x2 = x2, x3 = x3)
amod <- blackboost(y ~ -1 + x1 + x2, data = DF)
agg <- c("none", "sum", "cumsum")
whi <- list(NULL, 1)
for (i in 1:2){
pred <- vector("list", length=3)
for (j in 1:3){
pred[[j]] <- predict(amod, aggregate=agg[j], which = whi[[i]])
}
if (i == 1){
stopifnot(max(abs(pred[[2]] - pred[[3]][,ncol(pred[[3]])])) < sqrt(.Machine$double.eps))
if ((pred[[2]] - rowSums(pred[[1]]))[1] - amod$offset < sqrt(.Machine$double.eps))
warning(sQuote("aggregate = sum"), " adds the offset, ", sQuote("aggregate = none"), " doesn't.")
stopifnot(max(abs(pred[[2]] - rowSums(pred[[1]]) - amod$offset)) < sqrt(.Machine$double.eps))
} else {
stopifnot(max(abs(pred[[2]] - sapply(pred[[3]], function(obj) obj[,ncol(obj)]))) < sqrt(.Machine$double.eps))
stopifnot(max(abs(pred[[2]] - sapply(pred[[1]], function(obj) rowSums(obj)))) < sqrt(.Machine$double.eps))
}
}
stopifnot(all(predict(amod, which=1) + amod$offset - predict(amod) < sqrt(.Machine$double.eps)))
# check type argument
set.seed(1907)
x1 <- rnorm(100)
p <- 1/(1 + exp(- 3 * x1))
y <- as.factor(runif(100) < p)
DF <- data.frame(y = y, x1 = x1)
mod <- blackboost(y ~ x1, family = Binomial(),
data = DF, control=boost_control(mstop=5000))
pr <- predict(mod)
pr <- predict(mod, type="class")
foo <- table(pr, y)
stopifnot(foo[1,2] + foo[2,1] == 0)
pr <- predict(mod, type="response")
# <FIXME> How do we check "correctness" of results?</FIXME>
}
|
# Shared code to download and read in the neccessary data
source("readdata.R")
# Set locale to English, so that the labels on the x Axis are in english
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English")
png(filename="plot4.png")
# Make a plot with four graphs
par(mfrow=c(2,2))
# Same as in plot2.R
with(data, plot(Global_active_power ~ DateTime, type="l", ylab = "Global Active Power", xlab=""))
# New Plot
with(data, plot(Voltage ~ DateTime, type = "l"))
# Same as in plot3.R
with(data, {
plot(Sub_metering_1 ~ DateTime, type="l", ylab = "Energy sub metering", xlab="")
points(Sub_metering_2 ~ DateTime, type="l", col="red")
points(Sub_metering_3 ~ DateTime, type="l", col="blue")
})
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1, lwd=1, bty="n")
# New Plot
with(data, plot(Global_reactive_power ~ DateTime, type = "l"))
dev.off()
Sys.setlocale("LC_TIME", loc)
|
/plot4.R
|
no_license
|
alexkops/ExData_Plotting1
|
R
| false
| false
| 968
|
r
|
# Shared code to download and read in the neccessary data
source("readdata.R")
# Set locale to English, so that the labels on the x Axis are in english
loc <- Sys.getlocale("LC_TIME")
Sys.setlocale("LC_TIME", "English")
png(filename="plot4.png")
# Make a plot with four graphs
par(mfrow=c(2,2))
# Same as in plot2.R
with(data, plot(Global_active_power ~ DateTime, type="l", ylab = "Global Active Power", xlab=""))
# New Plot
with(data, plot(Voltage ~ DateTime, type = "l"))
# Same as in plot3.R
with(data, {
plot(Sub_metering_1 ~ DateTime, type="l", ylab = "Energy sub metering", xlab="")
points(Sub_metering_2 ~ DateTime, type="l", col="red")
points(Sub_metering_3 ~ DateTime, type="l", col="blue")
})
legend("topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col=c("black", "red", "blue"), lty=1, lwd=1, bty="n")
# New Plot
with(data, plot(Global_reactive_power ~ DateTime, type = "l"))
dev.off()
Sys.setlocale("LC_TIME", loc)
|
report_mode <- 1
# If 1, we are generating a report!
petoc <- function() {
if (report_mode == 0) {
message("Press [Enter] to continue")
r <- readline()
if (r == "q") {
terminate_session()
stop("User asked for termination.\n")
}
}
}
#' Basic tests of model functionalty. Serious issues if the test does not pass.
#' @return tests results
#' @export
sanity_check <- function() {
init_session()
cat("test 1: zero all costs\n")
input <- model_input$values
for (el in get_list_elements(input$cost)) input$cost[[el]] <- input$cost[[el]] * 0
res <- run(1, input = input)
if (Cget_output()$total_cost != 0)
message("Test failed!") else message("Test passed!")
message("test 2: zero all utilities\n")
input <- model_input$values
for (el in get_list_elements(input$utility)) input$utility[[el]] <- input$utility[[el]] * 0
res <- run(input = input)
if (Cget_output()$total_qaly != 0)
message("Test failed!") else message("Test passed!")
message("test 3: one all utilities ad get one QALY without discount\n")
input <- model_input$values
input$global_parameters$discount_qaly <- 0
for (el in get_list_elements(input$utility)) input$utility[[el]] <- input$utility[[el]] * 0 + 1
input$utility$exac_dutil = input$utility$exac_dutil * 0
res <- run(input = input)
if (Cget_output()$total_qaly/Cget_output()$cumul_time != 1)
message("Test failed!") else message("Test passed!")
message("test 4: zero mortality (both bg and exac)\n")
input <- model_input$values
input$exacerbation$logit_p_death_by_sex <- input$exacerbation$logit_p_death_by_sex * 0 - 10000000 # log scale'
input$agent$p_bgd_by_sex <- input$agent$p_bgd_by_sex * 0
input$manual$explicit_mortality_by_age_sex <- input$manual$explicit_mortality_by_age_sex * 0
res <- run(input = input)
if (Cget_output()$n_deaths != 0) {
message (Cget_output()$n_deaths)
stop("Test failed!")
} else message("Test passed!")
terminate_session()
return(0)
}
#' Returns results of validation tests for population module
#' @param incidence_k a number (default=1) by which the incidence rate of population will be multiplied.
#' @param remove_COPD 0 or 1, indicating whether COPD-caused mortality should be removed
#' @param savePlots 0 or 1, exports 300 DPI population growth and pyramid plots comparing simulated vs. predicted population
#' @return validation test results
#' @export
validate_population <- function(remove_COPD = 0, incidence_k = 1, savePlots = 0) {
message("Validate_population(...) is responsible for producing output that can be used to test if the population module is properly calibrated.\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+06
settings$event_stack_size <- 1
init_session(settings = settings)
input <- model_input$values #We can work with local copy more conveniently and submit it to the Run function
message("\nBecause you have called me with remove_COPD=", remove_COPD, ", I am", c("NOT", "indeed")[remove_COPD + 1], "going to remove COPD-related mortality from my calculations")
petoc()
# CanSim.052.0005<-read.csv(system.file ('extdata', 'CanSim.052.0005.csv', package = 'epicR'), header = T); #package ready
# reading
x <- aggregate(CanSim.052.0005[, "value"], by = list(CanSim.052.0005[, "year"]), FUN = sum)
x[, 2] <- x[, 2]/x[1, 2]
x <- x[1:input$global_parameters$time_horizon, ]
plot(x, type = "l", ylim = c(0.5, max(x[, 2] * 1.5)), xlab = "Year", ylab = "Relative population size")
title(cex.main = 0.5, "Relative populaton size")
message("The plot I just drew is the expected (well, StatCan's predictions) relative population growth from 2015\n")
petoc()
if (remove_COPD) {
input$exacerbation$logit_p_death_by_sex <- -1000 + input$exacerbation$logit_p_death_by_sex
input$manual$explicit_mortality_by_age_sex <- 0
}
input$agent$l_inc_betas[1] <- input$agent$l_inc_betas[1] + log(incidence_k)
message("working...\n")
res <- run(input = input)
if (res < 0) {
stop("Something went awry; bye!")
return()
}
n_y1_agents <- sum(Cget_output_ex()$n_alive_by_ctime_sex[1, ])
legend("topright", c("Predicted", "Simulated"), lty = c(1, 1), col = c("black", "red"))
message("And the black one is the observed (simulated) growth\n")
######## pretty population growth curve
CanSim <- tibble::as_tibble(CanSim.052.0005)
CanSim <- tidyr::spread(CanSim, key = year, value = value)
CanSim <- CanSim[, 3:51]
CanSim <- colSums (CanSim)
df <- data.frame(Year = c(2015:(2015 + model_input$values$global_parameters$time_horizon-1)), Predicted = CanSim[1:model_input$values$global_parameters$time_horizon] * 1000, Simulated = rowSums(Cget_output_ex()$n_alive_by_ctime_sex)/ settings$n_base_agents * 18179400) #rescaling population. There are about 18.6 million Canadians above 40
message ("Here's simulated vs. predicted population table:")
print(df)
dfm <- reshape2::melt(df[,c('Year','Predicted','Simulated')], id.vars = 1)
plot_population_growth <- ggplot2::ggplot(dfm, aes(x = Year, y = value)) + theme_tufte(base_size=14, ticks=F) +
geom_bar(aes(fill = variable), stat = "identity", position = "dodge") +
labs(title = "Population Growth Curve") + ylab ("Population") +
labs(caption = "(based on population at age 40 and above)") +
theme(legend.title=element_blank()) +
scale_y_continuous(name="Population", labels = scales::comma)
plot (plot_population_growth)
if (savePlots) ggsave(paste0("PopulationGrowth",".tiff"), plot = last_plot(), device = "tiff", dpi = 300)
pyramid <- matrix(NA, nrow = input$global_parameters$time_horizon, ncol = length(Cget_output_ex()$n_alive_by_ctime_age[1, ]) -
input$global_parameters$age0)
for (year in 0:model_input$values$global_parameters$time_horizon - 1) pyramid[1 + year, ] <- Cget_output_ex()$n_alive_by_ctime_age[year +1, -(1:input$global_parameters$age0)]
message("Also, the ratio of the expected to observed population in years 10 and 20 are ", sum(Cget_output_ex()$n_alive_by_ctime_sex[10,
])/x[10, 2], " and ", sum(Cget_output_ex()$n_alive_by_ctime_sex[20, ])/x[20, 2])
petoc()
message("Now evaluating the population pyramid\n")
for (year in c(2015, 2025, 2034)) {
message("The observed population pyramid in", year, "is just drawn\n")
x <- CanSim.052.0005[which(CanSim.052.0005[, "year"] == year & CanSim.052.0005[, "sex"] == "both"), "value"]
#x <- c(x, rep(0, 111 - length(x) - 40))
#barplot(x, names.arg=40:110, xlab = "Age")
#title(cex.main = 0.5, paste("Predicted Pyramid - ", year))
dfPredicted <- data.frame (population = x * 1000, age = 40:100)
# message("Predicted average age of those >40 y/o is", sum((input$global_parameters$age0:(input$global_parameters$age0 + length(x) -
# 1)) * x)/sum(x), "\n")
# petoc()
#
# message("Simulated average age of those >40 y/o is", sum((input$global_parameters$age0:(input$global_parameters$age0 + length(x) -
# 1)) * x)/sum(x), "\n")
# petoc()
dfSimulated <- data.frame (population = pyramid[year - 2015 + 1, ], age = 40:110)
dfSimulated$population <- dfSimulated$population * (-1) / settings$n_base_agents * 18179400 #rescaling population. There are 18179400 Canadians above 40
p <- ggplot (NULL, aes(x = age, y = population)) + theme_tufte(base_size=14, ticks=F) +
geom_bar (aes(fill = "Simulated"), data = dfSimulated, stat="identity", alpha = 0.5) +
geom_bar (aes(fill = "Predicted"), data = dfPredicted, stat="identity", alpha = 0.5) +
theme(axis.title=element_blank()) +
ggtitle(paste0("Simulated vs. Predicted Population Pyramid in ", year)) +
theme(legend.title=element_blank()) +
scale_y_continuous(name="Population", labels = scales::comma) +
scale_x_continuous(name="Age", labels = scales::comma)
if (savePlots) ggsave(paste0("Population ", year,".tiff"), plot = last_plot(), device = "tiff", dpi = 300)
plot(p)
}
terminate_session()
}
#' Returns results of validation tests for smoking module.
#' @param intercept_k a number
#' @param remove_COPD 0 or 1. whether to remove COPD-related mortality.
#' @return validation test results
#' @export
validate_smoking <- function(remove_COPD = 1, intercept_k = NULL) {
message("Welcome to EPIC validator! Today we will see if the model make good smoking predictions")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 1.7 * 30
init_session(settings = settings)
input <- model_input$values
message("\nBecause you have called me with remove_COPD=", remove_COPD, ", I am", c("NOT", "indeed")[remove_COPD + 1], "going to remove COPD-related mortality from my calculations")
if (remove_COPD) {
input$exacerbation$logit_p_death_by_sex <- input$exacerbation$logit_p_death_by_sex * -10000 # TODO why was this zero? Amin
}
if (!is.null(intercept_k))
input$manual$smoking$intercept_k <- intercept_k
petoc()
message("There are two validation targets: 1) the prevalence of current smokers (by sex) in 2015, and 2) the projected decline in smoking rate.\n")
message("Starting validation target 1: baseline prevalence of smokers.\n")
petoc()
# CanSim.105.0501<-read.csv(paste(data_path,'/CanSim.105.0501.csv',sep=''),header=T) Included in the package as internal data
tab1 <- rbind(CanSim.105.0501[1:3, "value"], CanSim.105.0501[4:6, "value"])/100
message("This is the observed percentage of current smokers in 2014 (m,f)\n")
barplot(tab1, beside = T, names.arg = c("40", "52", "65+"), ylim = c(0, 0.4), xlab = "Age group", ylab = "Prevalenc of smoking",
col = c("black", "grey"))
title(cex.main = 0.5, "Prevalence of current smoker by sex and age group (observed)")
legend("topright", c("Male", "Female"), fill = c("black", "grey"))
petoc()
message("Now I will run the model using the default smoking parameters")
petoc()
message("running the model\n")
run(input = input)
dataS <- Cget_all_events_matrix()
dataS <- dataS[which(dataS[, "event"] == events["event_start"]), ]
age_list <- list(a1 = c(35, 45), a2 = c(45, 65), a3 = c(65, 111))
tab2 <- tab1
for (i in 0:1) for (j in 1:length(age_list)) tab2[i + 1, j] <- mean(dataS[which(dataS[, "female"] == i & dataS[, "age_at_creation"] >
age_list[[j]][1] & dataS[, "age_at_creation"] <= age_list[[j]][2]), "smoking_status"])
message("This is the model generated bar plot")
petoc()
barplot(tab2, beside = T, names.arg = c("40", "52", "65+"), ylim = c(0, 0.4), xlab = "Age group", ylab = "Prevalence of smoking",
col = c("black", "grey"))
title(cex.main = 0.5, "Prevalence of current smoking at creation (simulated)")
legend("topright", c("Male", "Female"), fill = c("black", "grey"))
message("This step is over; press enter to continue to step 2")
petoc()
message("Now we will validate the model on smoking trends")
petoc()
message("According to Table 2.1 of this report (see the extracted data in data folder): http://www.tobaccoreport.ca/2015/TobaccoUseinCanada_2015.pdf, the prevalence of current smoker is declining by around 3.8% per year\n")
petoc()
op_ex <- Cget_output_ex()
smoker_prev <- op_ex$n_current_smoker_by_ctime_sex/op_ex$n_alive_by_ctime_sex
smoker_packyears <- op_ex$sum_pack_years_by_ctime_sex/op_ex$n_alive_by_ctime_sex
plot(2015:(2015+input$global_parameters$time_horizon-1), smoker_prev[, 1], type = "l", ylim = c(0, 0.25), col = "black", xlab = "Year", ylab = "Prevalence of current smoking")
lines(2015:(2015+input$global_parameters$time_horizon-1), smoker_prev[, 2], type = "l", col = "grey")
legend("topright", c("male", "female"), lty = c(1, 1), col = c("black", "grey"))
title(cex.main = 0.5, "Annual prevalence of currrent smoking (simulated)")
plot(2015:(2015+input$global_parameters$time_horizon-1), smoker_packyears[, 1], type = "l", ylim = c(0, 30), col = "black", xlab = "Year", ylab = "Average Pack years")
lines(2015:(2015+input$global_parameters$time_horizon-1), smoker_packyears[, 2], type = "l", col = "grey")
legend("topright", c("male", "female"), lty = c(1, 1), col = c("black", "grey"))
title(cex.main = 0.5, "Average Pack-Years Per Year for 40+ Population (simulated)")
z <- log(rowSums(smoker_prev))
message("average decline in % of current_smoking rate is", 1 - exp(mean(c(z[-1], NaN) - z, na.rm = T)))
petoc()
#plotting overall distribution of smoking stats over time
smoking_status_ctime <- matrix (NA, nrow = input$global_parameters$time_horizon, ncol = 4)
colnames(smoking_status_ctime) <- c("Year", "Non-Smoker", "Smoker", "Former smoker")
smoking_status_ctime[1:(input$global_parameters$time_horizon), 1] <- c(2015:(2015 + input$global_parameters$time_horizon-1))
smoking_status_ctime [, 2:4] <- op_ex$n_smoking_status_by_ctime / rowSums(as.data.frame (op_ex$n_alive_by_ctime_sex)) * 100
df <- as.data.frame(smoking_status_ctime)
dfm <- reshape2::melt(df[,c("Year", "Non-Smoker", "Smoker", "Former smoker")], id.vars = 1)
plot_smoking_status_ctime <- ggplot2::ggplot(dfm, aes(x = Year, y = value, color = variable)) +
geom_point () + geom_line() + labs(title = "Smoking Status per year") + ylab ("%") +
scale_colour_manual(values = c("#66CC99", "#CC6666", "#56B4E9")) + scale_y_continuous(breaks = scales::pretty_breaks(n = 12))
plot(plot_smoking_status_ctime ) #plot needs to be showing
# Plotting pack-years over time
dataS <- as.data.frame (Cget_all_events_matrix())
dataS <- subset (dataS, (event == 0 | event == 1 ))
data_all <- dataS
dataS <- subset (dataS, pack_years != 0)
avg_pack_years_ctime <- matrix (NA, nrow = input$global_parameters$time_horizon + 1, ncol = 4)
colnames(avg_pack_years_ctime) <- c("Year", "Smokers PYs", "Former Smokers PYs", "all")
avg_pack_years_ctime[1:(input$global_parameters$time_horizon + 1), 1] <- c(2015:(2015 + input$global_parameters$time_horizon))
for (i in 0:input$global_parameters$time_horizon) {
smokers <- subset (dataS, (floor(local_time + time_at_creation) == (i)) & smoking_status != 0)
prev_smokers <- subset (dataS, (floor(local_time + time_at_creation) == (i)) & smoking_status == 0)
all <- subset (data_all, floor(local_time + time_at_creation) == i)
avg_pack_years_ctime[i+1, "Smokers PYs"] <- colSums(smokers)[["pack_years"]] / dim (smokers)[1]
avg_pack_years_ctime[i+1, "Former Smokers PYs"] <- colSums(prev_smokers)[["pack_years"]] / dim (prev_smokers) [1]
avg_pack_years_ctime[i+1, "all"] <- colSums(all)[["pack_years"]] / dim (all) [1] #includes non-smokers
}
df <- as.data.frame(avg_pack_years_ctime)
dfm <- reshape2::melt(df[,c( "Year", "Smokers PYs", "Former Smokers PYs", "all")], id.vars = 1)
plot_avg_pack_years_ctime <- ggplot2::ggplot(dfm, aes(x = Year, y = value, color = variable)) +
geom_point () + geom_line() + labs(title = "Average pack-years per year ") + ylab ("Pack-years")
plot(plot_avg_pack_years_ctime) #plot needs to be showing
# Plotting pack-years over age
avg_pack_years_age <- matrix (NA, nrow = 110 - 40 + 1, ncol = 3)
colnames(avg_pack_years_age) <- c("Age", "Smokers PYs", "Former Smokers PYs")
avg_pack_years_age[1:(110 - 40 + 1), 1] <- c(40:110)
for (i in 0:(110 - 40)) {
smokers <- subset (dataS, (floor (local_time + age_at_creation) == (i+40)) & smoking_status != 0)
prev_smokers <- subset (dataS, (floor (local_time + age_at_creation) == (i+40)) & smoking_status == 0)
avg_pack_years_age[i+1, "Smokers PYs"] <- colSums(smokers)[["pack_years"]] / dim (smokers)[1]
avg_pack_years_age[i+1, "Former Smokers PYs"] <- colSums(prev_smokers)[["pack_years"]] / dim (prev_smokers) [1]
}
df <- as.data.frame(avg_pack_years_age)
dfm <- reshape2::melt(df[,c( "Age", "Smokers PYs", "Former Smokers PYs")], id.vars = 1)
plot_avg_pack_years_age <- ggplot2::ggplot(dfm, aes(x = Age, y = value, color = variable, ymin = 40, ymax = 100)) +
geom_point () + geom_line() + labs(title = "Average pack-years per age ") + ylab ("Pack-years")
plot(plot_avg_pack_years_age) #plot needs to be showing
message("This test is over; terminating the session")
petoc()
terminate_session()
}
#' Basic COPD test.
#' @return validation test results
#' @export
sanity_COPD <- function() {
settings <- default_settings
settings$record_mode <- record_mode["record_mode_agent"]
# settings$agent_stack_size<-0
settings$n_base_agents <- 10000
settings$event_stack_size <- settings$n_base_agents * 10
init_session(settings = settings)
message("Welcome! I am going to check EPIC's sanity with regard to modeling COPD\n ")
petoc()
message("COPD incidence and prevalenceparameters are as follows\n")
message("model_input$values$COPD$logit_p_COPD_betas_by_sex:\n")
print(model_input$values$COPD$logit_p_COPD_betas_by_sex)
petoc()
message("model_input$values$COPD$p_prevalent_COPD_stage:\n")
print(model_input$values$COPD$p_prevalent_COPD_stage)
petoc()
message("model_input$values$COPD$ln_h_COPD_betas_by_sex:\n")
print(model_input$values$COPD$ln_h_COPD_betas_by_sex)
petoc()
message("Now I am going to first turn off both prevalence and incidence parameters and run the model to see how many COPDs I get\n")
petoc()
input <- model_input$values
input$COPD$logit_p_COPD_betas_by_sex <- input$COPD$logit_p_COPD_betas_by_sex * 0 - 100
input$COPD$ln_h_COPD_betas_by_sex <- input$COPD$ln_h_COPD_betas_by_sex * 0 - 100
run(input = input)
message("The model is reporting it has got that many COPDs:", Cget_output()$n_COPD, " out of ", Cget_output()$n_agents, "agents.\n")
dataS <- get_events_by_type(events["event_start"])
message("The prevalence of COPD in Start event dump is:", mean(dataS[, "gold"] > 0), "\n")
dataS <- get_events_by_type(events["event_end"])
message("The prevalence of COPD in End event dump is:", mean(dataS[, "gold"] > 0), "\n")
petoc()
message("Now I am going to switch off incidence and create COPD patients only through prevalence (set at 0.5)")
petoc()
init_input()
input <- model_input$values
input$COPD$logit_p_COPD_betas_by_sex <- input$COPD$logit_p_COPD_betas_by_sex * 0
input$COPD$ln_h_COPD_betas_by_sex <- input$COPD$ln_h_COPD_betas_by_sex * 0 - 100
run(input = input)
message("The model is reporting it has got that many COPDs:", Cget_output()$n_COPD, " out of ", Cget_output()$n_agents, "agents.\n")
dataS <- get_events_by_type(events["event_start"])
message("The prevalence of COPD in Start event dump is:", mean(dataS[, "gold"] > 0), "\n")
dataS <- get_events_by_type(events["event_end"])
message("The prevalence of COPD in End event dump is:", mean(dataS[, "gold"] > 0), "\n")
petoc()
message("Now I am going to switch off prevalence and create COPD patients only through incidence\n")
petoc()
init_input()
input <- model_input$values
input$COPD$logit_p_COPD_betas_by_sex <- input$COPD$logit_p_COPD_betas_by_sex * 0 - 100
run(input = input)
message("The model is reporting it has got that many COPDs:", Cget_output()$n_COPD, " out of ", Cget_output()$n_agents, "agents.\n")
dataS <- get_events_by_type(events["event_start"])
message("The prevalence of COPD in Start event dump is:", mean(dataS[, "gold"] > 0), "\n")
dataS <- get_events_by_type(events["event_end"])
message("The prevalence of COPD in End event dump is:", mean(dataS[, "gold"] > 0), "\n")
petoc()
terminate_session()
}
#' Returns results of validation tests for COPD
#' @param incident_COPD_k a number (default=1) by which the incidence rate of COPD will be multiplied.
#' @param return_CI if TRUE, returns 95 percent confidence intervals for the "Year" coefficient
#' @return validation test results
#' @export
validate_COPD <- function(incident_COPD_k = 1, return_CI = FALSE) # The incidence rate is multiplied by K
{
out <- list()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 50
init_session(settings = settings)
input <- model_input$values
if (incident_COPD_k == 0)
input$COPD$ln_h_COPD_betas_by_sex <- input$COPD$ln_h_COPD_betas_by_sex * 0 - 100 else input$COPD$ln_h_COPD_betas_by_sex[1, ] <- model_input$values$COPD$ln_h_COPD_betas_by_sex[1, ] + log(incident_COPD_k)
message("working...\n")
run(input = input)
op <- Cget_output()
opx <- Cget_output_ex()
data <- as.data.frame(Cget_all_events_matrix())
dataS <- data[which(data[, "event"] == events["event_start"]), ]
dataE <- data[which(data[, "event"] == events["event_end"]), ]
out$p_copd_at_creation <- mean(dataS[, "gold"] > 0)
new_COPDs <- which(dataS[which(dataE[, "gold"] > 0), "gold"] == 0)
out$inc_copd <- sum(opx$n_inc_COPD_by_ctime_age)/opx$cumul_non_COPD_time
out$inc_copd_by_sex <- sum(opx$n_inc_COPD_by_ctime_age)/opx$cumul_non_COPD_time
x <- sqldf::sqldf("SELECT female, SUM(gold>0) AS n_copd, COUNT(*) AS n FROM dataS GROUP BY female")
out$p_copd_at_creation_by_sex <- x[, "n_copd"]/x[, "n"]
age_cats <- c(40, 50, 60, 70, 80, 111)
dataS[, "age_cat"] <- as.numeric(cut(dataS[, "age_at_creation"] + dataS[, "local_time"], age_cats, include.lowest = TRUE))
x <- sqldf::sqldf("SELECT age_cat, SUM(gold>0) AS n_copd, COUNT(*) AS n FROM dataS GROUP BY age_cat")
temp <- x[, "n_copd"]/x[, "n"]
names(temp) <- paste(age_cats[-length(age_cats)], age_cats[-1], sep = "-")
out$p_copd_at_creation_by_age <- temp
py_cats <- c(0, 15, 30, 45, Inf)
dataS[, "py_cat"] <- as.numeric(cut(dataS[, "pack_years"], py_cats, include.lowest = TRUE))
x <- sqldf::sqldf("SELECT py_cat, SUM(gold>0) AS n_copd, COUNT(*) AS n FROM dataS GROUP BY py_cat")
temp <- x[, "n_copd"]/x[, "n"]
names(temp) <- paste(py_cats[-length(py_cats)], py_cats[-1], sep = "-")
out$p_copd_at_creation_by_pack_years <- temp
dataF <- data[which(data[, "event"] == events["event_fixed"]), ]
dataF[, "age"] <- dataF[, "local_time"] + dataF[, "age_at_creation"]
dataF[, "copd"] <- (dataF[, "gold"] > 0) * 1
dataF[, "gold2p"] <- (dataF[, "gold"] > 1) * 1
dataF[, "gold3p"] <- (dataF[, "gold"] > 2) * 1
dataF[, "year"] <- dataF[, "local_time"] + dataF[, "time_at_creation"]
res <- glm(data = dataF[which(dataF[, "female"] == 0), ], formula = copd ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_copd_reg_coeffs_male <- coefficients(res)
if (return_CI) {out$conf_prev_copd_reg_coeffs_male <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 1), ], formula = copd ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_copd_reg_coeffs_female <- coefficients(res)
if (return_CI) {out$conf_prev_copd_reg_coeffs_female <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 0), ], formula = gold2p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold2p_reg_coeffs_male <- coefficients(res)
if (return_CI) {out$conf_prev_gold2p_reg_coeffs_male <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 1), ], formula = gold2p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold2p_reg_coeffs_female <- coefficients(res)
if (return_CI) {out$conf_prev_gold2p_reg_coeffs_female <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 0), ], formula = gold3p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold3p_reg_coeffs_male <- coefficients(res)
if (return_CI) {out$conf_prev_gold3p_reg_coeffs_male <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 1), ], formula = gold3p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold3p_reg_coeffs_female <- coefficients(res)
if (return_CI) {out$conf_prev_gold3p_reg_coeffs_female <- stats::confint(res, "year", level = 0.95)}
terminate_session()
return(out)
}
#' Returns results of validation tests for payoffs, costs and QALYs
#' @param nPatient number of simulated patients. Default is 1e6.
#' @param disableDiscounting if TRUE, discounting will be disabled for cost and QALY calculations. Default: TRUE
#' @param disableExacMortality if TRUE, mortality due to exacerbations will be disabled for cost and QALY calculations. Default: TRUE
#' @return validation test results
#' @export
validate_payoffs <- function(nPatient = 1e6, disableDiscounting = TRUE, disableExacMortality = TRUE)
{
out <- list()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- nPatient
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
if (disableDiscounting) {
input$global_parameters$discount_cost <- 0
input$global_parameters$discount_qaly <- 0
}
if (disableExacMortality) {
input$exacerbation$logit_p_death_by_sex <- -1000 + 0*input$exacerbation$logit_p_death_by_sex
}
run(input = input)
op <- Cget_output()
op_ex <- Cget_output_ex()
exac_dutil<-Cget_inputs()$utility$exac_dutil
exac_dcost<-Cget_inputs()$cost$exac_dcost
total_qaly<-colSums(op_ex$cumul_qaly_gold_ctime)[2:5]
qaly_loss_dueto_exac_by_gold<-rowSums(op_ex$n_exac_by_gold_severity*exac_dutil)
back_calculated_utilities<-(total_qaly-qaly_loss_dueto_exac_by_gold)/colSums(op_ex$cumul_time_by_ctime_GOLD)[2:5]
#I=0.81,II=0.72,III=0.68,IV=0.58)))
out$cumul_time_per_GOLD <- colSums(op_ex$cumul_time_by_ctime_GOLD)[2:5]
out$total_qaly <- total_qaly
out$qaly_loss_dueto_exac_by_gold <- qaly_loss_dueto_exac_by_gold
out$back_calculated_utilities <- back_calculated_utilities
out$utility_target_values <- input$utility$bg_util_by_stage
out$utility_difference_percentage <- (out$back_calculated_utilities - out$utility_target_values[2:5]) / out$utility_target_values[2:5] * 100
total_cost<-colSums(op_ex$cumul_cost_gold_ctime)[2:5]
cost_dueto_exac_by_gold<-rowSums(t((exac_dcost)*t(op_ex$n_exac_by_gold_severity)))
back_calculated_costs<-(total_cost-cost_dueto_exac_by_gold)/colSums(op_ex$cumul_time_by_ctime_GOLD)[2:5]
#I=615, II=1831, III=2619, IV=3021
out$total_cost <- total_cost
out$cost_dueto_exac_by_gold <- cost_dueto_exac_by_gold
out$back_calculated_costs <- back_calculated_costs
out$cost_target_values <- input$cost$bg_cost_by_stage
out$cost_difference_percentage <- (out$back_calculated_costs - out$cost_target_values[2:5]) / out$cost_target_values[2:5] * 100
terminate_session()
return(out)
}
#' Returns results of validation tests for mortality rate
#' @param n_sim number of simulated agents
#' @param bgd a number
#' @param bgd_h a number
#' @param manual a number
#' @param exacerbation a number
#' @param comorbidity a number
#' @return validation test results
#' @export
validate_mortality <- function(n_sim = 5e+05, bgd = 1, bgd_h = 1, manual = 1, exacerbation = 1, comorbidity = 1) {
message("Hello from EPIC! I am going to test mortality rate and how it is affected by input parameters\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
input$global_parameters$time_horizon <- 1
input$agent$p_bgd_by_sex <- input$agent$p_bgd_by_sex * bgd
input$agent$ln_h_bgd_betas <- input$agent$ln_h_bgd_betas * bgd_h
input$manual$explicit_mortality_by_age_sex <- input$manual$explicit_mortality_by_age_sex * manual
input$exacerbation$logit_p_death_by_sex <- input$exacerbation$logit_p_death_by_sex * exacerbation
if (comorbidity == 0) {
input$comorbidity$p_mi_death <- 0
input$comorbidity$p_stroke_death <- 0
input$agent$ln_h_bgd_betas[, c("b_mi", "n_mi", "b_stroke", "n_stroke", "hf")] <- 0
}
message("working...\n")
res <- run(input = input)
message("Mortality rate was", Cget_output()$n_death/Cget_output()$cumul_time, "\n")
if (Cget_output()$n_death > 0) {
ratio<-(Cget_output_ex()$n_death_by_age_sex[41:111,]/Cget_output_ex()$sum_time_by_age_sex[41:111,])/model_input$values$agent$p_bgd_by_sex[41:111,]
plot(40:110,ratio[,1],type='l',col='blue',xlab="age",ylab="Ratio", ylim = c(0, 4))
legend("topright",c("male","female"),lty=c(1,1),col=c("blue","red"))
lines(40:110,ratio[,2],type='l',col='red')
title(cex.main=0.5,"Ratio of simulated to expected (life table) mortality, by sex and age")
difference <- (Cget_output_ex()$n_death_by_age_sex[41:91, ]/Cget_output_ex()$sum_time_by_age_sex[41:91, ]) - model_input$values$agent$p_bgd_by_sex[41:91,
]
plot(40:90, difference[, 1], type = "l", col = "blue", xlab = "age", ylab = "Difference", ylim = c(-.1, .1))
legend("topright", c("male", "female"), lty = c(1, 1), col = c("blue", "red"))
lines(40:90, difference[, 2], type = "l", col = "red")
title(cex.main = 0.5, "Difference between simulated and expected (life table) mortality, by sex and age")
return(list(difference = difference))
} else message("No death occured.\n")
}
#' Returns results of validation tests for comorbidities
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_comorbidity <- function(n_sim = 1e+05) {
message("Hello from EPIC! I am going to validate comorbidities for ya\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
output <- Cget_output()
output_ex <- Cget_output_ex()
message("The prevalence of having MI at baseline was ", (output_ex$n_mi - output_ex$n_incident_mi)/output$n_agent, "\n")
message("The incidence of MI during follow-up was ", output_ex$n_incident_mi/output$cumul_time, "/PY\n")
message("The prevalence of having stroke at baseline was ", (output_ex$n_stroke - output_ex$n_incident_stroke)/output$n_agent, "\n")
message("The incidence of stroke during follow-up was ", output_ex$n_incident_stroke/output$cumul_time, "/PY\n")
message("The prevalence of having hf at baseline was ", (output_ex$n_stroke - output_ex$n_hf)/output$n_agent, "\n")
message("The incidence of hf during follow-up was ", output_ex$n_incident_hf/output$cumul_time, "/PY\n")
terminate_session()
settings$record_mode <- record_mode["record_mode_some_event"]
settings$events_to_record <- events[c("event_start", "event_mi", "event_stroke", "event_hf", "event_end")]
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 1.6 * 10
init_session(settings = settings)
input <- model_input$values
if (run(input = input) < 0)
stop("Execution stopped.\n")
output <- Cget_output()
output_ex <- Cget_output_ex()
# mi_events<-get_events_by_type(events['event_mi']) stroke_events<-get_events_by_type(events['event_stroke'])
# hf_events<-get_events_by_type(events['event_hf']) end_events<-get_events_by_type(events['event_end'])
plot(output_ex$n_mi_by_age_sex[41:100, 1]/output_ex$n_alive_by_age_sex[41:100, 1], type = "l", col = "red")
lines(output_ex$n_mi_by_age_sex[41:100, 2]/output_ex$n_alive_by_age_sex[41:100, 2], type = "l", col = "blue")
title(cex.main = 0.5, "Incidence of MI by age and sex")
plot(output_ex$n_stroke_by_age_sex[, 1]/output_ex$n_alive_by_age_sex[, 1], type = "l", col = "red")
lines(output_ex$n_stroke_by_age_sex[, 2]/output_ex$n_alive_by_age_sex[, 2], type = "l", col = "blue")
title(cex.main = 0.5, "Incidence of Stroke by age and sex")
plot(output_ex$n_hf_by_age_sex[, 1]/output_ex$n_alive_by_age_sex[, 1], type = "l", col = "red")
lines(output_ex$n_hf_by_age_sex[, 2]/output_ex$n_alive_by_age_sex[, 2], type = "l", col = "blue")
title(cex.main = 0.5, "Incidence of HF by age and sex")
output_ex$n_mi_by_age_sex[41:111, ]/output_ex$n_alive_by_age_sex[41:111, ]
}
#' Returns results of validation tests for lung function
#' @return validation test results
#' @export
validate_lung_function <- function() {
message("This function examines FEV1 values\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_some_event"]
settings$events_to_record <- events[c("event_start", "event_COPD", "event_fixed")]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 100
init_session(settings = settings)
input <- model_input$values
input$global_parameters$discount_qaly <- 0
run(input = input)
all_events <- as.data.frame(Cget_all_events_matrix())
COPD_events <- which(all_events[, "event"] == events["event_COPD"])
start_events <- which(all_events[, "event"] == events["event_start"])
out_FEV1_prev <- sqldf::sqldf(paste("SELECT gold, AVG(FEV1) AS 'Mean', STDEV(FEV1) AS 'SD' FROM all_events WHERE event=", events["event_start"],
" GROUP BY gold"))
out_FEV1_inc <- sqldf::sqldf(paste("SELECT gold, AVG(FEV1) AS 'Mean', STDEV(FEV1) AS 'SD' FROM all_events WHERE event=", events["event_COPD"],
" GROUP BY gold"))
out_gold_prev <- sqldf::sqldf(paste("SELECT gold, COUNT(*) AS N FROM all_events WHERE event=", events["event_start"], " GROUP BY gold"))
out_gold_prev[, "Percent"] <- round(out_gold_prev[, "N"]/sum(out_gold_prev[, "N"]), 3)
out_gold_inc <- sqldf::sqldf(paste("SELECT gold, COUNT(*) AS N FROM all_events WHERE event=", events["event_COPD"], " GROUP BY gold"))
out_gold_inc[, "Percent"] <- round(out_gold_inc[, "N"]/sum(out_gold_inc[, "N"]), 3)
COPD_events_patients <- subset(all_events, event == 4)
start_events_patients <- subset(all_events, event == 0 & gold > 0)
table(COPD_events_patients[, "gold"])/sum(table(COPD_events_patients[, "gold"]))
table(start_events_patients[, "gold"])/sum(table(start_events_patients[, "gold"]))
out_gold_inc_patients <- table(COPD_events_patients[, "gold"])/sum(table(COPD_events_patients[, "gold"]))
out_gold_prev_patients <- table(start_events_patients[, "gold"])/sum(table(start_events_patients[, "gold"]))
COPD_ids <- all_events[COPD_events, "id"]
for (i in 1:100) {
y <- which(all_events[, "id"] == COPD_ids[i] & all_events[, "gold"] > 0)
if (i == 1)
plot(all_events[y, "local_time"], all_events[y, "FEV1"], type = "l", xlim = c(0, 20), ylim = c(0, 5), xlab = "local time",
ylab = "FEV1") else lines(all_events[y, "local_time"], all_events[y, "FEV1"], type = "l")
}
title(cex.main = 0.5, "Trajectories of FEV1 in 100 individuals")
return(list(FEV1_prev = out_FEV1_prev, FEV1_inc = out_FEV1_inc, gold_prev = out_gold_prev, gold_inc = out_gold_inc, gold_prev_patients = out_gold_prev_patients,
gold_inc_patients = out_gold_inc_patients))
}
#' Returns results of validation tests for exacerbation rates
#' @param base_agents Number of agents in the simulation. Default is 1e4.
#' @return validation test results
#' @export
validate_exacerbation <- function(base_agents=1e4) {
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
#settings$agent_stack_size <- 0
settings$n_base_agents <- base_agents
#settings$event_stack_size <- 1
init_session(settings = settings)
input <- model_input$values #We can work with local copy more conveniently and submit it to the Run function
run(input = input)
op <- Cget_output()
all_events <- as.data.frame(Cget_all_events_matrix())
exac_events <- subset(all_events, event == 5)
exit_events <- subset(all_events, event == 14)
Follow_up_Gold <- c(0, 0, 0, 0)
last_GOLD_transition_time <- 0
for (i in 2:dim(all_events)[1]) {
if (all_events[i, "id"] != all_events[i - 1, "id"])
last_GOLD_transition_time <- 0
if ((all_events[i, "id"] == all_events[i - 1, "id"]) & (all_events[i, "gold"] != all_events[i - 1, "gold"])) {
Follow_up_Gold[all_events[i - 1, "gold"]] = Follow_up_Gold[all_events[i - 1, "gold"]] + all_events[i - 1, "followup_after_COPD"] -
last_GOLD_transition_time
last_GOLD_transition_time <- all_events[i - 1, "followup_after_COPD"]
}
if (all_events[i, "event"] == 14)
Follow_up_Gold[all_events[i, "gold"]] = Follow_up_Gold[all_events[i, "gold"]] + all_events[i, "followup_after_COPD"] -
last_GOLD_transition_time
}
terminate_session()
GOLD_I <- (as.data.frame(table(exac_events[, "gold"]))[1, 2]/Follow_up_Gold[1])
GOLD_II <- (as.data.frame(table(exac_events[, "gold"]))[2, 2]/Follow_up_Gold[2])
GOLD_III <- (as.data.frame(table(exac_events[, "gold"]))[3, 2]/Follow_up_Gold[3])
GOLD_IV<- (as.data.frame(table(exac_events[, "gold"]))[4, 2]/Follow_up_Gold[4])
return(list(exacRateGOLDI = GOLD_I, exacRateGOLDII = GOLD_II, exacRateGOLDIII = GOLD_III, exacRateGOLDIV = GOLD_IV))
}
#' Returns the Kaplan Meier curve comparing COPD and non-COPD
#' @param savePlots TRUE or FALSE (default), exports 300 DPI population growth and pyramid plots comparing simulated vs. predicted population
#' @param base_agents Number of agents in the simulation. Default is 1e4.
#' @return validation test results
#' @export
validate_survival <- function(savePlots = FALSE, base_agents=1e4) {
if (!requireNamespace("survival", quietly = TRUE)) {
stop("Package \"survival\" needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("survminer", quietly = TRUE)) {
stop("Package \"survminer\" needed for this function to work. Please install it.",
call. = FALSE)
}
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
#settings$agent_stack_size <- 0
settings$n_base_agents <- base_agents
#settings$event_stack_size <- 1
init_session(settings = settings)
input <- model_input$values #We can work with local copy more conveniently and submit it to the Run function
run(input = input)
events <- as.data.frame(Cget_all_events_matrix())
terminate_session()
cohort <- subset(events, ((event==7) | (event==13) | (event==14)))
cohort <- cohort %>% filter((id==lead(id) | ((event == 14) & id!=lag(id))))
cohort$copd <- (cohort$gold>0)
cohort$death <- (cohort$event!=14)
cohort$age <- (cohort$age_at_creation+cohort$local_time)
#fit <- survfit(Surv(age, death) ~ copd, data=cohort)
fit <- survival::survfit(Surv(age, death) ~ copd, data=cohort)
# Customized survival curves
surv_plot <- survminer::ggsurvplot(fit, data = cohort, censor.shape="", censor.size = 1,
surv.median.line = "hv", # Add medians survival
# Change legends: title & labels
legend.title = "Disease Status",
legend.labs = c("Non-COPD", "COPD"),
# Add p-value and tervals
pval = TRUE,
conf.int = TRUE,
xlim = c(40,110), # present narrower X axis, but not affect
# survival estimates.
xlab = "Age", # customize X axis label.
break.time.by = 20, # break X axis in time intervals by 500.
# Add risk table
#risk.table = TRUE,
tables.height = 0.2,
tables.theme = theme_cleantable(),
# Color palettes. Use custom color: c("#E7B800", "#2E9FDF"),
# or brewer color (e.g.: "Dark2"), or ggsci color (e.g.: "jco")
#palette = c("gray0", "gray1"),
ggtheme = theme_tufte() +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) # Change ggplot2 theme
)
plot (surv_plot)
if (savePlots) ggsave((paste0("survival-diagnosed", ".tiff")), plot = plot(surv_plot), device = "tiff", dpi = 300)
fitcox <- coxph(Surv(age, death) ~ copd, data = cohort)
ftest <- cox.zph(fitcox)
print(summary(fitcox))
return(surv_plot)
}
#' Returns results of validation tests for diagnosis
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_diagnosis <- function(n_sim = 1e+04) {
message("Let's take a look at diagnosis\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("Here are the proportion of COPD patients diagnosed over model time: \n")
diag <- data.frame(Year=1:inputs$global_parameters$time_horizon,
COPD=rowSums(output_ex$n_COPD_by_ctime_sex),
Diagnosed=rowSums(output_ex$n_Diagnosed_by_ctime_sex))
diag$Proportion <- round(diag$Diagnosed/diag$COPD,2)
print(diag)
message("The average proportion diagnosed from year", round(length(diag$Proportion)/2,0), "to", length(diag$Proportion), "is",
mean(diag$Proportion[(round(length(diag$Proportion)/2,0)):(length(diag$Proportion))]),"\n")
diag.plot <- tidyr::gather(data=diag, key="Variable", value="Number", c(COPD,Diagnosed))
diag.plotted <- ggplot2::ggplot(diag.plot, aes(x=Year, y=Number, col=Variable)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Number of COPD patients") + xlab("Years")
plot(diag.plotted)
message("\n")
message("Now let's look at the proportion diagnosed by COPD severity.\n")
prop <- data.frame(Year=1:inputs$global_parameters$time_horizon,
output_ex$n_Diagnosed_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)[,c(1,3,4,5,6)]
names(prop) <- c("Year","GOLD1","GOLD2","GOLD3","GOLD4")
prop <- prop[-1,]
print(prop)
message("The average proportion of GOLD 1 and 2 that are diagnosed from year", round(nrow(prop)/2,0), "to", max(prop$Year), "is",
(mean(prop$GOLD1[round((nrow(prop)/2),0):nrow(prop)]) + mean(prop$GOLD2[round((nrow(prop)/2),0):nrow(prop)]))/2,"\n")
prop.plot <- tidyr::gather(data=prop, key="GOLD", value="Proportion", c(GOLD1:GOLD4))
prop.plotted <- ggplot2::ggplot(prop.plot, aes(x=Year, y=Proportion, col=GOLD)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion diagnosed") + xlab("Years")
plot(prop.plotted)
terminate_session()
}
#' Returns results of validation tests for GP visits
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_gpvisits <- function(n_sim = 1e+04) {
message("Let's take a look at GP visits\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("\n")
message("Here is the Average number of GP visits by sex:\n")
GPSex <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_GPvisits_by_ctime_sex/output_ex$n_alive_by_ctime_sex)
names(GPSex) <- c("Year","Male","Female")
print(GPSex)
GPSex.plot <- tidyr::gather(data=GPSex, key="Sex", value="Visits", c(Male,Female))
GPSex.plot <- subset(GPSex.plot, Year!=1)
GPSex.plotted <- ggplot2::ggplot(GPSex.plot, aes(x=Year, y=Visits, col=Sex)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Average GP visits/year") + xlab("Years")
plot(GPSex.plotted)
message("\n")
message("Here is the Average number of GP visits by COPD severity:\n")
GPCOPD <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_GPvisits_by_ctime_severity/output_ex$cumul_time_by_ctime_GOLD)
names(GPCOPD) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(GPCOPD[-1,])
GPCOPD.plot <- tidyr::gather(data=GPCOPD, key="COPD", value="Visits", c(NoCOPD:GOLD4))
GPCOPD.plot <- subset(GPCOPD.plot, Year!=1)
GPCOPD.plotted <- ggplot2::ggplot(GPCOPD.plot, aes(x=Year, y=Visits, col=COPD)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Average GP visits/year") + xlab("Years")
plot(GPCOPD.plotted)
message("\n")
message("Here is the Average number of GP visits by COPD diagnosis status:\n")
Diagnosed <- rowSums(output_ex$n_Diagnosed_by_ctime_sex)
Undiagnosed <- rowSums(output_ex$cumul_time_by_ctime_GOLD[,2:5]) - Diagnosed
data <- cbind(Undiagnosed, Diagnosed)
GPDiag<- data.frame(Year=1:inputs$global_parameters$time_horizon,
output_ex$n_GPvisits_by_ctime_diagnosis/data)
print(GPDiag[-1,])
GPDiag.plot <- tidyr::gather(data=GPDiag, key="Diagnosis", value="Visits", c(Undiagnosed,Diagnosed))
GPDiag.plot <- subset(GPDiag.plot, Year!=1)
GPDiag.plotted <- ggplot2::ggplot(GPDiag.plot, aes(x=Year, y=Visits, col=Diagnosis)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Average GP visits/year") + xlab("Years")
plot(GPDiag.plotted)
message("\n")
terminate_session()
}
#' Returns results of validation tests for Symptoms
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_symptoms <- function(n_sim = 1e+04) {
message("Let's take a look at symptoms\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
# COUGH
message("\n")
message("I'm going to plot the prevalence of each symptom over time and by GOLD stage\n")
message("\n")
message("Cough:\n")
message("\n")
cough <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_cough_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(cough) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(cough)
# plot
cough.plot <- tidyr::gather(data=cough, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
cough.plot$Symptom <- "cough"
cough.plotted <- ggplot2::ggplot(cough.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with cough") + xlab("Model Year")
#plot(cough.plotted)
message("\n")
# PHLEGM
message("Phlegm:\n")
message("\n")
phlegm <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_phlegm_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(phlegm) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(phlegm)
# plot
phlegm.plot <- tidyr::gather(data=phlegm, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
phlegm.plot$Symptom <- "phlegm"
phlegm.plotted <- ggplot2::ggplot(phlegm.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with phlegm") + xlab("Model Year")
#plot(phlegm.plotted)
message("\n")
# WHEEZE
message("Wheeze:\n")
message("\n")
wheeze <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_wheeze_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(wheeze) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(wheeze)
# plot
wheeze.plot <- tidyr::gather(data=wheeze, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
wheeze.plot$Symptom <- "wheeze"
wheeze.plotted <- ggplot2::ggplot(wheeze.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with wheeze") + xlab("Model Year")
#plot(wheeze.plotted)
message("\n")
# DYSPNEA
message("Dyspnea:\n")
message("\n")
dyspnea <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_dyspnea_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(dyspnea) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(dyspnea)
# plot
dyspnea.plot <- tidyr::gather(data=dyspnea, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
dyspnea.plot$Symptom <- "dyspnea"
dyspnea.plotted <- ggplot2::ggplot(dyspnea.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with dyspnea") + xlab("Model Year")
#plot(dyspnea.plotted)
message("\n")
message("All symptoms plotted together:\n")
all.plot <- rbind(cough.plot, phlegm.plot, wheeze.plot, dyspnea.plot)
all.plotted <- ggplot2::ggplot(all.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + facet_wrap(~Symptom) +
expand_limits(y = 0) + theme_bw() + ylab("Proportion with symptom") + xlab("Model Year")
plot(all.plotted)
terminate_session()
}
#' Returns results of validation tests for Treatment
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_treatment<- function(n_sim = 1e+04) {
message("Let's make sure that treatment (which is initiated at diagnosis) is affecting the exacerbation rate.\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("\n")
message("Exacerbation rate for undiagnosed COPD patients.\n")
message("\n")
undiagnosed <- data.frame(cbind(1:inputs$global_parameters$time_horizon, output_ex$n_exac_by_ctime_severity_undiagnosed/
(rowSums(output_ex$n_COPD_by_ctime_severity[,-1]) - rowSums(output_ex$n_Diagnosed_by_ctime_sex))))
names(undiagnosed) <- c("Year","Mild","Moderate","Severe","VerySevere")
print(undiagnosed)
undiagnosed$Diagnosis <- "undiagnosed"
message("\n")
message("Exacerbation rate for diagnosed COPD patients.\n")
message("\n")
diagnosed <- data.frame(cbind(1:inputs$global_parameters$time_horizon,
output_ex$n_exac_by_ctime_severity_diagnosed/rowSums(output_ex$n_Diagnosed_by_ctime_sex)))
diagnosed[1,2:5] <- c(0,0,0,0)
names(diagnosed) <- c("Year","Mild","Moderate","Severe","VerySevere")
print(diagnosed)
diagnosed$Diagnosis <- "diagnosed"
# plot
exac.plot <- tidyr::gather(data=rbind(undiagnosed, diagnosed), key="Exacerbation", value="Rate", Mild:VerySevere)
exac.plotted <- ggplot2::ggplot(exac.plot, aes(x=Year, y=Rate, fill=Diagnosis)) +
geom_bar(stat="identity", position="dodge") + facet_wrap(~Exacerbation, labeller=label_both) +
scale_y_continuous(expand = c(0, 0)) +
xlab("Model Year") + ylab("Annual rate of exacerbations") + theme_bw()
plot(exac.plotted)
message("\n")
terminate_session()
###
message("\n")
message("Now, set the treatment effects to 0 and make sure the number of exacerbations increased among diagnosed patients.\n")
message("\n")
init_session(settings = settings)
input_nt <- model_input$values
input_nt$medication$medication_ln_hr_exac <- rep(0, length(inputs$medication$medication_ln_hr_exac))
res <- run(input = input_nt)
if (res < 0)
stop("Execution stopped.\n")
inputs_nt <- Cget_inputs()
output_ex_nt <- Cget_output_ex()
exac.diff <- data.frame(cbind(1:inputs_nt$global_parameters$time_horizon,
output_ex_nt$n_exac_by_ctime_severity_diagnosed - output_ex$n_exac_by_ctime_severity_diagnosed))
names(exac.diff) <- c("Year","Mild","Moderate","Severe","VerySevere")
message("Without treatment, there was an average of:\n")
message(mean(exac.diff$Mild),"more mild exacerbations,\n")
message(mean(exac.diff$Moderate),"more moderate exacerbations,\n")
message(mean(exac.diff$Severe),"more severe exacerbations, and\n")
message(mean(exac.diff$VerySevere),"more very severe exacerbations per year.\n")
###
message("\n")
message("Now, set all COPD patients to diagnosed, then undiagnosed, and compare the exacerbation rates.\n")
message("\n")
init_session(settings = settings)
input_nd <- model_input$values
input_nd$diagnosis$logit_p_prevalent_diagnosis_by_sex <- cbind(male=c(intercept=-100, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0),
female=c(intercept=-100-0.1638, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0))
input_nd$diagnosis$p_hosp_diagnosis <- 0
input_nd$diagnosis$logit_p_diagnosis_by_sex <- cbind(male=c(intercept=-100, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0),
female=c(intercept=-100-0.4873, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0))
input_nd$diagnosis$logit_p_overdiagnosis_by_sex <- cbind(male=c(intercept=-100, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=0),
female=c(intercept=-100+0.2597, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=0))
res <- run(input = input_nd)
if (res < 0)
stop("Execution stopped.\n")
output_ex_nd <- Cget_output_ex()
exac_rate_nodiag <- rowSums(output_ex_nd$n_exac_by_ctime_severity)/rowSums(output_ex_nd$n_COPD_by_ctime_sex)
terminate_session()
###
init_session(settings = settings)
input_d <- model_input$values
input_d$diagnosis$logit_p_prevalent_diagnosis_by_sex <- cbind(male=c(intercept=100, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0),
female=c(intercept=100-0.1638, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0))
input_d$diagnosis$p_hosp_diagnosis <- 1
input_d$diagnosis$logit_p_diagnosis_by_sex <- cbind(male=c(intercept=100, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0),
female=c(intercept=100-0.4873, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0))
res <- run(input = input_d)
if (res < 0)
stop("Execution stopped.\n")
inputs_d <- Cget_inputs()
output_ex_d <- Cget_output_ex()
exac_rate_diag <- rowSums(output_ex_d$n_exac_by_ctime_severity)/rowSums(output_ex_d$n_COPD_by_ctime_sex)
##
message("Annual exacerbation rate (this is also plotted):\n")
message("\n")
trt_effect<- data.frame(Year=1:inputs_d$global_parameters$time_horizon,
Diagnosed = exac_rate_diag,
Undiagnosed = exac_rate_nodiag)
trt_effect$Delta <- (trt_effect$Undiagnosed - trt_effect$Diagnosed)/trt_effect$Undiagnosed
print(trt_effect)
message("\n")
message("Treatment reduces the rate of exacerbations by a mean of:", mean(trt_effect$Delta),"\n")
# plot
trt.plot <- tidyr::gather(data=trt_effect, key="Diagnosis", value="Rate", Diagnosed:Undiagnosed)
trt.plotted <- ggplot2::ggplot(trt.plot, aes(x=Year, y=Rate, col=Diagnosis)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Annual exacerbation rate") + xlab("Years")
plot(trt.plotted)
terminate_session()
}
#' Returns results of Case Detection strategies
#' @param n_sim number of agents
#' @param p_of_CD probability of recieving case detection given that an agent meets the selection criteria
#' @param min_age minimum age that can recieve case detection
#' @param min_pack_years minimum pack years that can recieve case detection
#' @param only_smokers set to 1 if only smokers should recieve case detection
#' @param CD_method Choose one case detection method: CDQ195", "CDQ165", "FlowMeter", "FlowMeter_CDQ"
#' @return results of case detection strategy compared to no case detection
#' @export
test_case_detection <- function(n_sim = 1e+04, p_of_CD=0.1, min_age=40, min_pack_years=0, only_smokers=0, CD_method="CDQ195") {
message("Comparing a case detection strategy to no case detection.\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
# settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
input$diagnosis$p_case_detection <- p_of_CD
input$diagnosis$min_cd_age <- min_age
input$diagnosis$min_cd_pack_years <- min_pack_years
input$diagnosis$min_cd_smokers <-only_smokers
input$diagnosis$logit_p_prevalent_diagnosis_by_sex <- cbind(male=c(intercept=1.0543, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]),
female=c(intercept=1.0543-0.1638, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]))
input$diagnosis$logit_p_diagnosis_by_sex <- cbind(male=c(intercept=-2, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]),
female=c(intercept=-2-0.4873, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]))
input$diagnosis$logit_p_overdiagnosis_by_sex <- cbind(male=c(intercept=-5.2169, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=input$diagnosis$case_detection_methods[2,CD_method]),
female=c(intercept=-5.2169+0.2597, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=input$diagnosis$case_detection_methods[2,CD_method]))
message("\n")
message("Here are your inputs for the case detection strategy:\n")
message("\n")
print(input$diagnosis)
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output <- Cget_output()
output_ex <- Cget_output_ex()
# Exacerbations
exac <- output$total_exac
names(exac) <- c("Mild","Moderate","Severe","VerySevere")
# rate
total.gold <- colSums(output_ex$n_COPD_by_ctime_severity[,2:5])
names(total.gold) <- c("GOLD1","GOLD2","GOLD3","GOLD4")
exac.gs <- data.frame(output_ex$n_exac_by_gold_severity)
colnames(exac.gs) <- c("Mild","Moderate","Severe","VerySevere")
exac_rate <- rbind(GOLD1=exac.gs[1,]/total.gold[1],
GOLD2=exac.gs[2,]/total.gold[2],
GOLD3=exac.gs[3,]/total.gold[3],
GOLD4=exac.gs[4,]/total.gold[4])
exac_rate$CD <- "Case detection"
exac_rate$GOLD <- rownames(exac_rate)
# GOLD
gold <- data.frame(CD="Case detection",
Proportion=colMeans(output_ex$n_COPD_by_ctime_severity/rowSums(output_ex$n_alive_by_ctime_sex)))
gold$GOLD <- c("NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
terminate_session()
## Rerunning with no case detection
init_session(settings = settings)
input_nocd <- model_input$values
input_nocd$diagnosis$p_case_detection <- 0
message("\n")
message("Now setting the probability of case detection to", input_nocd$diagnosis$p_case_detection, "and re-running the model\n")
message("\n")
res <- run(input = input_nocd)
if (res < 0)
stop("Execution stopped.\n")
inputs_nocd <- Cget_inputs()
output_nocd <- Cget_output()
output_ex_nocd <- Cget_output_ex()
# Exacerbations
exac_nocd <- output_nocd$total_exac
names(exac_nocd) <- c("Mild","Moderate","Severe","VerySevere")
# rate
total.gold_nocd <- colSums(output_ex_nocd$n_COPD_by_ctime_severity[,2:5])
names(total.gold_nocd) <- c("GOLD1","GOLD2","GOLD3","GOLD4")
exac.gs_nocd <- data.frame(output_ex_nocd$n_exac_by_gold_severity)
colnames(exac.gs_nocd) <- c("Mild","Moderate","Severe","VerySevere")
exac_rate_nocd <- rbind(GOLD1=exac.gs_nocd[1,]/total.gold_nocd[1],
GOLD2=exac.gs_nocd[2,]/total.gold_nocd[2],
GOLD3=exac.gs_nocd[3,]/total.gold_nocd[3],
GOLD4=exac.gs_nocd[4,]/total.gold_nocd[4])
exac_rate_nocd$CD <- "No Case detection"
exac_rate_nocd$GOLD <- rownames(exac_rate_nocd)
# GOLD
gold_nocd<- data.frame(CD="No case detection",
Proportion=colMeans(output_ex_nocd$n_COPD_by_ctime_severity/rowSums(output_ex_nocd$n_alive_by_ctime_sex)))
gold_nocd$GOLD <- c("NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
## Difference between CD and No CD
# Exacerbations
exac.diff <- data.frame(cbind(CD=exac, NOCD=exac_nocd))
exac.diff$Delta <- exac.diff$CD - exac.diff$NOCD
message("Here are total number of exacerbations by severity:\n")
message("\n")
print(exac.diff)
message("\n")
message("The annual rate of exacerbations with case detection is:\n")
print(exac_rate[,1:4])
message("\n")
message("The annual rate of exacerbations without case detection is:\n")
print(exac_rate_nocd[,1:4])
message("\n")
message("This data is also plotted.\n")
#plot
exac.plot <- tidyr::gather(rbind(exac_rate, exac_rate_nocd), key="Exacerbation", value="Rate", Mild:VerySevere)
exac.plotted <-ggplot2::ggplot(exac.plot, aes(x=Exacerbation, y=Rate, fill=CD)) +
geom_bar(stat="identity", position="dodge") + facet_wrap(~GOLD, scales="free_y") +
scale_y_continuous(expand = expand_scale(mult=c(0, 0.1))) +
xlab("Exacerbation") + ylab("Annual rate of exacerbations") + theme_bw()
exac.plotted <- exac.plotted + theme(axis.text.x=element_text(angle=45, hjust=1)) +
theme(legend.title = element_blank())
plot(exac.plotted)
# GOLD
# plot
message("\n")
message("The average proportion of agents in each gold stage is also plotted.\n")
gold.plot <- rbind(gold, gold_nocd)
gold.plot$GOLD <- factor(gold.plot$GOLD, levels=c("NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4"))
gold.plotted <- ggplot2::ggplot(gold.plot, aes(x=GOLD, y=Proportion, fill=CD)) +
geom_bar(stat="identity", position="dodge") +
scale_y_continuous(expand = c(0,0), limits=c(0,1)) +
xlab("GOLD stage") + ylab("Average proportion") + theme_bw()
gold.plotted <- gold.plotted + theme(legend.title = element_blank())
plot(gold.plotted)
message("\n")
terminate_session()
}
#' Returns results of validation tests for overdiagnosis
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_overdiagnosis <- function(n_sim = 1e+04) {
message("Let's take a look at overdiagnosis\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("Here are the proportion of non-COPD subjects overdiagnosed over model time: \n")
overdiag <- data.frame(Year=1:inputs$global_parameters$time_horizon,
NonCOPD=output_ex$n_COPD_by_ctime_severity[,1],
Overdiagnosed=rowSums(output_ex$n_Overdiagnosed_by_ctime_sex))
overdiag$Proportion <- overdiag$Overdiagnosed/overdiag$NonCOPD
print(overdiag)
message("The average proportion overdiagnosed from year", round(length(overdiag$Proportion)/2,0), "to", length(overdiag$Proportion), "is",
mean(overdiag$Proportion[(round(length(overdiag$Proportion)/2,0)):(length(overdiag$Proportion))]),"\n")
overdiag.plot <- tidyr::gather(data=overdiag, key="Variable", value="Number", c(NonCOPD, Overdiagnosed))
overdiag.plotted <- ggplot2::ggplot(overdiag.plot, aes(x=Year, y=Number, col=Variable)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Number of non-COPD subjects") + xlab("Years")
plot(overdiag.plotted)
message("\n")
terminate_session()
}
#' Returns results of validation tests for medication module.
#' @param n_sim number of agents
#' @return validation test results for medication
#' @export
validate_medication <- function(n_sim = 5e+04) {
message("\n")
message("Plotting medimessageion usage over time:")
message("\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- settings$n_base_agents * 1.7 * 30
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
all_events <- as.data.frame(Cget_all_events_matrix())
all_annual_events <- all_events[all_events$event==1,] # only annual event
# Prop on each med class over time and by gold
all_annual_events$time <- floor(all_annual_events$local_time + all_annual_events$time_at_creation)
med.plot <- all_annual_events %>%
group_by(time, gold) %>%
count(medication_status) %>%
mutate(prop=n/sum(n))
med.plot$gold <- as.character(med.plot$gold )
# overall among COPD patients
copd <- med.plot %>%
filter(gold>0) %>%
group_by(time, medication_status) %>%
summarise(n=sum(n)) %>%
mutate(prop=n/sum(n), gold="all copd") %>%
select(time, gold, everything())
med.plot <- rbind(med.plot, copd)
med.plot$medication_status <- ifelse(med.plot$medication_status==0,"none",
ifelse(med.plot$medication_status==1,"SABA",
ifelse(med.plot$medication_status==4,"LAMA",
ifelse(med.plot$medication_status==6,"LAMA/LABA",
ifelse(med.plot$medication_status==14,"ICS/LAMA/LABA",9)))))
med.plotted <- ggplot2::ggplot(data=med.plot, aes(x=time, y=prop, col=medication_status)) +
geom_line() + facet_wrap(~gold, labeller=label_both) +
expand_limits(y = 0) + theme_bw() + ylab("Proportion per medication class") + xlab("Years") +
theme(legend.title=element_blank())
plot(med.plotted)
terminate_session()
}
|
/R/validation.R
|
no_license
|
tyhlee/epicR
|
R
| false
| false
| 72,108
|
r
|
report_mode <- 1
# If 1, we are generating a report!
petoc <- function() {
if (report_mode == 0) {
message("Press [Enter] to continue")
r <- readline()
if (r == "q") {
terminate_session()
stop("User asked for termination.\n")
}
}
}
#' Basic tests of model functionalty. Serious issues if the test does not pass.
#' @return tests results
#' @export
sanity_check <- function() {
init_session()
cat("test 1: zero all costs\n")
input <- model_input$values
for (el in get_list_elements(input$cost)) input$cost[[el]] <- input$cost[[el]] * 0
res <- run(1, input = input)
if (Cget_output()$total_cost != 0)
message("Test failed!") else message("Test passed!")
message("test 2: zero all utilities\n")
input <- model_input$values
for (el in get_list_elements(input$utility)) input$utility[[el]] <- input$utility[[el]] * 0
res <- run(input = input)
if (Cget_output()$total_qaly != 0)
message("Test failed!") else message("Test passed!")
message("test 3: one all utilities ad get one QALY without discount\n")
input <- model_input$values
input$global_parameters$discount_qaly <- 0
for (el in get_list_elements(input$utility)) input$utility[[el]] <- input$utility[[el]] * 0 + 1
input$utility$exac_dutil = input$utility$exac_dutil * 0
res <- run(input = input)
if (Cget_output()$total_qaly/Cget_output()$cumul_time != 1)
message("Test failed!") else message("Test passed!")
message("test 4: zero mortality (both bg and exac)\n")
input <- model_input$values
input$exacerbation$logit_p_death_by_sex <- input$exacerbation$logit_p_death_by_sex * 0 - 10000000 # log scale'
input$agent$p_bgd_by_sex <- input$agent$p_bgd_by_sex * 0
input$manual$explicit_mortality_by_age_sex <- input$manual$explicit_mortality_by_age_sex * 0
res <- run(input = input)
if (Cget_output()$n_deaths != 0) {
message (Cget_output()$n_deaths)
stop("Test failed!")
} else message("Test passed!")
terminate_session()
return(0)
}
#' Returns results of validation tests for population module
#' @param incidence_k a number (default=1) by which the incidence rate of population will be multiplied.
#' @param remove_COPD 0 or 1, indicating whether COPD-caused mortality should be removed
#' @param savePlots 0 or 1, exports 300 DPI population growth and pyramid plots comparing simulated vs. predicted population
#' @return validation test results
#' @export
validate_population <- function(remove_COPD = 0, incidence_k = 1, savePlots = 0) {
message("Validate_population(...) is responsible for producing output that can be used to test if the population module is properly calibrated.\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+06
settings$event_stack_size <- 1
init_session(settings = settings)
input <- model_input$values #We can work with local copy more conveniently and submit it to the Run function
message("\nBecause you have called me with remove_COPD=", remove_COPD, ", I am", c("NOT", "indeed")[remove_COPD + 1], "going to remove COPD-related mortality from my calculations")
petoc()
# CanSim.052.0005<-read.csv(system.file ('extdata', 'CanSim.052.0005.csv', package = 'epicR'), header = T); #package ready
# reading
x <- aggregate(CanSim.052.0005[, "value"], by = list(CanSim.052.0005[, "year"]), FUN = sum)
x[, 2] <- x[, 2]/x[1, 2]
x <- x[1:input$global_parameters$time_horizon, ]
plot(x, type = "l", ylim = c(0.5, max(x[, 2] * 1.5)), xlab = "Year", ylab = "Relative population size")
title(cex.main = 0.5, "Relative populaton size")
message("The plot I just drew is the expected (well, StatCan's predictions) relative population growth from 2015\n")
petoc()
if (remove_COPD) {
input$exacerbation$logit_p_death_by_sex <- -1000 + input$exacerbation$logit_p_death_by_sex
input$manual$explicit_mortality_by_age_sex <- 0
}
input$agent$l_inc_betas[1] <- input$agent$l_inc_betas[1] + log(incidence_k)
message("working...\n")
res <- run(input = input)
if (res < 0) {
stop("Something went awry; bye!")
return()
}
n_y1_agents <- sum(Cget_output_ex()$n_alive_by_ctime_sex[1, ])
legend("topright", c("Predicted", "Simulated"), lty = c(1, 1), col = c("black", "red"))
message("And the black one is the observed (simulated) growth\n")
######## pretty population growth curve
CanSim <- tibble::as_tibble(CanSim.052.0005)
CanSim <- tidyr::spread(CanSim, key = year, value = value)
CanSim <- CanSim[, 3:51]
CanSim <- colSums (CanSim)
df <- data.frame(Year = c(2015:(2015 + model_input$values$global_parameters$time_horizon-1)), Predicted = CanSim[1:model_input$values$global_parameters$time_horizon] * 1000, Simulated = rowSums(Cget_output_ex()$n_alive_by_ctime_sex)/ settings$n_base_agents * 18179400) #rescaling population. There are about 18.6 million Canadians above 40
message ("Here's simulated vs. predicted population table:")
print(df)
dfm <- reshape2::melt(df[,c('Year','Predicted','Simulated')], id.vars = 1)
plot_population_growth <- ggplot2::ggplot(dfm, aes(x = Year, y = value)) + theme_tufte(base_size=14, ticks=F) +
geom_bar(aes(fill = variable), stat = "identity", position = "dodge") +
labs(title = "Population Growth Curve") + ylab ("Population") +
labs(caption = "(based on population at age 40 and above)") +
theme(legend.title=element_blank()) +
scale_y_continuous(name="Population", labels = scales::comma)
plot (plot_population_growth)
if (savePlots) ggsave(paste0("PopulationGrowth",".tiff"), plot = last_plot(), device = "tiff", dpi = 300)
pyramid <- matrix(NA, nrow = input$global_parameters$time_horizon, ncol = length(Cget_output_ex()$n_alive_by_ctime_age[1, ]) -
input$global_parameters$age0)
for (year in 0:model_input$values$global_parameters$time_horizon - 1) pyramid[1 + year, ] <- Cget_output_ex()$n_alive_by_ctime_age[year +1, -(1:input$global_parameters$age0)]
message("Also, the ratio of the expected to observed population in years 10 and 20 are ", sum(Cget_output_ex()$n_alive_by_ctime_sex[10,
])/x[10, 2], " and ", sum(Cget_output_ex()$n_alive_by_ctime_sex[20, ])/x[20, 2])
petoc()
message("Now evaluating the population pyramid\n")
for (year in c(2015, 2025, 2034)) {
message("The observed population pyramid in", year, "is just drawn\n")
x <- CanSim.052.0005[which(CanSim.052.0005[, "year"] == year & CanSim.052.0005[, "sex"] == "both"), "value"]
#x <- c(x, rep(0, 111 - length(x) - 40))
#barplot(x, names.arg=40:110, xlab = "Age")
#title(cex.main = 0.5, paste("Predicted Pyramid - ", year))
dfPredicted <- data.frame (population = x * 1000, age = 40:100)
# message("Predicted average age of those >40 y/o is", sum((input$global_parameters$age0:(input$global_parameters$age0 + length(x) -
# 1)) * x)/sum(x), "\n")
# petoc()
#
# message("Simulated average age of those >40 y/o is", sum((input$global_parameters$age0:(input$global_parameters$age0 + length(x) -
# 1)) * x)/sum(x), "\n")
# petoc()
dfSimulated <- data.frame (population = pyramid[year - 2015 + 1, ], age = 40:110)
dfSimulated$population <- dfSimulated$population * (-1) / settings$n_base_agents * 18179400 #rescaling population. There are 18179400 Canadians above 40
p <- ggplot (NULL, aes(x = age, y = population)) + theme_tufte(base_size=14, ticks=F) +
geom_bar (aes(fill = "Simulated"), data = dfSimulated, stat="identity", alpha = 0.5) +
geom_bar (aes(fill = "Predicted"), data = dfPredicted, stat="identity", alpha = 0.5) +
theme(axis.title=element_blank()) +
ggtitle(paste0("Simulated vs. Predicted Population Pyramid in ", year)) +
theme(legend.title=element_blank()) +
scale_y_continuous(name="Population", labels = scales::comma) +
scale_x_continuous(name="Age", labels = scales::comma)
if (savePlots) ggsave(paste0("Population ", year,".tiff"), plot = last_plot(), device = "tiff", dpi = 300)
plot(p)
}
terminate_session()
}
#' Returns results of validation tests for smoking module.
#' @param intercept_k a number
#' @param remove_COPD 0 or 1. whether to remove COPD-related mortality.
#' @return validation test results
#' @export
validate_smoking <- function(remove_COPD = 1, intercept_k = NULL) {
message("Welcome to EPIC validator! Today we will see if the model make good smoking predictions")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 1.7 * 30
init_session(settings = settings)
input <- model_input$values
message("\nBecause you have called me with remove_COPD=", remove_COPD, ", I am", c("NOT", "indeed")[remove_COPD + 1], "going to remove COPD-related mortality from my calculations")
if (remove_COPD) {
input$exacerbation$logit_p_death_by_sex <- input$exacerbation$logit_p_death_by_sex * -10000 # TODO why was this zero? Amin
}
if (!is.null(intercept_k))
input$manual$smoking$intercept_k <- intercept_k
petoc()
message("There are two validation targets: 1) the prevalence of current smokers (by sex) in 2015, and 2) the projected decline in smoking rate.\n")
message("Starting validation target 1: baseline prevalence of smokers.\n")
petoc()
# CanSim.105.0501<-read.csv(paste(data_path,'/CanSim.105.0501.csv',sep=''),header=T) Included in the package as internal data
tab1 <- rbind(CanSim.105.0501[1:3, "value"], CanSim.105.0501[4:6, "value"])/100
message("This is the observed percentage of current smokers in 2014 (m,f)\n")
barplot(tab1, beside = T, names.arg = c("40", "52", "65+"), ylim = c(0, 0.4), xlab = "Age group", ylab = "Prevalenc of smoking",
col = c("black", "grey"))
title(cex.main = 0.5, "Prevalence of current smoker by sex and age group (observed)")
legend("topright", c("Male", "Female"), fill = c("black", "grey"))
petoc()
message("Now I will run the model using the default smoking parameters")
petoc()
message("running the model\n")
run(input = input)
dataS <- Cget_all_events_matrix()
dataS <- dataS[which(dataS[, "event"] == events["event_start"]), ]
age_list <- list(a1 = c(35, 45), a2 = c(45, 65), a3 = c(65, 111))
tab2 <- tab1
for (i in 0:1) for (j in 1:length(age_list)) tab2[i + 1, j] <- mean(dataS[which(dataS[, "female"] == i & dataS[, "age_at_creation"] >
age_list[[j]][1] & dataS[, "age_at_creation"] <= age_list[[j]][2]), "smoking_status"])
message("This is the model generated bar plot")
petoc()
barplot(tab2, beside = T, names.arg = c("40", "52", "65+"), ylim = c(0, 0.4), xlab = "Age group", ylab = "Prevalence of smoking",
col = c("black", "grey"))
title(cex.main = 0.5, "Prevalence of current smoking at creation (simulated)")
legend("topright", c("Male", "Female"), fill = c("black", "grey"))
message("This step is over; press enter to continue to step 2")
petoc()
message("Now we will validate the model on smoking trends")
petoc()
message("According to Table 2.1 of this report (see the extracted data in data folder): http://www.tobaccoreport.ca/2015/TobaccoUseinCanada_2015.pdf, the prevalence of current smoker is declining by around 3.8% per year\n")
petoc()
op_ex <- Cget_output_ex()
smoker_prev <- op_ex$n_current_smoker_by_ctime_sex/op_ex$n_alive_by_ctime_sex
smoker_packyears <- op_ex$sum_pack_years_by_ctime_sex/op_ex$n_alive_by_ctime_sex
plot(2015:(2015+input$global_parameters$time_horizon-1), smoker_prev[, 1], type = "l", ylim = c(0, 0.25), col = "black", xlab = "Year", ylab = "Prevalence of current smoking")
lines(2015:(2015+input$global_parameters$time_horizon-1), smoker_prev[, 2], type = "l", col = "grey")
legend("topright", c("male", "female"), lty = c(1, 1), col = c("black", "grey"))
title(cex.main = 0.5, "Annual prevalence of currrent smoking (simulated)")
plot(2015:(2015+input$global_parameters$time_horizon-1), smoker_packyears[, 1], type = "l", ylim = c(0, 30), col = "black", xlab = "Year", ylab = "Average Pack years")
lines(2015:(2015+input$global_parameters$time_horizon-1), smoker_packyears[, 2], type = "l", col = "grey")
legend("topright", c("male", "female"), lty = c(1, 1), col = c("black", "grey"))
title(cex.main = 0.5, "Average Pack-Years Per Year for 40+ Population (simulated)")
z <- log(rowSums(smoker_prev))
message("average decline in % of current_smoking rate is", 1 - exp(mean(c(z[-1], NaN) - z, na.rm = T)))
petoc()
#plotting overall distribution of smoking stats over time
smoking_status_ctime <- matrix (NA, nrow = input$global_parameters$time_horizon, ncol = 4)
colnames(smoking_status_ctime) <- c("Year", "Non-Smoker", "Smoker", "Former smoker")
smoking_status_ctime[1:(input$global_parameters$time_horizon), 1] <- c(2015:(2015 + input$global_parameters$time_horizon-1))
smoking_status_ctime [, 2:4] <- op_ex$n_smoking_status_by_ctime / rowSums(as.data.frame (op_ex$n_alive_by_ctime_sex)) * 100
df <- as.data.frame(smoking_status_ctime)
dfm <- reshape2::melt(df[,c("Year", "Non-Smoker", "Smoker", "Former smoker")], id.vars = 1)
plot_smoking_status_ctime <- ggplot2::ggplot(dfm, aes(x = Year, y = value, color = variable)) +
geom_point () + geom_line() + labs(title = "Smoking Status per year") + ylab ("%") +
scale_colour_manual(values = c("#66CC99", "#CC6666", "#56B4E9")) + scale_y_continuous(breaks = scales::pretty_breaks(n = 12))
plot(plot_smoking_status_ctime ) #plot needs to be showing
# Plotting pack-years over time
dataS <- as.data.frame (Cget_all_events_matrix())
dataS <- subset (dataS, (event == 0 | event == 1 ))
data_all <- dataS
dataS <- subset (dataS, pack_years != 0)
avg_pack_years_ctime <- matrix (NA, nrow = input$global_parameters$time_horizon + 1, ncol = 4)
colnames(avg_pack_years_ctime) <- c("Year", "Smokers PYs", "Former Smokers PYs", "all")
avg_pack_years_ctime[1:(input$global_parameters$time_horizon + 1), 1] <- c(2015:(2015 + input$global_parameters$time_horizon))
for (i in 0:input$global_parameters$time_horizon) {
smokers <- subset (dataS, (floor(local_time + time_at_creation) == (i)) & smoking_status != 0)
prev_smokers <- subset (dataS, (floor(local_time + time_at_creation) == (i)) & smoking_status == 0)
all <- subset (data_all, floor(local_time + time_at_creation) == i)
avg_pack_years_ctime[i+1, "Smokers PYs"] <- colSums(smokers)[["pack_years"]] / dim (smokers)[1]
avg_pack_years_ctime[i+1, "Former Smokers PYs"] <- colSums(prev_smokers)[["pack_years"]] / dim (prev_smokers) [1]
avg_pack_years_ctime[i+1, "all"] <- colSums(all)[["pack_years"]] / dim (all) [1] #includes non-smokers
}
df <- as.data.frame(avg_pack_years_ctime)
dfm <- reshape2::melt(df[,c( "Year", "Smokers PYs", "Former Smokers PYs", "all")], id.vars = 1)
plot_avg_pack_years_ctime <- ggplot2::ggplot(dfm, aes(x = Year, y = value, color = variable)) +
geom_point () + geom_line() + labs(title = "Average pack-years per year ") + ylab ("Pack-years")
plot(plot_avg_pack_years_ctime) #plot needs to be showing
# Plotting pack-years over age
avg_pack_years_age <- matrix (NA, nrow = 110 - 40 + 1, ncol = 3)
colnames(avg_pack_years_age) <- c("Age", "Smokers PYs", "Former Smokers PYs")
avg_pack_years_age[1:(110 - 40 + 1), 1] <- c(40:110)
for (i in 0:(110 - 40)) {
smokers <- subset (dataS, (floor (local_time + age_at_creation) == (i+40)) & smoking_status != 0)
prev_smokers <- subset (dataS, (floor (local_time + age_at_creation) == (i+40)) & smoking_status == 0)
avg_pack_years_age[i+1, "Smokers PYs"] <- colSums(smokers)[["pack_years"]] / dim (smokers)[1]
avg_pack_years_age[i+1, "Former Smokers PYs"] <- colSums(prev_smokers)[["pack_years"]] / dim (prev_smokers) [1]
}
df <- as.data.frame(avg_pack_years_age)
dfm <- reshape2::melt(df[,c( "Age", "Smokers PYs", "Former Smokers PYs")], id.vars = 1)
plot_avg_pack_years_age <- ggplot2::ggplot(dfm, aes(x = Age, y = value, color = variable, ymin = 40, ymax = 100)) +
geom_point () + geom_line() + labs(title = "Average pack-years per age ") + ylab ("Pack-years")
plot(plot_avg_pack_years_age) #plot needs to be showing
message("This test is over; terminating the session")
petoc()
terminate_session()
}
#' Basic COPD test.
#' @return validation test results
#' @export
sanity_COPD <- function() {
settings <- default_settings
settings$record_mode <- record_mode["record_mode_agent"]
# settings$agent_stack_size<-0
settings$n_base_agents <- 10000
settings$event_stack_size <- settings$n_base_agents * 10
init_session(settings = settings)
message("Welcome! I am going to check EPIC's sanity with regard to modeling COPD\n ")
petoc()
message("COPD incidence and prevalenceparameters are as follows\n")
message("model_input$values$COPD$logit_p_COPD_betas_by_sex:\n")
print(model_input$values$COPD$logit_p_COPD_betas_by_sex)
petoc()
message("model_input$values$COPD$p_prevalent_COPD_stage:\n")
print(model_input$values$COPD$p_prevalent_COPD_stage)
petoc()
message("model_input$values$COPD$ln_h_COPD_betas_by_sex:\n")
print(model_input$values$COPD$ln_h_COPD_betas_by_sex)
petoc()
message("Now I am going to first turn off both prevalence and incidence parameters and run the model to see how many COPDs I get\n")
petoc()
input <- model_input$values
input$COPD$logit_p_COPD_betas_by_sex <- input$COPD$logit_p_COPD_betas_by_sex * 0 - 100
input$COPD$ln_h_COPD_betas_by_sex <- input$COPD$ln_h_COPD_betas_by_sex * 0 - 100
run(input = input)
message("The model is reporting it has got that many COPDs:", Cget_output()$n_COPD, " out of ", Cget_output()$n_agents, "agents.\n")
dataS <- get_events_by_type(events["event_start"])
message("The prevalence of COPD in Start event dump is:", mean(dataS[, "gold"] > 0), "\n")
dataS <- get_events_by_type(events["event_end"])
message("The prevalence of COPD in End event dump is:", mean(dataS[, "gold"] > 0), "\n")
petoc()
message("Now I am going to switch off incidence and create COPD patients only through prevalence (set at 0.5)")
petoc()
init_input()
input <- model_input$values
input$COPD$logit_p_COPD_betas_by_sex <- input$COPD$logit_p_COPD_betas_by_sex * 0
input$COPD$ln_h_COPD_betas_by_sex <- input$COPD$ln_h_COPD_betas_by_sex * 0 - 100
run(input = input)
message("The model is reporting it has got that many COPDs:", Cget_output()$n_COPD, " out of ", Cget_output()$n_agents, "agents.\n")
dataS <- get_events_by_type(events["event_start"])
message("The prevalence of COPD in Start event dump is:", mean(dataS[, "gold"] > 0), "\n")
dataS <- get_events_by_type(events["event_end"])
message("The prevalence of COPD in End event dump is:", mean(dataS[, "gold"] > 0), "\n")
petoc()
message("Now I am going to switch off prevalence and create COPD patients only through incidence\n")
petoc()
init_input()
input <- model_input$values
input$COPD$logit_p_COPD_betas_by_sex <- input$COPD$logit_p_COPD_betas_by_sex * 0 - 100
run(input = input)
message("The model is reporting it has got that many COPDs:", Cget_output()$n_COPD, " out of ", Cget_output()$n_agents, "agents.\n")
dataS <- get_events_by_type(events["event_start"])
message("The prevalence of COPD in Start event dump is:", mean(dataS[, "gold"] > 0), "\n")
dataS <- get_events_by_type(events["event_end"])
message("The prevalence of COPD in End event dump is:", mean(dataS[, "gold"] > 0), "\n")
petoc()
terminate_session()
}
#' Returns results of validation tests for COPD
#' @param incident_COPD_k a number (default=1) by which the incidence rate of COPD will be multiplied.
#' @param return_CI if TRUE, returns 95 percent confidence intervals for the "Year" coefficient
#' @return validation test results
#' @export
validate_COPD <- function(incident_COPD_k = 1, return_CI = FALSE) # The incidence rate is multiplied by K
{
out <- list()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 50
init_session(settings = settings)
input <- model_input$values
if (incident_COPD_k == 0)
input$COPD$ln_h_COPD_betas_by_sex <- input$COPD$ln_h_COPD_betas_by_sex * 0 - 100 else input$COPD$ln_h_COPD_betas_by_sex[1, ] <- model_input$values$COPD$ln_h_COPD_betas_by_sex[1, ] + log(incident_COPD_k)
message("working...\n")
run(input = input)
op <- Cget_output()
opx <- Cget_output_ex()
data <- as.data.frame(Cget_all_events_matrix())
dataS <- data[which(data[, "event"] == events["event_start"]), ]
dataE <- data[which(data[, "event"] == events["event_end"]), ]
out$p_copd_at_creation <- mean(dataS[, "gold"] > 0)
new_COPDs <- which(dataS[which(dataE[, "gold"] > 0), "gold"] == 0)
out$inc_copd <- sum(opx$n_inc_COPD_by_ctime_age)/opx$cumul_non_COPD_time
out$inc_copd_by_sex <- sum(opx$n_inc_COPD_by_ctime_age)/opx$cumul_non_COPD_time
x <- sqldf::sqldf("SELECT female, SUM(gold>0) AS n_copd, COUNT(*) AS n FROM dataS GROUP BY female")
out$p_copd_at_creation_by_sex <- x[, "n_copd"]/x[, "n"]
age_cats <- c(40, 50, 60, 70, 80, 111)
dataS[, "age_cat"] <- as.numeric(cut(dataS[, "age_at_creation"] + dataS[, "local_time"], age_cats, include.lowest = TRUE))
x <- sqldf::sqldf("SELECT age_cat, SUM(gold>0) AS n_copd, COUNT(*) AS n FROM dataS GROUP BY age_cat")
temp <- x[, "n_copd"]/x[, "n"]
names(temp) <- paste(age_cats[-length(age_cats)], age_cats[-1], sep = "-")
out$p_copd_at_creation_by_age <- temp
py_cats <- c(0, 15, 30, 45, Inf)
dataS[, "py_cat"] <- as.numeric(cut(dataS[, "pack_years"], py_cats, include.lowest = TRUE))
x <- sqldf::sqldf("SELECT py_cat, SUM(gold>0) AS n_copd, COUNT(*) AS n FROM dataS GROUP BY py_cat")
temp <- x[, "n_copd"]/x[, "n"]
names(temp) <- paste(py_cats[-length(py_cats)], py_cats[-1], sep = "-")
out$p_copd_at_creation_by_pack_years <- temp
dataF <- data[which(data[, "event"] == events["event_fixed"]), ]
dataF[, "age"] <- dataF[, "local_time"] + dataF[, "age_at_creation"]
dataF[, "copd"] <- (dataF[, "gold"] > 0) * 1
dataF[, "gold2p"] <- (dataF[, "gold"] > 1) * 1
dataF[, "gold3p"] <- (dataF[, "gold"] > 2) * 1
dataF[, "year"] <- dataF[, "local_time"] + dataF[, "time_at_creation"]
res <- glm(data = dataF[which(dataF[, "female"] == 0), ], formula = copd ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_copd_reg_coeffs_male <- coefficients(res)
if (return_CI) {out$conf_prev_copd_reg_coeffs_male <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 1), ], formula = copd ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_copd_reg_coeffs_female <- coefficients(res)
if (return_CI) {out$conf_prev_copd_reg_coeffs_female <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 0), ], formula = gold2p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold2p_reg_coeffs_male <- coefficients(res)
if (return_CI) {out$conf_prev_gold2p_reg_coeffs_male <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 1), ], formula = gold2p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold2p_reg_coeffs_female <- coefficients(res)
if (return_CI) {out$conf_prev_gold2p_reg_coeffs_female <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 0), ], formula = gold3p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold3p_reg_coeffs_male <- coefficients(res)
if (return_CI) {out$conf_prev_gold3p_reg_coeffs_male <- stats::confint(res, "year", level = 0.95)}
res <- glm(data = dataF[which(dataF[, "female"] == 1), ], formula = gold3p ~ age + pack_years + smoking_status + year, family = binomial(link = logit))
out$calib_prev_gold3p_reg_coeffs_female <- coefficients(res)
if (return_CI) {out$conf_prev_gold3p_reg_coeffs_female <- stats::confint(res, "year", level = 0.95)}
terminate_session()
return(out)
}
#' Returns results of validation tests for payoffs, costs and QALYs
#' @param nPatient number of simulated patients. Default is 1e6.
#' @param disableDiscounting if TRUE, discounting will be disabled for cost and QALY calculations. Default: TRUE
#' @param disableExacMortality if TRUE, mortality due to exacerbations will be disabled for cost and QALY calculations. Default: TRUE
#' @return validation test results
#' @export
validate_payoffs <- function(nPatient = 1e6, disableDiscounting = TRUE, disableExacMortality = TRUE)
{
out <- list()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- nPatient
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
if (disableDiscounting) {
input$global_parameters$discount_cost <- 0
input$global_parameters$discount_qaly <- 0
}
if (disableExacMortality) {
input$exacerbation$logit_p_death_by_sex <- -1000 + 0*input$exacerbation$logit_p_death_by_sex
}
run(input = input)
op <- Cget_output()
op_ex <- Cget_output_ex()
exac_dutil<-Cget_inputs()$utility$exac_dutil
exac_dcost<-Cget_inputs()$cost$exac_dcost
total_qaly<-colSums(op_ex$cumul_qaly_gold_ctime)[2:5]
qaly_loss_dueto_exac_by_gold<-rowSums(op_ex$n_exac_by_gold_severity*exac_dutil)
back_calculated_utilities<-(total_qaly-qaly_loss_dueto_exac_by_gold)/colSums(op_ex$cumul_time_by_ctime_GOLD)[2:5]
#I=0.81,II=0.72,III=0.68,IV=0.58)))
out$cumul_time_per_GOLD <- colSums(op_ex$cumul_time_by_ctime_GOLD)[2:5]
out$total_qaly <- total_qaly
out$qaly_loss_dueto_exac_by_gold <- qaly_loss_dueto_exac_by_gold
out$back_calculated_utilities <- back_calculated_utilities
out$utility_target_values <- input$utility$bg_util_by_stage
out$utility_difference_percentage <- (out$back_calculated_utilities - out$utility_target_values[2:5]) / out$utility_target_values[2:5] * 100
total_cost<-colSums(op_ex$cumul_cost_gold_ctime)[2:5]
cost_dueto_exac_by_gold<-rowSums(t((exac_dcost)*t(op_ex$n_exac_by_gold_severity)))
back_calculated_costs<-(total_cost-cost_dueto_exac_by_gold)/colSums(op_ex$cumul_time_by_ctime_GOLD)[2:5]
#I=615, II=1831, III=2619, IV=3021
out$total_cost <- total_cost
out$cost_dueto_exac_by_gold <- cost_dueto_exac_by_gold
out$back_calculated_costs <- back_calculated_costs
out$cost_target_values <- input$cost$bg_cost_by_stage
out$cost_difference_percentage <- (out$back_calculated_costs - out$cost_target_values[2:5]) / out$cost_target_values[2:5] * 100
terminate_session()
return(out)
}
#' Returns results of validation tests for mortality rate
#' @param n_sim number of simulated agents
#' @param bgd a number
#' @param bgd_h a number
#' @param manual a number
#' @param exacerbation a number
#' @param comorbidity a number
#' @return validation test results
#' @export
validate_mortality <- function(n_sim = 5e+05, bgd = 1, bgd_h = 1, manual = 1, exacerbation = 1, comorbidity = 1) {
message("Hello from EPIC! I am going to test mortality rate and how it is affected by input parameters\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
input$global_parameters$time_horizon <- 1
input$agent$p_bgd_by_sex <- input$agent$p_bgd_by_sex * bgd
input$agent$ln_h_bgd_betas <- input$agent$ln_h_bgd_betas * bgd_h
input$manual$explicit_mortality_by_age_sex <- input$manual$explicit_mortality_by_age_sex * manual
input$exacerbation$logit_p_death_by_sex <- input$exacerbation$logit_p_death_by_sex * exacerbation
if (comorbidity == 0) {
input$comorbidity$p_mi_death <- 0
input$comorbidity$p_stroke_death <- 0
input$agent$ln_h_bgd_betas[, c("b_mi", "n_mi", "b_stroke", "n_stroke", "hf")] <- 0
}
message("working...\n")
res <- run(input = input)
message("Mortality rate was", Cget_output()$n_death/Cget_output()$cumul_time, "\n")
if (Cget_output()$n_death > 0) {
ratio<-(Cget_output_ex()$n_death_by_age_sex[41:111,]/Cget_output_ex()$sum_time_by_age_sex[41:111,])/model_input$values$agent$p_bgd_by_sex[41:111,]
plot(40:110,ratio[,1],type='l',col='blue',xlab="age",ylab="Ratio", ylim = c(0, 4))
legend("topright",c("male","female"),lty=c(1,1),col=c("blue","red"))
lines(40:110,ratio[,2],type='l',col='red')
title(cex.main=0.5,"Ratio of simulated to expected (life table) mortality, by sex and age")
difference <- (Cget_output_ex()$n_death_by_age_sex[41:91, ]/Cget_output_ex()$sum_time_by_age_sex[41:91, ]) - model_input$values$agent$p_bgd_by_sex[41:91,
]
plot(40:90, difference[, 1], type = "l", col = "blue", xlab = "age", ylab = "Difference", ylim = c(-.1, .1))
legend("topright", c("male", "female"), lty = c(1, 1), col = c("blue", "red"))
lines(40:90, difference[, 2], type = "l", col = "red")
title(cex.main = 0.5, "Difference between simulated and expected (life table) mortality, by sex and age")
return(list(difference = difference))
} else message("No death occured.\n")
}
#' Returns results of validation tests for comorbidities
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_comorbidity <- function(n_sim = 1e+05) {
message("Hello from EPIC! I am going to validate comorbidities for ya\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
output <- Cget_output()
output_ex <- Cget_output_ex()
message("The prevalence of having MI at baseline was ", (output_ex$n_mi - output_ex$n_incident_mi)/output$n_agent, "\n")
message("The incidence of MI during follow-up was ", output_ex$n_incident_mi/output$cumul_time, "/PY\n")
message("The prevalence of having stroke at baseline was ", (output_ex$n_stroke - output_ex$n_incident_stroke)/output$n_agent, "\n")
message("The incidence of stroke during follow-up was ", output_ex$n_incident_stroke/output$cumul_time, "/PY\n")
message("The prevalence of having hf at baseline was ", (output_ex$n_stroke - output_ex$n_hf)/output$n_agent, "\n")
message("The incidence of hf during follow-up was ", output_ex$n_incident_hf/output$cumul_time, "/PY\n")
terminate_session()
settings$record_mode <- record_mode["record_mode_some_event"]
settings$events_to_record <- events[c("event_start", "event_mi", "event_stroke", "event_hf", "event_end")]
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 1.6 * 10
init_session(settings = settings)
input <- model_input$values
if (run(input = input) < 0)
stop("Execution stopped.\n")
output <- Cget_output()
output_ex <- Cget_output_ex()
# mi_events<-get_events_by_type(events['event_mi']) stroke_events<-get_events_by_type(events['event_stroke'])
# hf_events<-get_events_by_type(events['event_hf']) end_events<-get_events_by_type(events['event_end'])
plot(output_ex$n_mi_by_age_sex[41:100, 1]/output_ex$n_alive_by_age_sex[41:100, 1], type = "l", col = "red")
lines(output_ex$n_mi_by_age_sex[41:100, 2]/output_ex$n_alive_by_age_sex[41:100, 2], type = "l", col = "blue")
title(cex.main = 0.5, "Incidence of MI by age and sex")
plot(output_ex$n_stroke_by_age_sex[, 1]/output_ex$n_alive_by_age_sex[, 1], type = "l", col = "red")
lines(output_ex$n_stroke_by_age_sex[, 2]/output_ex$n_alive_by_age_sex[, 2], type = "l", col = "blue")
title(cex.main = 0.5, "Incidence of Stroke by age and sex")
plot(output_ex$n_hf_by_age_sex[, 1]/output_ex$n_alive_by_age_sex[, 1], type = "l", col = "red")
lines(output_ex$n_hf_by_age_sex[, 2]/output_ex$n_alive_by_age_sex[, 2], type = "l", col = "blue")
title(cex.main = 0.5, "Incidence of HF by age and sex")
output_ex$n_mi_by_age_sex[41:111, ]/output_ex$n_alive_by_age_sex[41:111, ]
}
#' Returns results of validation tests for lung function
#' @return validation test results
#' @export
validate_lung_function <- function() {
message("This function examines FEV1 values\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_some_event"]
settings$events_to_record <- events[c("event_start", "event_COPD", "event_fixed")]
settings$agent_stack_size <- 0
settings$n_base_agents <- 1e+05
settings$event_stack_size <- settings$n_base_agents * 100
init_session(settings = settings)
input <- model_input$values
input$global_parameters$discount_qaly <- 0
run(input = input)
all_events <- as.data.frame(Cget_all_events_matrix())
COPD_events <- which(all_events[, "event"] == events["event_COPD"])
start_events <- which(all_events[, "event"] == events["event_start"])
out_FEV1_prev <- sqldf::sqldf(paste("SELECT gold, AVG(FEV1) AS 'Mean', STDEV(FEV1) AS 'SD' FROM all_events WHERE event=", events["event_start"],
" GROUP BY gold"))
out_FEV1_inc <- sqldf::sqldf(paste("SELECT gold, AVG(FEV1) AS 'Mean', STDEV(FEV1) AS 'SD' FROM all_events WHERE event=", events["event_COPD"],
" GROUP BY gold"))
out_gold_prev <- sqldf::sqldf(paste("SELECT gold, COUNT(*) AS N FROM all_events WHERE event=", events["event_start"], " GROUP BY gold"))
out_gold_prev[, "Percent"] <- round(out_gold_prev[, "N"]/sum(out_gold_prev[, "N"]), 3)
out_gold_inc <- sqldf::sqldf(paste("SELECT gold, COUNT(*) AS N FROM all_events WHERE event=", events["event_COPD"], " GROUP BY gold"))
out_gold_inc[, "Percent"] <- round(out_gold_inc[, "N"]/sum(out_gold_inc[, "N"]), 3)
COPD_events_patients <- subset(all_events, event == 4)
start_events_patients <- subset(all_events, event == 0 & gold > 0)
table(COPD_events_patients[, "gold"])/sum(table(COPD_events_patients[, "gold"]))
table(start_events_patients[, "gold"])/sum(table(start_events_patients[, "gold"]))
out_gold_inc_patients <- table(COPD_events_patients[, "gold"])/sum(table(COPD_events_patients[, "gold"]))
out_gold_prev_patients <- table(start_events_patients[, "gold"])/sum(table(start_events_patients[, "gold"]))
COPD_ids <- all_events[COPD_events, "id"]
for (i in 1:100) {
y <- which(all_events[, "id"] == COPD_ids[i] & all_events[, "gold"] > 0)
if (i == 1)
plot(all_events[y, "local_time"], all_events[y, "FEV1"], type = "l", xlim = c(0, 20), ylim = c(0, 5), xlab = "local time",
ylab = "FEV1") else lines(all_events[y, "local_time"], all_events[y, "FEV1"], type = "l")
}
title(cex.main = 0.5, "Trajectories of FEV1 in 100 individuals")
return(list(FEV1_prev = out_FEV1_prev, FEV1_inc = out_FEV1_inc, gold_prev = out_gold_prev, gold_inc = out_gold_inc, gold_prev_patients = out_gold_prev_patients,
gold_inc_patients = out_gold_inc_patients))
}
#' Returns results of validation tests for exacerbation rates
#' @param base_agents Number of agents in the simulation. Default is 1e4.
#' @return validation test results
#' @export
validate_exacerbation <- function(base_agents=1e4) {
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
#settings$agent_stack_size <- 0
settings$n_base_agents <- base_agents
#settings$event_stack_size <- 1
init_session(settings = settings)
input <- model_input$values #We can work with local copy more conveniently and submit it to the Run function
run(input = input)
op <- Cget_output()
all_events <- as.data.frame(Cget_all_events_matrix())
exac_events <- subset(all_events, event == 5)
exit_events <- subset(all_events, event == 14)
Follow_up_Gold <- c(0, 0, 0, 0)
last_GOLD_transition_time <- 0
for (i in 2:dim(all_events)[1]) {
if (all_events[i, "id"] != all_events[i - 1, "id"])
last_GOLD_transition_time <- 0
if ((all_events[i, "id"] == all_events[i - 1, "id"]) & (all_events[i, "gold"] != all_events[i - 1, "gold"])) {
Follow_up_Gold[all_events[i - 1, "gold"]] = Follow_up_Gold[all_events[i - 1, "gold"]] + all_events[i - 1, "followup_after_COPD"] -
last_GOLD_transition_time
last_GOLD_transition_time <- all_events[i - 1, "followup_after_COPD"]
}
if (all_events[i, "event"] == 14)
Follow_up_Gold[all_events[i, "gold"]] = Follow_up_Gold[all_events[i, "gold"]] + all_events[i, "followup_after_COPD"] -
last_GOLD_transition_time
}
terminate_session()
GOLD_I <- (as.data.frame(table(exac_events[, "gold"]))[1, 2]/Follow_up_Gold[1])
GOLD_II <- (as.data.frame(table(exac_events[, "gold"]))[2, 2]/Follow_up_Gold[2])
GOLD_III <- (as.data.frame(table(exac_events[, "gold"]))[3, 2]/Follow_up_Gold[3])
GOLD_IV<- (as.data.frame(table(exac_events[, "gold"]))[4, 2]/Follow_up_Gold[4])
return(list(exacRateGOLDI = GOLD_I, exacRateGOLDII = GOLD_II, exacRateGOLDIII = GOLD_III, exacRateGOLDIV = GOLD_IV))
}
#' Returns the Kaplan Meier curve comparing COPD and non-COPD
#' @param savePlots TRUE or FALSE (default), exports 300 DPI population growth and pyramid plots comparing simulated vs. predicted population
#' @param base_agents Number of agents in the simulation. Default is 1e4.
#' @return validation test results
#' @export
validate_survival <- function(savePlots = FALSE, base_agents=1e4) {
if (!requireNamespace("survival", quietly = TRUE)) {
stop("Package \"survival\" needed for this function to work. Please install it.",
call. = FALSE)
}
if (!requireNamespace("survminer", quietly = TRUE)) {
stop("Package \"survminer\" needed for this function to work. Please install it.",
call. = FALSE)
}
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
#settings$agent_stack_size <- 0
settings$n_base_agents <- base_agents
#settings$event_stack_size <- 1
init_session(settings = settings)
input <- model_input$values #We can work with local copy more conveniently and submit it to the Run function
run(input = input)
events <- as.data.frame(Cget_all_events_matrix())
terminate_session()
cohort <- subset(events, ((event==7) | (event==13) | (event==14)))
cohort <- cohort %>% filter((id==lead(id) | ((event == 14) & id!=lag(id))))
cohort$copd <- (cohort$gold>0)
cohort$death <- (cohort$event!=14)
cohort$age <- (cohort$age_at_creation+cohort$local_time)
#fit <- survfit(Surv(age, death) ~ copd, data=cohort)
fit <- survival::survfit(Surv(age, death) ~ copd, data=cohort)
# Customized survival curves
surv_plot <- survminer::ggsurvplot(fit, data = cohort, censor.shape="", censor.size = 1,
surv.median.line = "hv", # Add medians survival
# Change legends: title & labels
legend.title = "Disease Status",
legend.labs = c("Non-COPD", "COPD"),
# Add p-value and tervals
pval = TRUE,
conf.int = TRUE,
xlim = c(40,110), # present narrower X axis, but not affect
# survival estimates.
xlab = "Age", # customize X axis label.
break.time.by = 20, # break X axis in time intervals by 500.
# Add risk table
#risk.table = TRUE,
tables.height = 0.2,
tables.theme = theme_cleantable(),
# Color palettes. Use custom color: c("#E7B800", "#2E9FDF"),
# or brewer color (e.g.: "Dark2"), or ggsci color (e.g.: "jco")
#palette = c("gray0", "gray1"),
ggtheme = theme_tufte() +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank()) # Change ggplot2 theme
)
plot (surv_plot)
if (savePlots) ggsave((paste0("survival-diagnosed", ".tiff")), plot = plot(surv_plot), device = "tiff", dpi = 300)
fitcox <- coxph(Surv(age, death) ~ copd, data = cohort)
ftest <- cox.zph(fitcox)
print(summary(fitcox))
return(surv_plot)
}
#' Returns results of validation tests for diagnosis
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_diagnosis <- function(n_sim = 1e+04) {
message("Let's take a look at diagnosis\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("Here are the proportion of COPD patients diagnosed over model time: \n")
diag <- data.frame(Year=1:inputs$global_parameters$time_horizon,
COPD=rowSums(output_ex$n_COPD_by_ctime_sex),
Diagnosed=rowSums(output_ex$n_Diagnosed_by_ctime_sex))
diag$Proportion <- round(diag$Diagnosed/diag$COPD,2)
print(diag)
message("The average proportion diagnosed from year", round(length(diag$Proportion)/2,0), "to", length(diag$Proportion), "is",
mean(diag$Proportion[(round(length(diag$Proportion)/2,0)):(length(diag$Proportion))]),"\n")
diag.plot <- tidyr::gather(data=diag, key="Variable", value="Number", c(COPD,Diagnosed))
diag.plotted <- ggplot2::ggplot(diag.plot, aes(x=Year, y=Number, col=Variable)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Number of COPD patients") + xlab("Years")
plot(diag.plotted)
message("\n")
message("Now let's look at the proportion diagnosed by COPD severity.\n")
prop <- data.frame(Year=1:inputs$global_parameters$time_horizon,
output_ex$n_Diagnosed_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)[,c(1,3,4,5,6)]
names(prop) <- c("Year","GOLD1","GOLD2","GOLD3","GOLD4")
prop <- prop[-1,]
print(prop)
message("The average proportion of GOLD 1 and 2 that are diagnosed from year", round(nrow(prop)/2,0), "to", max(prop$Year), "is",
(mean(prop$GOLD1[round((nrow(prop)/2),0):nrow(prop)]) + mean(prop$GOLD2[round((nrow(prop)/2),0):nrow(prop)]))/2,"\n")
prop.plot <- tidyr::gather(data=prop, key="GOLD", value="Proportion", c(GOLD1:GOLD4))
prop.plotted <- ggplot2::ggplot(prop.plot, aes(x=Year, y=Proportion, col=GOLD)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion diagnosed") + xlab("Years")
plot(prop.plotted)
terminate_session()
}
#' Returns results of validation tests for GP visits
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_gpvisits <- function(n_sim = 1e+04) {
message("Let's take a look at GP visits\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("\n")
message("Here is the Average number of GP visits by sex:\n")
GPSex <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_GPvisits_by_ctime_sex/output_ex$n_alive_by_ctime_sex)
names(GPSex) <- c("Year","Male","Female")
print(GPSex)
GPSex.plot <- tidyr::gather(data=GPSex, key="Sex", value="Visits", c(Male,Female))
GPSex.plot <- subset(GPSex.plot, Year!=1)
GPSex.plotted <- ggplot2::ggplot(GPSex.plot, aes(x=Year, y=Visits, col=Sex)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Average GP visits/year") + xlab("Years")
plot(GPSex.plotted)
message("\n")
message("Here is the Average number of GP visits by COPD severity:\n")
GPCOPD <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_GPvisits_by_ctime_severity/output_ex$cumul_time_by_ctime_GOLD)
names(GPCOPD) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(GPCOPD[-1,])
GPCOPD.plot <- tidyr::gather(data=GPCOPD, key="COPD", value="Visits", c(NoCOPD:GOLD4))
GPCOPD.plot <- subset(GPCOPD.plot, Year!=1)
GPCOPD.plotted <- ggplot2::ggplot(GPCOPD.plot, aes(x=Year, y=Visits, col=COPD)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Average GP visits/year") + xlab("Years")
plot(GPCOPD.plotted)
message("\n")
message("Here is the Average number of GP visits by COPD diagnosis status:\n")
Diagnosed <- rowSums(output_ex$n_Diagnosed_by_ctime_sex)
Undiagnosed <- rowSums(output_ex$cumul_time_by_ctime_GOLD[,2:5]) - Diagnosed
data <- cbind(Undiagnosed, Diagnosed)
GPDiag<- data.frame(Year=1:inputs$global_parameters$time_horizon,
output_ex$n_GPvisits_by_ctime_diagnosis/data)
print(GPDiag[-1,])
GPDiag.plot <- tidyr::gather(data=GPDiag, key="Diagnosis", value="Visits", c(Undiagnosed,Diagnosed))
GPDiag.plot <- subset(GPDiag.plot, Year!=1)
GPDiag.plotted <- ggplot2::ggplot(GPDiag.plot, aes(x=Year, y=Visits, col=Diagnosis)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Average GP visits/year") + xlab("Years")
plot(GPDiag.plotted)
message("\n")
terminate_session()
}
#' Returns results of validation tests for Symptoms
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_symptoms <- function(n_sim = 1e+04) {
message("Let's take a look at symptoms\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
# COUGH
message("\n")
message("I'm going to plot the prevalence of each symptom over time and by GOLD stage\n")
message("\n")
message("Cough:\n")
message("\n")
cough <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_cough_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(cough) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(cough)
# plot
cough.plot <- tidyr::gather(data=cough, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
cough.plot$Symptom <- "cough"
cough.plotted <- ggplot2::ggplot(cough.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with cough") + xlab("Model Year")
#plot(cough.plotted)
message("\n")
# PHLEGM
message("Phlegm:\n")
message("\n")
phlegm <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_phlegm_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(phlegm) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(phlegm)
# plot
phlegm.plot <- tidyr::gather(data=phlegm, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
phlegm.plot$Symptom <- "phlegm"
phlegm.plotted <- ggplot2::ggplot(phlegm.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with phlegm") + xlab("Model Year")
#plot(phlegm.plotted)
message("\n")
# WHEEZE
message("Wheeze:\n")
message("\n")
wheeze <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_wheeze_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(wheeze) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(wheeze)
# plot
wheeze.plot <- tidyr::gather(data=wheeze, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
wheeze.plot$Symptom <- "wheeze"
wheeze.plotted <- ggplot2::ggplot(wheeze.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with wheeze") + xlab("Model Year")
#plot(wheeze.plotted)
message("\n")
# DYSPNEA
message("Dyspnea:\n")
message("\n")
dyspnea <- data.frame(1:inputs$global_parameters$time_horizon,
output_ex$n_dyspnea_by_ctime_severity/output_ex$n_COPD_by_ctime_severity)
names(dyspnea) <- c("Year","NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
print(dyspnea)
# plot
dyspnea.plot <- tidyr::gather(data=dyspnea, key="GOLD", value="Prevalence", NoCOPD:GOLD4)
dyspnea.plot$Symptom <- "dyspnea"
dyspnea.plotted <- ggplot2::ggplot(dyspnea.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Proportion with dyspnea") + xlab("Model Year")
#plot(dyspnea.plotted)
message("\n")
message("All symptoms plotted together:\n")
all.plot <- rbind(cough.plot, phlegm.plot, wheeze.plot, dyspnea.plot)
all.plotted <- ggplot2::ggplot(all.plot, aes(x=Year, y=Prevalence, col=GOLD)) +
geom_smooth(method=lm, formula = y~x, level=0) + geom_point() + facet_wrap(~Symptom) +
expand_limits(y = 0) + theme_bw() + ylab("Proportion with symptom") + xlab("Model Year")
plot(all.plotted)
terminate_session()
}
#' Returns results of validation tests for Treatment
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_treatment<- function(n_sim = 1e+04) {
message("Let's make sure that treatment (which is initiated at diagnosis) is affecting the exacerbation rate.\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("\n")
message("Exacerbation rate for undiagnosed COPD patients.\n")
message("\n")
undiagnosed <- data.frame(cbind(1:inputs$global_parameters$time_horizon, output_ex$n_exac_by_ctime_severity_undiagnosed/
(rowSums(output_ex$n_COPD_by_ctime_severity[,-1]) - rowSums(output_ex$n_Diagnosed_by_ctime_sex))))
names(undiagnosed) <- c("Year","Mild","Moderate","Severe","VerySevere")
print(undiagnosed)
undiagnosed$Diagnosis <- "undiagnosed"
message("\n")
message("Exacerbation rate for diagnosed COPD patients.\n")
message("\n")
diagnosed <- data.frame(cbind(1:inputs$global_parameters$time_horizon,
output_ex$n_exac_by_ctime_severity_diagnosed/rowSums(output_ex$n_Diagnosed_by_ctime_sex)))
diagnosed[1,2:5] <- c(0,0,0,0)
names(diagnosed) <- c("Year","Mild","Moderate","Severe","VerySevere")
print(diagnosed)
diagnosed$Diagnosis <- "diagnosed"
# plot
exac.plot <- tidyr::gather(data=rbind(undiagnosed, diagnosed), key="Exacerbation", value="Rate", Mild:VerySevere)
exac.plotted <- ggplot2::ggplot(exac.plot, aes(x=Year, y=Rate, fill=Diagnosis)) +
geom_bar(stat="identity", position="dodge") + facet_wrap(~Exacerbation, labeller=label_both) +
scale_y_continuous(expand = c(0, 0)) +
xlab("Model Year") + ylab("Annual rate of exacerbations") + theme_bw()
plot(exac.plotted)
message("\n")
terminate_session()
###
message("\n")
message("Now, set the treatment effects to 0 and make sure the number of exacerbations increased among diagnosed patients.\n")
message("\n")
init_session(settings = settings)
input_nt <- model_input$values
input_nt$medication$medication_ln_hr_exac <- rep(0, length(inputs$medication$medication_ln_hr_exac))
res <- run(input = input_nt)
if (res < 0)
stop("Execution stopped.\n")
inputs_nt <- Cget_inputs()
output_ex_nt <- Cget_output_ex()
exac.diff <- data.frame(cbind(1:inputs_nt$global_parameters$time_horizon,
output_ex_nt$n_exac_by_ctime_severity_diagnosed - output_ex$n_exac_by_ctime_severity_diagnosed))
names(exac.diff) <- c("Year","Mild","Moderate","Severe","VerySevere")
message("Without treatment, there was an average of:\n")
message(mean(exac.diff$Mild),"more mild exacerbations,\n")
message(mean(exac.diff$Moderate),"more moderate exacerbations,\n")
message(mean(exac.diff$Severe),"more severe exacerbations, and\n")
message(mean(exac.diff$VerySevere),"more very severe exacerbations per year.\n")
###
message("\n")
message("Now, set all COPD patients to diagnosed, then undiagnosed, and compare the exacerbation rates.\n")
message("\n")
init_session(settings = settings)
input_nd <- model_input$values
input_nd$diagnosis$logit_p_prevalent_diagnosis_by_sex <- cbind(male=c(intercept=-100, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0),
female=c(intercept=-100-0.1638, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0))
input_nd$diagnosis$p_hosp_diagnosis <- 0
input_nd$diagnosis$logit_p_diagnosis_by_sex <- cbind(male=c(intercept=-100, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0),
female=c(intercept=-100-0.4873, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0))
input_nd$diagnosis$logit_p_overdiagnosis_by_sex <- cbind(male=c(intercept=-100, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=0),
female=c(intercept=-100+0.2597, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=0))
res <- run(input = input_nd)
if (res < 0)
stop("Execution stopped.\n")
output_ex_nd <- Cget_output_ex()
exac_rate_nodiag <- rowSums(output_ex_nd$n_exac_by_ctime_severity)/rowSums(output_ex_nd$n_COPD_by_ctime_sex)
terminate_session()
###
init_session(settings = settings)
input_d <- model_input$values
input_d$diagnosis$logit_p_prevalent_diagnosis_by_sex <- cbind(male=c(intercept=100, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0),
female=c(intercept=100-0.1638, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=0))
input_d$diagnosis$p_hosp_diagnosis <- 1
input_d$diagnosis$logit_p_diagnosis_by_sex <- cbind(male=c(intercept=100, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0),
female=c(intercept=100-0.4873, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=0))
res <- run(input = input_d)
if (res < 0)
stop("Execution stopped.\n")
inputs_d <- Cget_inputs()
output_ex_d <- Cget_output_ex()
exac_rate_diag <- rowSums(output_ex_d$n_exac_by_ctime_severity)/rowSums(output_ex_d$n_COPD_by_ctime_sex)
##
message("Annual exacerbation rate (this is also plotted):\n")
message("\n")
trt_effect<- data.frame(Year=1:inputs_d$global_parameters$time_horizon,
Diagnosed = exac_rate_diag,
Undiagnosed = exac_rate_nodiag)
trt_effect$Delta <- (trt_effect$Undiagnosed - trt_effect$Diagnosed)/trt_effect$Undiagnosed
print(trt_effect)
message("\n")
message("Treatment reduces the rate of exacerbations by a mean of:", mean(trt_effect$Delta),"\n")
# plot
trt.plot <- tidyr::gather(data=trt_effect, key="Diagnosis", value="Rate", Diagnosed:Undiagnosed)
trt.plotted <- ggplot2::ggplot(trt.plot, aes(x=Year, y=Rate, col=Diagnosis)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Annual exacerbation rate") + xlab("Years")
plot(trt.plotted)
terminate_session()
}
#' Returns results of Case Detection strategies
#' @param n_sim number of agents
#' @param p_of_CD probability of recieving case detection given that an agent meets the selection criteria
#' @param min_age minimum age that can recieve case detection
#' @param min_pack_years minimum pack years that can recieve case detection
#' @param only_smokers set to 1 if only smokers should recieve case detection
#' @param CD_method Choose one case detection method: CDQ195", "CDQ165", "FlowMeter", "FlowMeter_CDQ"
#' @return results of case detection strategy compared to no case detection
#' @export
test_case_detection <- function(n_sim = 1e+04, p_of_CD=0.1, min_age=40, min_pack_years=0, only_smokers=0, CD_method="CDQ195") {
message("Comparing a case detection strategy to no case detection.\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
# settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
input$diagnosis$p_case_detection <- p_of_CD
input$diagnosis$min_cd_age <- min_age
input$diagnosis$min_cd_pack_years <- min_pack_years
input$diagnosis$min_cd_smokers <-only_smokers
input$diagnosis$logit_p_prevalent_diagnosis_by_sex <- cbind(male=c(intercept=1.0543, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]),
female=c(intercept=1.0543-0.1638, age=-0.0152, smoking=0.1068, fev1=-0.6146,
cough=0.075, phlegm=0.283, wheeze=-0.0275, dyspnea=0.5414,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]))
input$diagnosis$logit_p_diagnosis_by_sex <- cbind(male=c(intercept=-2, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]),
female=c(intercept=-2-0.4873, age=-0.0324, smoking=0.3711, fev1=-0.8032,
gpvisits=0.0087, cough=0.208, phlegm=0.4088, wheeze=0.0321, dyspnea=0.722,
case_detection=input$diagnosis$case_detection_methods[1,CD_method]))
input$diagnosis$logit_p_overdiagnosis_by_sex <- cbind(male=c(intercept=-5.2169, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=input$diagnosis$case_detection_methods[2,CD_method]),
female=c(intercept=-5.2169+0.2597, age=0.0025, smoking=0.6911, gpvisits=0.0075,
cough=0.7264, phlegm=0.7956, wheeze=0.66, dyspnea=0.8798,
case_detection=input$diagnosis$case_detection_methods[2,CD_method]))
message("\n")
message("Here are your inputs for the case detection strategy:\n")
message("\n")
print(input$diagnosis)
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output <- Cget_output()
output_ex <- Cget_output_ex()
# Exacerbations
exac <- output$total_exac
names(exac) <- c("Mild","Moderate","Severe","VerySevere")
# rate
total.gold <- colSums(output_ex$n_COPD_by_ctime_severity[,2:5])
names(total.gold) <- c("GOLD1","GOLD2","GOLD3","GOLD4")
exac.gs <- data.frame(output_ex$n_exac_by_gold_severity)
colnames(exac.gs) <- c("Mild","Moderate","Severe","VerySevere")
exac_rate <- rbind(GOLD1=exac.gs[1,]/total.gold[1],
GOLD2=exac.gs[2,]/total.gold[2],
GOLD3=exac.gs[3,]/total.gold[3],
GOLD4=exac.gs[4,]/total.gold[4])
exac_rate$CD <- "Case detection"
exac_rate$GOLD <- rownames(exac_rate)
# GOLD
gold <- data.frame(CD="Case detection",
Proportion=colMeans(output_ex$n_COPD_by_ctime_severity/rowSums(output_ex$n_alive_by_ctime_sex)))
gold$GOLD <- c("NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
terminate_session()
## Rerunning with no case detection
init_session(settings = settings)
input_nocd <- model_input$values
input_nocd$diagnosis$p_case_detection <- 0
message("\n")
message("Now setting the probability of case detection to", input_nocd$diagnosis$p_case_detection, "and re-running the model\n")
message("\n")
res <- run(input = input_nocd)
if (res < 0)
stop("Execution stopped.\n")
inputs_nocd <- Cget_inputs()
output_nocd <- Cget_output()
output_ex_nocd <- Cget_output_ex()
# Exacerbations
exac_nocd <- output_nocd$total_exac
names(exac_nocd) <- c("Mild","Moderate","Severe","VerySevere")
# rate
total.gold_nocd <- colSums(output_ex_nocd$n_COPD_by_ctime_severity[,2:5])
names(total.gold_nocd) <- c("GOLD1","GOLD2","GOLD3","GOLD4")
exac.gs_nocd <- data.frame(output_ex_nocd$n_exac_by_gold_severity)
colnames(exac.gs_nocd) <- c("Mild","Moderate","Severe","VerySevere")
exac_rate_nocd <- rbind(GOLD1=exac.gs_nocd[1,]/total.gold_nocd[1],
GOLD2=exac.gs_nocd[2,]/total.gold_nocd[2],
GOLD3=exac.gs_nocd[3,]/total.gold_nocd[3],
GOLD4=exac.gs_nocd[4,]/total.gold_nocd[4])
exac_rate_nocd$CD <- "No Case detection"
exac_rate_nocd$GOLD <- rownames(exac_rate_nocd)
# GOLD
gold_nocd<- data.frame(CD="No case detection",
Proportion=colMeans(output_ex_nocd$n_COPD_by_ctime_severity/rowSums(output_ex_nocd$n_alive_by_ctime_sex)))
gold_nocd$GOLD <- c("NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4")
## Difference between CD and No CD
# Exacerbations
exac.diff <- data.frame(cbind(CD=exac, NOCD=exac_nocd))
exac.diff$Delta <- exac.diff$CD - exac.diff$NOCD
message("Here are total number of exacerbations by severity:\n")
message("\n")
print(exac.diff)
message("\n")
message("The annual rate of exacerbations with case detection is:\n")
print(exac_rate[,1:4])
message("\n")
message("The annual rate of exacerbations without case detection is:\n")
print(exac_rate_nocd[,1:4])
message("\n")
message("This data is also plotted.\n")
#plot
exac.plot <- tidyr::gather(rbind(exac_rate, exac_rate_nocd), key="Exacerbation", value="Rate", Mild:VerySevere)
exac.plotted <-ggplot2::ggplot(exac.plot, aes(x=Exacerbation, y=Rate, fill=CD)) +
geom_bar(stat="identity", position="dodge") + facet_wrap(~GOLD, scales="free_y") +
scale_y_continuous(expand = expand_scale(mult=c(0, 0.1))) +
xlab("Exacerbation") + ylab("Annual rate of exacerbations") + theme_bw()
exac.plotted <- exac.plotted + theme(axis.text.x=element_text(angle=45, hjust=1)) +
theme(legend.title = element_blank())
plot(exac.plotted)
# GOLD
# plot
message("\n")
message("The average proportion of agents in each gold stage is also plotted.\n")
gold.plot <- rbind(gold, gold_nocd)
gold.plot$GOLD <- factor(gold.plot$GOLD, levels=c("NoCOPD","GOLD1","GOLD2","GOLD3","GOLD4"))
gold.plotted <- ggplot2::ggplot(gold.plot, aes(x=GOLD, y=Proportion, fill=CD)) +
geom_bar(stat="identity", position="dodge") +
scale_y_continuous(expand = c(0,0), limits=c(0,1)) +
xlab("GOLD stage") + ylab("Average proportion") + theme_bw()
gold.plotted <- gold.plotted + theme(legend.title = element_blank())
plot(gold.plotted)
message("\n")
terminate_session()
}
#' Returns results of validation tests for overdiagnosis
#' @param n_sim number of agents
#' @return validation test results
#' @export
validate_overdiagnosis <- function(n_sim = 1e+04) {
message("Let's take a look at overdiagnosis\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_none"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- 0
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
inputs <- Cget_inputs()
output_ex <- Cget_output_ex()
message("Here are the proportion of non-COPD subjects overdiagnosed over model time: \n")
overdiag <- data.frame(Year=1:inputs$global_parameters$time_horizon,
NonCOPD=output_ex$n_COPD_by_ctime_severity[,1],
Overdiagnosed=rowSums(output_ex$n_Overdiagnosed_by_ctime_sex))
overdiag$Proportion <- overdiag$Overdiagnosed/overdiag$NonCOPD
print(overdiag)
message("The average proportion overdiagnosed from year", round(length(overdiag$Proportion)/2,0), "to", length(overdiag$Proportion), "is",
mean(overdiag$Proportion[(round(length(overdiag$Proportion)/2,0)):(length(overdiag$Proportion))]),"\n")
overdiag.plot <- tidyr::gather(data=overdiag, key="Variable", value="Number", c(NonCOPD, Overdiagnosed))
overdiag.plotted <- ggplot2::ggplot(overdiag.plot, aes(x=Year, y=Number, col=Variable)) +
geom_line() + geom_point() + expand_limits(y = 0) +
theme_bw() + ylab("Number of non-COPD subjects") + xlab("Years")
plot(overdiag.plotted)
message("\n")
terminate_session()
}
#' Returns results of validation tests for medication module.
#' @param n_sim number of agents
#' @return validation test results for medication
#' @export
validate_medication <- function(n_sim = 5e+04) {
message("\n")
message("Plotting medimessageion usage over time:")
message("\n")
petoc()
settings <- default_settings
settings$record_mode <- record_mode["record_mode_event"]
settings$agent_stack_size <- 0
settings$n_base_agents <- n_sim
settings$event_stack_size <- settings$n_base_agents * 1.7 * 30
init_session(settings = settings)
input <- model_input$values
res <- run(input = input)
if (res < 0)
stop("Execution stopped.\n")
all_events <- as.data.frame(Cget_all_events_matrix())
all_annual_events <- all_events[all_events$event==1,] # only annual event
# Prop on each med class over time and by gold
all_annual_events$time <- floor(all_annual_events$local_time + all_annual_events$time_at_creation)
med.plot <- all_annual_events %>%
group_by(time, gold) %>%
count(medication_status) %>%
mutate(prop=n/sum(n))
med.plot$gold <- as.character(med.plot$gold )
# overall among COPD patients
copd <- med.plot %>%
filter(gold>0) %>%
group_by(time, medication_status) %>%
summarise(n=sum(n)) %>%
mutate(prop=n/sum(n), gold="all copd") %>%
select(time, gold, everything())
med.plot <- rbind(med.plot, copd)
med.plot$medication_status <- ifelse(med.plot$medication_status==0,"none",
ifelse(med.plot$medication_status==1,"SABA",
ifelse(med.plot$medication_status==4,"LAMA",
ifelse(med.plot$medication_status==6,"LAMA/LABA",
ifelse(med.plot$medication_status==14,"ICS/LAMA/LABA",9)))))
med.plotted <- ggplot2::ggplot(data=med.plot, aes(x=time, y=prop, col=medication_status)) +
geom_line() + facet_wrap(~gold, labeller=label_both) +
expand_limits(y = 0) + theme_bw() + ylab("Proportion per medication class") + xlab("Years") +
theme(legend.title=element_blank())
plot(med.plotted)
terminate_session()
}
|
#' Check result of exercise code
#'
#' \code{check_result()} compares the final result of the student code to known
#' \code{\link{pass_if}} and \code{\link{fail_if}} \code{\link{condition}}s.
#' If the student result exactly matches a known case, \code{check_result}
#' returns the matching message value.
#'
#' @param ... \code{\link{pass_if}} or \code{\link{fail_if}} \code{\link{condition}}s to check
#' @template correct
#' @template incorrect
#' @template grader_args
#' @template learnr_args
#' @template glue_correct
#' @template glue_incorrect
#'
#' @return a \code{\link{graded}} object from either
#' \code{\link{pass_if}} or \code{\link{fail_if}} containing a formatted
#' \code{correct} or \code{incorrect} message and whether or not a match was found.
#'
#' @seealso \code{\link{check_code}}, \code{\link{check_result}}, and \code{\link{test_result}}
#' @export
#' @examples
#' \dontrun{grading_demo()}
#'
#' @template check_result_examples
check_result <- function(
...,
correct = NULL,
incorrect = NULL,
grader_args = list(),
learnr_args = list(),
glue_correct = getOption("gradethis_glue_correct"),
glue_incorrect = getOption("gradethis_glue_incorrect")
) {
results <- list(...)
chkm8_item_class(results, "grader_condition")
if (!any(vapply(results, `[[`, logical(1), "correct"))) {
stop("At least one correct result must be provided")
}
# init final answer as not found
final_result <- graded(correct = FALSE, message = NULL)
found_match <- FALSE
for (resu in results) {
evaluated_condi <- evaluate_condition(resu, grader_args, learnr_args)
if (! is.null(evaluated_condi)) {
final_result <- evaluated_condi
found_match <- TRUE
break
}
}
message <- glue_message(
{if (final_result$correct) glue_correct else glue_incorrect}, # nolint
.is_match = found_match,
.is_correct = final_result$correct,
.message = final_result$message,
.correct = correct,
.incorrect = incorrect
)
return(graded(
correct = final_result$correct,
message = message
))
}
|
/R/check_result.R
|
no_license
|
garrettgman/gradethis
|
R
| false
| false
| 2,077
|
r
|
#' Check result of exercise code
#'
#' \code{check_result()} compares the final result of the student code to known
#' \code{\link{pass_if}} and \code{\link{fail_if}} \code{\link{condition}}s.
#' If the student result exactly matches a known case, \code{check_result}
#' returns the matching message value.
#'
#' @param ... \code{\link{pass_if}} or \code{\link{fail_if}} \code{\link{condition}}s to check
#' @template correct
#' @template incorrect
#' @template grader_args
#' @template learnr_args
#' @template glue_correct
#' @template glue_incorrect
#'
#' @return a \code{\link{graded}} object from either
#' \code{\link{pass_if}} or \code{\link{fail_if}} containing a formatted
#' \code{correct} or \code{incorrect} message and whether or not a match was found.
#'
#' @seealso \code{\link{check_code}}, \code{\link{check_result}}, and \code{\link{test_result}}
#' @export
#' @examples
#' \dontrun{grading_demo()}
#'
#' @template check_result_examples
check_result <- function(
...,
correct = NULL,
incorrect = NULL,
grader_args = list(),
learnr_args = list(),
glue_correct = getOption("gradethis_glue_correct"),
glue_incorrect = getOption("gradethis_glue_incorrect")
) {
results <- list(...)
chkm8_item_class(results, "grader_condition")
if (!any(vapply(results, `[[`, logical(1), "correct"))) {
stop("At least one correct result must be provided")
}
# init final answer as not found
final_result <- graded(correct = FALSE, message = NULL)
found_match <- FALSE
for (resu in results) {
evaluated_condi <- evaluate_condition(resu, grader_args, learnr_args)
if (! is.null(evaluated_condi)) {
final_result <- evaluated_condi
found_match <- TRUE
break
}
}
message <- glue_message(
{if (final_result$correct) glue_correct else glue_incorrect}, # nolint
.is_match = found_match,
.is_correct = final_result$correct,
.message = final_result$message,
.correct = correct,
.incorrect = incorrect
)
return(graded(
correct = final_result$correct,
message = message
))
}
|
# K-Means Clustering
# Importing the mall dataset
dataset <- read.csv('Mall_Customers.csv')
X <- dataset[4:5]
# Using the elbow method to find the optimal number of clusters
set.seed(6)
wcss <- vector()
for (i in 1:10) wcss[i] <- sum(kmeans(X, i)$withinss)
plot(1:10, wcss, type = 'b', main = paste('Clusters of clients'), xlab = 'Number of clusters', ylab = 'WCSS')
# Applying k-means to the mall dataset
set.seed(29)
kmeans <- kmeans(X, 5, iter.max = 300, nstart = 10)
# Visualizing the clusters
# install.packages('cluster')
library(cluster)
clusplot(X,
kmeans$cluster,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Cluster of clients'),
xlab = "Annual Income",
ylab = "Spending Score")
|
/Working Data K-Means.R
|
no_license
|
taksug229/R-K-means-clustering
|
R
| false
| false
| 862
|
r
|
# K-Means Clustering
# Importing the mall dataset
dataset <- read.csv('Mall_Customers.csv')
X <- dataset[4:5]
# Using the elbow method to find the optimal number of clusters
set.seed(6)
wcss <- vector()
for (i in 1:10) wcss[i] <- sum(kmeans(X, i)$withinss)
plot(1:10, wcss, type = 'b', main = paste('Clusters of clients'), xlab = 'Number of clusters', ylab = 'WCSS')
# Applying k-means to the mall dataset
set.seed(29)
kmeans <- kmeans(X, 5, iter.max = 300, nstart = 10)
# Visualizing the clusters
# install.packages('cluster')
library(cluster)
clusplot(X,
kmeans$cluster,
lines = 0,
shade = TRUE,
color = TRUE,
labels = 2,
plotchar = FALSE,
span = TRUE,
main = paste('Cluster of clients'),
xlab = "Annual Income",
ylab = "Spending Score")
|
# Description ---------------
#### Mal checken: https://www.datascience.com/blog/introduction-to-forecasting-with-arima-in-r-learn-data-science-tutorials #####
# Todo: Train Data f?r TBATS kann beide male 3 Wochen gross sein (gleiches modell), f?r das Train Data Set f?r RF muss dann letzte woche abgeschnitten werden (damit merge klappt, da variablen nur 3 monate)
# momentan: nimmt erst 2 wochen und predicted daraus 3, dann 3 wochen und daraus 4. (aber unn?tig, da erste beide wochen bereits biased)
# In this script
# - we will forecast the parking density
# Setup ----------------------------------------------
# Load required packages
library(data.table)
library(tidyverse)
library(ggplot2)
library(forecast)
library(tseries)
library(lubridate)
library(caret)
library(car)
# Clear workspace
rm(list=ls())
graphics.off()
# ...
theme_set(theme_minimal())
### REAL ### ----------------------------------
load("../Schramm, Cornelius - 02_Business_Analytics_Data/FinalDFKmean.RData")
# Merge with clusters
load("../02_Business_Analytics_Data/FinalDFKmean.RData")
# Choose cluster
choice = 17
# Filter one parking meter
parking_filtered = FinalDFKmean %>%
filter(cluster == choice)
# Plot
# ggplot(parking_filtered, aes(x=datetime, y=freeParkingSpaces)) +
# geom_line() +
# geom_hline(yintercept=parking_filtered[1,4])
# Training ----------
# msts (2 seasonalities)
ts_kmc_2 = msts(parking_filtered$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
# tbats model smoothing
tbats = tbats(ts_kmc_2)
#plot(tbats, main="Multiple Season Decomposition")
tbats.components(tbats)
# Predictions tbat
sp = predict(tbats,h=10*6)
plot(sp, main = "TBATS Forecast")
# Testing tbat model on real data ------------------------
# Splitting and creating msts train and test
parking_filtered_train = parking_filtered[parking_filtered$datetime <= "2019-04-09",]
parking_filtered_test = parking_filtered[parking_filtered$datetime > "2019-04-09" & parking_filtered$datetime <= "2019-04-16",]
ts_kmc_train = msts(parking_filtered_train$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
ts_kmc_test = msts(parking_filtered_test$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-04-09 08:00:00")),
ts.frequency = 10*6*52)
# Predictions
tbats_2 = tbats(ts_kmc_train)
preds = predict(tbats_2, h=10*6)
plot(preds, main = "TBATS Forecast")
lines(ts_kmc_test)
# Why mean-> not sure but dont care (maybe care a little)
shinyPredsDF = as.data.frame(preds$mean)
# Create empty time Series to merge to shinyPredsDF
datetime = FinalDFKmean[FinalDFKmean$datetime >= "2019-04-09 08:00:00",]
datetime = datetime[datetime$cluster == 17,1]
datetime = datetime[c(1:60)]
# Cbind
shinyPredsDF = cbind(shinyPredsDF,datetime)
# Saving Predictions
#save.image(file = "../02_Business_Analytics_Data/shinyPredsDF.RData")
pred_stacked_rf = ts(tree_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
preds_test_ts = ts(preds$mean, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52 )
plot(ts_kmc)
lines(preds_test_ts, col = "blue")
lines(pred_stacked_rf, col = "green")
# Model building GLM
model_glm = train(freeParkingSpaces ~ ., data=parking_filtered_train, method="glm")
glm_predict = predict(model_glm, newdata = parking_filtered_test)
plot(tree_predict ~ parking_filtered_test$freeParkingSpaces)
pred_stacked_glm = ts(glm_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
plot(ts_kmc)
lines(preds_test_ts, col = "blue")
lines(pred_stacked_glm, col = "red")
lines(pred_stacked_rf, col = "green")
## Compare RMSE
RMSE_rf = RMSE(tree_predict, parking_filtered_test$freeParkingSpaces)
RMSE_glm = RMSE(glm_predict, parking_filtered_test$freeParkingSpaces)
RMSE_ts = RMSE(preds$mean, parking_filtered_test$freeParkingSpaces)
#------- LOOP -------
stop = max(as.numeric(FinalDFKmean$cluster))
Result_TS_RSME = vector("numeric", stop)
Result_GLM_RSME = vector("numeric", stop)
Result_RF_RSME = vector("numeric", stop)
Result_AA_RSME = vector("numeric", stop)
parking_filtered = FinalDFKmean %>%
filter(cluster == 1)
parking_filtered_train = parking_filtered[parking_filtered$datetime <= "2019-04-16",]
parking_filtered_test = parking_filtered[parking_filtered$datetime > "2019-04-16",]
ts_kmc_train = msts(parking_filtered_train$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
ts_kmc_test = msts(parking_filtered_test$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-04-16 08:00:00")),
ts.frequency = 10*6*52)
DF_GLM = as.data.frame(parking_filtered_test$datetime)
DF_RF = as.data.frame(parking_filtered_test$datetime)
DF_TS = as.data.frame(parking_filtered_test$datetime)
DF_AA = as.data.frame(parking_filtered_test$datetime)
colnames(DF_GLM)[1] = "datetime"
colnames(DF_RF)[1] = "datetime"
colnames(DF_TS)[1] = "datetime"
colnames(DF_AA)[1] = "datetime"
fourier_train = fourier(ts_kmc_train, K = c(2,4))
fourier_test = fourier(ts_kmc_test, K = c(2,4))
i = 1
for (i in 1:stop) {
choice = i
parking_filtered = FinalDFKmean %>%
filter(cluster == choice)
parking_filtered = parking_filtered[,-17]
ts_kmc = msts(parking_filtered$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
# Predictions
tbats_2 = tbats(ts_kmc)
preds = predict(tbats_2, h=10*6)
# Stacking ----
# Test data
parking_filtered$preds = tbats_2$fitted.values
parking_filtered_train = parking_filtered[parking_filtered$datetime <= "2019-04-16",]
parking_filtered_test = parking_filtered[parking_filtered$datetime > "2019-04-16",]
# Model building RF
model_rf = train(freeParkingSpaces ~ ., data=parking_filtered_train, method="rf", ntree = 100) # aus perfomancegr�nden ntree = 100
tree_predict = predict(model_rf, newdata = parking_filtered_test)
pred_stacked_rf = ts(tree_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
preds_test_ts = ts(preds$mean, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52 )
# Model building GLM
model_glm = train(freeParkingSpaces ~ ., data=parking_filtered_train, method="glm")
glm_predict = predict(model_glm, newdata = parking_filtered_test)
pred_stacked_glm = ts(glm_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
# ARIMA Model Build
data_train = as.matrix(parking_filtered_train[c(5,7,9,12,13,14,15)])
data_test = as.matrix(parking_filtered_test[c(5,7,9,12,13,14,15)])
model_arima = auto.arima(ts_kmc_train, xreg = cbind(fourier_train,data_train), seasonal = T)
arima_predict = forecast(model_arima, xreg=cbind(fourier_test,data_test))
pred_arima_ts = ts(arima_predict$mean, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52 )
##Plotting
plot(ts_kmc, main = paste("Predicitions for Cluster ", i), xlab = "Date/Time in decimal", ylab = "Free parking spots")
lines(preds_test_ts, col = "blue")
lines(pred_stacked_glm, col = "red")
lines(pred_stacked_rf, col = "green4")
lines(pred_arima_ts, col = "orange")
legend("bottomleft", legend = c("TBATS Predicitions", "GLM Stacked Predicitions", "RF Stacked Predictions", "Auto Arima + Fourier Predictions"), col = c("blue", "red", "green4", "orange"),text.col = c("blue", "red", "green4", "orange"), bty = "n", cex = 0.8)
#Build DF with results
DF_TS = cbind(DF_TS, as.vector(preds$mean))
colnames(DF_TS)[i+1] = i
DF_GLM = cbind(DF_GLM, glm_predict)
colnames(DF_GLM)[i+1] = i
DF_RF = cbind(DF_RF, tree_predict)
colnames(DF_RF)[i+1] = i
DF_AA = cbind(DF_AA, as.vector(arima_predict$mean))
colnames(DF_AA)[i+1] = i
## RMSE
Result_TS_RSME[i] = RMSE(preds$mean, parking_filtered_test$freeParkingSpaces)
Result_GLM_RSME[i] = RMSE(glm_predict, parking_filtered_test$freeParkingSpaces)
Result_RF_RSME[i] = RMSE(tree_predict, parking_filtered_test$freeParkingSpaces)
Result_AA_RSME[i] = RMSE(arima_predict$mean, parking_filtered_test$freeParkingSpaces)
i = i+1
}
#----- Ende LOOP -----
# Total RSME �ber alle Cluster
sum(Result_TS_RSME)
sum(Result_GLM_RSME)
sum(Result_RF_RSME)
sum(Result_AA_RSME)
# select Model for each cluster
results = as.data.frame(cbind(Result_TS_RSME, Result_GLM_RSME, Result_RF_RSME, Result_AA_RSME))
colnames(results)[1:4] = c("TS", "GLM", "RF", "ARIMA")
best_model = vector("character", stop)
best_model = colnames(results)[apply(results,1,which.min)]
view(best_model)
# Save Plots (be carefull, all plots in session!)
plots.dir.path = list.files(tempdir(), pattern="rs-graphics", full.names = TRUE);
plots.png.paths = list.files(plots.dir.path, pattern=".png", full.names = TRUE)
file.copy(from=plots.png.paths, to="../Schramm, Cornelius - 02_Business_Analytics_Data/Graphs")
## Save Data Frames
save(DF_GLM,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_glm.RData")
save(DF_RF,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_rf.RData")
save(DF_TS,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_ts.RData")
save(DF_AA,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_aa.RData")
save(best_model, file = "../Schramm, Cornelius - 02_Business_Analytics_Data/best_model.RData")
# auto.arima TEST ---------------
# Set up harmonic regressors
fourier_train = fourier(ts_kmc_train, K = c(2,4))
fourier_test = fourier(ts_kmc_test, K = c(2,4))
data_train = as.matrix(parking_filtered_train[c(5,7,9,12,13,14,15)])
data_test = as.matrix(parking_filtered_test[c(5,7,9,12,13,14,15)])
# Fit regression model with ARIMA errors
model_arima = auto.arima(ts_kmc_train, xreg = cbind(fourier_train,data_train), seasonal = T)
pred_arima = forecast(model_arima, xreg=cbind(fourier_test,data_test))
# Plotting ------
plot(ts_kmc, main = paste("Predicitions for Cluster ", i), xlab = "Date/Time in decimal", ylab = "Free parking spots")
lines(preds_test_ts, col = "blue")
lines(pred_stacked_glm, col = "red")
lines(pred_stacked_rf, col = "green4")
lines(pred_arima$mean, col = "orange")
legend("bottomleft", legend = c("TBATS Predicitions", "GLM Stacked Predicitions", "RF Stacked Predictions", "Auto Arima + Fourier Predictions"), col = c("blue", "red", "green4", "orange"),text.col = c("blue", "red", "green4", "orange"), bty = "n", cex = 0.8)
|
/03_DataAnalysis_05.R
|
no_license
|
CorneliusSchramm/01_Scripts_BusinessAnalytics_ParkMe
|
R
| false
| false
| 10,931
|
r
|
# Description ---------------
#### Mal checken: https://www.datascience.com/blog/introduction-to-forecasting-with-arima-in-r-learn-data-science-tutorials #####
# Todo: Train Data f?r TBATS kann beide male 3 Wochen gross sein (gleiches modell), f?r das Train Data Set f?r RF muss dann letzte woche abgeschnitten werden (damit merge klappt, da variablen nur 3 monate)
# momentan: nimmt erst 2 wochen und predicted daraus 3, dann 3 wochen und daraus 4. (aber unn?tig, da erste beide wochen bereits biased)
# In this script
# - we will forecast the parking density
# Setup ----------------------------------------------
# Load required packages
library(data.table)
library(tidyverse)
library(ggplot2)
library(forecast)
library(tseries)
library(lubridate)
library(caret)
library(car)
# Clear workspace
rm(list=ls())
graphics.off()
# ...
theme_set(theme_minimal())
### REAL ### ----------------------------------
load("../Schramm, Cornelius - 02_Business_Analytics_Data/FinalDFKmean.RData")
# Merge with clusters
load("../02_Business_Analytics_Data/FinalDFKmean.RData")
# Choose cluster
choice = 17
# Filter one parking meter
parking_filtered = FinalDFKmean %>%
filter(cluster == choice)
# Plot
# ggplot(parking_filtered, aes(x=datetime, y=freeParkingSpaces)) +
# geom_line() +
# geom_hline(yintercept=parking_filtered[1,4])
# Training ----------
# msts (2 seasonalities)
ts_kmc_2 = msts(parking_filtered$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
# tbats model smoothing
tbats = tbats(ts_kmc_2)
#plot(tbats, main="Multiple Season Decomposition")
tbats.components(tbats)
# Predictions tbat
sp = predict(tbats,h=10*6)
plot(sp, main = "TBATS Forecast")
# Testing tbat model on real data ------------------------
# Splitting and creating msts train and test
parking_filtered_train = parking_filtered[parking_filtered$datetime <= "2019-04-09",]
parking_filtered_test = parking_filtered[parking_filtered$datetime > "2019-04-09" & parking_filtered$datetime <= "2019-04-16",]
ts_kmc_train = msts(parking_filtered_train$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
ts_kmc_test = msts(parking_filtered_test$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-04-09 08:00:00")),
ts.frequency = 10*6*52)
# Predictions
tbats_2 = tbats(ts_kmc_train)
preds = predict(tbats_2, h=10*6)
plot(preds, main = "TBATS Forecast")
lines(ts_kmc_test)
# Why mean-> not sure but dont care (maybe care a little)
shinyPredsDF = as.data.frame(preds$mean)
# Create empty time Series to merge to shinyPredsDF
datetime = FinalDFKmean[FinalDFKmean$datetime >= "2019-04-09 08:00:00",]
datetime = datetime[datetime$cluster == 17,1]
datetime = datetime[c(1:60)]
# Cbind
shinyPredsDF = cbind(shinyPredsDF,datetime)
# Saving Predictions
#save.image(file = "../02_Business_Analytics_Data/shinyPredsDF.RData")
pred_stacked_rf = ts(tree_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
preds_test_ts = ts(preds$mean, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52 )
plot(ts_kmc)
lines(preds_test_ts, col = "blue")
lines(pred_stacked_rf, col = "green")
# Model building GLM
model_glm = train(freeParkingSpaces ~ ., data=parking_filtered_train, method="glm")
glm_predict = predict(model_glm, newdata = parking_filtered_test)
plot(tree_predict ~ parking_filtered_test$freeParkingSpaces)
pred_stacked_glm = ts(glm_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
plot(ts_kmc)
lines(preds_test_ts, col = "blue")
lines(pred_stacked_glm, col = "red")
lines(pred_stacked_rf, col = "green")
## Compare RMSE
RMSE_rf = RMSE(tree_predict, parking_filtered_test$freeParkingSpaces)
RMSE_glm = RMSE(glm_predict, parking_filtered_test$freeParkingSpaces)
RMSE_ts = RMSE(preds$mean, parking_filtered_test$freeParkingSpaces)
#------- LOOP -------
stop = max(as.numeric(FinalDFKmean$cluster))
Result_TS_RSME = vector("numeric", stop)
Result_GLM_RSME = vector("numeric", stop)
Result_RF_RSME = vector("numeric", stop)
Result_AA_RSME = vector("numeric", stop)
parking_filtered = FinalDFKmean %>%
filter(cluster == 1)
parking_filtered_train = parking_filtered[parking_filtered$datetime <= "2019-04-16",]
parking_filtered_test = parking_filtered[parking_filtered$datetime > "2019-04-16",]
ts_kmc_train = msts(parking_filtered_train$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
ts_kmc_test = msts(parking_filtered_test$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-04-16 08:00:00")),
ts.frequency = 10*6*52)
DF_GLM = as.data.frame(parking_filtered_test$datetime)
DF_RF = as.data.frame(parking_filtered_test$datetime)
DF_TS = as.data.frame(parking_filtered_test$datetime)
DF_AA = as.data.frame(parking_filtered_test$datetime)
colnames(DF_GLM)[1] = "datetime"
colnames(DF_RF)[1] = "datetime"
colnames(DF_TS)[1] = "datetime"
colnames(DF_AA)[1] = "datetime"
fourier_train = fourier(ts_kmc_train, K = c(2,4))
fourier_test = fourier(ts_kmc_test, K = c(2,4))
i = 1
for (i in 1:stop) {
choice = i
parking_filtered = FinalDFKmean %>%
filter(cluster == choice)
parking_filtered = parking_filtered[,-17]
ts_kmc = msts(parking_filtered$freeParkingSpaces, seasonal.periods = c(10,10*6),
start = decimal_date(as.POSIXct("2019-03-25 08:00:00")),
ts.frequency = 10*6*52)
# Predictions
tbats_2 = tbats(ts_kmc)
preds = predict(tbats_2, h=10*6)
# Stacking ----
# Test data
parking_filtered$preds = tbats_2$fitted.values
parking_filtered_train = parking_filtered[parking_filtered$datetime <= "2019-04-16",]
parking_filtered_test = parking_filtered[parking_filtered$datetime > "2019-04-16",]
# Model building RF
model_rf = train(freeParkingSpaces ~ ., data=parking_filtered_train, method="rf", ntree = 100) # aus perfomancegr�nden ntree = 100
tree_predict = predict(model_rf, newdata = parking_filtered_test)
pred_stacked_rf = ts(tree_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
preds_test_ts = ts(preds$mean, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52 )
# Model building GLM
model_glm = train(freeParkingSpaces ~ ., data=parking_filtered_train, method="glm")
glm_predict = predict(model_glm, newdata = parking_filtered_test)
pred_stacked_glm = ts(glm_predict, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52)
# ARIMA Model Build
data_train = as.matrix(parking_filtered_train[c(5,7,9,12,13,14,15)])
data_test = as.matrix(parking_filtered_test[c(5,7,9,12,13,14,15)])
model_arima = auto.arima(ts_kmc_train, xreg = cbind(fourier_train,data_train), seasonal = T)
arima_predict = forecast(model_arima, xreg=cbind(fourier_test,data_test))
pred_arima_ts = ts(arima_predict$mean, start = decimal_date(as.POSIXct("2019-04-16 08:00:00")), frequency = 10*6*52 )
##Plotting
plot(ts_kmc, main = paste("Predicitions for Cluster ", i), xlab = "Date/Time in decimal", ylab = "Free parking spots")
lines(preds_test_ts, col = "blue")
lines(pred_stacked_glm, col = "red")
lines(pred_stacked_rf, col = "green4")
lines(pred_arima_ts, col = "orange")
legend("bottomleft", legend = c("TBATS Predicitions", "GLM Stacked Predicitions", "RF Stacked Predictions", "Auto Arima + Fourier Predictions"), col = c("blue", "red", "green4", "orange"),text.col = c("blue", "red", "green4", "orange"), bty = "n", cex = 0.8)
#Build DF with results
DF_TS = cbind(DF_TS, as.vector(preds$mean))
colnames(DF_TS)[i+1] = i
DF_GLM = cbind(DF_GLM, glm_predict)
colnames(DF_GLM)[i+1] = i
DF_RF = cbind(DF_RF, tree_predict)
colnames(DF_RF)[i+1] = i
DF_AA = cbind(DF_AA, as.vector(arima_predict$mean))
colnames(DF_AA)[i+1] = i
## RMSE
Result_TS_RSME[i] = RMSE(preds$mean, parking_filtered_test$freeParkingSpaces)
Result_GLM_RSME[i] = RMSE(glm_predict, parking_filtered_test$freeParkingSpaces)
Result_RF_RSME[i] = RMSE(tree_predict, parking_filtered_test$freeParkingSpaces)
Result_AA_RSME[i] = RMSE(arima_predict$mean, parking_filtered_test$freeParkingSpaces)
i = i+1
}
#----- Ende LOOP -----
# Total RSME �ber alle Cluster
sum(Result_TS_RSME)
sum(Result_GLM_RSME)
sum(Result_RF_RSME)
sum(Result_AA_RSME)
# select Model for each cluster
results = as.data.frame(cbind(Result_TS_RSME, Result_GLM_RSME, Result_RF_RSME, Result_AA_RSME))
colnames(results)[1:4] = c("TS", "GLM", "RF", "ARIMA")
best_model = vector("character", stop)
best_model = colnames(results)[apply(results,1,which.min)]
view(best_model)
# Save Plots (be carefull, all plots in session!)
plots.dir.path = list.files(tempdir(), pattern="rs-graphics", full.names = TRUE);
plots.png.paths = list.files(plots.dir.path, pattern=".png", full.names = TRUE)
file.copy(from=plots.png.paths, to="../Schramm, Cornelius - 02_Business_Analytics_Data/Graphs")
## Save Data Frames
save(DF_GLM,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_glm.RData")
save(DF_RF,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_rf.RData")
save(DF_TS,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_ts.RData")
save(DF_AA,file = "../Schramm, Cornelius - 02_Business_Analytics_Data/results_aa.RData")
save(best_model, file = "../Schramm, Cornelius - 02_Business_Analytics_Data/best_model.RData")
# auto.arima TEST ---------------
# Set up harmonic regressors
fourier_train = fourier(ts_kmc_train, K = c(2,4))
fourier_test = fourier(ts_kmc_test, K = c(2,4))
data_train = as.matrix(parking_filtered_train[c(5,7,9,12,13,14,15)])
data_test = as.matrix(parking_filtered_test[c(5,7,9,12,13,14,15)])
# Fit regression model with ARIMA errors
model_arima = auto.arima(ts_kmc_train, xreg = cbind(fourier_train,data_train), seasonal = T)
pred_arima = forecast(model_arima, xreg=cbind(fourier_test,data_test))
# Plotting ------
plot(ts_kmc, main = paste("Predicitions for Cluster ", i), xlab = "Date/Time in decimal", ylab = "Free parking spots")
lines(preds_test_ts, col = "blue")
lines(pred_stacked_glm, col = "red")
lines(pred_stacked_rf, col = "green4")
lines(pred_arima$mean, col = "orange")
legend("bottomleft", legend = c("TBATS Predicitions", "GLM Stacked Predicitions", "RF Stacked Predictions", "Auto Arima + Fourier Predictions"), col = c("blue", "red", "green4", "orange"),text.col = c("blue", "red", "green4", "orange"), bty = "n", cex = 0.8)
|
deck <- read.csv('deck.csv')
deal <- function(cards) {
cards[1,]
}
|
/playing_cards.R
|
no_license
|
umairrafique85/Playing_cards
|
R
| false
| false
| 69
|
r
|
deck <- read.csv('deck.csv')
deal <- function(cards) {
cards[1,]
}
|
testlist <- list(x = structure(c(2.2202775176633e-271, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 6L)))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result)
|
/bravo/inst/testfiles/colSumSq_matrix/libFuzzer_colSumSq_matrix/colSumSq_matrix_valgrind_files/1609958974-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 300
|
r
|
testlist <- list(x = structure(c(2.2202775176633e-271, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(9L, 6L)))
result <- do.call(bravo:::colSumSq_matrix,testlist)
str(result)
|
setwd("E:\\Harddrive\\OneDrive - Lund University\\Mastern\\Spring 2020\\NEKN34 Time Series Analysis\\Assignments\\Ass 3\\Assignment-3-TS")
getwd()
install.packages("")
library(vars)
library(urca)
library(tseries)
library(tsDyn)
library(lmtest)
library(car)
library(data.table) #used for shifting, aka lagging
library(dynlm)
library(readxl)
library(ggplot2)
data = read_excel("MoneyIncome.xlsx")
#tests for stationarity----
plot(data$t, data$ip)
plot(data$t, data$m1)
adf.test(data$ip)
adf.test(data$m1)
summary(data)
plot(diff(data$ip))
plot(diff(data$m1))
adf.test(diff(data$ip))
adf.test(diff(data$m1))
coeff <- 10
ggplot(data, aes(x =t))+
geom_line(aes(y = ip))
#make them identifiable
ggplot(data, aes(x=t)) +
geom_line( aes(y=ip, colour = "Industrial Production")) +
geom_line( aes(y=m1 / coeff, colour = "Money/10")) +
scale_y_continuous(
name = "Money/10",
sec.axis = sec_axis(~.*coeff, name="Industrial Production")) +
theme(legend.position= c(0.5, 0.9)) +
xlab("Date")
#testing for cointegration----
VARselect(data[,2:3], lag.max = 10, type = "const", season = 12)$select #displays a bunch of information criterion values, 10 seems to be a good number of lags
cointest = ca.jo(data[,2:3], type = "trace", ecdet = "const", season = 12)
summary(cointest) #from this we can see that the test-statistic is much greater than the critical values, at the 5% level, we have two cointegrating variables
vecm = cajorls(cointest, r = 1)
vecm$rlm #really strange because it shows that the "error correction term" are almost zero. ("error correction term" = "speed of adjustment" in our notes)
vecm$beta
#split the data into pre, post and during the 80s as per the literature
datapre80 = data[1:252,]
datapost80 = data[373:length(data),]
data80 = data[253:length(data),]
#full dataset
vecm_full = VECM(data[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_full) # the interpretation of the first value of "m1 -3" is that m1 from 3 periods ago affects current ip by -0.0043
#pre 80s dataset
vecm_pre = VECM(datapre80[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_pre)
#post 80s dataset
vecm_post = VECM(datapost80[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_post)
#during 80s dataset
vecm_80 = VECM(data80[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_80)
# how VECM interprets significance codes: ** = 0.001 * = 0.01 . = 0.05
#VAR-----
Var_full = VAR(diff(ts(data[,2:3])), p =10 , type = "const", season = 12)
summary(Var_full)
causality(Var_full, cause = "m1") #the one of interest, m1 DOES cause ip
causality(Var_full, cause = "ip") #aux causality, ip causes m1
Var_pre = VAR(diff(ts(datapre80[,2:3])), p =10 , type = "const", season = 12)
summary(Var_pre)
causality(Var_pre, cause = "m1") # m1 does not cause ip
causality(Var_pre, cause = "ip")
Var_post = VAR(diff(ts(datapost80[,2:3])), p =10 , type = "const", season = 12)
summary(Var_post)
causality(Var_post, cause = "m1") # m1 does not cause ip
causality(Var_post, cause = "ip")
Var_80 = VAR(diff(ts(data80[,2:3])), p =10 , type = "const", season = 12)
summary(Var_80)
causality(Var_80, cause = "m1") # m1 DOES cause ip
causality(Var_80, cause = "ip")
#previous stuff & possible trash-----
data[,2:3]
po.test(data[,2:3])
tsdata = ts(data)
VectorError = VECM(tsdata[,2:3], lag = 1, r = 1, include = "const") #VECM MUST have time series
summary(VectorError)
dl = dynlm(ip ~ m1 + m1lag1, data = tsdata)
plot(dl$residuals)
###-----
#testing if VAR might be any good, forgot to take first difference on the linear regression parts :(
ddata = data.frame(diff(data$ip), diff(data$m1), data$t[1:731]) #creates differenced data and adds the time
plot(ddata$diff.data.ip.)
plot(ddata$diff.data.m1.)
test = data
test$m1lag1 = shift(test$m1)
test$m1lag2 = shift(test$m1lag1)
test$iplag1 = shift(test$ip)
test$iplag2 = shift(test$iplag1)
dl = dynlm(ip ~ m1 + m1lag1 + m1lag2 + iplag1 + iplag2, data = test)
summary(dl)
grangertest(data$ip, data$m1, 2)
linearHypothesis(dl,"m1lag1 - m1lag2" )
naive = lm(ip ~ m1, data = data)
plot(naive$residuals)
print(data[,2:3])
var1 = VAR(data[,2:3],p = 2, type = "const")
print(var1)
summary(var1)
#TRASH TRASH TRASH TRASH--------
datapre80 = data[1:252,]
VARselect(datapre80[,2:3], lag.max = 12, type = "const", season = 12)$select #displays a bunch of information criterion values, 10 seems to be a good number
cointestpre80 = ca.jo(datapre80[,2:3], type = "trace", ecdet = "const", season = 12)
cointestpre80@cval
cointestpre80@teststat[1] #H0: r = 0 is not rejected at % level, critical value is 19.96
#post 1980s
datapost80 = data[373:length(data),]
VARselect(datapost80[,2:3], lag.max = 12, type = "const", season = 12)$select #displays a bunch of information criterion values, 10 seems to be a good number
cointestpost80 = ca.jo(datapost80[,2:3], type = "trace", ecdet = "const", season = 12)
cointestpost80@cval
cointestpost80@teststat[1] #H0: r = 0 is not rejected at 5% level, critical value is 19.96
|
/MainCodingFile.R
|
no_license
|
Supersoppan/Assignment-3-TS
|
R
| false
| false
| 4,993
|
r
|
setwd("E:\\Harddrive\\OneDrive - Lund University\\Mastern\\Spring 2020\\NEKN34 Time Series Analysis\\Assignments\\Ass 3\\Assignment-3-TS")
getwd()
install.packages("")
library(vars)
library(urca)
library(tseries)
library(tsDyn)
library(lmtest)
library(car)
library(data.table) #used for shifting, aka lagging
library(dynlm)
library(readxl)
library(ggplot2)
data = read_excel("MoneyIncome.xlsx")
#tests for stationarity----
plot(data$t, data$ip)
plot(data$t, data$m1)
adf.test(data$ip)
adf.test(data$m1)
summary(data)
plot(diff(data$ip))
plot(diff(data$m1))
adf.test(diff(data$ip))
adf.test(diff(data$m1))
coeff <- 10
ggplot(data, aes(x =t))+
geom_line(aes(y = ip))
#make them identifiable
ggplot(data, aes(x=t)) +
geom_line( aes(y=ip, colour = "Industrial Production")) +
geom_line( aes(y=m1 / coeff, colour = "Money/10")) +
scale_y_continuous(
name = "Money/10",
sec.axis = sec_axis(~.*coeff, name="Industrial Production")) +
theme(legend.position= c(0.5, 0.9)) +
xlab("Date")
#testing for cointegration----
VARselect(data[,2:3], lag.max = 10, type = "const", season = 12)$select #displays a bunch of information criterion values, 10 seems to be a good number of lags
cointest = ca.jo(data[,2:3], type = "trace", ecdet = "const", season = 12)
summary(cointest) #from this we can see that the test-statistic is much greater than the critical values, at the 5% level, we have two cointegrating variables
vecm = cajorls(cointest, r = 1)
vecm$rlm #really strange because it shows that the "error correction term" are almost zero. ("error correction term" = "speed of adjustment" in our notes)
vecm$beta
#split the data into pre, post and during the 80s as per the literature
datapre80 = data[1:252,]
datapost80 = data[373:length(data),]
data80 = data[253:length(data),]
#full dataset
vecm_full = VECM(data[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_full) # the interpretation of the first value of "m1 -3" is that m1 from 3 periods ago affects current ip by -0.0043
#pre 80s dataset
vecm_pre = VECM(datapre80[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_pre)
#post 80s dataset
vecm_post = VECM(datapost80[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_post)
#during 80s dataset
vecm_80 = VECM(data80[,2:3], lag = 10, r = 1, estim = "2OLS")
summary(vecm_80)
# how VECM interprets significance codes: ** = 0.001 * = 0.01 . = 0.05
#VAR-----
Var_full = VAR(diff(ts(data[,2:3])), p =10 , type = "const", season = 12)
summary(Var_full)
causality(Var_full, cause = "m1") #the one of interest, m1 DOES cause ip
causality(Var_full, cause = "ip") #aux causality, ip causes m1
Var_pre = VAR(diff(ts(datapre80[,2:3])), p =10 , type = "const", season = 12)
summary(Var_pre)
causality(Var_pre, cause = "m1") # m1 does not cause ip
causality(Var_pre, cause = "ip")
Var_post = VAR(diff(ts(datapost80[,2:3])), p =10 , type = "const", season = 12)
summary(Var_post)
causality(Var_post, cause = "m1") # m1 does not cause ip
causality(Var_post, cause = "ip")
Var_80 = VAR(diff(ts(data80[,2:3])), p =10 , type = "const", season = 12)
summary(Var_80)
causality(Var_80, cause = "m1") # m1 DOES cause ip
causality(Var_80, cause = "ip")
#previous stuff & possible trash-----
data[,2:3]
po.test(data[,2:3])
tsdata = ts(data)
VectorError = VECM(tsdata[,2:3], lag = 1, r = 1, include = "const") #VECM MUST have time series
summary(VectorError)
dl = dynlm(ip ~ m1 + m1lag1, data = tsdata)
plot(dl$residuals)
###-----
#testing if VAR might be any good, forgot to take first difference on the linear regression parts :(
ddata = data.frame(diff(data$ip), diff(data$m1), data$t[1:731]) #creates differenced data and adds the time
plot(ddata$diff.data.ip.)
plot(ddata$diff.data.m1.)
test = data
test$m1lag1 = shift(test$m1)
test$m1lag2 = shift(test$m1lag1)
test$iplag1 = shift(test$ip)
test$iplag2 = shift(test$iplag1)
dl = dynlm(ip ~ m1 + m1lag1 + m1lag2 + iplag1 + iplag2, data = test)
summary(dl)
grangertest(data$ip, data$m1, 2)
linearHypothesis(dl,"m1lag1 - m1lag2" )
naive = lm(ip ~ m1, data = data)
plot(naive$residuals)
print(data[,2:3])
var1 = VAR(data[,2:3],p = 2, type = "const")
print(var1)
summary(var1)
#TRASH TRASH TRASH TRASH--------
datapre80 = data[1:252,]
VARselect(datapre80[,2:3], lag.max = 12, type = "const", season = 12)$select #displays a bunch of information criterion values, 10 seems to be a good number
cointestpre80 = ca.jo(datapre80[,2:3], type = "trace", ecdet = "const", season = 12)
cointestpre80@cval
cointestpre80@teststat[1] #H0: r = 0 is not rejected at % level, critical value is 19.96
#post 1980s
datapost80 = data[373:length(data),]
VARselect(datapost80[,2:3], lag.max = 12, type = "const", season = 12)$select #displays a bunch of information criterion values, 10 seems to be a good number
cointestpost80 = ca.jo(datapost80[,2:3], type = "trace", ecdet = "const", season = 12)
cointestpost80@cval
cointestpost80@teststat[1] #H0: r = 0 is not rejected at 5% level, critical value is 19.96
|
# Define server logic required to draw a histogram
server <- function(input, output) {
################### INPUT ####################
select_state <- eventReactive(input$go, {
state_name <- input$state
twin <- input$true_date
df_state <- master_df %>% filter(state_name == state)
df_state_date <- df_state %>% filter(Date >= twin[1] & Date <= twin[2])
return(df_state_date)
})
output$timedate <- renderUI({
state_name <- input$state
df <- master_df %>%
filter(state == state_name)
min_time <- min(df$Date)
max_time <- max(df$Date)
dateRangeInput("true_date", "Período de análise",
end = max_time,
start = min_time,
min = min_time,
max = max_time,
format = "dd/mm/yy",
separator = " - ",
language='pt-BR')
})
output$timedate_comp <- renderUI({
state_name <- input$state
df <- master_df %>%
filter(state %in% state_name)
maxmin_time <- df %>%
group_by(state) %>%
summarise(MD = min(Date)) %>%
.$MD %>%
max()
minmax_time <- df %>%
group_by(state) %>%
summarise(MD = max(Date)) %>%
.$MD %>%
min()
min_time <- maxmin_time
max_time <- minmax_time
dateRangeInput("true_date_comp", "Período de análise",
end = max_time,
start = min_time,
min = min_time,
max = max_time,
format = "dd/mm/yy",
separator = " - ",
language='pt-BR')
})
################ OUTPUT #####################
Info_DataTable <- eventReactive(input$go,{
df <- select_state()
numbers <- df %>% select(number)
mean <- numbers %>% colMeans()
Média <- mean[[1]]
median <- numbers
Mediana <- median(median[[1]])
moda<-function(x){which.max(tabulate(x))}
Moda <- moda((numbers)[[1]])
standDeviation <- numbers
DesvioPadrão <- sd(standDeviation[[1]])
ValorMáximo<- max(numbers[[1]])
ValorMínimo<- min(numbers[[1]])
Estado <- input$state
df_tb <- data.frame(Estado, Média, Mediana, Moda, DesvioPadrão, ValorMáximo, ValorMínimo)
df_tb <- as.data.frame(t(df_tb))
# tb <- as_tibble(cbind(nms = names(df_tb), t(df_tb)))
# tb <- tb %>%
# rename('Informações' = nms,
# 'Valores' = V2)
#
return(df_tb)
})
output$info <- renderDT({
Info_DataTable() %>%
as.data.frame() %>%
DT::datatable(options=list(
language=list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Portuguese-Brasil.json'
)
))
})
output$sh <- renderPlot({
# All the inputs
df <- select_state()
aux <- df$number %>% na.omit() %>% as.numeric()
aux1 <- min(aux)
aux2 <- max(aux)
df$Date <- ymd(df$Date)
a <- df %>%
ggplot(aes(Date, number, group=1)) +
geom_path() +
ylab('Número de ocorrências de incêndios no estado') +
coord_cartesian(ylim = c(aux1, aux2)) +
theme_bw() +
scale_x_date(date_labels = "%Y-%m-%d")
a
})
comp_line <- eventReactive(input$go_comp, {
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df <- master_df[master_df$state == state_1 | master_df$state == state_2,] %>%
filter(Date >= twin[1] & Date <= twin[2])
aux <- df$number %>% na.omit() %>% as.numeric()
aux1 <- min(aux)
aux2 <- max(aux)
df$Date <- ymd(df$Date)
a <- df %>%
ggplot(aes(Date, number, group=1,colour=state)) +
geom_path() +
ylab('Número de ocorrências de incêndios nos estados') +
coord_cartesian(ylim = c(aux1, aux2)) +
theme_bw() +
scale_x_date(date_labels = "%Y-%m-%d")
a
})
output$line_graph_comp <- renderPlot(comp_line())
comp_bar <- eventReactive(input$go_comp,{
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df_1 <- master_df %>% filter(master_df$state == state_1 & Date >= twin[1] & Date <= twin[2])
df_2 <- master_df %>% filter(master_df$state == state_2 & Date >= twin[1] & Date <= twin[2])
mean_1 <- df_1 %>% select(number) %>% colMeans()
Média_1 <- mean_1[[1]]
mean_2 <- df_2 %>% select(number) %>% colMeans()
Média_2 <- mean_2[[1]]
data <- data.frame(
Estado=c(state_1, state_2) ,
Media=c(mean_1, mean_2)
)
ggplot(data, aes(x=Estado, y=Media)) +
geom_bar(stat = "identity")
})
output$bar_graph_comp <- renderPlot(comp_bar())
draw_scatterplot <- eventReactive(input$go_comp,{
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df <- master_df[master_df$state == state_1 | master_df$state == state_2,] %>%
filter(Date >= twin[1] & Date <= twin[2])
a <- ggplot(data=df, aes(x=year, y=number, size=2)) +
geom_point(aes(colour=state))+
xlab("Ano") +
ylab("Número de incêndios")
a
})
output$scatterplot <- renderPlot(draw_scatterplot())
correlation_value <- eventReactive(input$go_comp,{
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df_1 <- master_df %>% filter(master_df$state == state_1 & Date >= twin[1] & Date <= twin[2])
df_2 <- master_df %>% filter(master_df$state == state_2 & Date >= twin[1] & Date <= twin[2])
Correlacao <- round(cor(df_1$number, df_2$number), digits=4)
df_tb <- data.frame(Correlacao)
df_tb <- as.data.frame(t(df_tb))
return(df_tb)
})
output$correlation <- renderDT({
correlation_value() %>%
as.data.frame() %>%
DT::datatable(options=list(
language=list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Portuguese-Brasil.json'
)
))
})
draw_histograma <- eventReactive(input$go,{
df <- select_state()
a <- ggplot(df, aes(x=number)) +
geom_histogram(binwidth=6)+
xlab("Número de incêndios") +
ylab("Frequência")
a
})
output$histograma <- renderPlot(draw_histograma())
draw_boxplot <- eventReactive(input$go,{
df <- select_state()
a <- ggplot(df, aes(x=state, y=number))+
geom_boxplot()+
ylab("Número de incêndios")
a
})
output$boxplot <- renderPlot(draw_boxplot())
}
|
/server.R
|
no_license
|
pccql/shiny-project
|
R
| false
| false
| 8,038
|
r
|
# Define server logic required to draw a histogram
server <- function(input, output) {
################### INPUT ####################
select_state <- eventReactive(input$go, {
state_name <- input$state
twin <- input$true_date
df_state <- master_df %>% filter(state_name == state)
df_state_date <- df_state %>% filter(Date >= twin[1] & Date <= twin[2])
return(df_state_date)
})
output$timedate <- renderUI({
state_name <- input$state
df <- master_df %>%
filter(state == state_name)
min_time <- min(df$Date)
max_time <- max(df$Date)
dateRangeInput("true_date", "Período de análise",
end = max_time,
start = min_time,
min = min_time,
max = max_time,
format = "dd/mm/yy",
separator = " - ",
language='pt-BR')
})
output$timedate_comp <- renderUI({
state_name <- input$state
df <- master_df %>%
filter(state %in% state_name)
maxmin_time <- df %>%
group_by(state) %>%
summarise(MD = min(Date)) %>%
.$MD %>%
max()
minmax_time <- df %>%
group_by(state) %>%
summarise(MD = max(Date)) %>%
.$MD %>%
min()
min_time <- maxmin_time
max_time <- minmax_time
dateRangeInput("true_date_comp", "Período de análise",
end = max_time,
start = min_time,
min = min_time,
max = max_time,
format = "dd/mm/yy",
separator = " - ",
language='pt-BR')
})
################ OUTPUT #####################
Info_DataTable <- eventReactive(input$go,{
df <- select_state()
numbers <- df %>% select(number)
mean <- numbers %>% colMeans()
Média <- mean[[1]]
median <- numbers
Mediana <- median(median[[1]])
moda<-function(x){which.max(tabulate(x))}
Moda <- moda((numbers)[[1]])
standDeviation <- numbers
DesvioPadrão <- sd(standDeviation[[1]])
ValorMáximo<- max(numbers[[1]])
ValorMínimo<- min(numbers[[1]])
Estado <- input$state
df_tb <- data.frame(Estado, Média, Mediana, Moda, DesvioPadrão, ValorMáximo, ValorMínimo)
df_tb <- as.data.frame(t(df_tb))
# tb <- as_tibble(cbind(nms = names(df_tb), t(df_tb)))
# tb <- tb %>%
# rename('Informações' = nms,
# 'Valores' = V2)
#
return(df_tb)
})
output$info <- renderDT({
Info_DataTable() %>%
as.data.frame() %>%
DT::datatable(options=list(
language=list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Portuguese-Brasil.json'
)
))
})
output$sh <- renderPlot({
# All the inputs
df <- select_state()
aux <- df$number %>% na.omit() %>% as.numeric()
aux1 <- min(aux)
aux2 <- max(aux)
df$Date <- ymd(df$Date)
a <- df %>%
ggplot(aes(Date, number, group=1)) +
geom_path() +
ylab('Número de ocorrências de incêndios no estado') +
coord_cartesian(ylim = c(aux1, aux2)) +
theme_bw() +
scale_x_date(date_labels = "%Y-%m-%d")
a
})
comp_line <- eventReactive(input$go_comp, {
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df <- master_df[master_df$state == state_1 | master_df$state == state_2,] %>%
filter(Date >= twin[1] & Date <= twin[2])
aux <- df$number %>% na.omit() %>% as.numeric()
aux1 <- min(aux)
aux2 <- max(aux)
df$Date <- ymd(df$Date)
a <- df %>%
ggplot(aes(Date, number, group=1,colour=state)) +
geom_path() +
ylab('Número de ocorrências de incêndios nos estados') +
coord_cartesian(ylim = c(aux1, aux2)) +
theme_bw() +
scale_x_date(date_labels = "%Y-%m-%d")
a
})
output$line_graph_comp <- renderPlot(comp_line())
comp_bar <- eventReactive(input$go_comp,{
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df_1 <- master_df %>% filter(master_df$state == state_1 & Date >= twin[1] & Date <= twin[2])
df_2 <- master_df %>% filter(master_df$state == state_2 & Date >= twin[1] & Date <= twin[2])
mean_1 <- df_1 %>% select(number) %>% colMeans()
Média_1 <- mean_1[[1]]
mean_2 <- df_2 %>% select(number) %>% colMeans()
Média_2 <- mean_2[[1]]
data <- data.frame(
Estado=c(state_1, state_2) ,
Media=c(mean_1, mean_2)
)
ggplot(data, aes(x=Estado, y=Media)) +
geom_bar(stat = "identity")
})
output$bar_graph_comp <- renderPlot(comp_bar())
draw_scatterplot <- eventReactive(input$go_comp,{
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df <- master_df[master_df$state == state_1 | master_df$state == state_2,] %>%
filter(Date >= twin[1] & Date <= twin[2])
a <- ggplot(data=df, aes(x=year, y=number, size=2)) +
geom_point(aes(colour=state))+
xlab("Ano") +
ylab("Número de incêndios")
a
})
output$scatterplot <- renderPlot(draw_scatterplot())
correlation_value <- eventReactive(input$go_comp,{
if (length(input$state_comp) != 2){
return('Selecione dois estados')
}
state_1 <- input$state_comp[1]
state_2 <- input$state_comp[2]
twin <- input$true_date_comp
df_1 <- master_df %>% filter(master_df$state == state_1 & Date >= twin[1] & Date <= twin[2])
df_2 <- master_df %>% filter(master_df$state == state_2 & Date >= twin[1] & Date <= twin[2])
Correlacao <- round(cor(df_1$number, df_2$number), digits=4)
df_tb <- data.frame(Correlacao)
df_tb <- as.data.frame(t(df_tb))
return(df_tb)
})
output$correlation <- renderDT({
correlation_value() %>%
as.data.frame() %>%
DT::datatable(options=list(
language=list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Portuguese-Brasil.json'
)
))
})
draw_histograma <- eventReactive(input$go,{
df <- select_state()
a <- ggplot(df, aes(x=number)) +
geom_histogram(binwidth=6)+
xlab("Número de incêndios") +
ylab("Frequência")
a
})
output$histograma <- renderPlot(draw_histograma())
draw_boxplot <- eventReactive(input$go,{
df <- select_state()
a <- ggplot(df, aes(x=state, y=number))+
geom_boxplot()+
ylab("Número de incêndios")
a
})
output$boxplot <- renderPlot(draw_boxplot())
}
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
warning = FALSE,
message = FALSE,
fig.align = "center",
out.width = "90%",
fig.width = 7,
fig.height = 5
)
## ---- echo=F------------------------------------------------------------------
library(tidyverse)
library(modeltime)
modeltime_forecast_tbl <- read_rds("modeltime_forecast_tbl.rds")
modeltime_forecast_tbl %>%
plot_modeltime_forecast(
.facet_ncol = 2,
.facet_scales = "free",
.interactive = FALSE
)
## ----setup--------------------------------------------------------------------
library(modeltime.gluonts)
library(tidymodels)
library(tidyverse)
library(timetk)
## ---- eval=F------------------------------------------------------------------
# install_gluonts()
## -----------------------------------------------------------------------------
data <- m4_hourly %>%
select(id, date, value) %>%
group_by(id) %>%
mutate(value = standardize_vec(value)) %>%
ungroup()
data
## -----------------------------------------------------------------------------
HORIZON <- 24*7
new_data <- data %>%
group_by(id) %>%
future_frame(.length_out = HORIZON) %>%
ungroup()
new_data
## ---- eval = FALSE------------------------------------------------------------
# model_fit_nbeats_ensemble <- nbeats(
# id = "id",
# freq = "H",
# prediction_length = HORIZON,
# lookback_length = c(HORIZON, 4*HORIZON),
# epochs = 5,
# num_batches_per_epoch = 15,
# batch_size = 1
# ) %>%
# set_engine("gluonts_nbeats_ensemble") %>%
# fit(value ~ date + id, data)
## ---- eval=F------------------------------------------------------------------
# model_fit_nbeats_ensemble
## ---- echo=F------------------------------------------------------------------
knitr::include_graphics("nbeats_model.jpg")
## ---- eval=F------------------------------------------------------------------
# modeltime_forecast_tbl <- modeltime_table(
# model_fit_nbeats_ensemble
# ) %>%
# modeltime_forecast(
# new_data = new_data,
# actual_data = data,
# keep_data = TRUE
# ) %>%
# group_by(id)
## -----------------------------------------------------------------------------
modeltime_forecast_tbl %>%
plot_modeltime_forecast(
.conf_interval_show = FALSE,
.facet_ncol = 2,
.facet_scales = "free",
.interactive = FALSE
)
## ---- eval = FALSE------------------------------------------------------------
# model_fit_nbeats_ensemble %>%
# save_gluonts_model(path = "nbeats_ensemble_model", overwrite = TRUE)
## ---- eval=FALSE--------------------------------------------------------------
# model_fit_nbeats_ensemble <- load_gluonts_model("nbeats_ensemble_model")
|
/inst/doc/getting-started.R
|
no_license
|
cran/modeltime.gluonts
|
R
| false
| false
| 2,897
|
r
|
## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>",
warning = FALSE,
message = FALSE,
fig.align = "center",
out.width = "90%",
fig.width = 7,
fig.height = 5
)
## ---- echo=F------------------------------------------------------------------
library(tidyverse)
library(modeltime)
modeltime_forecast_tbl <- read_rds("modeltime_forecast_tbl.rds")
modeltime_forecast_tbl %>%
plot_modeltime_forecast(
.facet_ncol = 2,
.facet_scales = "free",
.interactive = FALSE
)
## ----setup--------------------------------------------------------------------
library(modeltime.gluonts)
library(tidymodels)
library(tidyverse)
library(timetk)
## ---- eval=F------------------------------------------------------------------
# install_gluonts()
## -----------------------------------------------------------------------------
data <- m4_hourly %>%
select(id, date, value) %>%
group_by(id) %>%
mutate(value = standardize_vec(value)) %>%
ungroup()
data
## -----------------------------------------------------------------------------
HORIZON <- 24*7
new_data <- data %>%
group_by(id) %>%
future_frame(.length_out = HORIZON) %>%
ungroup()
new_data
## ---- eval = FALSE------------------------------------------------------------
# model_fit_nbeats_ensemble <- nbeats(
# id = "id",
# freq = "H",
# prediction_length = HORIZON,
# lookback_length = c(HORIZON, 4*HORIZON),
# epochs = 5,
# num_batches_per_epoch = 15,
# batch_size = 1
# ) %>%
# set_engine("gluonts_nbeats_ensemble") %>%
# fit(value ~ date + id, data)
## ---- eval=F------------------------------------------------------------------
# model_fit_nbeats_ensemble
## ---- echo=F------------------------------------------------------------------
knitr::include_graphics("nbeats_model.jpg")
## ---- eval=F------------------------------------------------------------------
# modeltime_forecast_tbl <- modeltime_table(
# model_fit_nbeats_ensemble
# ) %>%
# modeltime_forecast(
# new_data = new_data,
# actual_data = data,
# keep_data = TRUE
# ) %>%
# group_by(id)
## -----------------------------------------------------------------------------
modeltime_forecast_tbl %>%
plot_modeltime_forecast(
.conf_interval_show = FALSE,
.facet_ncol = 2,
.facet_scales = "free",
.interactive = FALSE
)
## ---- eval = FALSE------------------------------------------------------------
# model_fit_nbeats_ensemble %>%
# save_gluonts_model(path = "nbeats_ensemble_model", overwrite = TRUE)
## ---- eval=FALSE--------------------------------------------------------------
# model_fit_nbeats_ensemble <- load_gluonts_model("nbeats_ensemble_model")
|
# some basic useful functions
# function: is not in
'%!in%' <- function(x,y)!('%in%'(x,y))
# function: remove '\xa0' chars
phrase_clean <- function(x) gsub("[\xA0]", "", x)
# function: replace double spaces with single spaces
space_clean <- function(x) gsub(" ", " ", x)
# function: apply a function to ALL character columns
char_fun <- function(x,y){ # x = dataframe, y = function to apply
setDT(x)
cols_to_be_rectified <- names(x)[vapply(x, is.character, logical(1))]
x[,c(cols_to_be_rectified) := lapply(.SD, y), .SDcols = cols_to_be_rectified]
}
# function: get everything from INSIDE any parenthesis
inparens <- function(x)gsub("(?<=\\()[^()]*(?=\\))(*SKIP)(*F)|.", "", x, perl=T)
# function: get everything from OUTSIDE any parenthesis
outparens <- function(x){
trimws(gsub("\\([^()]*\\)", "", x))
}
# combine data frames that do not have the same column headers and keep all columns
combine <- function(x,y) # x and y are the dataframes to be combined
rbindlist(list(x, y), fill = TRUE)
# get a dataframe of duplicates in a single column
duplicated <- function(x,y){ # x is the dataframe to look for duplicates, y is the column to check
dupe <- x[,c('y')] # list data in column to check duplicates in
review_dups <- x[duplicated(dupe) | duplicated(dupe, fromLast=TRUE),] # create duplicates data frame
}
|
/useful_basic.r
|
permissive
|
Jegelewicz/r-codesnippets
|
R
| false
| false
| 1,336
|
r
|
# some basic useful functions
# function: is not in
'%!in%' <- function(x,y)!('%in%'(x,y))
# function: remove '\xa0' chars
phrase_clean <- function(x) gsub("[\xA0]", "", x)
# function: replace double spaces with single spaces
space_clean <- function(x) gsub(" ", " ", x)
# function: apply a function to ALL character columns
char_fun <- function(x,y){ # x = dataframe, y = function to apply
setDT(x)
cols_to_be_rectified <- names(x)[vapply(x, is.character, logical(1))]
x[,c(cols_to_be_rectified) := lapply(.SD, y), .SDcols = cols_to_be_rectified]
}
# function: get everything from INSIDE any parenthesis
inparens <- function(x)gsub("(?<=\\()[^()]*(?=\\))(*SKIP)(*F)|.", "", x, perl=T)
# function: get everything from OUTSIDE any parenthesis
outparens <- function(x){
trimws(gsub("\\([^()]*\\)", "", x))
}
# combine data frames that do not have the same column headers and keep all columns
combine <- function(x,y) # x and y are the dataframes to be combined
rbindlist(list(x, y), fill = TRUE)
# get a dataframe of duplicates in a single column
duplicated <- function(x,y){ # x is the dataframe to look for duplicates, y is the column to check
dupe <- x[,c('y')] # list data in column to check duplicates in
review_dups <- x[duplicated(dupe) | duplicated(dupe, fromLast=TRUE),] # create duplicates data frame
}
|
install.packages("reshape2")
install.packages("dplyr")
install.packages("ggplot2")
library(reshape2)
library(dplyr)
library(ggplot2)
acc <- read.csv("요일별_시간대별_교통사고.csv", header=T)
acc
# 목표 : 요일별로 교통사고 사망자의 시간별 분포를 살펴보자!
### step 1. 필요없는 행을 지우고, 필요한 행만 추출하자.
# tip1) filter(데이터, 행조건, ...)
# tip2) 데이터 %>% filter(행조건, ...)
### step 2. 필요없는 열을 지우자.
# tip1) select(데이터, 열이름, -열이름,...)
# tip2) 데이터 %>% select(열이름, -열이름,...)
### step 3. 목적에 맞게 데이터를 롱포맷으로 변환하자.
# tip) melt(데이터, id= ~~, measured= ~~ )
### step 4. 데이터를 시각화하자.
# tip) ggplot() + geom_xx()
|
/part3/B_Network/dplyr보강/data_handling_practice.R
|
no_license
|
anhnguyendepocen/visual
|
R
| false
| false
| 846
|
r
|
install.packages("reshape2")
install.packages("dplyr")
install.packages("ggplot2")
library(reshape2)
library(dplyr)
library(ggplot2)
acc <- read.csv("요일별_시간대별_교통사고.csv", header=T)
acc
# 목표 : 요일별로 교통사고 사망자의 시간별 분포를 살펴보자!
### step 1. 필요없는 행을 지우고, 필요한 행만 추출하자.
# tip1) filter(데이터, 행조건, ...)
# tip2) 데이터 %>% filter(행조건, ...)
### step 2. 필요없는 열을 지우자.
# tip1) select(데이터, 열이름, -열이름,...)
# tip2) 데이터 %>% select(열이름, -열이름,...)
### step 3. 목적에 맞게 데이터를 롱포맷으로 변환하자.
# tip) melt(데이터, id= ~~, measured= ~~ )
### step 4. 데이터를 시각화하자.
# tip) ggplot() + geom_xx()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{ggplot}
\alias{ggplot}
\title{Create a new ggplot}
\usage{
ggplot(data = NULL, mapping = aes(), ..., environment = parent.frame())
}
\arguments{
\item{data}{Default dataset to use for plot. If not already a data.frame,
will be converted to one by \code{\link[=fortify]{fortify()}}. If not specified,
must be supplied in each layer added to the plot.}
\item{mapping}{Default list of aesthetic mappings to use for plot.
If not specified, must be supplied in each layer added to the plot.}
\item{...}{Other arguments passed on to methods. Not currently used.}
\item{environment}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} Used prior to tidy
evaluation.}
}
\description{
\code{ggplot()} initializes a ggplot object. It can be used to
declare the input data frame for a graphic and to specify the
set of plot aesthetics intended to be common throughout all
subsequent layers unless specifically overridden.
}
\details{
\code{ggplot()} is used to construct the initial plot object,
and is almost always followed by a plus sign (\code{+}) to add
components to the plot.
There are three common patterns used to invoke \code{ggplot()}:
\itemize{
\item \verb{ggplot(data = df, mapping = aes(x, y, other aesthetics))}
\item \code{ggplot(data = df)}
\item \code{ggplot()}
}
The first pattern is recommended if all layers use the same
data and the same set of aesthetics, although this method
can also be used when adding a layer using data from another
data frame.
The second pattern specifies the default data frame to use
for the plot, but no aesthetics are defined up front. This
is useful when one data frame is used predominantly for the
plot, but the aesthetics vary from one layer to another.
The third pattern initializes a skeleton \code{ggplot} object, which
is fleshed out as layers are added. This is useful when
multiple data frames are used to produce different layers, as
is often the case in complex graphics.
The \verb{data =} and \verb{mapping =} specifications in the arguments are optional
(and are often omitted in practice), so long as the data and the mapping
values are passed into the function in the right order. In the examples
below, however, they are left in place for clarity.
}
\examples{
# Create a data frame with some sample data, then create a data frame
# containing the mean value for each group in the sample data.
set.seed(1)
sample_df <- data.frame(
group = factor(rep(letters[1:3], each = 10)),
value = rnorm(30)
)
group_means_df <- setNames(
aggregate(value ~ group, sample_df, mean),
c("group", "group_mean")
)
# The following three code blocks create the same graphic, each using one
# of the three patterns specified above. In each graphic, the sample data
# are plotted in the first layer and the group means data frame is used to
# plot larger red points on top of the sample data in the second layer.
# Pattern 1
# Both the `data` and `mapping` arguments are passed into the `ggplot()`
# call. Those arguments are omitted in the first `geom_point()` layer
# because they get passed along from the `ggplot()` call. Note that the
# second `geom_point()` layer re-uses the `x = group` aesthetic through
# that mechanism but overrides the y-position aesthetic.
ggplot(data = sample_df, mapping = aes(x = group, y = value)) +
geom_point() +
geom_point(
mapping = aes(y = group_mean), data = group_means_df,
colour = 'red', size = 3
)
# Pattern 2
# Same plot as above, passing only the `data` argument into the `ggplot()`
# call. The `mapping` arguments are now required in each `geom_point()`
# layer because there is no `mapping` argument passed along from the
# `ggplot()` call.
ggplot(data = sample_df) +
geom_point(mapping = aes(x = group, y = value)) +
geom_point(
mapping = aes(x = group, y = group_mean), data = group_means_df,
colour = 'red', size = 3
)
# Pattern 3
# Same plot as above, passing neither the `data` or `mapping` arguments
# into the `ggplot()` call. Both those arguments are now required in
# each `geom_point()` layer. This pattern can be particularly useful when
# creating more complex graphics with many layers using data from multiple
# data frames.
ggplot() +
geom_point(mapping = aes(x = group, y = value), data = sample_df) +
geom_point(
mapping = aes(x = group, y = group_mean), data = group_means_df,
colour = 'red', size = 3
)
}
|
/man/ggplot.Rd
|
permissive
|
tidyverse/ggplot2
|
R
| false
| true
| 4,576
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{ggplot}
\alias{ggplot}
\title{Create a new ggplot}
\usage{
ggplot(data = NULL, mapping = aes(), ..., environment = parent.frame())
}
\arguments{
\item{data}{Default dataset to use for plot. If not already a data.frame,
will be converted to one by \code{\link[=fortify]{fortify()}}. If not specified,
must be supplied in each layer added to the plot.}
\item{mapping}{Default list of aesthetic mappings to use for plot.
If not specified, must be supplied in each layer added to the plot.}
\item{...}{Other arguments passed on to methods. Not currently used.}
\item{environment}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}} Used prior to tidy
evaluation.}
}
\description{
\code{ggplot()} initializes a ggplot object. It can be used to
declare the input data frame for a graphic and to specify the
set of plot aesthetics intended to be common throughout all
subsequent layers unless specifically overridden.
}
\details{
\code{ggplot()} is used to construct the initial plot object,
and is almost always followed by a plus sign (\code{+}) to add
components to the plot.
There are three common patterns used to invoke \code{ggplot()}:
\itemize{
\item \verb{ggplot(data = df, mapping = aes(x, y, other aesthetics))}
\item \code{ggplot(data = df)}
\item \code{ggplot()}
}
The first pattern is recommended if all layers use the same
data and the same set of aesthetics, although this method
can also be used when adding a layer using data from another
data frame.
The second pattern specifies the default data frame to use
for the plot, but no aesthetics are defined up front. This
is useful when one data frame is used predominantly for the
plot, but the aesthetics vary from one layer to another.
The third pattern initializes a skeleton \code{ggplot} object, which
is fleshed out as layers are added. This is useful when
multiple data frames are used to produce different layers, as
is often the case in complex graphics.
The \verb{data =} and \verb{mapping =} specifications in the arguments are optional
(and are often omitted in practice), so long as the data and the mapping
values are passed into the function in the right order. In the examples
below, however, they are left in place for clarity.
}
\examples{
# Create a data frame with some sample data, then create a data frame
# containing the mean value for each group in the sample data.
set.seed(1)
sample_df <- data.frame(
group = factor(rep(letters[1:3], each = 10)),
value = rnorm(30)
)
group_means_df <- setNames(
aggregate(value ~ group, sample_df, mean),
c("group", "group_mean")
)
# The following three code blocks create the same graphic, each using one
# of the three patterns specified above. In each graphic, the sample data
# are plotted in the first layer and the group means data frame is used to
# plot larger red points on top of the sample data in the second layer.
# Pattern 1
# Both the `data` and `mapping` arguments are passed into the `ggplot()`
# call. Those arguments are omitted in the first `geom_point()` layer
# because they get passed along from the `ggplot()` call. Note that the
# second `geom_point()` layer re-uses the `x = group` aesthetic through
# that mechanism but overrides the y-position aesthetic.
ggplot(data = sample_df, mapping = aes(x = group, y = value)) +
geom_point() +
geom_point(
mapping = aes(y = group_mean), data = group_means_df,
colour = 'red', size = 3
)
# Pattern 2
# Same plot as above, passing only the `data` argument into the `ggplot()`
# call. The `mapping` arguments are now required in each `geom_point()`
# layer because there is no `mapping` argument passed along from the
# `ggplot()` call.
ggplot(data = sample_df) +
geom_point(mapping = aes(x = group, y = value)) +
geom_point(
mapping = aes(x = group, y = group_mean), data = group_means_df,
colour = 'red', size = 3
)
# Pattern 3
# Same plot as above, passing neither the `data` or `mapping` arguments
# into the `ggplot()` call. Both those arguments are now required in
# each `geom_point()` layer. This pattern can be particularly useful when
# creating more complex graphics with many layers using data from multiple
# data frames.
ggplot() +
geom_point(mapping = aes(x = group, y = value), data = sample_df) +
geom_point(
mapping = aes(x = group, y = group_mean), data = group_means_df,
colour = 'red', size = 3
)
}
|
expit <-
function (x)
{
exp(x)/(1 + exp(x))
}
|
/R/expit.R
|
no_license
|
miemiemiem/Aclust
|
R
| false
| false
| 51
|
r
|
expit <-
function (x)
{
exp(x)/(1 + exp(x))
}
|
hypotenuse <- function(x, y)
{
sqrt(x ^ 2 + y ^ 2)
}
ythagorean_triples <- data.frame(
x = c(3, 5, 8, 7, 9, 11, 12, 13, 15, 16, 17, 19),
y = c(4, 12, 15, 24, 40, 60, 35, 84, 112, 63, 144, 180),
z = c(5, 13, 17, 25, 41, 61, 37, 85, 113, 65, 145, 181)
)
|
/pkg/hypotenuse.R
|
no_license
|
RinLinux/RNotes
|
R
| false
| false
| 262
|
r
|
hypotenuse <- function(x, y)
{
sqrt(x ^ 2 + y ^ 2)
}
ythagorean_triples <- data.frame(
x = c(3, 5, 8, 7, 9, 11, 12, 13, 15, 16, 17, 19),
y = c(4, 12, 15, 24, 40, 60, 35, 84, 112, 63, 144, 180),
z = c(5, 13, 17, 25, 41, 61, 37, 85, 113, 65, 145, 181)
)
|
## The goal of my functions is to compute the inverse of a matrix and cache its inverse
## to avoid repeated computation and decrease the computation time. One function creates
## a special matrix that cache its inverse while the other function computes the inverse
## of the special matrix returned by the first fucntion or by retriving the inverse if
## it already exists
## My first function "makeCacheMatrix" takes matrix 'x' as input and creates a special
## matrix that can cache its inverse using lexical scoping in R
makeCacheMatrix <- function(x=matrix()){
#set the inverse to null
Inv <- NULL
#cache the matrix outside the environment
set <- function(y){
x <<- y
Inv <<- NULL
}
#get the matrix from cache
get <-function (){
x
}
#solve for the inverse and cache it outside the environment
setinv <- function(solve){
Inv <<- solve
}
#get the inverse from cache
getinv <- function(){
Inv
}
#setup the list of matrices to be returned from each function
list( set=set, get=get, setinv=setinv, getinv=getinv)
}
## My second function "cacheSolve" computes the inverse of x,a special matrix returned
## with the first function. If the inverse already exists,function retrieves it.
cacheSolve <- function (x, ...){
#get inverse matrix from x
Inv <- x$getinv()
#if the matrix has been solved and has inverse will return as non-null
if(!is.null(Inv)){
message("Getting cached matrix")
#returns the inverse
return(Inv)
}
#Otherwise the matrix is passed to vector
data <- x$get()
#inverse is computed using "solve"
Inv <- solve(data, ...)
#cache the matrix
x$setinv(Inv)
#return the inverse
Inv
}
|
/cachematrix.R
|
no_license
|
shabnamh/ProgrammingAssignment2
|
R
| false
| false
| 1,793
|
r
|
## The goal of my functions is to compute the inverse of a matrix and cache its inverse
## to avoid repeated computation and decrease the computation time. One function creates
## a special matrix that cache its inverse while the other function computes the inverse
## of the special matrix returned by the first fucntion or by retriving the inverse if
## it already exists
## My first function "makeCacheMatrix" takes matrix 'x' as input and creates a special
## matrix that can cache its inverse using lexical scoping in R
makeCacheMatrix <- function(x=matrix()){
#set the inverse to null
Inv <- NULL
#cache the matrix outside the environment
set <- function(y){
x <<- y
Inv <<- NULL
}
#get the matrix from cache
get <-function (){
x
}
#solve for the inverse and cache it outside the environment
setinv <- function(solve){
Inv <<- solve
}
#get the inverse from cache
getinv <- function(){
Inv
}
#setup the list of matrices to be returned from each function
list( set=set, get=get, setinv=setinv, getinv=getinv)
}
## My second function "cacheSolve" computes the inverse of x,a special matrix returned
## with the first function. If the inverse already exists,function retrieves it.
cacheSolve <- function (x, ...){
#get inverse matrix from x
Inv <- x$getinv()
#if the matrix has been solved and has inverse will return as non-null
if(!is.null(Inv)){
message("Getting cached matrix")
#returns the inverse
return(Inv)
}
#Otherwise the matrix is passed to vector
data <- x$get()
#inverse is computed using "solve"
Inv <- solve(data, ...)
#cache the matrix
x$setinv(Inv)
#return the inverse
Inv
}
|
dataset <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
subdata <- dataset[dataset$Date %in% c("1/2/2007", "2/2/2007"),]
# Subset the data data from the dates 2007-02-01 and 2007-02-02
datetime <- strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y %H:%M:%S")
#Convert the Date/Time variables to Date/Time classes,
#bring data and time in one variable
#Plot 2
plot(datetime, subdata$Global_active_power,
xlab = "",
ylab = "Global Active Power (kilowatts)",
type = "n" )
lines(datetime, subdata$Global_active_power)
dev.copy(png, file = "plot2.png") # create my plot to a PNG file
dev.off() # close the png device
#Please note that the difference in the x axis is due to the different language of the configuration system
|
/plot2.R
|
no_license
|
Kinundu/ExData_Plotting1
|
R
| false
| false
| 814
|
r
|
dataset <- read.table("household_power_consumption.txt",
header = TRUE, sep = ";", na.strings = "?")
subdata <- dataset[dataset$Date %in% c("1/2/2007", "2/2/2007"),]
# Subset the data data from the dates 2007-02-01 and 2007-02-02
datetime <- strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y %H:%M:%S")
#Convert the Date/Time variables to Date/Time classes,
#bring data and time in one variable
#Plot 2
plot(datetime, subdata$Global_active_power,
xlab = "",
ylab = "Global Active Power (kilowatts)",
type = "n" )
lines(datetime, subdata$Global_active_power)
dev.copy(png, file = "plot2.png") # create my plot to a PNG file
dev.off() # close the png device
#Please note that the difference in the x axis is due to the different language of the configuration system
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funding_metadata.R
\name{.replace_other_attribute_fundagency}
\alias{.replace_other_attribute_fundagency}
\title{Replace attributes that has others values in funding agency}
\usage{
.replace_other_attribute_fundagency(.data, attribute, other_attribute)
}
\arguments{
\item{.data}{data to clean up \code{Other} values}
\item{attribute}{attribute of the database. Attribute name from AgroFIMS database where a user input is stored}
\item{other_attribute}{Other attribute name related to \code{attribute} parameter used to store \code{Other} values or non-standardized inputs.}
}
\description{
Tipically, users type values that are not mapped in the agronomy ontology. For this reason, the API response retrieve additional information
that should ensemble in a data structure.
}
\examples{
\dontrun{
.data <- ag_get_fundagency_studyId(studyDbId = 28,format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version ="/0212/r")
.data <- .replace_other_attribute_funding(.data, "fundagencytypeId", "fundagencytypeother")
}
}
\author{
Omar Benites
}
|
/man/dot-replace_other_attribute_fundagency.Rd
|
permissive
|
AGROFIMS/ragrofims
|
R
| false
| true
| 1,223
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funding_metadata.R
\name{.replace_other_attribute_fundagency}
\alias{.replace_other_attribute_fundagency}
\title{Replace attributes that has others values in funding agency}
\usage{
.replace_other_attribute_fundagency(.data, attribute, other_attribute)
}
\arguments{
\item{.data}{data to clean up \code{Other} values}
\item{attribute}{attribute of the database. Attribute name from AgroFIMS database where a user input is stored}
\item{other_attribute}{Other attribute name related to \code{attribute} parameter used to store \code{Other} values or non-standardized inputs.}
}
\description{
Tipically, users type values that are not mapped in the agronomy ontology. For this reason, the API response retrieve additional information
that should ensemble in a data structure.
}
\examples{
\dontrun{
.data <- ag_get_fundagency_studyId(studyDbId = 28,format = "data.frame",
serverURL = "https://research.cip.cgiar.org/agrofims/api/dev",
version ="/0212/r")
.data <- .replace_other_attribute_funding(.data, "fundagencytypeId", "fundagencytypeother")
}
}
\author{
Omar Benites
}
|
\name{accessD.wp}
\alias{accessD.wp}
\title{Obtain whole resolution level of wavelet packet coefficients from a wavelet packet object (wp).}
\description{
Get a whole resolution level's worth of coefficients from a \code{\link{wp}} wavelet packet object. To obtain packets of coefficients from a wavelet packet object you should use the \code{\link{getpacket}} collection of functions.
}
\usage{
\method{accessD}{wp}(wp, level, \dots)
}
\arguments{
\item{wp}{Wavelet packet object}.
\item{level}{the resolution level that you wish to extract.}
\item{\dots}{any other arguments}
}
\details{
The wavelet packet coefficients are actually stored in a straightforward manner in a matrix component of a \code{\link{wp}} object so it would not be too difficult to extract whole resolution levels yourself. However, this routine makes it easier to do.
}
\value{
A vector containing the coefficients that you wanted to extract.
}
\section{RELEASE}{
Version 3.5.3 Copyright Guy Nason 1994
}
\seealso{
\code{\link{accessD}}, \code{\link{getpacket}}
}
\keyword{manip}
\author{G P Nason}
|
/man/accessD.wp.rd
|
no_license
|
cran/wavethresh
|
R
| false
| false
| 1,077
|
rd
|
\name{accessD.wp}
\alias{accessD.wp}
\title{Obtain whole resolution level of wavelet packet coefficients from a wavelet packet object (wp).}
\description{
Get a whole resolution level's worth of coefficients from a \code{\link{wp}} wavelet packet object. To obtain packets of coefficients from a wavelet packet object you should use the \code{\link{getpacket}} collection of functions.
}
\usage{
\method{accessD}{wp}(wp, level, \dots)
}
\arguments{
\item{wp}{Wavelet packet object}.
\item{level}{the resolution level that you wish to extract.}
\item{\dots}{any other arguments}
}
\details{
The wavelet packet coefficients are actually stored in a straightforward manner in a matrix component of a \code{\link{wp}} object so it would not be too difficult to extract whole resolution levels yourself. However, this routine makes it easier to do.
}
\value{
A vector containing the coefficients that you wanted to extract.
}
\section{RELEASE}{
Version 3.5.3 Copyright Guy Nason 1994
}
\seealso{
\code{\link{accessD}}, \code{\link{getpacket}}
}
\keyword{manip}
\author{G P Nason}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementTemplateApi.r
\name{elementTemplate$get}
\alias{elementTemplate$get}
\title{Retrieve an element template.}
\arguments{
\item{webId}{The ID of the element template.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL brevity and other special cases. Default is the value of the configuration item "WebIDType".}
}
\value{
The specified element template.
}
\description{
Retrieve an element template.
}
|
/man/elementTemplate-cash-get.Rd
|
permissive
|
frbl/PI-Web-API-Client-R
|
R
| false
| true
| 697
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elementTemplateApi.r
\name{elementTemplate$get}
\alias{elementTemplate$get}
\title{Retrieve an element template.}
\arguments{
\item{webId}{The ID of the element template.}
\item{selectedFields}{List of fields to be returned in the response, separated by semicolons (;). If this parameter is not specified, all available fields will be returned.}
\item{webIdType}{Optional parameter. Used to specify the type of WebID. Useful for URL brevity and other special cases. Default is the value of the configuration item "WebIDType".}
}
\value{
The specified element template.
}
\description{
Retrieve an element template.
}
|
ggplot(BOD, aes(x=Time, y=demand))+geom_line()
ggplot(BOD, aes(x=factor(Time), y=demand, group=1))+geom_line()
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+ylim(0, max(BOD$demand))
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+expand_limits(y=0)
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+geom_point()
head(worldpop,10)
ggplot(worldpop, aes(x=Year, y=Population))+geom_line()+geom_point()
ggplot(worldpop, aes(x=Year, y=Population))+geom_line()+geom_point()+scale_y_log10()
#multiple line
library(plyr)
tg<-ddply(ToothGrowth, c("supp","dose"), summarise, length=mean(len))
#aggregate(len~supp*dose, data=ToothGrowth, mean)
ggplot(tg, aes(x=dose, y=length, colour=supp))+geom_line()
ggplot(tg, aes(x=dose, y=length, linetype=supp))+geom_line()
ggplot(tg, aes(x=dose, y=length, colour=supp, linetype=supp))+geom_line()
ggplot(tg, aes(x=factor(dose), y=length, colour=supp, group=supp))+geom_line()
ggplot(tg, aes(x=dose, y=length, shape=supp))+geom_line()+geom_point(size=4)
ggplot(tg, aes(x=dose, y=length, linetype=supp, colour=supp, fill=supp))+geom_line()+geom_point(size=4, shape=21)
ggplot(tg, aes(x=dose, y=length, shape=supp)) +geom_line(position=position_dodge(0.2))+geom_point(position=position_dodge(0.2), size=4)
ggplot(BOD, aes(x=Time, y=demand))+geom_line(linetype="dashed", size=1, colour="blue")
ggplot(tg, aes(x=dose, y=length, colour=supp))+geom_line()+scale_colour_brewer(palette="Set1")
ggplot(tg, aes(x=dose, y=length, group=supp))+geom_line(colour="darkgreen", size=1.5)
ggplot(tg, aes(x=dose, y=length, colour=supp))+geom_line(linetype="dashed")+geom_point(shape=22, size=3, fil="white")
#change points appearance
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+geom_point(size=4, shape=22, colour="darkred", fill="pink")
#fill color is relevant for point shapes 21~25
pd<-position_dodge(0.2)
ggplot(tg, aes(x=dose, y=length, fill=supp))+geom_line(position=pd)+geom_point(position=pd, shape=21, size=3)+scale_fill_manual(values=c('black','white'))
#area chart
sunspotyear<-data.frame(
Year=as.numeric(time(sunspot.year)),
Sunspots=as.numeric(sunspot.year)
)
ggplot(sunspotyear, aes(x=Year, y=Sunspots))+geom_area()
ggplot(sunspotyear,aes(x=Year, y=Sunspots))+geom_area(colour="black", fill="blue", alpha=.2)
ggplot(sunspotyear, aes(x=Year, y=Sunspots))+geom_area(fill='blue', alpha=.2)+geom_line()
#stacked area chart
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup))+geom_area()+guides(fill=guide_legend(reverse=T))
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup))+geom_area(colour="black", size=.2, alpha=.4)+scale_fill_brewer(palette="Blues", breaks=rev(levels(uspopage$AgeGroup)))
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup))+geom_area(colour="black", size=.2, alpha=.4)+scale_fill_brewer(palette="Blues")+guides(fill=guide_legend(reverse=T))
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup, order=desc(AgeGroup)))+geom_area(colour='black', size=.2, alpha=.4)+scale_fill_brewer(palette='Blues')
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup,, order=desc(AgeGroup))) +geom_area(alpha=.4)+scale_fill_brewer(palette='Blues')+geom_line(position='stack', size=.2)
#percentage area
uspopage_prop<-ddply(uspopage, "Year", transform, Percent=Thousands/sum(Thousands)*100)
ggplot(uspopage_prop, aes(x=Year, y=Percent, fill=AgeGroup))+geom_area(colour='black', size=.2, alpha=.4)+scale_fill_brewer(palette='Blues', breaks=rev(levels(uspopage_prop$AgeGroup)))
#confidence region
clim<-subset(climate, Source=="Berkeley", select=c("Year","Anomaly10y", "Unc10y"))
ggplot(clim, aes(x=Year, y=Anomaly10y))+
geom_ribbon(aes(ymin=Anomaly10y-Unc10y, ymax=Anomaly10y+Unc10y), alpha=0.2)+geom_line()
ggplot(clim, aes(x=Year, y=Anomaly10y))+
geom_line(aes(y=Anomaly10y-Unc10y), colour='grey50', linetype='dotted')+
geom_line(aes(y=Anomaly10y+Unc10y), colour='grey50', linetype='dotted')+
geom_line()
|
/linegraph.R
|
no_license
|
yonghuat/rgraphics
|
R
| false
| false
| 3,912
|
r
|
ggplot(BOD, aes(x=Time, y=demand))+geom_line()
ggplot(BOD, aes(x=factor(Time), y=demand, group=1))+geom_line()
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+ylim(0, max(BOD$demand))
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+expand_limits(y=0)
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+geom_point()
head(worldpop,10)
ggplot(worldpop, aes(x=Year, y=Population))+geom_line()+geom_point()
ggplot(worldpop, aes(x=Year, y=Population))+geom_line()+geom_point()+scale_y_log10()
#multiple line
library(plyr)
tg<-ddply(ToothGrowth, c("supp","dose"), summarise, length=mean(len))
#aggregate(len~supp*dose, data=ToothGrowth, mean)
ggplot(tg, aes(x=dose, y=length, colour=supp))+geom_line()
ggplot(tg, aes(x=dose, y=length, linetype=supp))+geom_line()
ggplot(tg, aes(x=dose, y=length, colour=supp, linetype=supp))+geom_line()
ggplot(tg, aes(x=factor(dose), y=length, colour=supp, group=supp))+geom_line()
ggplot(tg, aes(x=dose, y=length, shape=supp))+geom_line()+geom_point(size=4)
ggplot(tg, aes(x=dose, y=length, linetype=supp, colour=supp, fill=supp))+geom_line()+geom_point(size=4, shape=21)
ggplot(tg, aes(x=dose, y=length, shape=supp)) +geom_line(position=position_dodge(0.2))+geom_point(position=position_dodge(0.2), size=4)
ggplot(BOD, aes(x=Time, y=demand))+geom_line(linetype="dashed", size=1, colour="blue")
ggplot(tg, aes(x=dose, y=length, colour=supp))+geom_line()+scale_colour_brewer(palette="Set1")
ggplot(tg, aes(x=dose, y=length, group=supp))+geom_line(colour="darkgreen", size=1.5)
ggplot(tg, aes(x=dose, y=length, colour=supp))+geom_line(linetype="dashed")+geom_point(shape=22, size=3, fil="white")
#change points appearance
ggplot(BOD, aes(x=Time, y=demand))+geom_line()+geom_point(size=4, shape=22, colour="darkred", fill="pink")
#fill color is relevant for point shapes 21~25
pd<-position_dodge(0.2)
ggplot(tg, aes(x=dose, y=length, fill=supp))+geom_line(position=pd)+geom_point(position=pd, shape=21, size=3)+scale_fill_manual(values=c('black','white'))
#area chart
sunspotyear<-data.frame(
Year=as.numeric(time(sunspot.year)),
Sunspots=as.numeric(sunspot.year)
)
ggplot(sunspotyear, aes(x=Year, y=Sunspots))+geom_area()
ggplot(sunspotyear,aes(x=Year, y=Sunspots))+geom_area(colour="black", fill="blue", alpha=.2)
ggplot(sunspotyear, aes(x=Year, y=Sunspots))+geom_area(fill='blue', alpha=.2)+geom_line()
#stacked area chart
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup))+geom_area()+guides(fill=guide_legend(reverse=T))
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup))+geom_area(colour="black", size=.2, alpha=.4)+scale_fill_brewer(palette="Blues", breaks=rev(levels(uspopage$AgeGroup)))
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup))+geom_area(colour="black", size=.2, alpha=.4)+scale_fill_brewer(palette="Blues")+guides(fill=guide_legend(reverse=T))
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup, order=desc(AgeGroup)))+geom_area(colour='black', size=.2, alpha=.4)+scale_fill_brewer(palette='Blues')
ggplot(uspopage, aes(x=Year, y=Thousands, fill=AgeGroup,, order=desc(AgeGroup))) +geom_area(alpha=.4)+scale_fill_brewer(palette='Blues')+geom_line(position='stack', size=.2)
#percentage area
uspopage_prop<-ddply(uspopage, "Year", transform, Percent=Thousands/sum(Thousands)*100)
ggplot(uspopage_prop, aes(x=Year, y=Percent, fill=AgeGroup))+geom_area(colour='black', size=.2, alpha=.4)+scale_fill_brewer(palette='Blues', breaks=rev(levels(uspopage_prop$AgeGroup)))
#confidence region
clim<-subset(climate, Source=="Berkeley", select=c("Year","Anomaly10y", "Unc10y"))
ggplot(clim, aes(x=Year, y=Anomaly10y))+
geom_ribbon(aes(ymin=Anomaly10y-Unc10y, ymax=Anomaly10y+Unc10y), alpha=0.2)+geom_line()
ggplot(clim, aes(x=Year, y=Anomaly10y))+
geom_line(aes(y=Anomaly10y-Unc10y), colour='grey50', linetype='dotted')+
geom_line(aes(y=Anomaly10y+Unc10y), colour='grey50', linetype='dotted')+
geom_line()
|
\name{fortify.mg_ensemble}
\alias{fortify.mg_ensemble}
\title{S3method fortify mg_ensemble}
\usage{
fortify.mg_ensemble(model, data = NULL, ...)
}
\description{
S3method fortify mg_ensemble
}
|
/man/fortify.mg_ensemble.Rd
|
no_license
|
garrettgman/modelglyphs
|
R
| false
| false
| 197
|
rd
|
\name{fortify.mg_ensemble}
\alias{fortify.mg_ensemble}
\title{S3method fortify mg_ensemble}
\usage{
fortify.mg_ensemble(model, data = NULL, ...)
}
\description{
S3method fortify mg_ensemble
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/battingStats.R
\name{HRpct}
\alias{HRpct}
\title{Batting: Calculate home run percentage}
\usage{
HRpct(dat = NULL)
}
\arguments{
\item{dat}{A data frame you would wish to calculate. The data frame must have the same column names found in
The \code{Lahman} package or the Chadwick Bureau GitHub repository.}
}
\description{
Find home run percentage for batters with more than zero at bats.
Required fields from the Batting table are "AB" and "HR."
}
\examples{
data("Batting2016")
head(Batting2016)
Batting2016$HRpct <- HRpct(Batting2016)
}
\seealso{
Other Batting functions: \code{\link{BABIP}},
\code{\link{BA}}, \code{\link{BBpct}},
\code{\link{CTpct}}, \code{\link{ISO}},
\code{\link{Kpct}}, \code{\link{OBP}}, \code{\link{OPS}},
\code{\link{PA}}, \code{\link{RC2002}},
\code{\link{RCbasic}}, \code{\link{RCtech}},
\code{\link{SLG}}, \code{\link{TBs}},
\code{\link{XBHpct}}, \code{\link{XBperH}},
\code{\link{wOBA}}, \code{\link{wRAA}}, \code{\link{wRC}}
}
\keyword{HRpct}
\keyword{home}
\keyword{percentage}
\keyword{run}
|
/man/HRpct.Rd
|
no_license
|
cran/baseballDBR
|
R
| false
| true
| 1,123
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/battingStats.R
\name{HRpct}
\alias{HRpct}
\title{Batting: Calculate home run percentage}
\usage{
HRpct(dat = NULL)
}
\arguments{
\item{dat}{A data frame you would wish to calculate. The data frame must have the same column names found in
The \code{Lahman} package or the Chadwick Bureau GitHub repository.}
}
\description{
Find home run percentage for batters with more than zero at bats.
Required fields from the Batting table are "AB" and "HR."
}
\examples{
data("Batting2016")
head(Batting2016)
Batting2016$HRpct <- HRpct(Batting2016)
}
\seealso{
Other Batting functions: \code{\link{BABIP}},
\code{\link{BA}}, \code{\link{BBpct}},
\code{\link{CTpct}}, \code{\link{ISO}},
\code{\link{Kpct}}, \code{\link{OBP}}, \code{\link{OPS}},
\code{\link{PA}}, \code{\link{RC2002}},
\code{\link{RCbasic}}, \code{\link{RCtech}},
\code{\link{SLG}}, \code{\link{TBs}},
\code{\link{XBHpct}}, \code{\link{XBperH}},
\code{\link{wOBA}}, \code{\link{wRAA}}, \code{\link{wRC}}
}
\keyword{HRpct}
\keyword{home}
\keyword{percentage}
\keyword{run}
|
# Automate message creation for Carpentries teaching demos
# Jeff Oliver
# jcoliver@arizona.edu
# 2021-08-09
library(rmarkdown)
library(lubridate)
# The only two lines you will likely need to change are these two:
# + trainees: update with the location of your trainees file (example format
# is available at data/trainees.csv)
# + demo_date: update the date and time of the demo
trainees <- read.csv(file = "data/trainees.csv")
demo_date <- as.POSIXct(x = "2021-08-12 20:00:00", tz = "GMT")
# Shouldn't need to change anything below here
lesson_snippets <- read.csv(file = "data/lesson-snippets.csv")
# Let's be pedantic and ensure that https is always used over http
lesson_snippets$url <- gsub(pattern = "http://",
replacement = "https://",
x = lesson_snippets$url)
# Lots of effort here to get a nicely formated description of the day and time
# of the teaching demo
# First, get the name of the day the demo occurs (e.g. "Thursday")
day_name <- lubridate::wday(x = demo_date, label = TRUE, abbr = FALSE)
# Going to print out the date of the demo for the time zone, here we want the
# output to be like Thursday 12 August 2021 20:00 GMT
date_string <- paste(day_name,
format(x = demo_date, "%d %B %Y %H:%M"),
lubridate::tz(demo_date)) # tz extracts the time zone info
# We also want to make sure to provide a link to a time zone converter, easiest
# if we use GMT/UTC time, so converting to that time zone first
gmt_date <- lubridate::with_tz(demo_date, tzone = "GMT")
url_date <- paste0(format(x = gmt_date, "%Y%m%d"),
"T",
format(x = gmt_date, "%H%M"))
# Putting everything together for that url that will show up in message
tzconvert_url <- paste0("https://www.timeanddate.com/worldclock/fixedtime.html?",
"msg=Carpentries+Teaching+Demo&iso=",
url_date)
# Iterate over each row in trainees data frame and create html file
for (i in 1:nrow(trainees)) {
first <- trainees$first[i]
last <- trainees$last[i]
email <- trainees$email[i]
# Want to get the lesson-specific snippet based on the URL the trainee
# provided
# pull out the URL the trainee provided
lesson_url <- trainees$lesson_url[i]
# Trim off trailing slash (if it is there)
if (substr(x = lesson_url, start = nchar(lesson_url), stop = nchar(lesson_url)) == "/") {
lesson_url <- substr(x = lesson_url, start = 1, stop = (nchar(lesson_url) - 1))
}
# Pedantic https!
lesson_url <- gsub(pattern = "http://",
replacement = "https://",
x = lesson_url)
# See if we can get the snippet text based on URL
snippet <- lesson_snippets$snippet[lesson_snippets$url == lesson_url]
if (length(snippet) != 1) {
warning(paste0("There was a problem identifying the corresponding snippet for ",
first, " ", last,
". Either an entry does not exist in data/lesson-snippets.csv ",
"or the provided lesson URL is incorrect. Please add lesson ",
"specific snippet manually to e-mail message"))
snippet <- "\n**insert lesson-specific snippet here**\n"
}
# Use the RMarkdown template to build message, passing information through
# the params list
rmarkdown::render(input = "templates/e-mail-template.Rmd",
output_dir = "output",
output_file = paste0(last, "-", first, "-email.html"),
params = list(first = first,
email = email,
date_string = date_string,
tzconvert_url = tzconvert_url,
snippet = snippet))
}
|
/auto-messages.R
|
permissive
|
klbarnes20/auto-demo-email
|
R
| false
| false
| 3,853
|
r
|
# Automate message creation for Carpentries teaching demos
# Jeff Oliver
# jcoliver@arizona.edu
# 2021-08-09
library(rmarkdown)
library(lubridate)
# The only two lines you will likely need to change are these two:
# + trainees: update with the location of your trainees file (example format
# is available at data/trainees.csv)
# + demo_date: update the date and time of the demo
trainees <- read.csv(file = "data/trainees.csv")
demo_date <- as.POSIXct(x = "2021-08-12 20:00:00", tz = "GMT")
# Shouldn't need to change anything below here
lesson_snippets <- read.csv(file = "data/lesson-snippets.csv")
# Let's be pedantic and ensure that https is always used over http
lesson_snippets$url <- gsub(pattern = "http://",
replacement = "https://",
x = lesson_snippets$url)
# Lots of effort here to get a nicely formated description of the day and time
# of the teaching demo
# First, get the name of the day the demo occurs (e.g. "Thursday")
day_name <- lubridate::wday(x = demo_date, label = TRUE, abbr = FALSE)
# Going to print out the date of the demo for the time zone, here we want the
# output to be like Thursday 12 August 2021 20:00 GMT
date_string <- paste(day_name,
format(x = demo_date, "%d %B %Y %H:%M"),
lubridate::tz(demo_date)) # tz extracts the time zone info
# We also want to make sure to provide a link to a time zone converter, easiest
# if we use GMT/UTC time, so converting to that time zone first
gmt_date <- lubridate::with_tz(demo_date, tzone = "GMT")
url_date <- paste0(format(x = gmt_date, "%Y%m%d"),
"T",
format(x = gmt_date, "%H%M"))
# Putting everything together for that url that will show up in message
tzconvert_url <- paste0("https://www.timeanddate.com/worldclock/fixedtime.html?",
"msg=Carpentries+Teaching+Demo&iso=",
url_date)
# Iterate over each row in trainees data frame and create html file
for (i in 1:nrow(trainees)) {
first <- trainees$first[i]
last <- trainees$last[i]
email <- trainees$email[i]
# Want to get the lesson-specific snippet based on the URL the trainee
# provided
# pull out the URL the trainee provided
lesson_url <- trainees$lesson_url[i]
# Trim off trailing slash (if it is there)
if (substr(x = lesson_url, start = nchar(lesson_url), stop = nchar(lesson_url)) == "/") {
lesson_url <- substr(x = lesson_url, start = 1, stop = (nchar(lesson_url) - 1))
}
# Pedantic https!
lesson_url <- gsub(pattern = "http://",
replacement = "https://",
x = lesson_url)
# See if we can get the snippet text based on URL
snippet <- lesson_snippets$snippet[lesson_snippets$url == lesson_url]
if (length(snippet) != 1) {
warning(paste0("There was a problem identifying the corresponding snippet for ",
first, " ", last,
". Either an entry does not exist in data/lesson-snippets.csv ",
"or the provided lesson URL is incorrect. Please add lesson ",
"specific snippet manually to e-mail message"))
snippet <- "\n**insert lesson-specific snippet here**\n"
}
# Use the RMarkdown template to build message, passing information through
# the params list
rmarkdown::render(input = "templates/e-mail-template.Rmd",
output_dir = "output",
output_file = paste0(last, "-", first, "-email.html"),
params = list(first = first,
email = email,
date_string = date_string,
tzconvert_url = tzconvert_url,
snippet = snippet))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odbc.R
\name{odbc_ini}
\alias{odbc_ini}
\title{Manage Database Connection}
\usage{
odbc_ini()
}
\description{
This RStudio Addin opens up the .odbc.ini file to manage local
SQL server login credential
}
|
/man/odbc_ini.Rd
|
no_license
|
shafayetShafee/addin_demo
|
R
| false
| true
| 281
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/odbc.R
\name{odbc_ini}
\alias{odbc_ini}
\title{Manage Database Connection}
\usage{
odbc_ini()
}
\description{
This RStudio Addin opens up the .odbc.ini file to manage local
SQL server login credential
}
|
\name{locq.growth}
\alias{locq.growth}
\title{
Portfolio matrix for specialization and growth
}
\description{
Portfolio matrix plot comparing two numeric vectors (here: specialization and growth)
}
\usage{
locq.growth(e_ij1, e_ij2, e_i1, e_i2, industry.names = NULL,
y.axis = "r",
psize, psize.factor = 10, time.periods = NULL,
pmx = "Regional specialization", pmy = "Regional growth",
pmtitle = "Portfolio matrix", pcol = NULL, pcol.border = NULL,
leg = FALSE, leg.fsize = 1, leg.col = NULL,
leg.x = 0, leg.y = y_min*1.5,
bg.col = "gray95", bgrid = TRUE, bgrid.col = "white",
bgrid.size = 2, bgrid.type = "solid",
seg.x = 1, seg.y = 0)
}
\arguments{
\item{e_ij1}{
a numeric vector with \eqn{i} values containing the employment in \eqn{i} industries in region \eqn{j} at time 1
}
\item{e_ij2}{
a numeric vector with \eqn{i} values containing the employment in \eqn{i} industries in region \eqn{j} at time 2
}
\item{e_i1}{
a numeric vector with \eqn{i} values containing the total employment in \eqn{i} industries at time 1
}
\item{e_i2}{
a numeric vector with \eqn{i} values containing the total employment in \eqn{i} industries at time 2
}
\item{industry.names}{
Industry names (e.g. from the relevant statistical classification of economic activities)
}
\item{y.axis}{
Declares which values shall be plotted on the Y axis: If \code{y.axis = "r"}, the Y axis shows the \emph{regional} growth. If \code{y.axis = "n"}, the Y axis shows the \emph{national} growth. To set both growths in ratio, choose \code{y.axis = "rn"} (regional vs. national growth)
}
\item{psize}{
Point size in the portfolio matrix plot (mostly the absolute values of employment in \eqn{i} industries in region \eqn{j} at time 2)
}
\item{psize.factor}{
Enlargement factor for the points in the plot
}
\item{time.periods}{
No. of regarded time periods (for average growth rates)
}
\item{pmx}{
Name of the X axis in the plot
}
\item{pmy}{
Name of the Y axis in the plot
}
\item{pmtitle}{
Plot title
}
\item{pcol}{
Industry-specific point colors
}
\item{pcol.border}{
Color of point border
}
\item{leg}{
Logical argument that indicates if a legend has to be added to the plot
}
\item{leg.fsize}{
If \code{leg = TRUE}: Font size in the plot legend
}
\item{leg.col}{
No. of columns in the plot legend
}
\item{leg.x}{
If \code{leg = TRUE}: X coordinate of the legend
}
\item{leg.y}{
If \code{leg = TRUE}: Y coordinate of the legend
}
\item{bg.col}{
Background color
}
\item{bgrid}{
Logical argument that indicates if a grid has to be added to the plot
}
\item{bgrid.col}{
If \code{bgrid = TRUE}: Color of the grid
}
\item{bgrid.size}{
If \code{bgrid = TRUE}: Size of the grid
}
\item{bgrid.type}{
If \code{bgrid = TRUE}: Type of the grid
}
\item{seg.x}{
X coordinate of segmentation of the plot
}
\item{seg.y}{
Y coordinate of segmentation of the plot
}
}
\details{
The \emph{portfolio matrix} is a graphic tool displaying the development of one variable compared to another variable. The plot shows the regarded variable on the \eqn{x} axis and a variable with which it is confronted on the \eqn{y} axis while the graph is divided in four quadrants. Originally, the \emph{portfolio matrix} was developed by the \emph{Boston Consulting Group} to analyze the performance of product lines in marketing, also known as the \emph{growth-share matrix}. The quadrants show the performace of the regarded objects (stars, cash cows, question marks, dogs) (Henderson 1973). But the \emph{portfolio matrix} can also be used to analyze/illustrate the world market integration of a region or a national economy by confronting e.g. the increase in world market share (\eqn{x} axis) and the world trade growth (\eqn{y} axis) (Baker et al. 2002). Another option is to analyze/illustrate the economic performance of a region (Howard 2007). E.g. it is possible to confront the growth of industries in a region with the all-over growth of these industries in the national economy.
This function is a special case of portfolio matrix, showing the regional specialization on the X axis instead of the regional growth (which can be plotted on the Y axis).
}
\value{
A portfolio matrix plot.
Invisible: a \code{list} containing the following items:
\item{portfolio.data }{The data related to the plot}
\item{locq }{The localization quotients for each year}
\item{growth }{The growth values for each industry}
}
\references{
Baker, P./von Kirchbach, F./Mimouni, M./Pasteels, J.-M. (2002): \dQuote{Analytical tools for enhancing the participation of developing countries in the Multilateral Trading System in the context of the Doha Development Agenda}. In: \emph{Aussenwirtschaft}, \bold{57}, 3, p. 343-372.
Howard, D. (2007): \dQuote{A regional economic performance matrix - an aid to regional economic policy development}. In: \emph{Journal of Economic and Social Policy}, \bold{11}, 2, Art. 4.
Henderson, B. D. (1973): \dQuote{The Experience Curve - Reviewed, IV. The Growth Share Matrix or The Product Portfolio}. The Boston Consulting Group (BCG).
}
\author{
Thomas Wieland
}
\seealso{
\code{\link{locq}}, \code{\link{portfolio}}, \code{\link{shift}}, \code{\link{shiftd}}, \code{\link{shifti}}
}
\examples{
data(Goettingen)
# Loads employment data for Goettingen and Germany (2008-2017)
locq.growth(Goettingen$Goettingen2008[2:16], Goettingen$Goettingen2017[2:16],
Goettingen$BRD2008[2:16], Goettingen$BRD2017[2:16],
psize = Goettingen$Goettingen2017[2:16],
industry.names = Goettingen$WA_WZ2008[2:16], pcol.border = "grey",
leg = TRUE, leg.fsize = 0.4, leg.x = -0.2)
}
|
/man/locq.growth.Rd
|
no_license
|
cran/REAT
|
R
| false
| false
| 5,736
|
rd
|
\name{locq.growth}
\alias{locq.growth}
\title{
Portfolio matrix for specialization and growth
}
\description{
Portfolio matrix plot comparing two numeric vectors (here: specialization and growth)
}
\usage{
locq.growth(e_ij1, e_ij2, e_i1, e_i2, industry.names = NULL,
y.axis = "r",
psize, psize.factor = 10, time.periods = NULL,
pmx = "Regional specialization", pmy = "Regional growth",
pmtitle = "Portfolio matrix", pcol = NULL, pcol.border = NULL,
leg = FALSE, leg.fsize = 1, leg.col = NULL,
leg.x = 0, leg.y = y_min*1.5,
bg.col = "gray95", bgrid = TRUE, bgrid.col = "white",
bgrid.size = 2, bgrid.type = "solid",
seg.x = 1, seg.y = 0)
}
\arguments{
\item{e_ij1}{
a numeric vector with \eqn{i} values containing the employment in \eqn{i} industries in region \eqn{j} at time 1
}
\item{e_ij2}{
a numeric vector with \eqn{i} values containing the employment in \eqn{i} industries in region \eqn{j} at time 2
}
\item{e_i1}{
a numeric vector with \eqn{i} values containing the total employment in \eqn{i} industries at time 1
}
\item{e_i2}{
a numeric vector with \eqn{i} values containing the total employment in \eqn{i} industries at time 2
}
\item{industry.names}{
Industry names (e.g. from the relevant statistical classification of economic activities)
}
\item{y.axis}{
Declares which values shall be plotted on the Y axis: If \code{y.axis = "r"}, the Y axis shows the \emph{regional} growth. If \code{y.axis = "n"}, the Y axis shows the \emph{national} growth. To set both growths in ratio, choose \code{y.axis = "rn"} (regional vs. national growth)
}
\item{psize}{
Point size in the portfolio matrix plot (mostly the absolute values of employment in \eqn{i} industries in region \eqn{j} at time 2)
}
\item{psize.factor}{
Enlargement factor for the points in the plot
}
\item{time.periods}{
No. of regarded time periods (for average growth rates)
}
\item{pmx}{
Name of the X axis in the plot
}
\item{pmy}{
Name of the Y axis in the plot
}
\item{pmtitle}{
Plot title
}
\item{pcol}{
Industry-specific point colors
}
\item{pcol.border}{
Color of point border
}
\item{leg}{
Logical argument that indicates if a legend has to be added to the plot
}
\item{leg.fsize}{
If \code{leg = TRUE}: Font size in the plot legend
}
\item{leg.col}{
No. of columns in the plot legend
}
\item{leg.x}{
If \code{leg = TRUE}: X coordinate of the legend
}
\item{leg.y}{
If \code{leg = TRUE}: Y coordinate of the legend
}
\item{bg.col}{
Background color
}
\item{bgrid}{
Logical argument that indicates if a grid has to be added to the plot
}
\item{bgrid.col}{
If \code{bgrid = TRUE}: Color of the grid
}
\item{bgrid.size}{
If \code{bgrid = TRUE}: Size of the grid
}
\item{bgrid.type}{
If \code{bgrid = TRUE}: Type of the grid
}
\item{seg.x}{
X coordinate of segmentation of the plot
}
\item{seg.y}{
Y coordinate of segmentation of the plot
}
}
\details{
The \emph{portfolio matrix} is a graphic tool displaying the development of one variable compared to another variable. The plot shows the regarded variable on the \eqn{x} axis and a variable with which it is confronted on the \eqn{y} axis while the graph is divided in four quadrants. Originally, the \emph{portfolio matrix} was developed by the \emph{Boston Consulting Group} to analyze the performance of product lines in marketing, also known as the \emph{growth-share matrix}. The quadrants show the performace of the regarded objects (stars, cash cows, question marks, dogs) (Henderson 1973). But the \emph{portfolio matrix} can also be used to analyze/illustrate the world market integration of a region or a national economy by confronting e.g. the increase in world market share (\eqn{x} axis) and the world trade growth (\eqn{y} axis) (Baker et al. 2002). Another option is to analyze/illustrate the economic performance of a region (Howard 2007). E.g. it is possible to confront the growth of industries in a region with the all-over growth of these industries in the national economy.
This function is a special case of portfolio matrix, showing the regional specialization on the X axis instead of the regional growth (which can be plotted on the Y axis).
}
\value{
A portfolio matrix plot.
Invisible: a \code{list} containing the following items:
\item{portfolio.data }{The data related to the plot}
\item{locq }{The localization quotients for each year}
\item{growth }{The growth values for each industry}
}
\references{
Baker, P./von Kirchbach, F./Mimouni, M./Pasteels, J.-M. (2002): \dQuote{Analytical tools for enhancing the participation of developing countries in the Multilateral Trading System in the context of the Doha Development Agenda}. In: \emph{Aussenwirtschaft}, \bold{57}, 3, p. 343-372.
Howard, D. (2007): \dQuote{A regional economic performance matrix - an aid to regional economic policy development}. In: \emph{Journal of Economic and Social Policy}, \bold{11}, 2, Art. 4.
Henderson, B. D. (1973): \dQuote{The Experience Curve - Reviewed, IV. The Growth Share Matrix or The Product Portfolio}. The Boston Consulting Group (BCG).
}
\author{
Thomas Wieland
}
\seealso{
\code{\link{locq}}, \code{\link{portfolio}}, \code{\link{shift}}, \code{\link{shiftd}}, \code{\link{shifti}}
}
\examples{
data(Goettingen)
# Loads employment data for Goettingen and Germany (2008-2017)
locq.growth(Goettingen$Goettingen2008[2:16], Goettingen$Goettingen2017[2:16],
Goettingen$BRD2008[2:16], Goettingen$BRD2017[2:16],
psize = Goettingen$Goettingen2017[2:16],
industry.names = Goettingen$WA_WZ2008[2:16], pcol.border = "grey",
leg = TRUE, leg.fsize = 0.4, leg.x = -0.2)
}
|
data <- read.table(file="~/Downloads/household_power_consumption.txt", header = TRUE, sep=";", na.strings="?")
Febdates <- subset(data, Date%in%c("1/2/2007","2/2/2007"))
Febdates$Date <- as.Date(Febdates$Date, format = "%d/%m/%Y")
png(file="~/Desktop/Coursera/ExData_Plotting1/plot1.png", width= 480, height= 480, units="px")
hist(Febdates$Global_active_power, col = "red", main ="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency" )
dev.off()
|
/plot1.R
|
no_license
|
shannonbrady/ExData_Plotting1
|
R
| false
| false
| 475
|
r
|
data <- read.table(file="~/Downloads/household_power_consumption.txt", header = TRUE, sep=";", na.strings="?")
Febdates <- subset(data, Date%in%c("1/2/2007","2/2/2007"))
Febdates$Date <- as.Date(Febdates$Date, format = "%d/%m/%Y")
png(file="~/Desktop/Coursera/ExData_Plotting1/plot1.png", width= 480, height= 480, units="px")
hist(Febdates$Global_active_power, col = "red", main ="Global Active Power", xlab="Global Active Power (kilowatts)", ylab="Frequency" )
dev.off()
|
library(readxl)
Dataset_Thesis_Raw <- read_excel("C:/Users/Utente/Downloads/Dataset Thesis Raw.xlsx")
View(Dataset_Thesis_Raw)
library(ggplot2)
ggplot(Dataset_Thesis_Raw, aes(y=age_beginning))+
geom_boxplot() +
labs(title="Histogram for Age at the beginning of the investment", y="Age of the investors")
# Counting the people with respect to their field of study
sum(Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE")
sum(Dataset_Thesis_Raw$field_of_study=="ENGINEERING")
sum(Dataset_Thesis_Raw$field_of_study=="LANGUAGES AND TOURISM")
sum(Dataset_Thesis_Raw$field_of_study=="MEDICAL SCIENCES")
sum(Dataset_Thesis_Raw$field_of_study=="MODERN LITERATURE")
sum(Dataset_Thesis_Raw$field_of_study=="UNKNOWN")
sum(Dataset_Thesis_Raw$field_of_study=="SCIENTIFIC HIGH SCHOOL")
# Statistics of the experience estimators
max(Dataset_Thesis_Raw$n_roles_beginning)
mean(Dataset_Thesis_Raw$n_failed_inv)
median(Dataset_Thesis_Raw$n_failed_inv)
max(Dataset_Thesis_Raw$n_failed_inv)
sum(Dataset_Thesis_Raw$n_failed_inv==0)
sd(Dataset_Thesis_Raw$value_variation)
max(Dataset_Thesis_Raw$value_variation)
# Values in percentage
variation<-Dataset_Thesis_Raw$value_variation*100
sd(variation)
max(Dataset_Thesis_Raw$value_variation)*100
mean(Dataset_Thesis_Raw$value_variation)*100
median(Dataset_Thesis_Raw$value_variation)*100
# Summary of all variables of the dataset
summary(Dataset_Thesis_Raw)
# Standard deviations for the table 1
VariationShare<-Dataset_Thesis_Raw$difference_share
sd(VariationShare)
logvar<-Dataset_Thesis_Raw$log_variation
sd(logvar)
rolesbeg<-Dataset_Thesis_Raw$n_roles_beginning
sd(rolesbeg)
failed<-Dataset_Thesis_Raw$n_failed_inv
sd(failed)
agetoday<-Dataset_Thesis_Raw$age_today
sd(agetoday)
agebeg<-Dataset_Thesis_Raw$age_beginning
sd(agebeg)
durat<-Dataset_Thesis_Raw$duration
sd(durat)
library(ggplot2)
# Histogram for Years of beginning of investment
ggplot(data=Dataset_Thesis_Raw, aes(x=Dataset_Thesis_Raw$foundation_year)) +
geom_histogram(breaks=seq(2000, 2019, by=1),
col="red",
fill="green",
alpha = .2) +
labs(x="Years of constitution", y="Count") +
ylim(c(0,13))
sum(Dataset_Thesis_Raw$foundation_year==2017)
sum(Dataset_Thesis_Raw$foundation_year==2018)
sum(Dataset_Thesis_Raw$foundation_year==2016)
sum(Dataset_Thesis_Raw$foundation_year>=2011)/91 #share of investments that begun in 2011
# Distribution of the log growth rates
ggplot(data=Dataset_Thesis_Raw, aes(log_variation)) +
geom_histogram(aes(y =..density..),
col="red",
fill="green",
alpha=.2) +
geom_density(col=2) +
labs(x="log-variation")
mean(Dataset_Thesis_Raw$log_variation)
sd(Dataset_Thesis_Raw$log_variation)
median(Dataset_Thesis_Raw$log_variation)
# Scatterplot age investors vs growth rate
ggplot(Dataset_Thesis_Raw, aes(x=age_beginning, y=log_variation)) +
geom_point(aes(col=gender))+
geom_smooth(method = "lm", se = TRUE)+
labs(x="Age at the beginning of the investement", y="log-variation")
# Linear regression
Age<-as.matrix(Dataset_Thesis_Raw$age_beginning)
logvariation <- as.matrix(Dataset_Thesis_Raw$log_variation)
reg1 <-lm(logvariation ~ Age)
summary(reg1)
# Other types of scatterplots:
# Scatterplot experience vs growth rate
ggplot(Dataset_Thesis_Raw, aes(x=n_roles_beginning, y=log_variation)) +
geom_point(aes(col=gender))+
geom_smooth(method = "lm", se = TRUE)
#Scatterplot (at least one failure) vs growth rate
ggplot(Dataset_Thesis_Raw[Dataset_Thesis_Raw$n_failed_inv>=1,], aes(x=n_failed_inv, y=log_variation)) +
geom_point(aes(col=gender))+
geom_smooth(method = "lm", se = TRUE) +
labs(x="Number of failed investments", y="log-variation")
#Boxplot for the log-variation
ggplot(data=Dataset_Thesis_Raw, aes(y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(y="Log - variation")+
xlim(c(-0.5,0.5))
# Men vs Women
ggplot(data=Dataset_Thesis_Raw, aes(x=gender, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Gender" ,y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="F"])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="F"])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
# ECONOMICS vs others
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE"])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study!="ECONOMICS AND FINANCE"])
ggplot(data=Dataset_Thesis_Raw, aes(field_of_study=="ECONOMICS AND FINANCE", y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Economics and Finance" ,y="Log - variation")
# ECONOMICS vs ENGINEERING
# Vectors
Eco<-Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE"
Eng<-Dataset_Thesis_Raw$field_of_study=="ENGINEERING"
# Dataset containing only people with Eco & Fin or Engineering study background
EcovsEng<-Dataset_Thesis_Raw[Eco | Eng,]
ggplot(data=EcovsEng, aes(x=field_of_study=="ECONOMICS AND FINANCE", y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Economics and Finance", y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE"])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study!="ENGINEERING"])
min(EcovsEng$log_variation[EcovsEng$field_of_study=="ENGINEERING"])
max(EcovsEng$log_variation[EcovsEng$field_of_study=="ENGINEERING"])
sd(EcovsEng$log_variation[EcovsEng$field_of_study=="ENGINEERING"])
# Who experienced the best variation in the involvement of shareholding of an enterprise?
ggplot(data=Dataset_Thesis_Raw, aes(x=age_beginning>=43, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People less than 43 years old",y="Log - variation")
diffshare_pos<-Dataset_Thesis_Raw[Dataset_Thesis_Raw$difference_share>0,]
ggplot(data=diffshare_pos, aes(x=age_beginning>43, y=difference_share)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People more than 43 years old",y="Share variation in percentage")
mean(diffshare_pos$difference_share[diffshare_pos$age_beginning>43])
mean(diffshare_pos$difference_share[diffshare_pos$age_beginning<=43])
diffshare_neg<-Dataset_Thesis_Raw[Dataset_Thesis_Raw$difference_share<0,]
ggplot(data=diffshare_neg, aes(x=age_beginning>43, y=difference_share)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People more than 43 years old",y="Share variation in percentage")
mean(diffshare_neg$difference_share[diffshare_neg$age_beginning>43])
mean(diffshare_neg$difference_share[diffshare_neg$age_beginning<=43])
# Do people who have experienced more than 8 (mean) investments have a higher return than those with less experience?
ggplot(data=Dataset_Thesis_Raw, aes(x=n_roles_beginning>=8, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People with more than 8 roles", y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8]
# Do people who have at least one failure investments have a higher return than those with less experience?
ggplot(data=Dataset_Thesis_Raw, aes(x=n_failed_inv>=1, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People with at least one failed investment",y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
# Did people with multiple investments improved the quality of investments (growth of the company) over the time?
Multiple_investors<-Dataset_Thesis_Raw[c(4:24,26:50,53:64,66:69,76:82,87,88,90,91),]
ggplot(Multiple_investors, aes(x=year_beg, y=log_variation, color=gender)) +
geom_point() +
geom_smooth(method=lm, se=FALSE, fullrange=TRUE)+
labs(x="Initial year",y="Log - variation")
# Is there a correlation between the rate of involvement and the performance of the enterprise?
share5<-Dataset_Thesis_Raw[Dataset_Thesis_Raw$share_beginning>8.36,]
ggplot(share5, aes(x=share_beginning, y=log_variation)) +
geom_point() +
geom_smooth(method=lm, se=FALSE, fullrange=TRUE)+
labs(x="Share owned at the beginning",y="Log - variation")
share<-as.matrix(share5$share_beginning)
logvariation <- as.matrix(share5$log_variation)
reg2 <-lm(logvariation ~ share)
summary(reg2)
# Did the companies founded before 2010 grew more than the others
median(Dataset_Thesis_Raw$foundation_year)
ggplot(data=Dataset_Thesis_Raw, aes(x=foundation_year<=2010, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Companies founded in 2010 or before",y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year<=2010])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year>2010])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year<=2010])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year<=2010])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year>2010])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year>2010])
# generate a table (for the regression) in latex programming language
install.packages("stargazer")
library(stargazer)
stargazer(reg1,title="Regression Results")
|
/THESIS CODE.R
|
no_license
|
girolamovurro/Thesis
|
R
| false
| false
| 11,147
|
r
|
library(readxl)
Dataset_Thesis_Raw <- read_excel("C:/Users/Utente/Downloads/Dataset Thesis Raw.xlsx")
View(Dataset_Thesis_Raw)
library(ggplot2)
ggplot(Dataset_Thesis_Raw, aes(y=age_beginning))+
geom_boxplot() +
labs(title="Histogram for Age at the beginning of the investment", y="Age of the investors")
# Counting the people with respect to their field of study
sum(Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE")
sum(Dataset_Thesis_Raw$field_of_study=="ENGINEERING")
sum(Dataset_Thesis_Raw$field_of_study=="LANGUAGES AND TOURISM")
sum(Dataset_Thesis_Raw$field_of_study=="MEDICAL SCIENCES")
sum(Dataset_Thesis_Raw$field_of_study=="MODERN LITERATURE")
sum(Dataset_Thesis_Raw$field_of_study=="UNKNOWN")
sum(Dataset_Thesis_Raw$field_of_study=="SCIENTIFIC HIGH SCHOOL")
# Statistics of the experience estimators
max(Dataset_Thesis_Raw$n_roles_beginning)
mean(Dataset_Thesis_Raw$n_failed_inv)
median(Dataset_Thesis_Raw$n_failed_inv)
max(Dataset_Thesis_Raw$n_failed_inv)
sum(Dataset_Thesis_Raw$n_failed_inv==0)
sd(Dataset_Thesis_Raw$value_variation)
max(Dataset_Thesis_Raw$value_variation)
# Values in percentage
variation<-Dataset_Thesis_Raw$value_variation*100
sd(variation)
max(Dataset_Thesis_Raw$value_variation)*100
mean(Dataset_Thesis_Raw$value_variation)*100
median(Dataset_Thesis_Raw$value_variation)*100
# Summary of all variables of the dataset
summary(Dataset_Thesis_Raw)
# Standard deviations for the table 1
VariationShare<-Dataset_Thesis_Raw$difference_share
sd(VariationShare)
logvar<-Dataset_Thesis_Raw$log_variation
sd(logvar)
rolesbeg<-Dataset_Thesis_Raw$n_roles_beginning
sd(rolesbeg)
failed<-Dataset_Thesis_Raw$n_failed_inv
sd(failed)
agetoday<-Dataset_Thesis_Raw$age_today
sd(agetoday)
agebeg<-Dataset_Thesis_Raw$age_beginning
sd(agebeg)
durat<-Dataset_Thesis_Raw$duration
sd(durat)
library(ggplot2)
# Histogram for Years of beginning of investment
ggplot(data=Dataset_Thesis_Raw, aes(x=Dataset_Thesis_Raw$foundation_year)) +
geom_histogram(breaks=seq(2000, 2019, by=1),
col="red",
fill="green",
alpha = .2) +
labs(x="Years of constitution", y="Count") +
ylim(c(0,13))
sum(Dataset_Thesis_Raw$foundation_year==2017)
sum(Dataset_Thesis_Raw$foundation_year==2018)
sum(Dataset_Thesis_Raw$foundation_year==2016)
sum(Dataset_Thesis_Raw$foundation_year>=2011)/91 #share of investments that begun in 2011
# Distribution of the log growth rates
ggplot(data=Dataset_Thesis_Raw, aes(log_variation)) +
geom_histogram(aes(y =..density..),
col="red",
fill="green",
alpha=.2) +
geom_density(col=2) +
labs(x="log-variation")
mean(Dataset_Thesis_Raw$log_variation)
sd(Dataset_Thesis_Raw$log_variation)
median(Dataset_Thesis_Raw$log_variation)
# Scatterplot age investors vs growth rate
ggplot(Dataset_Thesis_Raw, aes(x=age_beginning, y=log_variation)) +
geom_point(aes(col=gender))+
geom_smooth(method = "lm", se = TRUE)+
labs(x="Age at the beginning of the investement", y="log-variation")
# Linear regression
Age<-as.matrix(Dataset_Thesis_Raw$age_beginning)
logvariation <- as.matrix(Dataset_Thesis_Raw$log_variation)
reg1 <-lm(logvariation ~ Age)
summary(reg1)
# Other types of scatterplots:
# Scatterplot experience vs growth rate
ggplot(Dataset_Thesis_Raw, aes(x=n_roles_beginning, y=log_variation)) +
geom_point(aes(col=gender))+
geom_smooth(method = "lm", se = TRUE)
#Scatterplot (at least one failure) vs growth rate
ggplot(Dataset_Thesis_Raw[Dataset_Thesis_Raw$n_failed_inv>=1,], aes(x=n_failed_inv, y=log_variation)) +
geom_point(aes(col=gender))+
geom_smooth(method = "lm", se = TRUE) +
labs(x="Number of failed investments", y="log-variation")
#Boxplot for the log-variation
ggplot(data=Dataset_Thesis_Raw, aes(y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(y="Log - variation")+
xlim(c(-0.5,0.5))
# Men vs Women
ggplot(data=Dataset_Thesis_Raw, aes(x=gender, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Gender" ,y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="F"])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="F"])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$gender=="M"])
# ECONOMICS vs others
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE"])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study!="ECONOMICS AND FINANCE"])
ggplot(data=Dataset_Thesis_Raw, aes(field_of_study=="ECONOMICS AND FINANCE", y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Economics and Finance" ,y="Log - variation")
# ECONOMICS vs ENGINEERING
# Vectors
Eco<-Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE"
Eng<-Dataset_Thesis_Raw$field_of_study=="ENGINEERING"
# Dataset containing only people with Eco & Fin or Engineering study background
EcovsEng<-Dataset_Thesis_Raw[Eco | Eng,]
ggplot(data=EcovsEng, aes(x=field_of_study=="ECONOMICS AND FINANCE", y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Economics and Finance", y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study=="ECONOMICS AND FINANCE"])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$field_of_study!="ENGINEERING"])
min(EcovsEng$log_variation[EcovsEng$field_of_study=="ENGINEERING"])
max(EcovsEng$log_variation[EcovsEng$field_of_study=="ENGINEERING"])
sd(EcovsEng$log_variation[EcovsEng$field_of_study=="ENGINEERING"])
# Who experienced the best variation in the involvement of shareholding of an enterprise?
ggplot(data=Dataset_Thesis_Raw, aes(x=age_beginning>=43, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People less than 43 years old",y="Log - variation")
diffshare_pos<-Dataset_Thesis_Raw[Dataset_Thesis_Raw$difference_share>0,]
ggplot(data=diffshare_pos, aes(x=age_beginning>43, y=difference_share)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People more than 43 years old",y="Share variation in percentage")
mean(diffshare_pos$difference_share[diffshare_pos$age_beginning>43])
mean(diffshare_pos$difference_share[diffshare_pos$age_beginning<=43])
diffshare_neg<-Dataset_Thesis_Raw[Dataset_Thesis_Raw$difference_share<0,]
ggplot(data=diffshare_neg, aes(x=age_beginning>43, y=difference_share)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People more than 43 years old",y="Share variation in percentage")
mean(diffshare_neg$difference_share[diffshare_neg$age_beginning>43])
mean(diffshare_neg$difference_share[diffshare_neg$age_beginning<=43])
# Do people who have experienced more than 8 (mean) investments have a higher return than those with less experience?
ggplot(data=Dataset_Thesis_Raw, aes(x=n_roles_beginning>=8, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People with more than 8 roles", y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning<8])
Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_roles_beginning>=8]
# Do people who have at least one failure investments have a higher return than those with less experience?
ggplot(data=Dataset_Thesis_Raw, aes(x=n_failed_inv>=1, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="People with at least one failed investment",y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv>=1])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
sd(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$n_failed_inv<1])
# Did people with multiple investments improved the quality of investments (growth of the company) over the time?
Multiple_investors<-Dataset_Thesis_Raw[c(4:24,26:50,53:64,66:69,76:82,87,88,90,91),]
ggplot(Multiple_investors, aes(x=year_beg, y=log_variation, color=gender)) +
geom_point() +
geom_smooth(method=lm, se=FALSE, fullrange=TRUE)+
labs(x="Initial year",y="Log - variation")
# Is there a correlation between the rate of involvement and the performance of the enterprise?
share5<-Dataset_Thesis_Raw[Dataset_Thesis_Raw$share_beginning>8.36,]
ggplot(share5, aes(x=share_beginning, y=log_variation)) +
geom_point() +
geom_smooth(method=lm, se=FALSE, fullrange=TRUE)+
labs(x="Share owned at the beginning",y="Log - variation")
share<-as.matrix(share5$share_beginning)
logvariation <- as.matrix(share5$log_variation)
reg2 <-lm(logvariation ~ share)
summary(reg2)
# Did the companies founded before 2010 grew more than the others
median(Dataset_Thesis_Raw$foundation_year)
ggplot(data=Dataset_Thesis_Raw, aes(x=foundation_year<=2010, y=log_variation)) +
geom_boxplot() +
stat_boxplot(geom ='errorbar') +
labs(x="Companies founded in 2010 or before",y="Log - variation")
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year<=2010])
mean(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year>2010])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year<=2010])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year<=2010])
min(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year>2010])
max(Dataset_Thesis_Raw$log_variation[Dataset_Thesis_Raw$foundation_year>2010])
# generate a table (for the regression) in latex programming language
install.packages("stargazer")
library(stargazer)
stargazer(reg1,title="Regression Results")
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:5, WORD_NUM:108">
</head>
<body bgcolor="white">
<a href="#0" id="0">It is the valley's heavily Moslem population that tilts the sectarian scale in Jammu-Kashmir state.</a>
<a href="#1" id="1">Exotic Kashmir, a tourist paradise of houseboat hotels and Mogul gardens from whose name the English made ``cashmere,'' has become a war zone of separatism and religious enmity.</a>
<a href="#2" id="2">Let new hospitals come up.''</a>
<a href="#3" id="3">``The residents are constantly bombarded with Islamic sermons and are reminded that fighting a `jihad' (Islamic holy war) is the most sacred duty of every Moslem,'' the federal official said, on condition of anonymity.</a>
<a href="#4" id="4">``We are fighting a handful of terrorists who are determined to create a law and order problem,'' said Jagmohan, a Hindu with a reputation for toughness.</a>
</body>
</html>
|
/DUC-Dataset/Summary_p100_R/D114.AP900130-0010.html.R
|
no_license
|
Angela7126/SLNSumEval
|
R
| false
| false
| 920
|
r
|
<html>
<head>
<meta name="TextLength" content="SENT_NUM:5, WORD_NUM:108">
</head>
<body bgcolor="white">
<a href="#0" id="0">It is the valley's heavily Moslem population that tilts the sectarian scale in Jammu-Kashmir state.</a>
<a href="#1" id="1">Exotic Kashmir, a tourist paradise of houseboat hotels and Mogul gardens from whose name the English made ``cashmere,'' has become a war zone of separatism and religious enmity.</a>
<a href="#2" id="2">Let new hospitals come up.''</a>
<a href="#3" id="3">``The residents are constantly bombarded with Islamic sermons and are reminded that fighting a `jihad' (Islamic holy war) is the most sacred duty of every Moslem,'' the federal official said, on condition of anonymity.</a>
<a href="#4" id="4">``We are fighting a handful of terrorists who are determined to create a law and order problem,'' said Jagmohan, a Hindu with a reputation for toughness.</a>
</body>
</html>
|
orth_Gram_Schmidt_metrique_diag <- function (M,Y)
{ nb_fact <- length(Y)
X <- vector("list",nb_fact)
if (nb_fact==1) { X<-Y} else{
normX <- vector(length=nb_fact)
X[[1]] <- Y[[1]]
normX[1] <- sum ( X[[1]]^2 * M )
for (i in 2:nb_fact)
{ X[[i]] <- Y[[i]]
for (j in 1:(i-1))
{ X[[i]] <- X[[i]]-(sum(Y[[i]] * X[[j]]* M)/normX[j]) * X[[j]]
}
normX[i] <- sum (X[[i]]^2 * M)
}
}
return(X)
}
|
/R/orth_gram_schmidt_metrique_diag.R
|
no_license
|
cran/factas
|
R
| false
| false
| 518
|
r
|
orth_Gram_Schmidt_metrique_diag <- function (M,Y)
{ nb_fact <- length(Y)
X <- vector("list",nb_fact)
if (nb_fact==1) { X<-Y} else{
normX <- vector(length=nb_fact)
X[[1]] <- Y[[1]]
normX[1] <- sum ( X[[1]]^2 * M )
for (i in 2:nb_fact)
{ X[[i]] <- Y[[i]]
for (j in 1:(i-1))
{ X[[i]] <- X[[i]]-(sum(Y[[i]] * X[[j]]* M)/normX[j]) * X[[j]]
}
normX[i] <- sum (X[[i]]^2 * M)
}
}
return(X)
}
|
#' db as a list of data.frame-s
#' @param con sqlite connection.
#' @export
db2list <- function(con) {
tnams = dbGetQuery(con, "SELECT name FROM sqlite_master WHERE type='table'")
sapply(tnams$name, function(x) dbGetQuery(con, paste("SELECT * FROM", x) ) )
}
#' Show db status
#' Returns a data.frame containing the status of the current CZ project.
#' @export
#' @examples
#' CZopen_example()
#' CZshowStatus()
CZshowStatus <- function() {
stopifnot( colorZapper_file_active())
d = dbGetQuery(getOption('cz.con'), "
SELECT count(id) replicates, id, processed, mark, fileName FROM
(select f.id, CASE WHEN r.id is null then 0 else 1 END as processed, mark,
CASE WHEN instr(wkt, 'MULTIPOINT') THEN 'points'
WHEN instr(wkt, 'MULTIPOLYGON') THEN 'polygons' END as selected,
path fileName
from files f left join ROI r on f.id = r.id )
GROUP BY id, mark, fileName
")
d$fileName = gsub("((\\.(?i)(jpg|jpeg|png|gif|bmp|tif|tiff))$)", "", basename(d$fileName),
ignore.case = TRUE )
d
}
#' colorZapper data
#' Fetch colorZapper RGB data
#' @param what 'ROI' (gets the data of ROI-s defined interactively)
#' or 'ALL' (extracts the color of from all images.)
#' @export
#' @examples
#' require(doParallel)
#' registerDoParallel(1)
#' CZopen_example()
#' CZextractROI()
#' CZextractALL()
#' stopImplicitCluster()
#' d = CZdata(what= 'ROI')
#' d = CZdata(what= 'ALL')
#'
CZdata <- function(what) {
stopifnot( colorZapper_file_active())
if(what == 'ROI')
sql = "SELECT R, G, B, f.id, w.mark, f.path FROM ROI_RGB c
JOIN ROI w ON c.roi_PK = w.pk
JOIN files f ON f.id = w.id"
if(what == 'ALL')
sql = "SELECT R, G, B, f.id, f.path FROM ALL_RGB a
JOIN files f ON f.id = a.all_pk"
dbGetQuery(getOption('cz.con'), sql) %>% data.table
}
|
/R/5_get_data.R
|
no_license
|
mpio-be/colorZapper
|
R
| false
| false
| 1,933
|
r
|
#' db as a list of data.frame-s
#' @param con sqlite connection.
#' @export
db2list <- function(con) {
tnams = dbGetQuery(con, "SELECT name FROM sqlite_master WHERE type='table'")
sapply(tnams$name, function(x) dbGetQuery(con, paste("SELECT * FROM", x) ) )
}
#' Show db status
#' Returns a data.frame containing the status of the current CZ project.
#' @export
#' @examples
#' CZopen_example()
#' CZshowStatus()
CZshowStatus <- function() {
stopifnot( colorZapper_file_active())
d = dbGetQuery(getOption('cz.con'), "
SELECT count(id) replicates, id, processed, mark, fileName FROM
(select f.id, CASE WHEN r.id is null then 0 else 1 END as processed, mark,
CASE WHEN instr(wkt, 'MULTIPOINT') THEN 'points'
WHEN instr(wkt, 'MULTIPOLYGON') THEN 'polygons' END as selected,
path fileName
from files f left join ROI r on f.id = r.id )
GROUP BY id, mark, fileName
")
d$fileName = gsub("((\\.(?i)(jpg|jpeg|png|gif|bmp|tif|tiff))$)", "", basename(d$fileName),
ignore.case = TRUE )
d
}
#' colorZapper data
#' Fetch colorZapper RGB data
#' @param what 'ROI' (gets the data of ROI-s defined interactively)
#' or 'ALL' (extracts the color of from all images.)
#' @export
#' @examples
#' require(doParallel)
#' registerDoParallel(1)
#' CZopen_example()
#' CZextractROI()
#' CZextractALL()
#' stopImplicitCluster()
#' d = CZdata(what= 'ROI')
#' d = CZdata(what= 'ALL')
#'
CZdata <- function(what) {
stopifnot( colorZapper_file_active())
if(what == 'ROI')
sql = "SELECT R, G, B, f.id, w.mark, f.path FROM ROI_RGB c
JOIN ROI w ON c.roi_PK = w.pk
JOIN files f ON f.id = w.id"
if(what == 'ALL')
sql = "SELECT R, G, B, f.id, f.path FROM ALL_RGB a
JOIN files f ON f.id = a.all_pk"
dbGetQuery(getOption('cz.con'), sql) %>% data.table
}
|
inbox_data <- read.table("inbox_data_enron.csv", header=TRUE, sep=",", quote='')
sent_data <- read.table("sent_data_enron.csv", header=TRUE, sep=",", quote='')
from <- inbox_data['from']
colnames(from)[1] <- 'mail'
to <- sent_data['to']
colnames(to)[1] <- 'mail'
all <- rbind(from,to)
counted <- data.frame(table(all))
sorted <- counted[order(counted['Freq'],decreasing=TRUE),]
print(sorted[0:20,])
|
/R_rb/Chapter6/mails_interact.r
|
no_license
|
takagotch/R
|
R
| false
| false
| 400
|
r
|
inbox_data <- read.table("inbox_data_enron.csv", header=TRUE, sep=",", quote='')
sent_data <- read.table("sent_data_enron.csv", header=TRUE, sep=",", quote='')
from <- inbox_data['from']
colnames(from)[1] <- 'mail'
to <- sent_data['to']
colnames(to)[1] <- 'mail'
all <- rbind(from,to)
counted <- data.frame(table(all))
sorted <- counted[order(counted['Freq'],decreasing=TRUE),]
print(sorted[0:20,])
|
library(mefa4)
set.seed(1234)
y <- Matrix(rpois(50, 0.5), 20, 10)
dimnames(y) <- list(letters[1:20], LETTERS[1:10])
x <- Melt(y)
x <- x[sample.int(nrow(x)),]
x <- data.frame(id=1:nrow(x), x)
file <- "trydata.csv"
write.csv(x, file, row.names=FALSE)
FUN <- function(x) return(x)
REDUCE <- rbind
nrows <- 20
nlines <- function(file) {
## http://r.789695.n4.nabble.com/Fast-way-to-determine-number-of-lines-in-a-file-td1472962.html
## needs Rtools on Windows
if (.Platform$OS.type == "windows") {
nr <- as.integer(strsplit(system(paste("/RTools/bin/wc -l",
file), intern=TRUE), " ")[[1]][1])
} else {
nr <- as.integer(strsplit(system(paste("wc -l",
file), intern=TRUE), " ")[[1]][1])
}
nr
}
MapReduce_function <- function(file, nrows, FUN, REDUCE, ...) {
## Map
nr <- nlines(file)
m <- floor((nr-1) / nrows)
mm <- (nr-1) %% nrows
if (mm > 0)
m <- m+1
## Reduce
tmp0 <- read.csv(file, nrows=2, skip=0, header=TRUE, ...)
cn <- colnames(tmp0)
res <- list()
for (i in 1:m) {
tmp <- read.csv(file, nrows=nrows, skip=(i-1)*nrows+1,
header=FALSE, ...)
colnames(tmp) <- cn
res[[i]] <- FUN(tmp)
}
out <- do.call(REDUCE, res)
}
out <- MapReduce_function(file, nrows, FUN, REDUCE)
fff <- Xtab(value ~ rows + cols, out)
fff
y[rownames(fff),]
|
/R/mapreduce.R
|
no_license
|
psolymos/bamanalytics
|
R
| false
| false
| 1,392
|
r
|
library(mefa4)
set.seed(1234)
y <- Matrix(rpois(50, 0.5), 20, 10)
dimnames(y) <- list(letters[1:20], LETTERS[1:10])
x <- Melt(y)
x <- x[sample.int(nrow(x)),]
x <- data.frame(id=1:nrow(x), x)
file <- "trydata.csv"
write.csv(x, file, row.names=FALSE)
FUN <- function(x) return(x)
REDUCE <- rbind
nrows <- 20
nlines <- function(file) {
## http://r.789695.n4.nabble.com/Fast-way-to-determine-number-of-lines-in-a-file-td1472962.html
## needs Rtools on Windows
if (.Platform$OS.type == "windows") {
nr <- as.integer(strsplit(system(paste("/RTools/bin/wc -l",
file), intern=TRUE), " ")[[1]][1])
} else {
nr <- as.integer(strsplit(system(paste("wc -l",
file), intern=TRUE), " ")[[1]][1])
}
nr
}
MapReduce_function <- function(file, nrows, FUN, REDUCE, ...) {
## Map
nr <- nlines(file)
m <- floor((nr-1) / nrows)
mm <- (nr-1) %% nrows
if (mm > 0)
m <- m+1
## Reduce
tmp0 <- read.csv(file, nrows=2, skip=0, header=TRUE, ...)
cn <- colnames(tmp0)
res <- list()
for (i in 1:m) {
tmp <- read.csv(file, nrows=nrows, skip=(i-1)*nrows+1,
header=FALSE, ...)
colnames(tmp) <- cn
res[[i]] <- FUN(tmp)
}
out <- do.call(REDUCE, res)
}
out <- MapReduce_function(file, nrows, FUN, REDUCE)
fff <- Xtab(value ~ rows + cols, out)
fff
y[rownames(fff),]
|
#GO Enrichment script
#By Cassie Ettinger
library(tidyverse)
library(ggplot2)
library(vroom)
library(AnnotationDbi)
library(GSEABase)
library(GOstats)
## Bash commands:
# grep "gene" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/ID=//' | sed 's/[;].*//' > all_genes.txt
# sort all_genes.txt | uniq > universal_genes.txt
# rm all_genes.txt
## note could have just gone to genome for this - but oh my poor brain didn't think of that until later, oh well
#
# grep "GO" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/ID=//' | sed 's/[-T].*//' > CM_genes_uniq.txt
# grep "GO" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/^.*.Ontology_term=//' | sed 's/[;].*//' | sed 's/,/|/g' > CM_GO.txt
# paste -d'\t' CM_genes_uniq.txt CM_GO.txt > CM_gogenes.txt
#
# rm CM_genes_uniq.txt CM_GO.txt
#
## Remove the -T1 from gene names
# sort results/deseq_kallisto/Result_Up.tsv | uniq | sed 's/[-T].*//' > results/deseq_kallisto/Results_Up.tsv
# sort results/deseq_kallisto/Result_Down.tsv | uniq | sed 's/[-T].*//' > results/deseq_kallisto/Results_Down.tsv
# sort results/deseq_kallisto/allDEGs.tsv | uniq | sed 's/[-T].*//' > results/deseq_kallisto/all_DEG.tsv
# rm Result_Up.tsv Result_Down.tsv allDEGs.tsv
#load datasets
#load in genes and respective GO IDS
meiospore <- vroom('db/CM_gogenes.txt', col_names = c("GeneID", "GO"))
#list of all genes in genome
all_genes <- read.delim('db/universal_genes.txt',header = FALSE)
#respective outputs of kalisto_DESeq_more.R
enrich.spo <- read.delim('results/deseq_kallisto/Results_Up.tsv', header =FALSE)
enrich.inf <- read.delim('results/deseq_kallisto/Results_Down.tsv', header =FALSE)
deg <- read.delim('results/deseq_kallisto/all_DEG.tsv', header =FALSE)
#split GO IDs into individual rows
meiospore.go <- meiospore %>%
separate_rows(GO, sep="\\|")
# GO Analyses
# following https://github.com/stajichlab/Bd_Zoo-Spor_Analysis2015
# as input genes of interest here I am using genes
# that are expressed more in one group (e.g. Spor / Inf) over the other
# not sure that is right approach - esp when asking about GO 'underenrichment'
#make GO dataframe: #GO ID, #IEA, #GeneID
meiospore.goframeData <- data.frame(meiospore.go$GO, "IEA", meiospore.go$GeneID)
meiospore.goFrame <- GOFrame(meiospore.goframeData,organism="Coelomomyces lativittatus")
meiospore.goAllFrame=GOAllFrame(meiospore.goFrame)
meiospore.gsc <- GeneSetCollection(meiospore.goAllFrame, setType = GOCollection())
# Sporangia
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
Over <- hyperGTest(params)
summary(Over)
Over
write.csv(summary(Over),"results/Spo_OverMF_enrich.csv");
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverCC <- hyperGTest(paramsCC)
summary(OverCC)
OverCC
write.csv(summary(OverCC),"results/GO_enrich_kallisto/Spo_OverCC_enrich.csv");
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverBP <- hyperGTest(paramsBP)
summary(OverBP)
OverBP
write.csv(summary(OverBP),"results/GO_enrich_kallisto/Spo_OverBP_enrich.csv");
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
Under <- hyperGTest(params)
summary(Under)
Under
write.csv(summary(Under),"results/GO_enrich_kallisto/Spo_UnderMF_enrich.csv");
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderCC <- hyperGTest(paramsCC)
summary(UnderCC)
UnderCC
write.csv(summary(UnderCC),"results/GO_enrich_kallisto/Spo_UnderCC_enrich.csv");
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderBP <- hyperGTest(paramsBP)
summary(UnderBP)
UnderBP
write.csv(summary(UnderBP),"results/GO_enrich_kallisto/Spo_UnderBP_enrich.csv");
## Inf
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
Over <- hyperGTest(params)
summary(Over)
Over
write.csv(summary(Over),"results/GO_enrich_kallisto/Inf_OverMF_enrich.csv")
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverCC <- hyperGTest(paramsCC)
summary(OverCC)
OverCC
write.csv(summary(OverCC),"results/GO_enrich_kallisto/Inf_OverCC_enrich.csv")
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverBP <- hyperGTest(paramsBP)
summary(OverBP)
OverBP
write.csv(summary(OverBP),"results/GO_enrich_kallisto/Inf_OverBP_enrich.csv")
ggplot(overBP_inf, aes(x=ExtraTerm, y=-log10(Fisher), fill=Significant)) +
stat_summary(geom = "bar", fun = mean, position = "dodge") +
xlab(element_blank()) +
ylab("Log Fold Enrichment") +
scale_fill_gradientn(colours = c("#87868140", colorHex), #0000ff40
limits=c(1,LegendLimit),
breaks=c(1,LegendLimit)) +
ggtitle(Title) +
scale_y_continuous(breaks=round(seq(0, max(-log10(GoGraph$Fisher),3)), 1)) +
#theme_bw(base_size=12) +
theme(
panel.grid = element_blank(),
legend.position=c(0.8,.3),
legend.background=element_blank(),
legend.key=element_blank(), #removes the border
legend.key.size=unit(0.5, "cm"), #Sets overall area/size of the legend
#legend.text=element_text(size=18), #Text size
legend.title=element_blank(),
plot.title=element_text(angle=0, face="bold", vjust=1, size=25),
axis.text.x=element_text(angle=0, hjust=0.5),
axis.text.y=element_text(angle=0, vjust=0.5),
axis.title=element_text(hjust=0.5),
#title=element_text(size=18)
) +
guides(fill=guide_colorbar(ticks=FALSE, label.position = 'left')) +
coord_flip()
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
Under <- hyperGTest(params)
summary(Under)
Under
write.csv(summary(Under),"results/GO_enrich_kallisto/Inf_UnderMF_enrich.csv");
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderCC <- hyperGTest(paramsCC)
summary(UnderCC)
UnderCC
write.csv(summary(UnderCC),"results/GO_enrich_kallisto/Inf_UnderCC_enrich.csv");
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderBP <- hyperGTest(paramsBP)
summary(UnderBP)
UnderBP
write.csv(summary(UnderBP),"results/GO_enrich_kallisto/Inf_UnderBP_enrich.csv")
#hmm is there a good way to plot these first results?
#we could sum by 'term' and plot
#need to think on this
##### alternative way ####
source("scripts/GOfunctions.R")
library("ggplot2")
library("cowplot")
theme_set(theme_cowplot())
library("patchwork")
#Now using scripts from https://github.com/rajewski/Datura-Genome/
#doesn't have an option of over vs. under enriched?
# Make a GO Enrichment of the Up and Down Regulated Genes
GO_Up_Inf <- GOEnrich(gene2go = "db/CM_gogenes.txt",
GOIs="results/deseq_kallisto/Results_Down.tsv")
inf <- GOPlot(GO_Up_Inf)
inf
GO_Up_Spo <- GOEnrich(gene2go = "db/CM_gogenes.txt",
GOIs="results/deseq_kallisto/Results_Up.tsv")
spo <- GOPlot(GO_Up_Spo)
spo
GO_All <- GOEnrich(gene2go = "db/CM_gogenes.txt",
GOIs="results/deseq_kallisto/all_DEG.tsv")
GOPlot(GO_All)
#plot results
inf + spo + plot_annotation(tag_levels = 'A')
ggsave(filename = 'plots/GO_enrich_infA_spoB_method2.pdf', plot = last_plot(), device = 'pdf', width = 16, height = 8, dpi = 300)
|
/scripts/GOEnrichment.R
|
permissive
|
90talieh/Chytrid_Coelomomyces_RNASeq
|
R
| false
| false
| 11,845
|
r
|
#GO Enrichment script
#By Cassie Ettinger
library(tidyverse)
library(ggplot2)
library(vroom)
library(AnnotationDbi)
library(GSEABase)
library(GOstats)
## Bash commands:
# grep "gene" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/ID=//' | sed 's/[;].*//' > all_genes.txt
# sort all_genes.txt | uniq > universal_genes.txt
# rm all_genes.txt
## note could have just gone to genome for this - but oh my poor brain didn't think of that until later, oh well
#
# grep "GO" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/ID=//' | sed 's/[-T].*//' > CM_genes_uniq.txt
# grep "GO" CoelomomycesMeiospore_Genes.gff3 | cut -f9 | sort | uniq | sed 's/^.*.Ontology_term=//' | sed 's/[;].*//' | sed 's/,/|/g' > CM_GO.txt
# paste -d'\t' CM_genes_uniq.txt CM_GO.txt > CM_gogenes.txt
#
# rm CM_genes_uniq.txt CM_GO.txt
#
## Remove the -T1 from gene names
# sort results/deseq_kallisto/Result_Up.tsv | uniq | sed 's/[-T].*//' > results/deseq_kallisto/Results_Up.tsv
# sort results/deseq_kallisto/Result_Down.tsv | uniq | sed 's/[-T].*//' > results/deseq_kallisto/Results_Down.tsv
# sort results/deseq_kallisto/allDEGs.tsv | uniq | sed 's/[-T].*//' > results/deseq_kallisto/all_DEG.tsv
# rm Result_Up.tsv Result_Down.tsv allDEGs.tsv
#load datasets
#load in genes and respective GO IDS
meiospore <- vroom('db/CM_gogenes.txt', col_names = c("GeneID", "GO"))
#list of all genes in genome
all_genes <- read.delim('db/universal_genes.txt',header = FALSE)
#respective outputs of kalisto_DESeq_more.R
enrich.spo <- read.delim('results/deseq_kallisto/Results_Up.tsv', header =FALSE)
enrich.inf <- read.delim('results/deseq_kallisto/Results_Down.tsv', header =FALSE)
deg <- read.delim('results/deseq_kallisto/all_DEG.tsv', header =FALSE)
#split GO IDs into individual rows
meiospore.go <- meiospore %>%
separate_rows(GO, sep="\\|")
# GO Analyses
# following https://github.com/stajichlab/Bd_Zoo-Spor_Analysis2015
# as input genes of interest here I am using genes
# that are expressed more in one group (e.g. Spor / Inf) over the other
# not sure that is right approach - esp when asking about GO 'underenrichment'
#make GO dataframe: #GO ID, #IEA, #GeneID
meiospore.goframeData <- data.frame(meiospore.go$GO, "IEA", meiospore.go$GeneID)
meiospore.goFrame <- GOFrame(meiospore.goframeData,organism="Coelomomyces lativittatus")
meiospore.goAllFrame=GOAllFrame(meiospore.goFrame)
meiospore.gsc <- GeneSetCollection(meiospore.goAllFrame, setType = GOCollection())
# Sporangia
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
Over <- hyperGTest(params)
summary(Over)
Over
write.csv(summary(Over),"results/Spo_OverMF_enrich.csv");
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverCC <- hyperGTest(paramsCC)
summary(OverCC)
OverCC
write.csv(summary(OverCC),"results/GO_enrich_kallisto/Spo_OverCC_enrich.csv");
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverBP <- hyperGTest(paramsBP)
summary(OverBP)
OverBP
write.csv(summary(OverBP),"results/GO_enrich_kallisto/Spo_OverBP_enrich.csv");
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
Under <- hyperGTest(params)
summary(Under)
Under
write.csv(summary(Under),"results/GO_enrich_kallisto/Spo_UnderMF_enrich.csv");
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderCC <- hyperGTest(paramsCC)
summary(UnderCC)
UnderCC
write.csv(summary(UnderCC),"results/GO_enrich_kallisto/Spo_UnderCC_enrich.csv");
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.spo$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderBP <- hyperGTest(paramsBP)
summary(UnderBP)
UnderBP
write.csv(summary(UnderBP),"results/GO_enrich_kallisto/Spo_UnderBP_enrich.csv");
## Inf
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
Over <- hyperGTest(params)
summary(Over)
Over
write.csv(summary(Over),"results/GO_enrich_kallisto/Inf_OverMF_enrich.csv")
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverCC <- hyperGTest(paramsCC)
summary(OverCC)
OverCC
write.csv(summary(OverCC),"results/GO_enrich_kallisto/Inf_OverCC_enrich.csv")
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "over")
OverBP <- hyperGTest(paramsBP)
summary(OverBP)
OverBP
write.csv(summary(OverBP),"results/GO_enrich_kallisto/Inf_OverBP_enrich.csv")
ggplot(overBP_inf, aes(x=ExtraTerm, y=-log10(Fisher), fill=Significant)) +
stat_summary(geom = "bar", fun = mean, position = "dodge") +
xlab(element_blank()) +
ylab("Log Fold Enrichment") +
scale_fill_gradientn(colours = c("#87868140", colorHex), #0000ff40
limits=c(1,LegendLimit),
breaks=c(1,LegendLimit)) +
ggtitle(Title) +
scale_y_continuous(breaks=round(seq(0, max(-log10(GoGraph$Fisher),3)), 1)) +
#theme_bw(base_size=12) +
theme(
panel.grid = element_blank(),
legend.position=c(0.8,.3),
legend.background=element_blank(),
legend.key=element_blank(), #removes the border
legend.key.size=unit(0.5, "cm"), #Sets overall area/size of the legend
#legend.text=element_text(size=18), #Text size
legend.title=element_blank(),
plot.title=element_text(angle=0, face="bold", vjust=1, size=25),
axis.text.x=element_text(angle=0, hjust=0.5),
axis.text.y=element_text(angle=0, vjust=0.5),
axis.title=element_text(hjust=0.5),
#title=element_text(size=18)
) +
guides(fill=guide_colorbar(ticks=FALSE, label.position = 'left')) +
coord_flip()
params <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "MF",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
Under <- hyperGTest(params)
summary(Under)
Under
write.csv(summary(Under),"results/GO_enrich_kallisto/Inf_UnderMF_enrich.csv");
paramsCC <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "CC",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderCC <- hyperGTest(paramsCC)
summary(UnderCC)
UnderCC
write.csv(summary(UnderCC),"results/GO_enrich_kallisto/Inf_UnderCC_enrich.csv");
paramsBP <- GSEAGOHyperGParams(name="My Custom GSEA based annot Params",
geneSetCollection=meiospore.gsc,
geneIds = enrich.inf$V1,
universeGeneIds = all_genes$V1,
ontology = "BP",
pvalueCutoff = 0.05,
conditional = FALSE,
testDirection = "under")
UnderBP <- hyperGTest(paramsBP)
summary(UnderBP)
UnderBP
write.csv(summary(UnderBP),"results/GO_enrich_kallisto/Inf_UnderBP_enrich.csv")
#hmm is there a good way to plot these first results?
#we could sum by 'term' and plot
#need to think on this
##### alternative way ####
source("scripts/GOfunctions.R")
library("ggplot2")
library("cowplot")
theme_set(theme_cowplot())
library("patchwork")
#Now using scripts from https://github.com/rajewski/Datura-Genome/
#doesn't have an option of over vs. under enriched?
# Make a GO Enrichment of the Up and Down Regulated Genes
GO_Up_Inf <- GOEnrich(gene2go = "db/CM_gogenes.txt",
GOIs="results/deseq_kallisto/Results_Down.tsv")
inf <- GOPlot(GO_Up_Inf)
inf
GO_Up_Spo <- GOEnrich(gene2go = "db/CM_gogenes.txt",
GOIs="results/deseq_kallisto/Results_Up.tsv")
spo <- GOPlot(GO_Up_Spo)
spo
GO_All <- GOEnrich(gene2go = "db/CM_gogenes.txt",
GOIs="results/deseq_kallisto/all_DEG.tsv")
GOPlot(GO_All)
#plot results
inf + spo + plot_annotation(tag_levels = 'A')
ggsave(filename = 'plots/GO_enrich_infA_spoB_method2.pdf', plot = last_plot(), device = 'pdf', width = 16, height = 8, dpi = 300)
|
grafik_un_smp <- function(.data, matpel, tahun_awal, tahun_akhir, judul = "Perubahan Rerata Nilai Ujian Nasional", subjudul = "Nilai Ujian Nasional Tingkat SMP Kota Bandung") {
matpel <-
matpel %>%
str_replace_all(pattern = "[:punct:]|[:space:]", replacement = "_") %>%
str_to_lower()
.data %>%
select(nama_kecamatan, tahun, contains(matpel)) %>%
filter(tahun %in% c(tahun_awal, tahun_akhir)) %>%
spread(key = "tahun", value = str_c("nilai_rerata_", matpel)) %>%
rename("awal" = 2, "akhir" = 3) %>%
mutate(
rerata = (awal + akhir) / 2,
status = if_else(akhir - awal > 0, "Meningkat", "Menurun"),
status = factor(status, levels = c("Meningkat", "Menurun"))
) %>%
ggplot() +
geom_segment(
aes(
x = awal,
xend = akhir,
y = fct_reorder(nama_kecamatan, rerata),
yend = fct_reorder(nama_kecamatan, rerata),
colour = status
),
arrow = arrow(length = unit(2, "mm")),
lwd = 1
) +
geom_point(
aes(
x = rerata,
y = fct_reorder(nama_kecamatan, rerata)
),
colour = "#268AFF",
size = 2
) +
geom_text(
aes(
x = awal,
y = nama_kecamatan,
label = round(awal, 1),
hjust = if_else(status == "Meningkat", 1.2, -0.2)
),
family = "Lato",
color = "gray25",
size = 2.5
) +
geom_text(
aes(
x = akhir,
y = nama_kecamatan,
label = round(akhir, 1),
hjust = if_else(status == "Meningkat", -0.2, 1.2)
),
family = "Lato",
color = "gray25",
size = 2.5
) +
geom_text(
aes(
x = rerata,
y = nama_kecamatan,
label = nama_kecamatan,
vjust = -0.6
),
family = "Arial",
color = "gray15",
size = 3.5
) +
labs(
title = judul,
subtitle = subjudul,
x = "Rerata Nilai Ujian",
y = NULL,
caption = "Open Data Kota Bandung (data.bandung.go.id)"
) +
scale_colour_manual(values = c("Meningkat" = "#37DC94", "Menurun" = "#FA5C65"), drop = FALSE) +
theme(
legend.background = element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
legend.position = "bottom"
)
# theme(
# # panel.background = element_rect(fill = "lightgrey"),
# # plot.background = element_rect(fill = "lightgrey"),
# # legend.background = element_rect(fill = "lightgrey"),
# panel.background = element_blank(),
# plot.background = element_blank(),
# legend.background = element_blank(),
# legend.key = element_blank(),
# legend.title = element_blank(),
# legend.position = "bottom",
# axis.ticks = element_blank(),
# axis.title = element_blank(),
# axis.text = element_blank(),
# panel.grid = element_blank(),
# plot.title = element_text(
# family = "Manjari",
# size = 19
# ),
# plot.subtitle = element_text(
# family = "Manjari",
# size = 15
# ),
# plot.caption = element_text(
# family = "Manjari",
# size = 9
# ),
# legend.text = element_text(
# family = "Manjari",
# size = 9
# )
# )
}
|
/R/grafik_un_smp.R
|
no_license
|
muftiivan/dataviz
|
R
| false
| false
| 3,305
|
r
|
grafik_un_smp <- function(.data, matpel, tahun_awal, tahun_akhir, judul = "Perubahan Rerata Nilai Ujian Nasional", subjudul = "Nilai Ujian Nasional Tingkat SMP Kota Bandung") {
matpel <-
matpel %>%
str_replace_all(pattern = "[:punct:]|[:space:]", replacement = "_") %>%
str_to_lower()
.data %>%
select(nama_kecamatan, tahun, contains(matpel)) %>%
filter(tahun %in% c(tahun_awal, tahun_akhir)) %>%
spread(key = "tahun", value = str_c("nilai_rerata_", matpel)) %>%
rename("awal" = 2, "akhir" = 3) %>%
mutate(
rerata = (awal + akhir) / 2,
status = if_else(akhir - awal > 0, "Meningkat", "Menurun"),
status = factor(status, levels = c("Meningkat", "Menurun"))
) %>%
ggplot() +
geom_segment(
aes(
x = awal,
xend = akhir,
y = fct_reorder(nama_kecamatan, rerata),
yend = fct_reorder(nama_kecamatan, rerata),
colour = status
),
arrow = arrow(length = unit(2, "mm")),
lwd = 1
) +
geom_point(
aes(
x = rerata,
y = fct_reorder(nama_kecamatan, rerata)
),
colour = "#268AFF",
size = 2
) +
geom_text(
aes(
x = awal,
y = nama_kecamatan,
label = round(awal, 1),
hjust = if_else(status == "Meningkat", 1.2, -0.2)
),
family = "Lato",
color = "gray25",
size = 2.5
) +
geom_text(
aes(
x = akhir,
y = nama_kecamatan,
label = round(akhir, 1),
hjust = if_else(status == "Meningkat", -0.2, 1.2)
),
family = "Lato",
color = "gray25",
size = 2.5
) +
geom_text(
aes(
x = rerata,
y = nama_kecamatan,
label = nama_kecamatan,
vjust = -0.6
),
family = "Arial",
color = "gray15",
size = 3.5
) +
labs(
title = judul,
subtitle = subjudul,
x = "Rerata Nilai Ujian",
y = NULL,
caption = "Open Data Kota Bandung (data.bandung.go.id)"
) +
scale_colour_manual(values = c("Meningkat" = "#37DC94", "Menurun" = "#FA5C65"), drop = FALSE) +
theme(
legend.background = element_blank(),
legend.key = element_blank(),
legend.title = element_blank(),
legend.position = "bottom"
)
# theme(
# # panel.background = element_rect(fill = "lightgrey"),
# # plot.background = element_rect(fill = "lightgrey"),
# # legend.background = element_rect(fill = "lightgrey"),
# panel.background = element_blank(),
# plot.background = element_blank(),
# legend.background = element_blank(),
# legend.key = element_blank(),
# legend.title = element_blank(),
# legend.position = "bottom",
# axis.ticks = element_blank(),
# axis.title = element_blank(),
# axis.text = element_blank(),
# panel.grid = element_blank(),
# plot.title = element_text(
# family = "Manjari",
# size = 19
# ),
# plot.subtitle = element_text(
# family = "Manjari",
# size = 15
# ),
# plot.caption = element_text(
# family = "Manjari",
# size = 9
# ),
# legend.text = element_text(
# family = "Manjari",
# size = 9
# )
# )
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/V4_T2.6b.R
\docType{data}
\name{V4_T2.6b}
\alias{V4_T2.6b}
\title{Volume 4: Table 2.6b}
\format{A data frame with 23 variables
\describe{
\item{\code{County}}{County}
\item{\code{Age}}{Age}
\item{\code{Total}}{Total}
\item{\code{Male}}{Number of males}
\item{\code{Female}}{Number of females}
\item{\code{StillinSchool_Total}}{Total number of persons still in school / a learning institution.}
\item{\code{StillinSchool_Male}}{Number of males still in school / a learning institution.}
\item{\code{StillinSchool_Female}}{Number of females still in school / a learning institution.}
\item{\code{LeftSchoolAfterC_Total}}{Total number of persons who left school / a learning institution, after completion.}
\item{\code{LeftSchoolAfterC_Male}}{Number of males who left school / a learning institution, after completion.}
\item{\code{LeftSchoolAfterC_Female}}{Number of females who left school / a learning institution, after completion.}
\item{\code{LeftSchoolBeforeC_Total}}{Total number of persons who left school / a learning institution, before completion.}
\item{\code{LeftSchoolBeforeC_Male}}{Number of males who left school / a learning institution, before completion.}
\item{\code{LeftSchoolBeforeC_Female}}{Number of females who left school / a learning institution, before completion.}
\item{\code{NeverbeentoSchool_Total}}{Total number of persons who have never been to a school / a learning institution.}
\item{\code{NeverbeentoSchool_Male}}{Number of males who have never been to a school / a learning institution.}
\item{\code{NeverbeentoSchool_Female}}{Number of females who have never been to a school / a learning institution.}
\item{\code{...}}{The other variables indicate situations where the state of school attendance is not known / not stated.}
}
Intersex population is excluded from the table since it is too small to be distributed at sub-national level.}
\usage{
data(V4_T2.6b)
}
\description{
Table 2.6b: Distribution of Population Age 3 Years and Above by School Attendance Status,
Sex, Special Age Groups and County
}
\keyword{datasets}
|
/man/V4_T2.6b.Rd
|
permissive
|
LucyNjoki/rKenyaCensus
|
R
| false
| true
| 2,141
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/V4_T2.6b.R
\docType{data}
\name{V4_T2.6b}
\alias{V4_T2.6b}
\title{Volume 4: Table 2.6b}
\format{A data frame with 23 variables
\describe{
\item{\code{County}}{County}
\item{\code{Age}}{Age}
\item{\code{Total}}{Total}
\item{\code{Male}}{Number of males}
\item{\code{Female}}{Number of females}
\item{\code{StillinSchool_Total}}{Total number of persons still in school / a learning institution.}
\item{\code{StillinSchool_Male}}{Number of males still in school / a learning institution.}
\item{\code{StillinSchool_Female}}{Number of females still in school / a learning institution.}
\item{\code{LeftSchoolAfterC_Total}}{Total number of persons who left school / a learning institution, after completion.}
\item{\code{LeftSchoolAfterC_Male}}{Number of males who left school / a learning institution, after completion.}
\item{\code{LeftSchoolAfterC_Female}}{Number of females who left school / a learning institution, after completion.}
\item{\code{LeftSchoolBeforeC_Total}}{Total number of persons who left school / a learning institution, before completion.}
\item{\code{LeftSchoolBeforeC_Male}}{Number of males who left school / a learning institution, before completion.}
\item{\code{LeftSchoolBeforeC_Female}}{Number of females who left school / a learning institution, before completion.}
\item{\code{NeverbeentoSchool_Total}}{Total number of persons who have never been to a school / a learning institution.}
\item{\code{NeverbeentoSchool_Male}}{Number of males who have never been to a school / a learning institution.}
\item{\code{NeverbeentoSchool_Female}}{Number of females who have never been to a school / a learning institution.}
\item{\code{...}}{The other variables indicate situations where the state of school attendance is not known / not stated.}
}
Intersex population is excluded from the table since it is too small to be distributed at sub-national level.}
\usage{
data(V4_T2.6b)
}
\description{
Table 2.6b: Distribution of Population Age 3 Years and Above by School Attendance Status,
Sex, Special Age Groups and County
}
\keyword{datasets}
|
#' Dung-derived SOC in year t (DDSOCt)
#'
#' When it comes to soil organic carbon (SOC), there is plant-derived SOC (PDSOC) and dung-derived SOC (DDSOC). Not generally called directly but incorporated in wrapper function.
#'
#' Both of these equations are fairly straight-forward. PDSOCt just takes the estimated ANPPt and BNPPt along with LIGCELL, Fire and Grazing Intensity. DDSOCt takes only ANPPt, LIGCELL and Grazing Intensity (not BNPPt nor Fire).
#'
#' @param ANPPt_est Output of calc_ANPPt_est()
#' @param LIGCELL Lignin and cellulose content of livestock feed for year t (proportion)
#' @param GI Grazing intensity for year t (proportion)
#' @seealso [snapr::SNAP] which wraps this function.
#' @seealso [snapr::calc_PDSOCt]
#' @seealso [snapr::calc_ANPPt_est]
#' @export
calc_DDSOCt = function(ANPPt_est, LIGCELL, GI) {
DDSOCt = LIGCELL * 0.45 * GI * ANPPt_est
return(DDSOCt)
}
|
/R/calc_DDSOCt.R
|
permissive
|
ruan-de-wet/snapr
|
R
| false
| false
| 898
|
r
|
#' Dung-derived SOC in year t (DDSOCt)
#'
#' When it comes to soil organic carbon (SOC), there is plant-derived SOC (PDSOC) and dung-derived SOC (DDSOC). Not generally called directly but incorporated in wrapper function.
#'
#' Both of these equations are fairly straight-forward. PDSOCt just takes the estimated ANPPt and BNPPt along with LIGCELL, Fire and Grazing Intensity. DDSOCt takes only ANPPt, LIGCELL and Grazing Intensity (not BNPPt nor Fire).
#'
#' @param ANPPt_est Output of calc_ANPPt_est()
#' @param LIGCELL Lignin and cellulose content of livestock feed for year t (proportion)
#' @param GI Grazing intensity for year t (proportion)
#' @seealso [snapr::SNAP] which wraps this function.
#' @seealso [snapr::calc_PDSOCt]
#' @seealso [snapr::calc_ANPPt_est]
#' @export
calc_DDSOCt = function(ANPPt_est, LIGCELL, GI) {
DDSOCt = LIGCELL * 0.45 * GI * ANPPt_est
return(DDSOCt)
}
|
# This is the code I wrote for Assignment 2 for the coursera Data Specialization R call
# It's purpose is to cache an inverse matrix adn retrieve it
# This first function creates the set, get, setinvmat, and getinvmat functions
makeCacheMatrix <- function(x = matrix()) { # makeCacheMatrix is is a function that takes a matrix as an arg
m <- NULL # m is set as an object that will be defined later
set <- function(y) { # set is a function the takes the arg y
x <<- y # sets y as x from the parent envi
m <<- NULL # resets m to NULL in the parent envi, thus clearing m (clearing the catch)
}
get <- function() x # get is a function that returns x (from parent envi)
setinvmat <- function(solve) m <<- solve # setinvmat is a function that sets m as the inverse of a matrix in the parent envi
getinvmat <- function() m # getinvmat returns m from the parent envi
list(set = set, get = get, # assigns each function as an element within a list within the parent envi
setinvmat = setinvmat, getinvmat = getinvmat)
}
# The is second function checks to make sure there is cached data before computing the
# inverse of a matrix and either retrieves that data or cumputes and sets the new data in the cache
cacheSolve <- function(x, ...) { # cacheSolve is a function that takes the arg x as well as others
m <- x$getinvmat() # the function tries to retrieve the inverse of matrix x
if (!is.null(m)) { # if m is not equal to NULL, then there is a value to retrieve from the cache
message("getting cached data") # informs the user that the value is retrieved from the cache
return(m) # returns the retrieved value
}
data <- x$get() # is !is.null(m) is FALSE, then the input (x) is retrieved
m <- solve(data, ...) # the inverse of the input matrix is calculated (solve)
x$setinvmat(m) # the inverse matrix is then set to the cache
m # the inverse matrix is returned
}
|
/cachematrix.R
|
no_license
|
deneara/ProgrammingAssignment2
|
R
| false
| false
| 2,150
|
r
|
# This is the code I wrote for Assignment 2 for the coursera Data Specialization R call
# It's purpose is to cache an inverse matrix adn retrieve it
# This first function creates the set, get, setinvmat, and getinvmat functions
makeCacheMatrix <- function(x = matrix()) { # makeCacheMatrix is is a function that takes a matrix as an arg
m <- NULL # m is set as an object that will be defined later
set <- function(y) { # set is a function the takes the arg y
x <<- y # sets y as x from the parent envi
m <<- NULL # resets m to NULL in the parent envi, thus clearing m (clearing the catch)
}
get <- function() x # get is a function that returns x (from parent envi)
setinvmat <- function(solve) m <<- solve # setinvmat is a function that sets m as the inverse of a matrix in the parent envi
getinvmat <- function() m # getinvmat returns m from the parent envi
list(set = set, get = get, # assigns each function as an element within a list within the parent envi
setinvmat = setinvmat, getinvmat = getinvmat)
}
# The is second function checks to make sure there is cached data before computing the
# inverse of a matrix and either retrieves that data or cumputes and sets the new data in the cache
cacheSolve <- function(x, ...) { # cacheSolve is a function that takes the arg x as well as others
m <- x$getinvmat() # the function tries to retrieve the inverse of matrix x
if (!is.null(m)) { # if m is not equal to NULL, then there is a value to retrieve from the cache
message("getting cached data") # informs the user that the value is retrieved from the cache
return(m) # returns the retrieved value
}
data <- x$get() # is !is.null(m) is FALSE, then the input (x) is retrieved
m <- solve(data, ...) # the inverse of the input matrix is calculated (solve)
x$setinvmat(m) # the inverse matrix is then set to the cache
m # the inverse matrix is returned
}
|
#------------------------------------------------------
# Program name: brahman_angus_CNVR_liftover_v1.R
# Objective: analyse Derek CNVR liftover that will give
# common arsucd coor
# Author: Lloyd Low
# Email add: lloydlow@hotmail.com
#------------------------------------------------------
library(UpSetR)
library(ggplot2)
library(dplyr)
library(readr)
library(tidyr)
library(GenomicFeatures)
library(ComplexHeatmap)
library(circlize)
#reproducing Derek's upset results
dir1 <- "/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/cnvr_data/"
upset_angus <- scan(paste0(dir1,"angus.upset.list"),sep="\n")
upset_brahman <- scan(paste0(dir1,"brahman.upset.list"),sep="\n")
upset_arsucd <- scan(paste0(dir1,"arsucd.upset.list"),sep="\n")
listInput <- list(angus=upset_angus,brahman=upset_brahman,hereford=upset_arsucd)
upset(fromList(listInput), order.by = "freq")
############################# Start from lifover modified results, without sex chr and unplaced ###########################
rm(list=ls())
# path to CNV results
dir2 <- "/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/cnvr_data/results_liftover/"
# angus
path1 <- paste0(dir2,"angus_arsucd_modi_coor")
angus.cnvrs.mapped <- read_tsv(path1,col_names = FALSE, col_types = "cddcdd")
colnames(angus.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#filter for size less than 1 mil
angus.cnvrs.mapped_modi <- angus.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
angus.cnvrs.mapped_modi$unique_name <- paste(angus.cnvrs.mapped_modi$chr,angus.cnvrs.mapped_modi$start,angus.cnvrs.mapped_modi$end,sep = "_")
angus.cnvrs.mapped_modi$strand <- rep("*",nrow(angus.cnvrs.mapped_modi))
angus.cnvrs.mapped_modi <- angus.cnvrs.mapped_modi %>% filter(chr != "tig00020276_arrow_arrow_40739300_45952718")
cnv_interval_angus <- makeGRangesFromDataFrame(angus.cnvrs.mapped_modi, keep.extra.columns = TRUE,
seqnames.field="chr", start.field="start",
end.field="end", strand.field="strand")
#brahman
path2 <- paste0(dir2,"brahman_arsucd_modi_coor")
brahman.cnvrs.mapped <- read_tsv(path2,col_names = FALSE, col_types = "cddcdd")
colnames(brahman.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#filter for size less than 1 mil
brahman.cnvrs.mapped_modi <- brahman.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
brahman.cnvrs.mapped_modi$unique_name <- paste(brahman.cnvrs.mapped_modi$chr,brahman.cnvrs.mapped_modi$start,brahman.cnvrs.mapped_modi$end,sep = "_")
brahman.cnvrs.mapped_modi$strand <- rep("*",nrow(brahman.cnvrs.mapped_modi))
brahman.cnvrs.mapped_modi <- brahman.cnvrs.mapped_modi %>% filter(chr != "tig00000831_arrow_arrow_obj") %>%
filter(chr != "tig00001951_arrow_arrow_obj") %>% filter(chr != "tig00002091_arrow_arrow_obj") %>%
filter(chr != "X")
cnv_interval_brahman <- makeGRangesFromDataFrame(brahman.cnvrs.mapped_modi, keep.extra.columns = TRUE,
seqnames.field="chr", start.field="start",
end.field="end", strand.field="strand")
#arsucd
path3 <- paste0(dir2,"arsucd.cnvrs_regions.bed")
arsucd.cnvrs.mapped <- read_tsv(path3,col_names = FALSE, col_types = "cddcdd")
colnames(arsucd.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#filter for size less than 1 mil
arsucd.cnvrs.mapped_modi <- arsucd.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
arsucd.cnvrs.mapped_modi$unique_name <- paste(arsucd.cnvrs.mapped_modi$chr,arsucd.cnvrs.mapped_modi$start,arsucd.cnvrs.mapped_modi$end,sep = "_")
arsucd.cnvrs.mapped_modi$strand <- rep("*",nrow(arsucd.cnvrs.mapped_modi))
arsucd.cnvrs.mapped_modi <- arsucd.cnvrs.mapped_modi %>% mutate(selc = as.numeric(chr)) %>%
filter(!is.na(selc)) %>% dplyr::select(chr:strand)
cnv_interval_arsucd <- makeGRangesFromDataFrame(arsucd.cnvrs.mapped_modi, keep.extra.columns = TRUE,
seqnames.field="chr", start.field="start",
end.field="end", strand.field="strand")
#combine cnv granges as list
listInput_complxhtmap <- list(angus=cnv_interval_angus,brahman=cnv_interval_brahman,hereford=cnv_interval_arsucd)
#make UpSet plot using the function from ComplexHeatmap
m = make_comb_mat(listInput_complxhtmap)
set_size(m)
comb_size(m)
UpSet(m)
tiff(filename = "FigFinal_Upset_basepair_resolution.tiff",width = 500,height = 300)
UpSet(m, pt_size = unit(5, "mm"), lwd = 3, comb_col = c("red", "blue", "black")[comb_degree(m)])
dev.off()
#On average, 0.5% of each cattle genome was covered by CNV regions (CNVRs)
#mean((set_size(m)/2.7e9)*100)
#[1] 0.51061
#The majority of CNVRs (at least 76% from each assembly) were found to be unique to one assembly
# (comb_size(m)[1:3]/set_size(m))*100
# angus brahman hereford
# 100 010 001
# 76.88974 82.24537 87.84588
# Angus vs Brahman
# comb_size(m)[4]
# 110
# 1345463
# Angus vs Hereford
# comb_size(m)[5]
# 101
# 988764
#region of Angus intersect with Brahman only
Angus_vs_Brahman_intersect_gr <- GenomicRanges::intersect(cnv_interval_angus,cnv_interval_brahman, ignore.strand = TRUE)
Angus_vs_Brahman_intersect_only_gr <- GenomicRanges::setdiff(Angus_vs_Brahman_intersect_gr,cnv_interval_arsucd, ignore.strand = TRUE)
Angus_vs_Brahman_intersect_only_df <- as(Angus_vs_Brahman_intersect_only_gr, "data.frame")
write_csv(Angus_vs_Brahman_intersect_only_df,"/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/suspicious_overlap_btwn_brahman_angus_only/Angus_vs_Brahman_intersect_only_df.csv")
# > sessionInfo()
# R version 3.5.3 (2019-03-11)
# Platform: x86_64-apple-darwin15.6.0 (64-bit)
# Running under: macOS Mojave 10.14.5
#
# Matrix products: default
# BLAS: /System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/libBLAS.dylib
# LAPACK: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRlapack.dylib
#
# locale:
# [1] en_AU.UTF-8/en_AU.UTF-8/en_AU.UTF-8/C/en_AU.UTF-8/en_AU.UTF-8
#
# attached base packages:
# [1] grid stats4 parallel stats graphics grDevices utils datasets methods base
#
# other attached packages:
# [1] circlize_0.4.6 ComplexHeatmap_2.1.0 GenomicFeatures_1.34.8 AnnotationDbi_1.44.0 Biobase_2.42.0
# [6] GenomicRanges_1.34.0 GenomeInfoDb_1.18.2 IRanges_2.16.0 S4Vectors_0.20.1 BiocGenerics_0.28.0
# [11] tidyr_0.8.3 readr_1.3.1 dplyr_0.8.1 ggplot2_3.1.1 UpSetR_1.4.0
#
# loaded via a namespace (and not attached):
# [1] Rcpp_1.0.1 lattice_0.20-38 prettyunits_1.0.2 png_0.1-7
# [5] Rsamtools_1.34.1 Biostrings_2.50.2 assertthat_0.2.1 digest_0.6.19
# [9] R6_2.4.0 plyr_1.8.4 RSQLite_2.1.1 httr_1.4.0
# [13] pillar_1.4.1 GlobalOptions_0.1.0 zlibbioc_1.28.0 rlang_0.3.4
# [17] progress_1.2.2 lazyeval_0.2.2 rstudioapi_0.10 blob_1.1.1
# [21] GetoptLong_0.1.7 Matrix_1.2-17 BiocParallel_1.16.6 stringr_1.4.0
# [25] RCurl_1.95-4.12 bit_1.1-14 biomaRt_2.38.0 munsell_0.5.0
# [29] DelayedArray_0.8.0 compiler_3.5.3 rtracklayer_1.42.2 pkgconfig_2.0.2
# [33] shape_1.4.4 tidyselect_0.2.5 SummarizedExperiment_1.12.0 tibble_2.1.3
# [37] gridExtra_2.3 GenomeInfoDbData_1.2.0 matrixStats_0.54.0 XML_3.98-1.20
# [41] crayon_1.3.4 withr_2.1.2 GenomicAlignments_1.18.1 bitops_1.0-6
# [45] gtable_0.3.0 DBI_1.0.0 magrittr_1.5 scales_1.0.0
# [49] stringi_1.4.3 XVector_0.22.0 RColorBrewer_1.1-2 rjson_0.2.20
# [53] tools_3.5.3 bit64_0.9-7 glue_1.3.1 purrr_0.3.2
# [57] hms_0.4.2 yaml_2.2.0 clue_0.3-57 colorspace_1.4-1
# [61] cluster_2.0.9 memoise_1.1.0
#####extra
############################# Start from lifover results ###########################
# # path to CNV results
# dir2 <- "/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/cnvr_data/results_liftover/"
#
# # angus
# path1 <- paste0(dir2,"angus_arsucd_coor")
#
# angus.cnvrs.mapped <- read_tsv(path1,col_names = FALSE, col_types = "cddcdd")
# colnames(angus.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#
# #filter for size less than 1 mil
# angus.cnvrs.mapped_modi <- angus.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
# angus.cnvrs.mapped_modi$unique_name <- paste(angus.cnvrs.mapped_modi$chr,angus.cnvrs.mapped_modi$start,angus.cnvrs.mapped_modi$end,sep = "_")
#
# angus.cnvrs.mapped_modi$strand <- rep("*",nrow(angus.cnvrs.mapped_modi))
#
# cnv_interval_angus <- makeGRangesFromDataFrame(angus.cnvrs.mapped_modi, keep.extra.columns = TRUE,
# seqnames.field="chr", start.field="start",
# end.field="end", strand.field="strand")
#
# #brahman
# path2 <- paste0(dir2,"brahman_arsucd_coor")
#
# brahman.cnvrs.mapped <- read_tsv(path2,col_names = FALSE, col_types = "cddcdd")
# colnames(brahman.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#
# #filter for size less than 1 mil
# brahman.cnvrs.mapped_modi <- brahman.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
# brahman.cnvrs.mapped_modi$unique_name <- paste(brahman.cnvrs.mapped_modi$chr,brahman.cnvrs.mapped_modi$start,brahman.cnvrs.mapped_modi$end,sep = "_")
#
# brahman.cnvrs.mapped_modi$strand <- rep("*",nrow(brahman.cnvrs.mapped_modi))
#
# cnv_interval_brahman <- makeGRangesFromDataFrame(brahman.cnvrs.mapped_modi, keep.extra.columns = TRUE,
# seqnames.field="chr", start.field="start",
# end.field="end", strand.field="strand")
#
# #arsucd
# path3 <- paste0(dir2,"arsucd.cnvrs_regions.bed")
#
# arsucd.cnvrs.mapped <- read_tsv(path3,col_names = FALSE, col_types = "cddcdd")
# colnames(arsucd.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#
# #filter for size less than 1 mil
# arsucd.cnvrs.mapped_modi <- arsucd.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
# arsucd.cnvrs.mapped_modi$unique_name <- paste(arsucd.cnvrs.mapped_modi$chr,arsucd.cnvrs.mapped_modi$start,arsucd.cnvrs.mapped_modi$end,sep = "_")
#
# arsucd.cnvrs.mapped_modi$strand <- rep("*",nrow(arsucd.cnvrs.mapped_modi))
#
# cnv_interval_arsucd <- makeGRangesFromDataFrame(arsucd.cnvrs.mapped_modi, keep.extra.columns = TRUE,
# seqnames.field="chr", start.field="start",
# end.field="end", strand.field="strand")
#
# #combine cnv granges as list
# listInput_complxhtmap <- list(angus=cnv_interval_angus,brahman=cnv_interval_brahman,hereford=cnv_interval_arsucd)
#
# #make UpSet plot using the function from ComplexHeatmap
# # install.packages("remotes")
# # remotes::install_github("jokergoo/ComplexHeatmap")
#
# m = make_comb_mat(listInput_complxhtmap)
#
# set_size(m)
#
# comb_size(m)
#
# UpSet(m)
#
# tiff(filename = "FigFinal_Upset_basepair_resolution.tiff",width = 500,height = 300)
# UpSet(m, pt_size = unit(5, "mm"), lwd = 3, comb_col = c("red", "blue", "black")[comb_degree(m)])
# dev.off()
|
/scripts/brahman_angus_CNVR_liftover_v1.R
|
no_license
|
lloydlow/BrahmanAngusAssemblyScripts
|
R
| false
| false
| 12,373
|
r
|
#------------------------------------------------------
# Program name: brahman_angus_CNVR_liftover_v1.R
# Objective: analyse Derek CNVR liftover that will give
# common arsucd coor
# Author: Lloyd Low
# Email add: lloydlow@hotmail.com
#------------------------------------------------------
library(UpSetR)
library(ggplot2)
library(dplyr)
library(readr)
library(tidyr)
library(GenomicFeatures)
library(ComplexHeatmap)
library(circlize)
#reproducing Derek's upset results
dir1 <- "/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/cnvr_data/"
upset_angus <- scan(paste0(dir1,"angus.upset.list"),sep="\n")
upset_brahman <- scan(paste0(dir1,"brahman.upset.list"),sep="\n")
upset_arsucd <- scan(paste0(dir1,"arsucd.upset.list"),sep="\n")
listInput <- list(angus=upset_angus,brahman=upset_brahman,hereford=upset_arsucd)
upset(fromList(listInput), order.by = "freq")
############################# Start from lifover modified results, without sex chr and unplaced ###########################
rm(list=ls())
# path to CNV results
dir2 <- "/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/cnvr_data/results_liftover/"
# angus
path1 <- paste0(dir2,"angus_arsucd_modi_coor")
angus.cnvrs.mapped <- read_tsv(path1,col_names = FALSE, col_types = "cddcdd")
colnames(angus.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#filter for size less than 1 mil
angus.cnvrs.mapped_modi <- angus.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
angus.cnvrs.mapped_modi$unique_name <- paste(angus.cnvrs.mapped_modi$chr,angus.cnvrs.mapped_modi$start,angus.cnvrs.mapped_modi$end,sep = "_")
angus.cnvrs.mapped_modi$strand <- rep("*",nrow(angus.cnvrs.mapped_modi))
angus.cnvrs.mapped_modi <- angus.cnvrs.mapped_modi %>% filter(chr != "tig00020276_arrow_arrow_40739300_45952718")
cnv_interval_angus <- makeGRangesFromDataFrame(angus.cnvrs.mapped_modi, keep.extra.columns = TRUE,
seqnames.field="chr", start.field="start",
end.field="end", strand.field="strand")
#brahman
path2 <- paste0(dir2,"brahman_arsucd_modi_coor")
brahman.cnvrs.mapped <- read_tsv(path2,col_names = FALSE, col_types = "cddcdd")
colnames(brahman.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#filter for size less than 1 mil
brahman.cnvrs.mapped_modi <- brahman.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
brahman.cnvrs.mapped_modi$unique_name <- paste(brahman.cnvrs.mapped_modi$chr,brahman.cnvrs.mapped_modi$start,brahman.cnvrs.mapped_modi$end,sep = "_")
brahman.cnvrs.mapped_modi$strand <- rep("*",nrow(brahman.cnvrs.mapped_modi))
brahman.cnvrs.mapped_modi <- brahman.cnvrs.mapped_modi %>% filter(chr != "tig00000831_arrow_arrow_obj") %>%
filter(chr != "tig00001951_arrow_arrow_obj") %>% filter(chr != "tig00002091_arrow_arrow_obj") %>%
filter(chr != "X")
cnv_interval_brahman <- makeGRangesFromDataFrame(brahman.cnvrs.mapped_modi, keep.extra.columns = TRUE,
seqnames.field="chr", start.field="start",
end.field="end", strand.field="strand")
#arsucd
path3 <- paste0(dir2,"arsucd.cnvrs_regions.bed")
arsucd.cnvrs.mapped <- read_tsv(path3,col_names = FALSE, col_types = "cddcdd")
colnames(arsucd.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#filter for size less than 1 mil
arsucd.cnvrs.mapped_modi <- arsucd.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
arsucd.cnvrs.mapped_modi$unique_name <- paste(arsucd.cnvrs.mapped_modi$chr,arsucd.cnvrs.mapped_modi$start,arsucd.cnvrs.mapped_modi$end,sep = "_")
arsucd.cnvrs.mapped_modi$strand <- rep("*",nrow(arsucd.cnvrs.mapped_modi))
arsucd.cnvrs.mapped_modi <- arsucd.cnvrs.mapped_modi %>% mutate(selc = as.numeric(chr)) %>%
filter(!is.na(selc)) %>% dplyr::select(chr:strand)
cnv_interval_arsucd <- makeGRangesFromDataFrame(arsucd.cnvrs.mapped_modi, keep.extra.columns = TRUE,
seqnames.field="chr", start.field="start",
end.field="end", strand.field="strand")
#combine cnv granges as list
listInput_complxhtmap <- list(angus=cnv_interval_angus,brahman=cnv_interval_brahman,hereford=cnv_interval_arsucd)
#make UpSet plot using the function from ComplexHeatmap
m = make_comb_mat(listInput_complxhtmap)
set_size(m)
comb_size(m)
UpSet(m)
tiff(filename = "FigFinal_Upset_basepair_resolution.tiff",width = 500,height = 300)
UpSet(m, pt_size = unit(5, "mm"), lwd = 3, comb_col = c("red", "blue", "black")[comb_degree(m)])
dev.off()
#On average, 0.5% of each cattle genome was covered by CNV regions (CNVRs)
#mean((set_size(m)/2.7e9)*100)
#[1] 0.51061
#The majority of CNVRs (at least 76% from each assembly) were found to be unique to one assembly
# (comb_size(m)[1:3]/set_size(m))*100
# angus brahman hereford
# 100 010 001
# 76.88974 82.24537 87.84588
# Angus vs Brahman
# comb_size(m)[4]
# 110
# 1345463
# Angus vs Hereford
# comb_size(m)[5]
# 101
# 988764
#region of Angus intersect with Brahman only
Angus_vs_Brahman_intersect_gr <- GenomicRanges::intersect(cnv_interval_angus,cnv_interval_brahman, ignore.strand = TRUE)
Angus_vs_Brahman_intersect_only_gr <- GenomicRanges::setdiff(Angus_vs_Brahman_intersect_gr,cnv_interval_arsucd, ignore.strand = TRUE)
Angus_vs_Brahman_intersect_only_df <- as(Angus_vs_Brahman_intersect_only_gr, "data.frame")
write_csv(Angus_vs_Brahman_intersect_only_df,"/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/suspicious_overlap_btwn_brahman_angus_only/Angus_vs_Brahman_intersect_only_df.csv")
# > sessionInfo()
# R version 3.5.3 (2019-03-11)
# Platform: x86_64-apple-darwin15.6.0 (64-bit)
# Running under: macOS Mojave 10.14.5
#
# Matrix products: default
# BLAS: /System/Library/Frameworks/Accelerate.framework/Versions/A/Frameworks/vecLib.framework/Versions/A/libBLAS.dylib
# LAPACK: /Library/Frameworks/R.framework/Versions/3.5/Resources/lib/libRlapack.dylib
#
# locale:
# [1] en_AU.UTF-8/en_AU.UTF-8/en_AU.UTF-8/C/en_AU.UTF-8/en_AU.UTF-8
#
# attached base packages:
# [1] grid stats4 parallel stats graphics grDevices utils datasets methods base
#
# other attached packages:
# [1] circlize_0.4.6 ComplexHeatmap_2.1.0 GenomicFeatures_1.34.8 AnnotationDbi_1.44.0 Biobase_2.42.0
# [6] GenomicRanges_1.34.0 GenomeInfoDb_1.18.2 IRanges_2.16.0 S4Vectors_0.20.1 BiocGenerics_0.28.0
# [11] tidyr_0.8.3 readr_1.3.1 dplyr_0.8.1 ggplot2_3.1.1 UpSetR_1.4.0
#
# loaded via a namespace (and not attached):
# [1] Rcpp_1.0.1 lattice_0.20-38 prettyunits_1.0.2 png_0.1-7
# [5] Rsamtools_1.34.1 Biostrings_2.50.2 assertthat_0.2.1 digest_0.6.19
# [9] R6_2.4.0 plyr_1.8.4 RSQLite_2.1.1 httr_1.4.0
# [13] pillar_1.4.1 GlobalOptions_0.1.0 zlibbioc_1.28.0 rlang_0.3.4
# [17] progress_1.2.2 lazyeval_0.2.2 rstudioapi_0.10 blob_1.1.1
# [21] GetoptLong_0.1.7 Matrix_1.2-17 BiocParallel_1.16.6 stringr_1.4.0
# [25] RCurl_1.95-4.12 bit_1.1-14 biomaRt_2.38.0 munsell_0.5.0
# [29] DelayedArray_0.8.0 compiler_3.5.3 rtracklayer_1.42.2 pkgconfig_2.0.2
# [33] shape_1.4.4 tidyselect_0.2.5 SummarizedExperiment_1.12.0 tibble_2.1.3
# [37] gridExtra_2.3 GenomeInfoDbData_1.2.0 matrixStats_0.54.0 XML_3.98-1.20
# [41] crayon_1.3.4 withr_2.1.2 GenomicAlignments_1.18.1 bitops_1.0-6
# [45] gtable_0.3.0 DBI_1.0.0 magrittr_1.5 scales_1.0.0
# [49] stringi_1.4.3 XVector_0.22.0 RColorBrewer_1.1-2 rjson_0.2.20
# [53] tools_3.5.3 bit64_0.9-7 glue_1.3.1 purrr_0.3.2
# [57] hms_0.4.2 yaml_2.2.0 clue_0.3-57 colorspace_1.4-1
# [61] cluster_2.0.9 memoise_1.1.0
#####extra
############################# Start from lifover results ###########################
# # path to CNV results
# dir2 <- "/Users/lloyd/Documents/lloyd_2017/Research/Brahman_Angus/Assembly_version/final_to_correct_20180905/CopyNumberVariation/20190530/cnvr_data/results_liftover/"
#
# # angus
# path1 <- paste0(dir2,"angus_arsucd_coor")
#
# angus.cnvrs.mapped <- read_tsv(path1,col_names = FALSE, col_types = "cddcdd")
# colnames(angus.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#
# #filter for size less than 1 mil
# angus.cnvrs.mapped_modi <- angus.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
# angus.cnvrs.mapped_modi$unique_name <- paste(angus.cnvrs.mapped_modi$chr,angus.cnvrs.mapped_modi$start,angus.cnvrs.mapped_modi$end,sep = "_")
#
# angus.cnvrs.mapped_modi$strand <- rep("*",nrow(angus.cnvrs.mapped_modi))
#
# cnv_interval_angus <- makeGRangesFromDataFrame(angus.cnvrs.mapped_modi, keep.extra.columns = TRUE,
# seqnames.field="chr", start.field="start",
# end.field="end", strand.field="strand")
#
# #brahman
# path2 <- paste0(dir2,"brahman_arsucd_coor")
#
# brahman.cnvrs.mapped <- read_tsv(path2,col_names = FALSE, col_types = "cddcdd")
# colnames(brahman.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#
# #filter for size less than 1 mil
# brahman.cnvrs.mapped_modi <- brahman.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
# brahman.cnvrs.mapped_modi$unique_name <- paste(brahman.cnvrs.mapped_modi$chr,brahman.cnvrs.mapped_modi$start,brahman.cnvrs.mapped_modi$end,sep = "_")
#
# brahman.cnvrs.mapped_modi$strand <- rep("*",nrow(brahman.cnvrs.mapped_modi))
#
# cnv_interval_brahman <- makeGRangesFromDataFrame(brahman.cnvrs.mapped_modi, keep.extra.columns = TRUE,
# seqnames.field="chr", start.field="start",
# end.field="end", strand.field="strand")
#
# #arsucd
# path3 <- paste0(dir2,"arsucd.cnvrs_regions.bed")
#
# arsucd.cnvrs.mapped <- read_tsv(path3,col_names = FALSE, col_types = "cddcdd")
# colnames(arsucd.cnvrs.mapped) <- c("chr","start","end","species","unsure1","unsure2")
#
# #filter for size less than 1 mil
# arsucd.cnvrs.mapped_modi <- arsucd.cnvrs.mapped %>% mutate(size=end - start) %>% filter(size < 1e6)
# arsucd.cnvrs.mapped_modi$unique_name <- paste(arsucd.cnvrs.mapped_modi$chr,arsucd.cnvrs.mapped_modi$start,arsucd.cnvrs.mapped_modi$end,sep = "_")
#
# arsucd.cnvrs.mapped_modi$strand <- rep("*",nrow(arsucd.cnvrs.mapped_modi))
#
# cnv_interval_arsucd <- makeGRangesFromDataFrame(arsucd.cnvrs.mapped_modi, keep.extra.columns = TRUE,
# seqnames.field="chr", start.field="start",
# end.field="end", strand.field="strand")
#
# #combine cnv granges as list
# listInput_complxhtmap <- list(angus=cnv_interval_angus,brahman=cnv_interval_brahman,hereford=cnv_interval_arsucd)
#
# #make UpSet plot using the function from ComplexHeatmap
# # install.packages("remotes")
# # remotes::install_github("jokergoo/ComplexHeatmap")
#
# m = make_comb_mat(listInput_complxhtmap)
#
# set_size(m)
#
# comb_size(m)
#
# UpSet(m)
#
# tiff(filename = "FigFinal_Upset_basepair_resolution.tiff",width = 500,height = 300)
# UpSet(m, pt_size = unit(5, "mm"), lwd = 3, comb_col = c("red", "blue", "black")[comb_degree(m)])
# dev.off()
|
# ---------------------------------------------------------------------------- #
parse_BCFstats = function(path){
library(stringr)
sname = str_replace(basename(path),'.filt.snps.stats.txt','')
s = scan(path, what='character', sep='\n', quiet=TRUE)
ind = unique(str_replace(s, '\\t.+', ''))
ind = ind[!str_detect(ind,'#')]
ind = setdiff(ind, 'ID')
wind = lapply(setNames(ind, ind), function(x)range(which(str_detect(s,paste0(x,'\\t')))))
lout = list()
for(i in names(wind)){
message(i)
d = s[Reduce(':',wind[[i]])]
d = str_replace(d,'^\\s+#','')
d = do.call(rbind, strsplit(d,'\\t'))
d = data.frame(d)
colnames(d) = str_replace(d[1,],'\\[.+\\]','')
d = d[-1,-1]
d = data.frame(d)
if(i == 'SN')
d$value = as.numeric(d$value)
if(i == 'ST')
d$count = as.numeric(d$count)
if(i %in% setdiff(names(wind),c('SN','ST')))
d = suppressWarnings(data.frame(lapply(d, as.numeric)))
d = data.frame(sample = sname, as.data.frame(d))
lout[[i]] = d
}
return(lout)
}
# ---------------------------------------------------------------------------- #
parse_BCFstats_Files = function(
path,
suffix = '.filt.snps.stats.txt'
){
infiles = list.files(path, recursive=TRUE, pattern=suffix, full.names=TRUE)
if(length(infiles) == 0)
stop('There are no input files')
lout = suppressMessages(lapply(infiles, parse_BCFstats))
snames = names(lout[[1]])
dout = lapply(setNames(snames,snames), function(x)
do.call(rbind, lapply(lout, '[[', x))
)
return(dout)
}
|
/R/Parse_BCFStats.R
|
no_license
|
frenkiboy/MyLib
|
R
| false
| false
| 1,678
|
r
|
# ---------------------------------------------------------------------------- #
parse_BCFstats = function(path){
library(stringr)
sname = str_replace(basename(path),'.filt.snps.stats.txt','')
s = scan(path, what='character', sep='\n', quiet=TRUE)
ind = unique(str_replace(s, '\\t.+', ''))
ind = ind[!str_detect(ind,'#')]
ind = setdiff(ind, 'ID')
wind = lapply(setNames(ind, ind), function(x)range(which(str_detect(s,paste0(x,'\\t')))))
lout = list()
for(i in names(wind)){
message(i)
d = s[Reduce(':',wind[[i]])]
d = str_replace(d,'^\\s+#','')
d = do.call(rbind, strsplit(d,'\\t'))
d = data.frame(d)
colnames(d) = str_replace(d[1,],'\\[.+\\]','')
d = d[-1,-1]
d = data.frame(d)
if(i == 'SN')
d$value = as.numeric(d$value)
if(i == 'ST')
d$count = as.numeric(d$count)
if(i %in% setdiff(names(wind),c('SN','ST')))
d = suppressWarnings(data.frame(lapply(d, as.numeric)))
d = data.frame(sample = sname, as.data.frame(d))
lout[[i]] = d
}
return(lout)
}
# ---------------------------------------------------------------------------- #
parse_BCFstats_Files = function(
path,
suffix = '.filt.snps.stats.txt'
){
infiles = list.files(path, recursive=TRUE, pattern=suffix, full.names=TRUE)
if(length(infiles) == 0)
stop('There are no input files')
lout = suppressMessages(lapply(infiles, parse_BCFstats))
snames = names(lout[[1]])
dout = lapply(setNames(snames,snames), function(x)
do.call(rbind, lapply(lout, '[[', x))
)
return(dout)
}
|
d<-read.table("/Volumes/Volume_4/analysis/DsimTE/refgen/TEannotation/stat/lengthdistri.R")
ids=c("ssr","g2","g1","g05")
par(mfrow=c(2,2))
avleng<-function(df){
c<-sum(df$V4)
ls<-sum(df$V4*df$V3)
al<-ls/c
return(al)
}
histable<-function(df){
v<-c()
for(i in 1:nrow(df))
{
cur<-df[i,]
ele<-rep(cur$V3,cur$V4)
v<-c(v,ele)
}
return(v)
}
for(id in ids)
{
ss<-d[d$V2==id,]
le=sum(ss$V4)
al<-avleng(ss)
al<-as.integer(al*100)
al<-as.numeric(al)/100
print(al)
header=paste(id," count=",le," av.leng.=",al,sep="")
plot(ss$V3,ss$V4,type="l",main=header,log="x",xlim=c(1,15000),ylim=c(1,350),xlab="TE length",ylab="count")
lines(c(50,50),c(0,350),col="red")
#histdat<-histable(ss)
#print(histdat)
#bre<-seq(0,30000,100)
#bre<-seq(2,5,0.1)
#bre<-c(0,10^bre)
#print(bre)
#hist.data<-hist(histdat, plot=F,breaks=bre)
#hist.data$counts = log10(hist.data$counts)
#plot(hist.data,xlim=c(0,3000),ylim=c(0,5))
#hist(histable(ss),breaks=bre,xlim=c(0,3000))
}
|
/TE/melsim/lengthDistri.R
|
no_license
|
capoony/popgentools
|
R
| false
| false
| 1,022
|
r
|
d<-read.table("/Volumes/Volume_4/analysis/DsimTE/refgen/TEannotation/stat/lengthdistri.R")
ids=c("ssr","g2","g1","g05")
par(mfrow=c(2,2))
avleng<-function(df){
c<-sum(df$V4)
ls<-sum(df$V4*df$V3)
al<-ls/c
return(al)
}
histable<-function(df){
v<-c()
for(i in 1:nrow(df))
{
cur<-df[i,]
ele<-rep(cur$V3,cur$V4)
v<-c(v,ele)
}
return(v)
}
for(id in ids)
{
ss<-d[d$V2==id,]
le=sum(ss$V4)
al<-avleng(ss)
al<-as.integer(al*100)
al<-as.numeric(al)/100
print(al)
header=paste(id," count=",le," av.leng.=",al,sep="")
plot(ss$V3,ss$V4,type="l",main=header,log="x",xlim=c(1,15000),ylim=c(1,350),xlab="TE length",ylab="count")
lines(c(50,50),c(0,350),col="red")
#histdat<-histable(ss)
#print(histdat)
#bre<-seq(0,30000,100)
#bre<-seq(2,5,0.1)
#bre<-c(0,10^bre)
#print(bre)
#hist.data<-hist(histdat, plot=F,breaks=bre)
#hist.data$counts = log10(hist.data$counts)
#plot(hist.data,xlim=c(0,3000),ylim=c(0,5))
#hist(histable(ss),breaks=bre,xlim=c(0,3000))
}
|
propSum <- function(x){
lim <- floor(sqrt(x))
div.vec <- c(1:lim)
div <- div.vec[x%%div.vec == 0]
ans <- sum(div + x/div) - x - lim * (x == lim^2)
return(ans)
}
lim <- 10000
amicable <- rep(NA, (lim-1))
for(i in 2:lim){
if(is.na(amicable[i])){
a <- propSum(i)
b <- propSum(a)
if(i == b && i != a){
amicable[i-1] <- amicable[b-1] <- TRUE
}else{
amicable[i-1] <- FALSE
}
}
}
sum(which(amicable == TRUE)+1)
|
/problems/problem021.R
|
no_license
|
parksw3/projectEuler
|
R
| false
| false
| 500
|
r
|
propSum <- function(x){
lim <- floor(sqrt(x))
div.vec <- c(1:lim)
div <- div.vec[x%%div.vec == 0]
ans <- sum(div + x/div) - x - lim * (x == lim^2)
return(ans)
}
lim <- 10000
amicable <- rep(NA, (lim-1))
for(i in 2:lim){
if(is.na(amicable[i])){
a <- propSum(i)
b <- propSum(a)
if(i == b && i != a){
amicable[i-1] <- amicable[b-1] <- TRUE
}else{
amicable[i-1] <- FALSE
}
}
}
sum(which(amicable == TRUE)+1)
|
select <- function(x, criterion=c("BIC","AIC","CAIC","EBIC"), gamma, scores=FALSE, df.method="active"){
if(class(x)!="fanc") stop('the class of object "x" must be "fanc"')
if(!missing(gamma)){
if(gamma<=1) stop("gamma must be greater than 1")
}
if(scores==TRUE && is.null(x$x)==TRUE) stop("Data matrix is needed for computing the factor score in fitting procedure by fanc")
if(is.null(x$AIC)==TRUE) stop("The model selection criterion was not able to be calculated. Data matrix or the number of observations is needed in fitting procedure by fanc.")
cand <- c("BIC", "AIC", "CAIC", "EBIC")
criterion <- criterion[1]
if(sum(criterion==cand) != 1) stop('"criterion" must be "AIC", "BIC, "CAIC" or "EBIC".')
if(df.method=="reparametrization"){
if(criterion=="AIC") criterion_vec <- x$AIC
if(criterion=="BIC") criterion_vec <- x$BIC
if(criterion=="CAIC") criterion_vec <- x$CAIC
if(criterion=="EBIC") criterion_vec <- x$EBIC
}
if(df.method=="active"){
if(criterion=="AIC") criterion_vec <- x$AIC_dfnonzero
if(criterion=="BIC") criterion_vec <- x$BIC_dfnonzero
if(criterion=="CAIC") criterion_vec <- x$CAIC_dfnonzero
if(criterion=="EBIC") criterion_vec <- x$EBIC_dfnonzero
}
gamma_vec <- x$gamma
gamma_length <- length(gamma_vec)
if(missing(gamma)) gamma_index <- which.min(apply(criterion_vec,2,min))
else if(gamma==Inf) gamma_index <- 1
else if(gamma!=Inf) gamma_index <- which.min(abs(gamma-gamma_vec))
if(gamma_length == 1) criterion_vec2=c(criterion_vec)
else criterion_vec2=criterion_vec[,gamma_index]
rho_index <- which.min(criterion_vec2)
Lambda <- x$loadings[[gamma_index]][[rho_index]]
diagPsi <- x$uniquenesses[rho_index,,gamma_index]
if(x$cor.factor==TRUE){
Phi <- x$Phi[,,rho_index,gamma_index]
Phi <- as.matrix(Phi)
}
rho0 <- x$rho[rho_index,gamma_index]
gamma0 <- gamma_vec[gamma_index]
criterion_minimum <- min(criterion_vec2)
if(df.method=="reparametrization") df <- x$df[rho_index,gamma_index]
if(df.method=="active") df <- x$dfnonzero[rho_index,gamma_index]
if(scores==TRUE){
Lambda_mat <- as.matrix(Lambda)
diagPsiinvrep <- matrix(diagPsi^(-1),nrow(Lambda),ncol(Lambda))
diagPsiinvLambda <- diagPsiinvrep * Lambda_mat
M0 <- crossprod(Lambda_mat,diagPsiinvLambda)
if(x$cor.factor==TRUE) M <- M0 + solve(Phi)
if(x$cor.factor==FALSE) M <- M0 + diag(x$factors)
solveM <- solve(M)
PsiinvLambdaMinv <-diagPsiinvLambda %*% solveM
ans_scores <- x$x %*% PsiinvLambdaMinv
}
if(is.null(x$GFI)==FALSE){
if(df.method=="reparametrization"){
GFI <- x$GFI[rho_index,gamma_index];
AGFI <- x$AGFI[rho_index,gamma_index];
CFI <- x$CFI[rho_index,gamma_index];
RMSEA <- x$RMSEA[rho_index,gamma_index];
SRMR <- x$SRMR[rho_index,gamma_index];
}
if(df.method=="active"){
GFI <- x$GFI[rho_index,gamma_index];
AGFI <- x$AGFI_dfnonzero[rho_index,gamma_index];
CFI <- x$CFI_dfnonzero[rho_index,gamma_index];
RMSEA <- x$RMSEA_dfnonzero[rho_index,gamma_index];
SRMR <- x$SRMR[rho_index,gamma_index];
}
GOF <- c(GFI,AGFI,CFI,RMSEA,SRMR)
names(GOF) <- c("GFI","AGFI","CFI","RMSEA","SRMR")
}
ans <- list(loadings=Lambda, uniquenesses=diagPsi)
if(x$cor.factor==TRUE) ans <- append(ans,list(Phi=Phi))
if(scores==TRUE) ans <- append(ans,list(scores=ans_scores))
ans <- append(ans,list(df=df))
if(criterion=="AIC") ans <- append(ans,list(AIC=criterion_minimum))
if(criterion=="BIC") ans <- append(ans,list(BIC=criterion_minimum))
if(criterion=="CAIC") ans <- append(ans,list(CAIC=criterion_minimum))
if(criterion=="EBIC") ans <- append(ans,list(EBIC=criterion_minimum))
if(is.null(x$GFI)==FALSE) ans <- append(ans,list(goodness.of.fit=GOF))
ans <- append(ans,list(rho=rho0, gamma=gamma0))
ans
}
|
/R/select.fanc.R
|
no_license
|
keihirose/fanc
|
R
| false
| false
| 3,763
|
r
|
select <- function(x, criterion=c("BIC","AIC","CAIC","EBIC"), gamma, scores=FALSE, df.method="active"){
if(class(x)!="fanc") stop('the class of object "x" must be "fanc"')
if(!missing(gamma)){
if(gamma<=1) stop("gamma must be greater than 1")
}
if(scores==TRUE && is.null(x$x)==TRUE) stop("Data matrix is needed for computing the factor score in fitting procedure by fanc")
if(is.null(x$AIC)==TRUE) stop("The model selection criterion was not able to be calculated. Data matrix or the number of observations is needed in fitting procedure by fanc.")
cand <- c("BIC", "AIC", "CAIC", "EBIC")
criterion <- criterion[1]
if(sum(criterion==cand) != 1) stop('"criterion" must be "AIC", "BIC, "CAIC" or "EBIC".')
if(df.method=="reparametrization"){
if(criterion=="AIC") criterion_vec <- x$AIC
if(criterion=="BIC") criterion_vec <- x$BIC
if(criterion=="CAIC") criterion_vec <- x$CAIC
if(criterion=="EBIC") criterion_vec <- x$EBIC
}
if(df.method=="active"){
if(criterion=="AIC") criterion_vec <- x$AIC_dfnonzero
if(criterion=="BIC") criterion_vec <- x$BIC_dfnonzero
if(criterion=="CAIC") criterion_vec <- x$CAIC_dfnonzero
if(criterion=="EBIC") criterion_vec <- x$EBIC_dfnonzero
}
gamma_vec <- x$gamma
gamma_length <- length(gamma_vec)
if(missing(gamma)) gamma_index <- which.min(apply(criterion_vec,2,min))
else if(gamma==Inf) gamma_index <- 1
else if(gamma!=Inf) gamma_index <- which.min(abs(gamma-gamma_vec))
if(gamma_length == 1) criterion_vec2=c(criterion_vec)
else criterion_vec2=criterion_vec[,gamma_index]
rho_index <- which.min(criterion_vec2)
Lambda <- x$loadings[[gamma_index]][[rho_index]]
diagPsi <- x$uniquenesses[rho_index,,gamma_index]
if(x$cor.factor==TRUE){
Phi <- x$Phi[,,rho_index,gamma_index]
Phi <- as.matrix(Phi)
}
rho0 <- x$rho[rho_index,gamma_index]
gamma0 <- gamma_vec[gamma_index]
criterion_minimum <- min(criterion_vec2)
if(df.method=="reparametrization") df <- x$df[rho_index,gamma_index]
if(df.method=="active") df <- x$dfnonzero[rho_index,gamma_index]
if(scores==TRUE){
Lambda_mat <- as.matrix(Lambda)
diagPsiinvrep <- matrix(diagPsi^(-1),nrow(Lambda),ncol(Lambda))
diagPsiinvLambda <- diagPsiinvrep * Lambda_mat
M0 <- crossprod(Lambda_mat,diagPsiinvLambda)
if(x$cor.factor==TRUE) M <- M0 + solve(Phi)
if(x$cor.factor==FALSE) M <- M0 + diag(x$factors)
solveM <- solve(M)
PsiinvLambdaMinv <-diagPsiinvLambda %*% solveM
ans_scores <- x$x %*% PsiinvLambdaMinv
}
if(is.null(x$GFI)==FALSE){
if(df.method=="reparametrization"){
GFI <- x$GFI[rho_index,gamma_index];
AGFI <- x$AGFI[rho_index,gamma_index];
CFI <- x$CFI[rho_index,gamma_index];
RMSEA <- x$RMSEA[rho_index,gamma_index];
SRMR <- x$SRMR[rho_index,gamma_index];
}
if(df.method=="active"){
GFI <- x$GFI[rho_index,gamma_index];
AGFI <- x$AGFI_dfnonzero[rho_index,gamma_index];
CFI <- x$CFI_dfnonzero[rho_index,gamma_index];
RMSEA <- x$RMSEA_dfnonzero[rho_index,gamma_index];
SRMR <- x$SRMR[rho_index,gamma_index];
}
GOF <- c(GFI,AGFI,CFI,RMSEA,SRMR)
names(GOF) <- c("GFI","AGFI","CFI","RMSEA","SRMR")
}
ans <- list(loadings=Lambda, uniquenesses=diagPsi)
if(x$cor.factor==TRUE) ans <- append(ans,list(Phi=Phi))
if(scores==TRUE) ans <- append(ans,list(scores=ans_scores))
ans <- append(ans,list(df=df))
if(criterion=="AIC") ans <- append(ans,list(AIC=criterion_minimum))
if(criterion=="BIC") ans <- append(ans,list(BIC=criterion_minimum))
if(criterion=="CAIC") ans <- append(ans,list(CAIC=criterion_minimum))
if(criterion=="EBIC") ans <- append(ans,list(EBIC=criterion_minimum))
if(is.null(x$GFI)==FALSE) ans <- append(ans,list(goodness.of.fit=GOF))
ans <- append(ans,list(rho=rho0, gamma=gamma0))
ans
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.