content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LoadData.R
\name{LoadData}
\alias{LoadData}
\title{LoadData}
\usage{
LoadData(
functionNames = NULL,
custom = FALSE,
filepath = NULL,
interactiveMode = interactive(),
tidy = TRUE,
DaysOld = 30,
minimumpercapitaactivecases = 0,
RiskEval = NULL,
dropNACountry = TRUE,
dropNAall = FALSE,
verbose = TRUE
)
}
\arguments{
\item{functionNames}{Name(s) of function representing that for loading data - options are in countrylist. Use NULL to attempt to download all available datasets.}
\item{custom}{If TRUE, allows for use with functions not listed in object countrylist (experimental usage).}
\item{filepath}{Optionally, provide a filepath to save an error csv file to.}
\item{interactiveMode}{Set whether the session is being run interactively. If not and no googledrive oauth token is found, avoid data requiring googledrive auth token.}
\item{tidy}{If TRUE, then perform tidying according to other parameters. If FALSE, then do nothing. (passed to tidy_Data).}
\item{DaysOld}{Set any pInf data more than this days old to NA.(passed to tidy_Data).}
\item{minimumpercapitaactivecases}{Set any pInf data less than this to NA.(passed to tidy_Data).}
\item{RiskEval}{Set pInf to NA when risk is below RiskEval$minimumRisk (\%) using RiskEval$ascertainmentbias and a maximum group size, RiskEval$maximumN (Note: this setting overwrites minimumpercapitaactivecases). (passed to tidy_Data).}
\item{dropNACountry}{If TRUE, remove rows for countries whose pInf estimates all return NA.(passed to tidy_Data).}
\item{dropNAall}{If TRUE, remove rows for any region whose pInf estimates all return NA. (passed to tidy_Data).}
\item{verbose}{If TRUE, reports on loading progress and returns warnings/errors to console.}
}
\value{
A simple feature returning the date of most recent data (DateReport), a unique region code (geoid), the region name (RegionName) and country name (Country), the number of active cases per capita (pInf) and the regions geometry (geometry).
}
\description{
General framework for loading and tidying data.
}
\examples{
LoadData("LoadUS")
LoadData("LoadUS", dropNAall = TRUE)
LoadData("LoadNewZealand",tidy = FALSE)
LoadData(c("LoadUS","LoadMalaysia"))
\dontrun{
LoadData()
}
}
\seealso{
\code{\link[=tidy_Data]{tidy_Data()}}
}
|
/man/LoadData.Rd
|
permissive
|
sjbeckett/localcovid19now
|
R
| false
| true
| 2,347
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LoadData.R
\name{LoadData}
\alias{LoadData}
\title{LoadData}
\usage{
LoadData(
functionNames = NULL,
custom = FALSE,
filepath = NULL,
interactiveMode = interactive(),
tidy = TRUE,
DaysOld = 30,
minimumpercapitaactivecases = 0,
RiskEval = NULL,
dropNACountry = TRUE,
dropNAall = FALSE,
verbose = TRUE
)
}
\arguments{
\item{functionNames}{Name(s) of function representing that for loading data - options are in countrylist. Use NULL to attempt to download all available datasets.}
\item{custom}{If TRUE, allows for use with functions not listed in object countrylist (experimental usage).}
\item{filepath}{Optionally, provide a filepath to save an error csv file to.}
\item{interactiveMode}{Set whether the session is being run interactively. If not and no googledrive oauth token is found, avoid data requiring googledrive auth token.}
\item{tidy}{If TRUE, then perform tidying according to other parameters. If FALSE, then do nothing. (passed to tidy_Data).}
\item{DaysOld}{Set any pInf data more than this days old to NA.(passed to tidy_Data).}
\item{minimumpercapitaactivecases}{Set any pInf data less than this to NA.(passed to tidy_Data).}
\item{RiskEval}{Set pInf to NA when risk is below RiskEval$minimumRisk (\%) using RiskEval$ascertainmentbias and a maximum group size, RiskEval$maximumN (Note: this setting overwrites minimumpercapitaactivecases). (passed to tidy_Data).}
\item{dropNACountry}{If TRUE, remove rows for countries whose pInf estimates all return NA.(passed to tidy_Data).}
\item{dropNAall}{If TRUE, remove rows for any region whose pInf estimates all return NA. (passed to tidy_Data).}
\item{verbose}{If TRUE, reports on loading progress and returns warnings/errors to console.}
}
\value{
A simple feature returning the date of most recent data (DateReport), a unique region code (geoid), the region name (RegionName) and country name (Country), the number of active cases per capita (pInf) and the regions geometry (geometry).
}
\description{
General framework for loading and tidying data.
}
\examples{
LoadData("LoadUS")
LoadData("LoadUS", dropNAall = TRUE)
LoadData("LoadNewZealand",tidy = FALSE)
LoadData(c("LoadUS","LoadMalaysia"))
\dontrun{
LoadData()
}
}
\seealso{
\code{\link[=tidy_Data]{tidy_Data()}}
}
|
#' @name deleteFromFileRepository
#' @title Delete a Single File from the File Repository
#'
#' @description Deletes a single file from the File Repository based on
#' its document ID.
#'
#' @param rcon A \code{redcapConnection} object.
#' @param doc_id \code{integerish(1)} The document ID of the file to be
#' deleted.
#' @param ... Arguments to pass to other methods.
#' @param refresh \code{logical(1)} When \code{TRUE} (default), the cached
#' File Repository data on \code{rcon} will be refreshed.
#' @param error_handling An option for how to handle errors returned by the API.
#' see \code{\link{redcapError}}
#' @param config \code{list} Additional configuration parameters to pass to
#' \code{\link[httr]{POST}}. These are appended to any parameters in
#' \code{rcon$config}.
#' @param api_param \code{list} Additional API parameters to pass into the
#' body of the API call. This provides users to execute calls with options
#' that may not otherwise be supported by \code{redcapAPI}.
#'
#' @details This method allows you to delete a single file in a project's
#' File Repository. Once deleted, the file will remain in the Recycle Bin
#' folder for up to 30 days.
#'
#' @author Benjamin Nutter
#'
#' @export
deleteFromFileRepository <- function(rcon,
doc_id,
...){
UseMethod("deleteFromFileRepository")
}
#' @rdname deleteFromFileRepository
#' @export
deleteFromFileRepository.redcapApiConnection <- function(rcon,
doc_id,
...,
refresh = TRUE,
error_handling = getOption("redcap_error_handling"),
config = list(),
api_param = list()){
# Argument Validation ---------------------------------------------
coll <- checkmate::makeAssertCollection()
checkmate::assert_class(x = rcon,
classes = "redcapApiConnection",
add = coll)
checkmate::assert_integerish(x = doc_id,
len = 1,
any.missing = FALSE,
add = coll)
checkmate::assert_logical(x = refresh,
len = 1,
add = coll)
error_handling <- checkmate::matchArg(x = error_handling,
choices = c("null", "error"),
.var.name = "error_handling",
add = coll)
checkmate::assert_list(x = config,
names = "named",
add = coll)
checkmate::assert_list(x = api_param,
names = "named",
add = coll)
checkmate::reportAssertions(coll)
FileRepo <- rcon$fileRepository()
if (!doc_id %in% FileRepo$doc_id){
coll$push(sprintf("doc_id (%s) not found in the File Repository",
doc_id))
checkmate::reportAssertions(coll)
}
# Get the file path of the file repository file -------------------
file_path <- fileRepositoryPath(doc_id = doc_id,
fileRepo = FileRepo)
# Build the Body List ---------------------------------------------
body <- list(content = "fileRepository",
action = "delete",
returnFormat = "csv",
doc_id = doc_id)
body <- body[lengths(body) > 0]
# Make the API Call -----------------------------------------------
response <- makeApiCall(rcon,
body = c(body, api_param),
config = config)
if (response$status_code != 200){
redcapError(response,
error_handling = error_handling)
}
message(sprintf("File deleted: %s", file_path))
# Refresh the cached File Repository ------------------------------
if (refresh && rcon$has_fileRepository()){
rcon$refresh_fileRepository()
}
data.frame(directory = dirname(file_path),
filename = basename(file_path),
stringsAsFactors = FALSE)
}
|
/R/deleteFromFileRepository.R
|
no_license
|
nutterb/redcapAPI
|
R
| false
| false
| 4,444
|
r
|
#' @name deleteFromFileRepository
#' @title Delete a Single File from the File Repository
#'
#' @description Deletes a single file from the File Repository based on
#' its document ID.
#'
#' @param rcon A \code{redcapConnection} object.
#' @param doc_id \code{integerish(1)} The document ID of the file to be
#' deleted.
#' @param ... Arguments to pass to other methods.
#' @param refresh \code{logical(1)} When \code{TRUE} (default), the cached
#' File Repository data on \code{rcon} will be refreshed.
#' @param error_handling An option for how to handle errors returned by the API.
#' see \code{\link{redcapError}}
#' @param config \code{list} Additional configuration parameters to pass to
#' \code{\link[httr]{POST}}. These are appended to any parameters in
#' \code{rcon$config}.
#' @param api_param \code{list} Additional API parameters to pass into the
#' body of the API call. This provides users to execute calls with options
#' that may not otherwise be supported by \code{redcapAPI}.
#'
#' @details This method allows you to delete a single file in a project's
#' File Repository. Once deleted, the file will remain in the Recycle Bin
#' folder for up to 30 days.
#'
#' @author Benjamin Nutter
#'
#' @export
deleteFromFileRepository <- function(rcon,
doc_id,
...){
UseMethod("deleteFromFileRepository")
}
#' @rdname deleteFromFileRepository
#' @export
deleteFromFileRepository.redcapApiConnection <- function(rcon,
doc_id,
...,
refresh = TRUE,
error_handling = getOption("redcap_error_handling"),
config = list(),
api_param = list()){
# Argument Validation ---------------------------------------------
coll <- checkmate::makeAssertCollection()
checkmate::assert_class(x = rcon,
classes = "redcapApiConnection",
add = coll)
checkmate::assert_integerish(x = doc_id,
len = 1,
any.missing = FALSE,
add = coll)
checkmate::assert_logical(x = refresh,
len = 1,
add = coll)
error_handling <- checkmate::matchArg(x = error_handling,
choices = c("null", "error"),
.var.name = "error_handling",
add = coll)
checkmate::assert_list(x = config,
names = "named",
add = coll)
checkmate::assert_list(x = api_param,
names = "named",
add = coll)
checkmate::reportAssertions(coll)
FileRepo <- rcon$fileRepository()
if (!doc_id %in% FileRepo$doc_id){
coll$push(sprintf("doc_id (%s) not found in the File Repository",
doc_id))
checkmate::reportAssertions(coll)
}
# Get the file path of the file repository file -------------------
file_path <- fileRepositoryPath(doc_id = doc_id,
fileRepo = FileRepo)
# Build the Body List ---------------------------------------------
body <- list(content = "fileRepository",
action = "delete",
returnFormat = "csv",
doc_id = doc_id)
body <- body[lengths(body) > 0]
# Make the API Call -----------------------------------------------
response <- makeApiCall(rcon,
body = c(body, api_param),
config = config)
if (response$status_code != 200){
redcapError(response,
error_handling = error_handling)
}
message(sprintf("File deleted: %s", file_path))
# Refresh the cached File Repository ------------------------------
if (refresh && rcon$has_fileRepository()){
rcon$refresh_fileRepository()
}
data.frame(directory = dirname(file_path),
filename = basename(file_path),
stringsAsFactors = FALSE)
}
|
###--------------------------------------------------------------------------###
### Author: Arman Oganisian
### Conduct partial pooling analysis
###--------------------------------------------------------------------------###
## Load Packages
library(rstan)
library(LaplacesDemon)
library(latex2exp)
set.seed(66)
####------------------------ Simulate Data ---------------------------------####
N = 500 # sample size
warmup = 1000
iter = 2000
n_draws = iter - warmup
#simulate standard normal confounder (L), dose (A), and outcome (Y)
W = rnorm(n = N)
Wmat = cbind(1, W)
V = sample(x = 1:5, size = N, replace = T, prob = c(3/10,3/10,2/10,1/10,1/10))
bv = c(0, -.5, .5, .5, -.5)
A = rbern(N, prob = invlogit( 0 + 1*W + bv[V] ) )
theta_v = 1 + c(0, -.5, 0, .5, .6)
Y = rbern(N, prob = invlogit( -1 + 1*W + theta_v[V]*A ) )
## sample size in each stratum
n_v = as.numeric(table(V))
## get indices of each stratum
ind_list = lapply(sort(unique(V)), function(v) c(1:N)[V==v] )
stan_data = list(Y=Y[order(V)], A=A[order(V)], V=V[order(V)],
W = Wmat[order(V), ],
Pw = ncol(Wmat),
Pv = length(unique(V)),
N=N, n_v=n_v,
ind = c(0, cumsum(n_v)))
####------------------------ Sample Posterior ---------------------------####
partial_pool_model = stan_model(file = "partial_pool.stan")
stan_res = sampling(partial_pool_model, data = stan_data,
pars = c("odds_ratio", 'mu', "overall_odds_ratio" ),
warmup = warmup, iter = iter, chains=1, seed=1)
Psi_draws = extract(stan_res, pars='odds_ratio')[[1]]
overall_mean = extract(stan_res, pars='overall_odds_ratio')[[1]]
####------------------- Compute Frequentist Estimates --------------------####
vfac = factor(V)
## estimate outcome model, which we'll then integrate over p(W)
freq_reg = glm(Y ~ W + vfac + A + vfac*A, family=binomial('logit') )
## loop through strata of interest
Psi_freq = numeric(5) ## shell to contain causal odds ratios
for(vf in 1:5){
vval= as.factor(vf)
## standardize over empirical distribution p(W)
p1 = predict(freq_reg, data.frame(W, vfac=vval, A=1 ), type='response')
p0 = predict(freq_reg, data.frame(W, vfac=vval, A=0 ), type='response')
## take mean over empirical distribution among V=v
marg_mean_y1 = mean( p1[V==vf] )
marg_mean_y0 = mean( p0[V==vf] )
## compute causal odds ratio
Psi_freq[vf] = (marg_mean_y1/(1-marg_mean_y1)) / (marg_mean_y0/(1-marg_mean_y0))
}
####------------------- Plot Results --------------------####
v_strata = 1:length(unique(V))
post_mean = colMeans(Psi_draws)
png("ppooling_plot.png", width = 600, height = 500)
plot(post_mean, pch=20, col='blue', ylim=c(0,5),
ylab=TeX("$\\Psi(v)$"), xlab=TeX("$V$"), axes=F )
axis_labs = paste0(v_strata, "\n(n=",table(V),")")
axis(1, at = 1:5, labels = axis_labs, padj = .5 )
axis(2, at = 0:5, labels = 0:5 )
### Plot posterior credible Intervals
colfunc <- colorRampPalette(c("white", "lightblue"))
ci_perc = seq(.99,.01,-.01)
colvec = colfunc(length(ci_perc))
names(colvec) = ci_perc
for(i in ci_perc){
pci = apply(Psi_draws, 2, quantile, probs=c( (1-i)/2, (1+i)/2 ) )
segments(1:5,pci[1,], 1:5, pci[2,], col=colvec[as.character(i)], lwd=10 )
}
###
points(post_mean, col='steelblue', pch=20, lwd=8)
points(Psi_freq, col='black', pch=20, lwd=5)
abline(h= mean(overall_mean), col='steelblue', lty=2)
legend('topleft',
legend = c('Posterior Mean/Credible Band', 'MLE', "Overall Effect" ),
col = c('steelblue', 'black', 'steelblue' ), pch=c(20,20,NA),
lty = c(NA,NA,2), bty='n')
dev.off()
|
/code/CATE_example.R
|
no_license
|
JosueMA/StanCon2020_BayesCausal
|
R
| false
| false
| 3,671
|
r
|
###--------------------------------------------------------------------------###
### Author: Arman Oganisian
### Conduct partial pooling analysis
###--------------------------------------------------------------------------###
## Load Packages
library(rstan)
library(LaplacesDemon)
library(latex2exp)
set.seed(66)
####------------------------ Simulate Data ---------------------------------####
N = 500 # sample size
warmup = 1000
iter = 2000
n_draws = iter - warmup
#simulate standard normal confounder (L), dose (A), and outcome (Y)
W = rnorm(n = N)
Wmat = cbind(1, W)
V = sample(x = 1:5, size = N, replace = T, prob = c(3/10,3/10,2/10,1/10,1/10))
bv = c(0, -.5, .5, .5, -.5)
A = rbern(N, prob = invlogit( 0 + 1*W + bv[V] ) )
theta_v = 1 + c(0, -.5, 0, .5, .6)
Y = rbern(N, prob = invlogit( -1 + 1*W + theta_v[V]*A ) )
## sample size in each stratum
n_v = as.numeric(table(V))
## get indices of each stratum
ind_list = lapply(sort(unique(V)), function(v) c(1:N)[V==v] )
stan_data = list(Y=Y[order(V)], A=A[order(V)], V=V[order(V)],
W = Wmat[order(V), ],
Pw = ncol(Wmat),
Pv = length(unique(V)),
N=N, n_v=n_v,
ind = c(0, cumsum(n_v)))
####------------------------ Sample Posterior ---------------------------####
partial_pool_model = stan_model(file = "partial_pool.stan")
stan_res = sampling(partial_pool_model, data = stan_data,
pars = c("odds_ratio", 'mu', "overall_odds_ratio" ),
warmup = warmup, iter = iter, chains=1, seed=1)
Psi_draws = extract(stan_res, pars='odds_ratio')[[1]]
overall_mean = extract(stan_res, pars='overall_odds_ratio')[[1]]
####------------------- Compute Frequentist Estimates --------------------####
vfac = factor(V)
## estimate outcome model, which we'll then integrate over p(W)
freq_reg = glm(Y ~ W + vfac + A + vfac*A, family=binomial('logit') )
## loop through strata of interest
Psi_freq = numeric(5) ## shell to contain causal odds ratios
for(vf in 1:5){
vval= as.factor(vf)
## standardize over empirical distribution p(W)
p1 = predict(freq_reg, data.frame(W, vfac=vval, A=1 ), type='response')
p0 = predict(freq_reg, data.frame(W, vfac=vval, A=0 ), type='response')
## take mean over empirical distribution among V=v
marg_mean_y1 = mean( p1[V==vf] )
marg_mean_y0 = mean( p0[V==vf] )
## compute causal odds ratio
Psi_freq[vf] = (marg_mean_y1/(1-marg_mean_y1)) / (marg_mean_y0/(1-marg_mean_y0))
}
####------------------- Plot Results --------------------####
v_strata = 1:length(unique(V))
post_mean = colMeans(Psi_draws)
png("ppooling_plot.png", width = 600, height = 500)
plot(post_mean, pch=20, col='blue', ylim=c(0,5),
ylab=TeX("$\\Psi(v)$"), xlab=TeX("$V$"), axes=F )
axis_labs = paste0(v_strata, "\n(n=",table(V),")")
axis(1, at = 1:5, labels = axis_labs, padj = .5 )
axis(2, at = 0:5, labels = 0:5 )
### Plot posterior credible Intervals
colfunc <- colorRampPalette(c("white", "lightblue"))
ci_perc = seq(.99,.01,-.01)
colvec = colfunc(length(ci_perc))
names(colvec) = ci_perc
for(i in ci_perc){
pci = apply(Psi_draws, 2, quantile, probs=c( (1-i)/2, (1+i)/2 ) )
segments(1:5,pci[1,], 1:5, pci[2,], col=colvec[as.character(i)], lwd=10 )
}
###
points(post_mean, col='steelblue', pch=20, lwd=8)
points(Psi_freq, col='black', pch=20, lwd=5)
abline(h= mean(overall_mean), col='steelblue', lty=2)
legend('topleft',
legend = c('Posterior Mean/Credible Band', 'MLE', "Overall Effect" ),
col = c('steelblue', 'black', 'steelblue' ), pch=c(20,20,NA),
lty = c(NA,NA,2), bty='n')
dev.off()
|
#' @method print cv.relaxed
#' @export
print.cv.relaxed <- function(x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall: ", deparse(x$call), "\n\n")
cat("Measure:", x$name,"\n\n")
x=x$relaxed
optlams=c(x$lambda.min,x$lambda.1se)
wg1=match(x$gamma.min,x$gamma)
wl1=match(x$lambda.min,x$statlist[[wg1]]$lambda)
s1=with(x$statlist[[wg1]],c(x$gamma.min,x$lambda.min,cvm[wl1],cvsd[wl1],x$nzero.min))
wg2=match(x$gamma.1se,x$gamma)
wl2=match(x$lambda.1se,x$statlist[[wg2]]$lambda)
s2=with(x$statlist[[wg2]],c(x$gamma.1se,x$lambda.1se,cvm[wl2],cvsd[wl2],x$nzero.1se))
mat=rbind(s1,s2)
dimnames(mat)=list(c("min","1se"),c("Gamma","Lambda","Measure","SE","Nonzero"))
mat=data.frame(mat,check.names=FALSE)
class(mat)=c("anova",class(mat))
print(mat,digits=digits)
}
|
/R/print.cv.relaxed.R
|
no_license
|
nfultz/glmnet-mirror
|
R
| false
| false
| 831
|
r
|
#' @method print cv.relaxed
#' @export
print.cv.relaxed <- function(x, digits = max(3, getOption("digits") - 3), ...)
{
cat("\nCall: ", deparse(x$call), "\n\n")
cat("Measure:", x$name,"\n\n")
x=x$relaxed
optlams=c(x$lambda.min,x$lambda.1se)
wg1=match(x$gamma.min,x$gamma)
wl1=match(x$lambda.min,x$statlist[[wg1]]$lambda)
s1=with(x$statlist[[wg1]],c(x$gamma.min,x$lambda.min,cvm[wl1],cvsd[wl1],x$nzero.min))
wg2=match(x$gamma.1se,x$gamma)
wl2=match(x$lambda.1se,x$statlist[[wg2]]$lambda)
s2=with(x$statlist[[wg2]],c(x$gamma.1se,x$lambda.1se,cvm[wl2],cvsd[wl2],x$nzero.1se))
mat=rbind(s1,s2)
dimnames(mat)=list(c("min","1se"),c("Gamma","Lambda","Measure","SE","Nonzero"))
mat=data.frame(mat,check.names=FALSE)
class(mat)=c("anova",class(mat))
print(mat,digits=digits)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitSexMix.R
\name{fitSexMix}
\alias{fitSexMix}
\title{simple function for plotting a mixture model for sex from XY stats}
\usage{
fitSexMix(x, ...)
}
\arguments{
\item{x}{a GenomicRatioSet, or an XYstats matrix}
\item{...}{arguments to pass to Mclust}
}
\value{
\if{html}{\out{<div class="sourceCode">}}\preformatted{ an mclust fit object with $sex as fitted sex
}\if{html}{\out{</div>}}
}
\description{
simple function for plotting a mixture model for sex from XY stats
}
|
/man/fitSexMix.Rd
|
no_license
|
ttriche/miser
|
R
| false
| true
| 554
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitSexMix.R
\name{fitSexMix}
\alias{fitSexMix}
\title{simple function for plotting a mixture model for sex from XY stats}
\usage{
fitSexMix(x, ...)
}
\arguments{
\item{x}{a GenomicRatioSet, or an XYstats matrix}
\item{...}{arguments to pass to Mclust}
}
\value{
\if{html}{\out{<div class="sourceCode">}}\preformatted{ an mclust fit object with $sex as fitted sex
}\if{html}{\out{</div>}}
}
\description{
simple function for plotting a mixture model for sex from XY stats
}
|
##### STA243 HW5
### STA 243 HM 5 Tongbi Tu & Yuan Chen
## Problem 1
#(a)
#Implement the cross-validation and generalized cross-validation methods for choosing the smoothing parameter .
#(b)
# use the AICc criterion
library(MASS)
n = 200
xi = seq(0.5/n, 199.5/n, by = 1/n)
Xi = runif(n, 0, 1)
e = rnorm(n,0,1)
phi = function(x){
exp(-x^2/2)/sqrt(2*pi)
}
f = function(x){
1.5*phi((x - 0.35)/0.15) - phi((x - 0.8)/0.04)
}
sigmanj = function(j){
0.02 + 0.04*(j-1)^2
}
F_inv = function(Xi, j = 1){
qbeta(p = Xi, shape1 = (j+4)/5, shape2 = (11-j)/5)
}
fj = function(x, j = 1){
sqrt(x*(1-x)) * sin( (2 * pi * (1 + 2^( (9 - 4 * j)/5) ))/ (x + 2^( (9 - 4 * j)/5) ))
}
vj = function(x, j = 1){
(0.15 * (1 + 0.4*(2*j-7)*(x-0.5)))^2
}
yn = function(x, j = 1, epsilon = e){
f(x) + sigmanj(j) * epsilon
}
yd = function(Xji, j = 1, epsilon = e){
f(Xji) + 0.1 * epsilon
}
ys = function(x, j = 1, epsilon = e){
fj(x, j) + 0.2 * epsilon
}
yv = function(x, j = 1, epsilon = e){
f(x) + sqrt(vj(x, j)) * epsilon
}
t = seq(0, 1, length.out = 32)[2:31]
X = t(sapply(xi, function(x){
xk = ifelse(x > t, x - t, 0)
c(1, x, x^2, x^3, xk^3)
}))
D = diag(c(rep(0, 4), rep(1, 30)))
H_lambda = function(lambda = 0){
X %*% solve(t(X) %*% X + lambda * D) %*% t(X)
}
H = X %*% solve(t(X) %*% X) %*% t(X)
cv = function(y, lambda){
yhat = H_lambda(lambda) %*% y
h = diag(H_lambda(lambda))
sum(((y-yhat)/(1-h))^2)
}
gcv = function(y, lambda){
yhat = H_lambda(lambda) %*% y
h = sum(diag(H_lambda(lambda)))/200
sum(((y-yhat)/(1-h))^2)
}
aicc = function(y, lambda){
yhat = H_lambda(lambda) %*% y
tr = sum(diag(H_lambda(lambda)))
log(sum((y-yhat)^2)) + 2*(tr + 1)/(200 - tr - 2)
}
risk = function(y, lambda){
yhat = H %*% y
rss = sum((y - yhat)^2)
sum((y - H_lambda(lambda) %*% y)^2) + rss/(200 - sum(diag(H))) * (2* sum(diag(H_lambda(lambda))) - 200)
}
mse = function(f, lambda, y){
sum((f(xi) - H_lambda(lambda) %*% y)^2)
}
algorithm = function(j = 1, x = xi, f_y = yn, f = f){
e = rnorm(200,0,1)
y = f_y(x, j, e)
lam = sapply(c(cv, gcv, aicc, risk), function(method) optimize(function(l) method(y, l), c(0, 10))$minimum)
mse_min = optimize(function(l) mse(f, l, y), c(0, 10))$objective
sapply(lam, function(l) mse(f, l, y))/mse_min
}
r1 = lapply(1:6, function(j) sapply(1:200, function(i) algorithm(j, xi, yn, f)))
r2 = lapply(1:6, function(j){
xi = sort(F_inv(Xi, j))
X = t(sapply(xi, function(x){
xk = ifelse(x > t, x - t, 0)
c(1, x, x^2, x^3, xk^3)
}))
H = X %*% ginv(t(X) %*% X) %*% t(X)
sapply(1:200, function(i) algorithm(j, xi, yd, f))
})
r3 = lapply(1:6, function(j) sapply(1:200, function(i) algorithm(j, xi, ys, function(x) fj(x, j))))
r4 = lapply(1:6, function(j) sapply(1:200, function(i) algorithm(j, xi, yv, f)))
par(mar=c(1,1,1,1))
par(mfrow = c(3, 4))
sapply(1:6, function(j){
plot(xi, yn(xi, j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(xi, f(xi))
boxplot(t(log(r1[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
sapply(1:6, function(j){
plot(F_inv(Xi, j), yd(F_inv(Xi, j), j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(sort(F_inv(Xi, j)), f(sort(F_inv(Xi, j))))
boxplot(t(log(r2[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
sapply(1:6, function(j){
plot(xi, ys(xi, j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(xi, fj(xi, j))
boxplot(t(log(r3[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
sapply(1:6, function(j){
plot(xi, yv(xi, j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(xi, f(xi))
boxplot(t(log(r4[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
###generate data
setwd("~/Desktop/STA_2017_spring/STA243")
phi=function(x) {1/sqrt(2*pi)*exp(-x^2/2)}
f_fun=function(x,i,j)
{
if(i==3)
{
sqrt(x*(1-x))*sin(2*pi*(1+2^((9-4*j)/5))/(x+2^((9-4*j)/5)))
}
else
{
1.5*phi((x-0.35)/0.15)-phi((x-0.8)/0.04)
}
}
generatenoisedata = function(j,err) {
sigma = 0.02 + 0.04*(j-1)^2
x = (c(1:200)-0.5)/200
y = f_fun(x,1,j) + sigma*err
return(data.frame(x,y))
}
generatedensitydata = function(j,err) {
sigma = 0.1
X = sort(runif(200,0,1))
x= qbeta(X,(j+4)/5, (11-j)/5)
y = f_fun(x,2,j) +sigma*err
return(data.frame(x,y))
}
generatespatialdata=function(j,err)
{
x=((1:200)-0.5)/200
sigma=0.2
y=f_fun(x,3,j)+sigma*err
data.frame(x,y)
}
generatevariancedata=function(j,err)
{
x=((1:200)-0.5)/200
y=f_fun(x,4,j)+abs(0.15*(1+0.4*(2*j-7)*(x-0.5)))*err
data.frame(x,y)
}
generatealldata=function()
{
err=rnorm(200,0,1)
noisedata = lapply(1:6,function(j) generatenoisedata(j,err))
densitydata=lapply(1:6,function(j) generatedensitydata(j,err))
spatialdata=lapply(1:6,function(j) generatespatialdata(j,err))
variancedata=lapply(1:6,function(j) generatevariancedata(j,err))
output=list()
output$noisedata=noisedata
output$densitydata=densitydata
output$spatialdata=spatialdata
output$variancedata=variancedata
output
}
t=(1:30)/31
D=diag(c(rep(0,4),rep(1,30)))
H=function(data,lambda)
{
sig=function(x)
{
x[x<0]=0
x
}
x=data$x
x_mat=data.frame(int=1,x,x^2,x^3)
for(i in 1:length(t))
{
x_mat=cbind(x_mat,sig(x-t[i])^3)
}
x_mat=as.matrix(x_mat)
x_mat%*%solve(t(x_mat)%*%x_mat+lambda*D)%*%t(x_mat)
}
CV=function(lambda,data)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
1/200*(sum(((data$y-as.numeric(f_hat))/(1-diag(H_mat)))^2))
}
GCV = function(lambda, data) {
H_mat=H(data,lambda)
f_hat=as.numeric(H_mat%*%data$y)
1/200*sum((data$y-f_hat)^2)/((1-1/200*sum(diag(H_mat)))^2)
}
AIC=function(lambda,data)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
tr=sum(diag(H_mat))
log(sum((data$y-f_hat)^2))+2*(tr+1)/(198-tr)
}
RISK=function(lambda,data)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
MSE=sum((data$y-f_hat)^2)/(200-34)
sum((data$y-f_hat)^2)+MSE*(2*sum(diag(H_mat))-200)
}
deviate=function(lambda,data,ftrue)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
sum((ftrue-f_hat)^2)
}
output=data.frame()
for(k in 1:200)
{
dataset=generatealldata()
for(i in 1:4)
{
for(j in 1:6)
{
data=dataset[[i]][[j]]
ftrue=f_fun(data$x,i,j)
mindevi=optimize(deviate,c(0,1),data,ftrue,maximum=FALSE)$objective
cv_opt=optimize(CV,c(0,1),data,maximum=FALSE)$minimum
cv_f_hat=H(data,cv_opt)%*%data$y
cv_r=sum((ftrue-cv_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="cv",r=cv_r))
output=rbind(output,data.frame(k,i,j,method="cv",r=cv_r))
gcv_opt=optimize(GCV,c(0,1),data,maximum=FALSE)$minimum
gcv_f_hat=H(data,gcv_opt)%*%data$y
gcv_r=sum((ftrue-gcv_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="gcv",r=gcv_r))
output=rbind(output,data.frame(k,i,j,method="gcv",r=gcv_r))
aic_opt=optimize(AIC,c(0,1),data,maximum=FALSE)$minimum
aic_f_hat=H(data,aic_opt)%*%data$y
aic_r=sum((ftrue-aic_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="aic",r=aic_r))
output=rbind(output,data.frame(k,i,j,method="aic",r=aic_r))
risk_opt=optimize(RISK,c(0,1),data,maximum=FALSE)$minimum
risk_f_hat=H(data,risk_opt)%*%data$y
risk_r=sum((ftrue-risk_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="risk",r=risk_r))
output=rbind(output,data.frame(k,i,j,method="risk",r=risk_r))
}
}
}
write.csv(output,'output.csv')
output=read.csv('output.csv')
names=c("noise data","density data","spatial data","variance data")
dataset=generatealldata()
par(mfrow=c(3,4))
for(i in 1:4)
{
for(j in 1:6)
{
print(plot(dataset[[i]][[j]],main=paste('j=',j)))
print(lines(x=dataset[[i]][[j]]$x,y=f_fun(dataset[[i]][[j]]$x,i,j),col='red'))
print(boxplot(log(r)~method,data=output[output$r>=1&output$i==i&output$j==j,]))
}
print(title(names[i],outer=TRUE,line=-1))
}
|
/STA243_HW5_simulation study.R
|
no_license
|
anhnguyendepocen/Computational_Statistics
|
R
| false
| false
| 8,159
|
r
|
##### STA243 HW5
### STA 243 HM 5 Tongbi Tu & Yuan Chen
## Problem 1
#(a)
#Implement the cross-validation and generalized cross-validation methods for choosing the smoothing parameter .
#(b)
# use the AICc criterion
library(MASS)
n = 200
xi = seq(0.5/n, 199.5/n, by = 1/n)
Xi = runif(n, 0, 1)
e = rnorm(n,0,1)
phi = function(x){
exp(-x^2/2)/sqrt(2*pi)
}
f = function(x){
1.5*phi((x - 0.35)/0.15) - phi((x - 0.8)/0.04)
}
sigmanj = function(j){
0.02 + 0.04*(j-1)^2
}
F_inv = function(Xi, j = 1){
qbeta(p = Xi, shape1 = (j+4)/5, shape2 = (11-j)/5)
}
fj = function(x, j = 1){
sqrt(x*(1-x)) * sin( (2 * pi * (1 + 2^( (9 - 4 * j)/5) ))/ (x + 2^( (9 - 4 * j)/5) ))
}
vj = function(x, j = 1){
(0.15 * (1 + 0.4*(2*j-7)*(x-0.5)))^2
}
yn = function(x, j = 1, epsilon = e){
f(x) + sigmanj(j) * epsilon
}
yd = function(Xji, j = 1, epsilon = e){
f(Xji) + 0.1 * epsilon
}
ys = function(x, j = 1, epsilon = e){
fj(x, j) + 0.2 * epsilon
}
yv = function(x, j = 1, epsilon = e){
f(x) + sqrt(vj(x, j)) * epsilon
}
t = seq(0, 1, length.out = 32)[2:31]
X = t(sapply(xi, function(x){
xk = ifelse(x > t, x - t, 0)
c(1, x, x^2, x^3, xk^3)
}))
D = diag(c(rep(0, 4), rep(1, 30)))
H_lambda = function(lambda = 0){
X %*% solve(t(X) %*% X + lambda * D) %*% t(X)
}
H = X %*% solve(t(X) %*% X) %*% t(X)
cv = function(y, lambda){
yhat = H_lambda(lambda) %*% y
h = diag(H_lambda(lambda))
sum(((y-yhat)/(1-h))^2)
}
gcv = function(y, lambda){
yhat = H_lambda(lambda) %*% y
h = sum(diag(H_lambda(lambda)))/200
sum(((y-yhat)/(1-h))^2)
}
aicc = function(y, lambda){
yhat = H_lambda(lambda) %*% y
tr = sum(diag(H_lambda(lambda)))
log(sum((y-yhat)^2)) + 2*(tr + 1)/(200 - tr - 2)
}
risk = function(y, lambda){
yhat = H %*% y
rss = sum((y - yhat)^2)
sum((y - H_lambda(lambda) %*% y)^2) + rss/(200 - sum(diag(H))) * (2* sum(diag(H_lambda(lambda))) - 200)
}
mse = function(f, lambda, y){
sum((f(xi) - H_lambda(lambda) %*% y)^2)
}
algorithm = function(j = 1, x = xi, f_y = yn, f = f){
e = rnorm(200,0,1)
y = f_y(x, j, e)
lam = sapply(c(cv, gcv, aicc, risk), function(method) optimize(function(l) method(y, l), c(0, 10))$minimum)
mse_min = optimize(function(l) mse(f, l, y), c(0, 10))$objective
sapply(lam, function(l) mse(f, l, y))/mse_min
}
r1 = lapply(1:6, function(j) sapply(1:200, function(i) algorithm(j, xi, yn, f)))
r2 = lapply(1:6, function(j){
xi = sort(F_inv(Xi, j))
X = t(sapply(xi, function(x){
xk = ifelse(x > t, x - t, 0)
c(1, x, x^2, x^3, xk^3)
}))
H = X %*% ginv(t(X) %*% X) %*% t(X)
sapply(1:200, function(i) algorithm(j, xi, yd, f))
})
r3 = lapply(1:6, function(j) sapply(1:200, function(i) algorithm(j, xi, ys, function(x) fj(x, j))))
r4 = lapply(1:6, function(j) sapply(1:200, function(i) algorithm(j, xi, yv, f)))
par(mar=c(1,1,1,1))
par(mfrow = c(3, 4))
sapply(1:6, function(j){
plot(xi, yn(xi, j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(xi, f(xi))
boxplot(t(log(r1[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
sapply(1:6, function(j){
plot(F_inv(Xi, j), yd(F_inv(Xi, j), j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(sort(F_inv(Xi, j)), f(sort(F_inv(Xi, j))))
boxplot(t(log(r2[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
sapply(1:6, function(j){
plot(xi, ys(xi, j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(xi, fj(xi, j))
boxplot(t(log(r3[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
sapply(1:6, function(j){
plot(xi, yv(xi, j), pch = 1, main = paste0("j = ", j),cex = 0.1,ylab = '',xlab = '')
lines(xi, f(xi))
boxplot(t(log(r4[[j]])), xaxt = 'n',ylab = 'r', xlab = 'criteria')
axis(1, at = 1:4 , labels = c('CV','GCV','AICc','Cp'))
})
###generate data
setwd("~/Desktop/STA_2017_spring/STA243")
phi=function(x) {1/sqrt(2*pi)*exp(-x^2/2)}
f_fun=function(x,i,j)
{
if(i==3)
{
sqrt(x*(1-x))*sin(2*pi*(1+2^((9-4*j)/5))/(x+2^((9-4*j)/5)))
}
else
{
1.5*phi((x-0.35)/0.15)-phi((x-0.8)/0.04)
}
}
generatenoisedata = function(j,err) {
sigma = 0.02 + 0.04*(j-1)^2
x = (c(1:200)-0.5)/200
y = f_fun(x,1,j) + sigma*err
return(data.frame(x,y))
}
generatedensitydata = function(j,err) {
sigma = 0.1
X = sort(runif(200,0,1))
x= qbeta(X,(j+4)/5, (11-j)/5)
y = f_fun(x,2,j) +sigma*err
return(data.frame(x,y))
}
generatespatialdata=function(j,err)
{
x=((1:200)-0.5)/200
sigma=0.2
y=f_fun(x,3,j)+sigma*err
data.frame(x,y)
}
generatevariancedata=function(j,err)
{
x=((1:200)-0.5)/200
y=f_fun(x,4,j)+abs(0.15*(1+0.4*(2*j-7)*(x-0.5)))*err
data.frame(x,y)
}
generatealldata=function()
{
err=rnorm(200,0,1)
noisedata = lapply(1:6,function(j) generatenoisedata(j,err))
densitydata=lapply(1:6,function(j) generatedensitydata(j,err))
spatialdata=lapply(1:6,function(j) generatespatialdata(j,err))
variancedata=lapply(1:6,function(j) generatevariancedata(j,err))
output=list()
output$noisedata=noisedata
output$densitydata=densitydata
output$spatialdata=spatialdata
output$variancedata=variancedata
output
}
t=(1:30)/31
D=diag(c(rep(0,4),rep(1,30)))
H=function(data,lambda)
{
sig=function(x)
{
x[x<0]=0
x
}
x=data$x
x_mat=data.frame(int=1,x,x^2,x^3)
for(i in 1:length(t))
{
x_mat=cbind(x_mat,sig(x-t[i])^3)
}
x_mat=as.matrix(x_mat)
x_mat%*%solve(t(x_mat)%*%x_mat+lambda*D)%*%t(x_mat)
}
CV=function(lambda,data)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
1/200*(sum(((data$y-as.numeric(f_hat))/(1-diag(H_mat)))^2))
}
GCV = function(lambda, data) {
H_mat=H(data,lambda)
f_hat=as.numeric(H_mat%*%data$y)
1/200*sum((data$y-f_hat)^2)/((1-1/200*sum(diag(H_mat)))^2)
}
AIC=function(lambda,data)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
tr=sum(diag(H_mat))
log(sum((data$y-f_hat)^2))+2*(tr+1)/(198-tr)
}
RISK=function(lambda,data)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
MSE=sum((data$y-f_hat)^2)/(200-34)
sum((data$y-f_hat)^2)+MSE*(2*sum(diag(H_mat))-200)
}
deviate=function(lambda,data,ftrue)
{
H_mat=H(data,lambda)
f_hat=H_mat%*%data$y
sum((ftrue-f_hat)^2)
}
output=data.frame()
for(k in 1:200)
{
dataset=generatealldata()
for(i in 1:4)
{
for(j in 1:6)
{
data=dataset[[i]][[j]]
ftrue=f_fun(data$x,i,j)
mindevi=optimize(deviate,c(0,1),data,ftrue,maximum=FALSE)$objective
cv_opt=optimize(CV,c(0,1),data,maximum=FALSE)$minimum
cv_f_hat=H(data,cv_opt)%*%data$y
cv_r=sum((ftrue-cv_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="cv",r=cv_r))
output=rbind(output,data.frame(k,i,j,method="cv",r=cv_r))
gcv_opt=optimize(GCV,c(0,1),data,maximum=FALSE)$minimum
gcv_f_hat=H(data,gcv_opt)%*%data$y
gcv_r=sum((ftrue-gcv_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="gcv",r=gcv_r))
output=rbind(output,data.frame(k,i,j,method="gcv",r=gcv_r))
aic_opt=optimize(AIC,c(0,1),data,maximum=FALSE)$minimum
aic_f_hat=H(data,aic_opt)%*%data$y
aic_r=sum((ftrue-aic_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="aic",r=aic_r))
output=rbind(output,data.frame(k,i,j,method="aic",r=aic_r))
risk_opt=optimize(RISK,c(0,1),data,maximum=FALSE)$minimum
risk_f_hat=H(data,risk_opt)%*%data$y
risk_r=sum((ftrue-risk_f_hat)^2)/mindevi
print(data.frame(k,i,j,method="risk",r=risk_r))
output=rbind(output,data.frame(k,i,j,method="risk",r=risk_r))
}
}
}
write.csv(output,'output.csv')
output=read.csv('output.csv')
names=c("noise data","density data","spatial data","variance data")
dataset=generatealldata()
par(mfrow=c(3,4))
for(i in 1:4)
{
for(j in 1:6)
{
print(plot(dataset[[i]][[j]],main=paste('j=',j)))
print(lines(x=dataset[[i]][[j]]$x,y=f_fun(dataset[[i]][[j]]$x,i,j),col='red'))
print(boxplot(log(r)~method,data=output[output$r>=1&output$i==i&output$j==j,]))
}
print(title(names[i],outer=TRUE,line=-1))
}
|
#' Number of states getter
#'
nstates <- function(x) {
UseMethod("nstates")
}
#' @rdname nstates
nstates.HMM <- function(x) return(length(x$states$names))
#' Number of transition parameters getter
#'
ntransitions <- function(x) {
UseMethod("ntransitions")
}
#' @rdname ntransitions
ntransitions.HMM <- function(x) return(ncol(x$transitions))
#' Number of constraints getter
#'
nconstraints <- function(x) {
UseMethod("nconstraints")
}
#' @rdname nconstraints
nconstraints.HMM <- function(x) return(nrow(x$constraints))
#' Matrix of constraints getter
#'
constraints <- function(x) {
UseMethod("constraints")
}
#' @rdname constraints
constraints.HMM <- function(x) return(x$constraints)
#' Probability of transitions parameters getter
#'
ptransition <- function(x) {
UseMethod("ptransition")
}
#' @rdname ptransition
ptransition.HMM <- function(x)
return(as.numeric(x$parameters$transitions))
#' Initial state parameters getter
#'
istates <- function(x) {
UseMethod("istates")
}
#' @rdname istates
istates.HMM <- function(x) return(x$parameters$states)
#' Transitions getter
#'
transitions <- function(x) {
UseMethod("transitions")
}
#' @rdname transitions
transitions.HMM <- function(x) return(x$transitions)
#' Reduced parameters getter
rparams <- function(x) {
UseMethod("rparams")
}
#' @rdname rparams
rparams.HMM <- function(x) return(x$parameters$reducedparams$params)
#' Transformation matrix getter
gettransmatrix <- function(x) {
UseMethod("gettransmatrix")
}
#' @rdname gettransmatrix
gettransmatrix.HMM <- function(x)
return(x$parameters$reducedparams$transmatrix)
#' Emissions matrix getter
#'
emissions <- function(x) {
UseMethod("emissions")
}
#' @rdname emissions
emissions.HMM <- function(x) return(x$emissions)
#' Transitions matrix getter.
#'
#' Returns the transitions matrix from a HMM.
#'
#' The HMM object contains three fields. States contains information
#' about the states. Transitions contains a list of matrices with
#' two rows. Each column represents a transition with non-zero
#' probability. Transitions in the same matrix have same probability.
#' Emissions is a matrix with the emission probabilities. These are
#' considered fixed.
#'
#' @param x An HMM object.
#'
#' @return The transition matrix of the HMM.
#'
#' @examples
#' HMM(1L, list(matrix(c(1L,1L), nrow = 2)), EM = matrix(1, nrow = 1))
getTM <- function(x) {
UseMethod("getTM")
}
#' @rdname getTM
getTM.HMM <- function(x) {
if (is.null(x$parameters$transitions))
stop(paste0("[destim::getTM] Parameters of the model are ",
"required to get the transitions matrix"))
TM <- matrix(0,nrow = nstates(x), ncol = nstates(x))
for (i in 1:ntransitions(x))
TM[x$transitions[1L,i],
x$transitions[2L,i]] <- x$parameters$transitions[i]
return(TM)
}
|
/MobileNetworkDataSimulationTemplate/code/src/destim/R/getters.R
|
no_license
|
Lorencrack3/TFG-Lorenzo
|
R
| false
| false
| 2,796
|
r
|
#' Number of states getter
#'
nstates <- function(x) {
UseMethod("nstates")
}
#' @rdname nstates
nstates.HMM <- function(x) return(length(x$states$names))
#' Number of transition parameters getter
#'
ntransitions <- function(x) {
UseMethod("ntransitions")
}
#' @rdname ntransitions
ntransitions.HMM <- function(x) return(ncol(x$transitions))
#' Number of constraints getter
#'
nconstraints <- function(x) {
UseMethod("nconstraints")
}
#' @rdname nconstraints
nconstraints.HMM <- function(x) return(nrow(x$constraints))
#' Matrix of constraints getter
#'
constraints <- function(x) {
UseMethod("constraints")
}
#' @rdname constraints
constraints.HMM <- function(x) return(x$constraints)
#' Probability of transitions parameters getter
#'
ptransition <- function(x) {
UseMethod("ptransition")
}
#' @rdname ptransition
ptransition.HMM <- function(x)
return(as.numeric(x$parameters$transitions))
#' Initial state parameters getter
#'
istates <- function(x) {
UseMethod("istates")
}
#' @rdname istates
istates.HMM <- function(x) return(x$parameters$states)
#' Transitions getter
#'
transitions <- function(x) {
UseMethod("transitions")
}
#' @rdname transitions
transitions.HMM <- function(x) return(x$transitions)
#' Reduced parameters getter
rparams <- function(x) {
UseMethod("rparams")
}
#' @rdname rparams
rparams.HMM <- function(x) return(x$parameters$reducedparams$params)
#' Transformation matrix getter
gettransmatrix <- function(x) {
UseMethod("gettransmatrix")
}
#' @rdname gettransmatrix
gettransmatrix.HMM <- function(x)
return(x$parameters$reducedparams$transmatrix)
#' Emissions matrix getter
#'
emissions <- function(x) {
UseMethod("emissions")
}
#' @rdname emissions
emissions.HMM <- function(x) return(x$emissions)
#' Transitions matrix getter.
#'
#' Returns the transitions matrix from a HMM.
#'
#' The HMM object contains three fields. States contains information
#' about the states. Transitions contains a list of matrices with
#' two rows. Each column represents a transition with non-zero
#' probability. Transitions in the same matrix have same probability.
#' Emissions is a matrix with the emission probabilities. These are
#' considered fixed.
#'
#' @param x An HMM object.
#'
#' @return The transition matrix of the HMM.
#'
#' @examples
#' HMM(1L, list(matrix(c(1L,1L), nrow = 2)), EM = matrix(1, nrow = 1))
getTM <- function(x) {
UseMethod("getTM")
}
#' @rdname getTM
getTM.HMM <- function(x) {
if (is.null(x$parameters$transitions))
stop(paste0("[destim::getTM] Parameters of the model are ",
"required to get the transitions matrix"))
TM <- matrix(0,nrow = nstates(x), ncol = nstates(x))
for (i in 1:ntransitions(x))
TM[x$transitions[1L,i],
x$transitions[2L,i]] <- x$parameters$transitions[i]
return(TM)
}
|
# Getting the Bang and Olufsen data from the lmerTest-package:
library(lmerTest) # (Udviklet af os)
data(TVbo)
# Each of 8 assessors scored each of 12 combinations 2 times
# Let's look at only a single picture and one of the two reps:
# And let us look at the sharpness
TVbosubset <- subset(TVbo,Picture==1 & Repeat==1)[,c(1, 2, 9)]
TVbosubset
sharp <- matrix(TVbosubset$Sharpness, nrow=8, byrow=T)
colnames(sharp) <- c("TV3", "TV2", "TV1")
rownames(sharp) <- c("Person 1", "Person 2", "Person 3",
"Person 4", "Person 5", "Person 6",
"Person 7", "Person 8")
sharp
# library(xtable)
# xtable(sharp)
par(mfrow=c(1,2))
with(TVbosubset, plot(Sharpness~TVset))
with(TVbosubset, plot(Sharpness~Assessor))
anova(lm(Sharpness ~ Assessor + TVset, data = TVbosubset))
####################################
## Input data and plot
## Observations
y <- c(2.8, 3.6, 3.4, 2.3,
5.5, 6.3, 6.1, 5.7,
5.8, 8.3, 6.9, 6.1)
## treatments (Groups, varieties)
treatm <- factor(c(1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3))
## blocks (persons, fields)
block <- factor(c(1, 2, 3, 4,
1, 2, 3, 4,
1, 2, 3, 4))
## for later formulas
(k <- length(unique(treatm)))
(l <- length(unique(block)))
y
treatm
block
cbind(y, treatm, block)
## Plots
par(mfrow=c(1,2))
## Plot histogramms by treatments
plot(treatm, y, xlab="Treatments", ylab="y")
## Plot histograms by blocks
plot(block, y, xlab="Blocks", ylab="y")
####################################
## Compute estimates of parameters in the model
## Sample mean
(muHat <- mean(y))
## Sample mean for each treatment
(alphaHat <- tapply(y, treatm, mean) - muHat)
## Sample mean for each Block
(betaHat <- tapply(y, block, mean) - muHat)
################################
## Find the total variation, Sum of Squares, SST
## SST for the example
(SST <- sum( (y - muHat)^2 ))
################################
## Find the variability explained by the treatments
## The Sum of Squares for treatment SS(Tr) for the example
(SSTr <- l * sum(alphaHat^2))
################################
## Find the variability explained by the blocks
## The Sum of Squares for the blocks SS(Bl) for the example
(SSBl <- k * sum(betaHat^2))
################################
## Find the variability left after model fit, SSE
## The Sum of Squares for the residuals for the example
(SSE <- SST - SSTr - SSBl)
################################
## Plot the F distribution and see the critical value for treatments
par(mfrow=c(1,1))
## Remember, this is "under H0" (that is we compute as if H0 is true):
## Sequence for plot
xseq <- seq(0, 10, by=0.1)
## Plot the density of the F distribution
plot(xseq, df(xseq, df1=k-1, df2=(k-1)*(l-1)), type="l")
##The critical value for significance level 5 %
cr <- qf(0.95, df1=k-1, df2=(k-1)*(l-1))
## Mark it in the plot
abline(v=cr, col="red")
## The value of the test statistic
(Ftr <- (SSTr/(k-1)) / (SSE/((k-1)*(l-1))))
## The p-value hence is:
(1 - pf(Ftr, df1=k-1, df2=(k-1)*(l-1)))
################################
## Plot the F distribution and see the critical value
## Remember, this is "under H0" (that is we compute as if H0 is true):
## Sequence for plot
xseq <- seq(0, 10, by=0.1)
## Plot the density of the F distribution
plot(xseq, df(xseq, df1=l-1, df2=(k-1)*(l-1)), type="l")
##The critical value for significance level 5 %
cr <- qf(0.95, df1=l-1, df2=(k-1)*(l-1))
## Mark it in the plot
abline(v=cr, col="red")
## The value of the test statistic
(Fbl <- (SSBl/(l-1)) / (SSE/((k-1)*(l-1))))
## The p-value hence is:
(1 - pf(Fbl, df1=l-1, df2=(k-1)*(l-1)))
################################
## All this can be found using anova() and lm()
anova(lm(y ~ treatm + block))
#### Simulate the sampling-distribution under the null hypothesis:
## Eg without the TVset effect: (but WITH person-effect)
## Sample mean
(muHat <- mean(TVbosubset$Sharpness))
## Sample mean for each assessor(blok)
(betaHat <- tapply(TVbosubset$Sharpness, TVbosubset$Assessor, mean) - muHat)
Sblock <- factor(rep(1:8, 3))
Streatm <- factor(rep(1:3, c(8, 8, 8)))
n <- 1000
F_sims <- rep(0, n)
for (i in 1:n){
ysim <- muHat + rep(betaHat, 3) + rnorm(24)
F_sims[i] <- anova(lm(ysim ~ Streatm + Sblock))[1, 4]
}
par(mfrow=c(1,1))
## Plot the simulated F-statistics AND the real F-distribution:
hist(F_sims, freq=FALSE)
lines(seq(0, 10, by=0.01), df(seq(0, 10, by=0.01), 2, 14))
################################
## Check assumption of homogeneous variance
## Save the fit
fit <- lm(y ~ treatm + block)
fit <- lm(Sharpness ~ Assessor + TVset, data = TVbosubset)
## Box plot
par(mfrow=c(1,2))
with(TVbosubset, plot(TVset, fit$residuals, y, xlab="Treatment"))
## Box plot
with(TVbosubset, plot(Assessor, fit$residuals, xlab="Block"))
################################
## Check the assumption of normality of residuals
## qq-normal plot of residuals
qqnorm(fit$residuals)
qqline(fit$residuals)
## Or with a Wally plot
require(MESS)
qqwrap <- function(x, y, ...) {qqnorm(y, main="",...);
qqline(y)}
## Can we see a deviating qq-norm plot?
wallyplot(fit$residuals, FUN = qqwrap)
|
/snippets/week11.R
|
no_license
|
andreasharmuth/statistics
|
R
| false
| false
| 5,161
|
r
|
# Getting the Bang and Olufsen data from the lmerTest-package:
library(lmerTest) # (Udviklet af os)
data(TVbo)
# Each of 8 assessors scored each of 12 combinations 2 times
# Let's look at only a single picture and one of the two reps:
# And let us look at the sharpness
TVbosubset <- subset(TVbo,Picture==1 & Repeat==1)[,c(1, 2, 9)]
TVbosubset
sharp <- matrix(TVbosubset$Sharpness, nrow=8, byrow=T)
colnames(sharp) <- c("TV3", "TV2", "TV1")
rownames(sharp) <- c("Person 1", "Person 2", "Person 3",
"Person 4", "Person 5", "Person 6",
"Person 7", "Person 8")
sharp
# library(xtable)
# xtable(sharp)
par(mfrow=c(1,2))
with(TVbosubset, plot(Sharpness~TVset))
with(TVbosubset, plot(Sharpness~Assessor))
anova(lm(Sharpness ~ Assessor + TVset, data = TVbosubset))
####################################
## Input data and plot
## Observations
y <- c(2.8, 3.6, 3.4, 2.3,
5.5, 6.3, 6.1, 5.7,
5.8, 8.3, 6.9, 6.1)
## treatments (Groups, varieties)
treatm <- factor(c(1, 1, 1, 1,
2, 2, 2, 2,
3, 3, 3, 3))
## blocks (persons, fields)
block <- factor(c(1, 2, 3, 4,
1, 2, 3, 4,
1, 2, 3, 4))
## for later formulas
(k <- length(unique(treatm)))
(l <- length(unique(block)))
y
treatm
block
cbind(y, treatm, block)
## Plots
par(mfrow=c(1,2))
## Plot histogramms by treatments
plot(treatm, y, xlab="Treatments", ylab="y")
## Plot histograms by blocks
plot(block, y, xlab="Blocks", ylab="y")
####################################
## Compute estimates of parameters in the model
## Sample mean
(muHat <- mean(y))
## Sample mean for each treatment
(alphaHat <- tapply(y, treatm, mean) - muHat)
## Sample mean for each Block
(betaHat <- tapply(y, block, mean) - muHat)
################################
## Find the total variation, Sum of Squares, SST
## SST for the example
(SST <- sum( (y - muHat)^2 ))
################################
## Find the variability explained by the treatments
## The Sum of Squares for treatment SS(Tr) for the example
(SSTr <- l * sum(alphaHat^2))
################################
## Find the variability explained by the blocks
## The Sum of Squares for the blocks SS(Bl) for the example
(SSBl <- k * sum(betaHat^2))
################################
## Find the variability left after model fit, SSE
## The Sum of Squares for the residuals for the example
(SSE <- SST - SSTr - SSBl)
################################
## Plot the F distribution and see the critical value for treatments
par(mfrow=c(1,1))
## Remember, this is "under H0" (that is we compute as if H0 is true):
## Sequence for plot
xseq <- seq(0, 10, by=0.1)
## Plot the density of the F distribution
plot(xseq, df(xseq, df1=k-1, df2=(k-1)*(l-1)), type="l")
##The critical value for significance level 5 %
cr <- qf(0.95, df1=k-1, df2=(k-1)*(l-1))
## Mark it in the plot
abline(v=cr, col="red")
## The value of the test statistic
(Ftr <- (SSTr/(k-1)) / (SSE/((k-1)*(l-1))))
## The p-value hence is:
(1 - pf(Ftr, df1=k-1, df2=(k-1)*(l-1)))
################################
## Plot the F distribution and see the critical value
## Remember, this is "under H0" (that is we compute as if H0 is true):
## Sequence for plot
xseq <- seq(0, 10, by=0.1)
## Plot the density of the F distribution
plot(xseq, df(xseq, df1=l-1, df2=(k-1)*(l-1)), type="l")
##The critical value for significance level 5 %
cr <- qf(0.95, df1=l-1, df2=(k-1)*(l-1))
## Mark it in the plot
abline(v=cr, col="red")
## The value of the test statistic
(Fbl <- (SSBl/(l-1)) / (SSE/((k-1)*(l-1))))
## The p-value hence is:
(1 - pf(Fbl, df1=l-1, df2=(k-1)*(l-1)))
################################
## All this can be found using anova() and lm()
anova(lm(y ~ treatm + block))
#### Simulate the sampling-distribution under the null hypothesis:
## Eg without the TVset effect: (but WITH person-effect)
## Sample mean
(muHat <- mean(TVbosubset$Sharpness))
## Sample mean for each assessor(blok)
(betaHat <- tapply(TVbosubset$Sharpness, TVbosubset$Assessor, mean) - muHat)
Sblock <- factor(rep(1:8, 3))
Streatm <- factor(rep(1:3, c(8, 8, 8)))
n <- 1000
F_sims <- rep(0, n)
for (i in 1:n){
ysim <- muHat + rep(betaHat, 3) + rnorm(24)
F_sims[i] <- anova(lm(ysim ~ Streatm + Sblock))[1, 4]
}
par(mfrow=c(1,1))
## Plot the simulated F-statistics AND the real F-distribution:
hist(F_sims, freq=FALSE)
lines(seq(0, 10, by=0.01), df(seq(0, 10, by=0.01), 2, 14))
################################
## Check assumption of homogeneous variance
## Save the fit
fit <- lm(y ~ treatm + block)
fit <- lm(Sharpness ~ Assessor + TVset, data = TVbosubset)
## Box plot
par(mfrow=c(1,2))
with(TVbosubset, plot(TVset, fit$residuals, y, xlab="Treatment"))
## Box plot
with(TVbosubset, plot(Assessor, fit$residuals, xlab="Block"))
################################
## Check the assumption of normality of residuals
## qq-normal plot of residuals
qqnorm(fit$residuals)
qqline(fit$residuals)
## Or with a Wally plot
require(MESS)
qqwrap <- function(x, y, ...) {qqnorm(y, main="",...);
qqline(y)}
## Can we see a deviating qq-norm plot?
wallyplot(fit$residuals, FUN = qqwrap)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_reads.R
\name{write_reads}
\alias{write_reads}
\title{write sequencing reads to disk}
\usage{
write_reads(reads, fname, readlen, paired = TRUE, gzip, offset = 1L)
}
\arguments{
\item{reads}{DNAStringSet representing sequencing reads}
\item{fname}{file path/prefix specifying where sequencing reads should be
written. Should not contain ".fasta" (this is appended automatically).}
\item{readlen}{maximum length of the reads in \code{reads}.}
\item{paired}{If \code{TRUE}, reads are assumed to be in pairs: i.e., read 1
and read 2 in \code{reads} are the left and right mate (respectively) of a
read pair; same with read 3 and read 4, etc. The odd-numbered reads are
written to \code{fname_1.fasta} and the even-numbered
reads are written to \code{fname_2.fasta}. If \code{FALSE}, reads are
assumed to be single-end and just one file, \code{fname.fasta}, is written.}
\item{gzip}{If \code{TRUE}, gzip the output fasta files.}
\item{offset}{An integer number greater or equal to 1 to start assigning
read numbers at.}
}
\value{
No return, but FASTA file(s) containing the sequences in \code{reads}
are written to \code{fname.fasta} (if \code{paired} is FALSE) or
\code{fname_1.fasta} and \code{fname_2.fasta} if \code{paired} is TRUE.
}
\description{
given a DNAStringSet representing simulated sequencing reads, write FASTA
files to disk representing the simulated reads.
}
\details{
The \code{\link{get_reads}} function returns a DNAStringSet object
representing sequencing reads that can be directly passed to
\code{write_reads}. If output other than that from \code{get_reads} is used
and \code{paired} is \code{TRUE}, make sure \code{reads} is ordered
properly (i.e., that mate pairs appear together and that the left mate
appears first).
}
\examples{
library(Biostrings)
data(srPhiX174) # pretend srPhiX174 represents a DNAStringSet of *reads*
readlen = unique(width(srPhiX174)) #35
write_reads(srPhiX174, fname='./srPhiX174', readlen=readlen, paired=FALSE,
gzip=FALSE)
## If the file is too big, you can subset it and write it in chunks.
## Here we split our 'reads' into two chunks and save them to the same file.
write_reads(srPhiX174[1:100], fname='./srPhiX174-offset', readlen=readlen,
paired=FALSE, gzip=FALSE, offset = 1L)
write_reads(srPhiX174[101:length(srPhiX174)], fname='./srPhiX174-offset',
readlen=readlen, paired=FALSE, gzip=FALSE, offset = 101L)
## We can verify that we get the same results
srPhi <- readDNAStringSet('./srPhiX174.fasta')
srPhiOffset <- readDNAStringSet('./srPhiX174-offset.fasta')
identical(srPhi, srPhiOffset)
}
\seealso{
\code{\link{get_reads}}
}
|
/man/write_reads.Rd
|
no_license
|
Christina-hshi/polyester-LC
|
R
| false
| true
| 2,718
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_reads.R
\name{write_reads}
\alias{write_reads}
\title{write sequencing reads to disk}
\usage{
write_reads(reads, fname, readlen, paired = TRUE, gzip, offset = 1L)
}
\arguments{
\item{reads}{DNAStringSet representing sequencing reads}
\item{fname}{file path/prefix specifying where sequencing reads should be
written. Should not contain ".fasta" (this is appended automatically).}
\item{readlen}{maximum length of the reads in \code{reads}.}
\item{paired}{If \code{TRUE}, reads are assumed to be in pairs: i.e., read 1
and read 2 in \code{reads} are the left and right mate (respectively) of a
read pair; same with read 3 and read 4, etc. The odd-numbered reads are
written to \code{fname_1.fasta} and the even-numbered
reads are written to \code{fname_2.fasta}. If \code{FALSE}, reads are
assumed to be single-end and just one file, \code{fname.fasta}, is written.}
\item{gzip}{If \code{TRUE}, gzip the output fasta files.}
\item{offset}{An integer number greater or equal to 1 to start assigning
read numbers at.}
}
\value{
No return, but FASTA file(s) containing the sequences in \code{reads}
are written to \code{fname.fasta} (if \code{paired} is FALSE) or
\code{fname_1.fasta} and \code{fname_2.fasta} if \code{paired} is TRUE.
}
\description{
given a DNAStringSet representing simulated sequencing reads, write FASTA
files to disk representing the simulated reads.
}
\details{
The \code{\link{get_reads}} function returns a DNAStringSet object
representing sequencing reads that can be directly passed to
\code{write_reads}. If output other than that from \code{get_reads} is used
and \code{paired} is \code{TRUE}, make sure \code{reads} is ordered
properly (i.e., that mate pairs appear together and that the left mate
appears first).
}
\examples{
library(Biostrings)
data(srPhiX174) # pretend srPhiX174 represents a DNAStringSet of *reads*
readlen = unique(width(srPhiX174)) #35
write_reads(srPhiX174, fname='./srPhiX174', readlen=readlen, paired=FALSE,
gzip=FALSE)
## If the file is too big, you can subset it and write it in chunks.
## Here we split our 'reads' into two chunks and save them to the same file.
write_reads(srPhiX174[1:100], fname='./srPhiX174-offset', readlen=readlen,
paired=FALSE, gzip=FALSE, offset = 1L)
write_reads(srPhiX174[101:length(srPhiX174)], fname='./srPhiX174-offset',
readlen=readlen, paired=FALSE, gzip=FALSE, offset = 101L)
## We can verify that we get the same results
srPhi <- readDNAStringSet('./srPhiX174.fasta')
srPhiOffset <- readDNAStringSet('./srPhiX174-offset.fasta')
identical(srPhi, srPhiOffset)
}
\seealso{
\code{\link{get_reads}}
}
|
#this is the implied theorethical probabilities based on normality
pc_PI <- function(rho, th.y1, th.y2) {
nth.y1 <- length(th.y1); nth.y2 <- length(th.y2)
pth.y1 <- stats::pnorm(th.y1); pth.y2 <- stats::pnorm(th.y2)
# catch special case: rho = 0.0
if(rho == 0.0) {
rowPI <- diff(c(0,pth.y1,1))
colPI <- diff(c(0,pth.y2,1))
PI.ij <- outer(rowPI, colPI)
return(PI.ij)
}
# prepare for a single call to pbinorm
upper.y <- rep(th.y2, times=rep.int(nth.y1, nth.y2))
upper.x <- rep(th.y1, times=ceiling(length(upper.y))/nth.y1)
#rho <- rep(rho, length(upper.x)) # only one rho here
BI <- pbivnorm::pbivnorm(x=upper.x, y=upper.y, rho=rho)
#BI <- pbinorm1(upper.x=upper.x, upper.y=upper.y, rho=rho)
dim(BI) <- c(nth.y1, nth.y2)
BI <- rbind(0, BI, pth.y2, deparse.level = 0)
BI <- cbind(0, BI, c(0, pth.y1, 1), deparse.level = 0)
# get probabilities
nr <- nrow(BI); nc <- ncol(BI)
PI <- BI[-1L,-1L] - BI[-1L,-nc] - BI[-nr,-1L] + BI[-nr,-nc]
# all elements should be strictly positive
PI[PI < .Machine$double.eps] <- .Machine$double.eps
PI
}
|
/R/pc_PI.R
|
no_license
|
njaalf/discnorm
|
R
| false
| false
| 1,110
|
r
|
#this is the implied theorethical probabilities based on normality
pc_PI <- function(rho, th.y1, th.y2) {
nth.y1 <- length(th.y1); nth.y2 <- length(th.y2)
pth.y1 <- stats::pnorm(th.y1); pth.y2 <- stats::pnorm(th.y2)
# catch special case: rho = 0.0
if(rho == 0.0) {
rowPI <- diff(c(0,pth.y1,1))
colPI <- diff(c(0,pth.y2,1))
PI.ij <- outer(rowPI, colPI)
return(PI.ij)
}
# prepare for a single call to pbinorm
upper.y <- rep(th.y2, times=rep.int(nth.y1, nth.y2))
upper.x <- rep(th.y1, times=ceiling(length(upper.y))/nth.y1)
#rho <- rep(rho, length(upper.x)) # only one rho here
BI <- pbivnorm::pbivnorm(x=upper.x, y=upper.y, rho=rho)
#BI <- pbinorm1(upper.x=upper.x, upper.y=upper.y, rho=rho)
dim(BI) <- c(nth.y1, nth.y2)
BI <- rbind(0, BI, pth.y2, deparse.level = 0)
BI <- cbind(0, BI, c(0, pth.y1, 1), deparse.level = 0)
# get probabilities
nr <- nrow(BI); nc <- ncol(BI)
PI <- BI[-1L,-1L] - BI[-1L,-nc] - BI[-nr,-1L] + BI[-nr,-nc]
# all elements should be strictly positive
PI[PI < .Machine$double.eps] <- .Machine$double.eps
PI
}
|
#' Bind all list members by column
#'
#' @param .data \code{list}
#' @name list.cbind
#' @export
#' @examples
#' \dontrun{
#' x <- list(data.frame(i=1:5,x=rnorm(5)),
#' data.frame(y=rnorm(5),z=rnorm(5)))
#' list.cbind(x)
#' }
list.cbind <- function(.data) {
list.do(.data,cbind)
}
|
/R/list.cbind.R
|
permissive
|
timelyportfolio/rlist
|
R
| false
| false
| 286
|
r
|
#' Bind all list members by column
#'
#' @param .data \code{list}
#' @name list.cbind
#' @export
#' @examples
#' \dontrun{
#' x <- list(data.frame(i=1:5,x=rnorm(5)),
#' data.frame(y=rnorm(5),z=rnorm(5)))
#' list.cbind(x)
#' }
list.cbind <- function(.data) {
list.do(.data,cbind)
}
|
# Bivariate Plots Section
ld <- read.csv('prosperLoanData.csv')
ld2 <- data.frame(ld$ListingCreationDate,ld$ListingCategory..numeric., ld$IncomeRange, ld$CreditGrade, ld$Term, ld$LoanStatus, ld$ClosedDate, ld$BorrowerAPR,ld$LenderYield, ld$BorrowerState, ld$Occupation, ld$MonthlyLoanPayment, ld$StatedMonthlyIncome, ld$ProsperRating..Alpha., ld$ListingCategory..numeric.)
library(ggplot2)
ggplot(aes(x = ld.StatedMonthlyIncome,y = ld.MonthlyLoanPayment), data = ld2) +
geom_point(color = "blue", alpha = 0.2, position = 'jitter') +
xlim(0, quantile(ld2$ld.StatedMonthlyIncome, 0.99)) +
ylim(0, quantile(ld2$ld.MonthlyLoanPayment, 0.99)) +
geom_smooth(color = "red")
ggplot(aes(x = ld.BorrowerAPR,
y = ld.MonthlyLoanPayment), data = ld2) +
geom_point(alpha = 0.2, position = 'jitter') +
coord_cartesian(xlim = c(0, 0.6)) +
geom_smooth(color = "red")
ggplot(aes(x = ld.Term,
y = ld.MonthlyLoanPayment), data = ld2) +
geom_point() +
scale_x_continuous(breaks = c(12,36,60))
ggplot(aes(x = ld.StatedMonthlyIncome,
y = ld.LenderYield), data = ld2) +
geom_point(alpha = 0.2, position = 'jitter') +
xlim(0, quantile(ld2$ld.StatedMonthlyIncome, 0.99)) +
ylim(0, 0.4) +
geom_smooth()
ld2$ld.MonthlyPaymentOfIncome <- ld2$ld.MonthlyLoanPayment/ld2$ld.StatedMonthlyIncome
ggplot(aes(x = ld.Occupation,
y = ld.MonthlyPaymentOfIncome), data = subset(ld2, !is.na(ld.Occupation) & ld.Occupation == "Professional" | ld.Occupation == "Computer Programmer" | ld.Occupation == "Executive" | ld.Occupation == "Teacher" | ld.Occupation == "Sales - Retail" | ld.Occupation == "Administrative Assistant")) +
geom_boxplot() +
coord_cartesian(ylim = c(0,0.5))
ggplot(aes(x = ld.LoanStatus,
y = ld.MonthlyLoanPayment), data = subset(ld2, !is.na(ld.LoanStatus) & ld.LoanStatus == "Completed" | ld.LoanStatus == "Current" | ld.LoanStatus == "Chargedoff" | ld.LoanStatus == "Defaulted" | ld.LoanStatus == "Past Due (1-15 days)" | ld.LoanStatus == "Past Due (31-60 days)")) +
geom_boxplot()
ggplot(aes(x = ld.Occupation,
y = ld.MonthlyLoanPayment), data = subset(ld2, !is.na(ld.Occupation) & ld.Occupation == "Professional" | ld.Occupation == "Computer Programmer" | ld.Occupation == "Executive" | ld.Occupation == "Teacher" | ld.Occupation == "Sales - Retail" | ld.Occupation == "Administrative Assistant")) +
geom_boxplot()
table(ld2$ld.Occupation)
ggplot(aes(x = ld.Occupation,
y = ld.StatedMonthlyIncome), data = subset(ld2, ld.StatedMonthlyIncome < 30000 & !is.na(ld.Occupation) & ld.Occupation == "Professional" | ld.Occupation == "Computer Programmer" | ld.Occupation == "Executive" | ld.Occupation == "Teacher" | ld.Occupation == "Sales - Retail" | ld.Occupation == "Administrative Assistant")) +
geom_boxplot() +
ylim(0,20000)
|
/bivariate.R
|
no_license
|
mpiplani/Prosper-Loan-Data-Exploration
|
R
| false
| false
| 2,901
|
r
|
# Bivariate Plots Section
ld <- read.csv('prosperLoanData.csv')
ld2 <- data.frame(ld$ListingCreationDate,ld$ListingCategory..numeric., ld$IncomeRange, ld$CreditGrade, ld$Term, ld$LoanStatus, ld$ClosedDate, ld$BorrowerAPR,ld$LenderYield, ld$BorrowerState, ld$Occupation, ld$MonthlyLoanPayment, ld$StatedMonthlyIncome, ld$ProsperRating..Alpha., ld$ListingCategory..numeric.)
library(ggplot2)
ggplot(aes(x = ld.StatedMonthlyIncome,y = ld.MonthlyLoanPayment), data = ld2) +
geom_point(color = "blue", alpha = 0.2, position = 'jitter') +
xlim(0, quantile(ld2$ld.StatedMonthlyIncome, 0.99)) +
ylim(0, quantile(ld2$ld.MonthlyLoanPayment, 0.99)) +
geom_smooth(color = "red")
ggplot(aes(x = ld.BorrowerAPR,
y = ld.MonthlyLoanPayment), data = ld2) +
geom_point(alpha = 0.2, position = 'jitter') +
coord_cartesian(xlim = c(0, 0.6)) +
geom_smooth(color = "red")
ggplot(aes(x = ld.Term,
y = ld.MonthlyLoanPayment), data = ld2) +
geom_point() +
scale_x_continuous(breaks = c(12,36,60))
ggplot(aes(x = ld.StatedMonthlyIncome,
y = ld.LenderYield), data = ld2) +
geom_point(alpha = 0.2, position = 'jitter') +
xlim(0, quantile(ld2$ld.StatedMonthlyIncome, 0.99)) +
ylim(0, 0.4) +
geom_smooth()
ld2$ld.MonthlyPaymentOfIncome <- ld2$ld.MonthlyLoanPayment/ld2$ld.StatedMonthlyIncome
ggplot(aes(x = ld.Occupation,
y = ld.MonthlyPaymentOfIncome), data = subset(ld2, !is.na(ld.Occupation) & ld.Occupation == "Professional" | ld.Occupation == "Computer Programmer" | ld.Occupation == "Executive" | ld.Occupation == "Teacher" | ld.Occupation == "Sales - Retail" | ld.Occupation == "Administrative Assistant")) +
geom_boxplot() +
coord_cartesian(ylim = c(0,0.5))
ggplot(aes(x = ld.LoanStatus,
y = ld.MonthlyLoanPayment), data = subset(ld2, !is.na(ld.LoanStatus) & ld.LoanStatus == "Completed" | ld.LoanStatus == "Current" | ld.LoanStatus == "Chargedoff" | ld.LoanStatus == "Defaulted" | ld.LoanStatus == "Past Due (1-15 days)" | ld.LoanStatus == "Past Due (31-60 days)")) +
geom_boxplot()
ggplot(aes(x = ld.Occupation,
y = ld.MonthlyLoanPayment), data = subset(ld2, !is.na(ld.Occupation) & ld.Occupation == "Professional" | ld.Occupation == "Computer Programmer" | ld.Occupation == "Executive" | ld.Occupation == "Teacher" | ld.Occupation == "Sales - Retail" | ld.Occupation == "Administrative Assistant")) +
geom_boxplot()
table(ld2$ld.Occupation)
ggplot(aes(x = ld.Occupation,
y = ld.StatedMonthlyIncome), data = subset(ld2, ld.StatedMonthlyIncome < 30000 & !is.na(ld.Occupation) & ld.Occupation == "Professional" | ld.Occupation == "Computer Programmer" | ld.Occupation == "Executive" | ld.Occupation == "Teacher" | ld.Occupation == "Sales - Retail" | ld.Occupation == "Administrative Assistant")) +
geom_boxplot() +
ylim(0,20000)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polar_data.R
\docType{data}
\name{polar_data}
\alias{polar_data}
\title{Example data for polarMap function}
\format{Data frame with example data from four sites in London in 2009.}
\source{
\code{polar_data} was compiled from data using the \code{importAURN} function from the \code{openair} package with meteorological data from the \code{worldmet} package.
}
\description{
The \code{polar_data} dataset is provided as an example dataset as part of the \code{openairmaps}
package. The dataset contains hourly measurements of air pollutant
concentrations, location and meteorological data.
}
\details{
\code{polar_data} is supplied with the \code{openairmaps} package as an example
dataset for use with documented examples.
}
\examples{
#basic structure
head(polar_data)
}
\keyword{datasets}
|
/man/polar_data.Rd
|
no_license
|
bodartv/openairmaps
|
R
| false
| true
| 874
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polar_data.R
\docType{data}
\name{polar_data}
\alias{polar_data}
\title{Example data for polarMap function}
\format{Data frame with example data from four sites in London in 2009.}
\source{
\code{polar_data} was compiled from data using the \code{importAURN} function from the \code{openair} package with meteorological data from the \code{worldmet} package.
}
\description{
The \code{polar_data} dataset is provided as an example dataset as part of the \code{openairmaps}
package. The dataset contains hourly measurements of air pollutant
concentrations, location and meteorological data.
}
\details{
\code{polar_data} is supplied with the \code{openairmaps} package as an example
dataset for use with documented examples.
}
\examples{
#basic structure
head(polar_data)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lightsail_operations.R
\name{reboot_instance}
\alias{reboot_instance}
\title{Restarts a specific instance}
\usage{
reboot_instance(instanceName)
}
\arguments{
\item{instanceName}{[required] The name of the instance to reboot.}
}
\description{
Restarts a specific instance.
}
\details{
The \code{reboot instance} operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the \href{https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags}{Lightsail Dev Guide}.
}
\section{Accepted Parameters}{
\preformatted{reboot_instance(
instanceName = "string"
)
}
}
|
/service/paws.lightsail/man/reboot_instance.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 765
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.lightsail_operations.R
\name{reboot_instance}
\alias{reboot_instance}
\title{Restarts a specific instance}
\usage{
reboot_instance(instanceName)
}
\arguments{
\item{instanceName}{[required] The name of the instance to reboot.}
}
\description{
Restarts a specific instance.
}
\details{
The \code{reboot instance} operation supports tag-based access control via resource tags applied to the resource identified by instanceName. For more information, see the \href{https://lightsail.aws.amazon.com/ls/docs/en/articles/amazon-lightsail-controlling-access-using-tags}{Lightsail Dev Guide}.
}
\section{Accepted Parameters}{
\preformatted{reboot_instance(
instanceName = "string"
)
}
}
|
SI2_onepop<-function(dd,ind){
fls=as.factor(ind)
m=length(dd[1,])
a=length(levels(fls))
popdd=array(0,dim=c(m,a))
colnames(popdd)=levels(fls)
rownames(popdd)=colnames(dd)
for (i in 1:m){
popdd[i,]=tapply(dd[,i],fls,sum)
}
f=array(0,dim=c(1,a))
colnames(f)=colnames(popdd)
for (i in 1:a){
f[,i]=length(popdd[,i][popdd[,i]!=0])
}
f=t(f)
popa=1/(a-1)
popaij=array(0,dim=c(a,a))
colnames(popaij)=levels(fls)
rownames(popaij)=levels(fls)
for (i in 1:a){
for (j in 1:a){
popaij[i,j]=length(popdd[,i][((popdd[,j]>0)&(popdd[,i]>0))])
}}
poptime=diag(popaij)
poptime=as.matrix(poptime)
ej=colSums(popaij)-poptime
ej=as.matrix(ej)
efj=ej/f
n=a
si=efj/(a-1)
colnames(si)=c("si")
print(si)
}
|
/R/SI2_onepop.r
|
no_license
|
cran/flower
|
R
| false
| false
| 712
|
r
|
SI2_onepop<-function(dd,ind){
fls=as.factor(ind)
m=length(dd[1,])
a=length(levels(fls))
popdd=array(0,dim=c(m,a))
colnames(popdd)=levels(fls)
rownames(popdd)=colnames(dd)
for (i in 1:m){
popdd[i,]=tapply(dd[,i],fls,sum)
}
f=array(0,dim=c(1,a))
colnames(f)=colnames(popdd)
for (i in 1:a){
f[,i]=length(popdd[,i][popdd[,i]!=0])
}
f=t(f)
popa=1/(a-1)
popaij=array(0,dim=c(a,a))
colnames(popaij)=levels(fls)
rownames(popaij)=levels(fls)
for (i in 1:a){
for (j in 1:a){
popaij[i,j]=length(popdd[,i][((popdd[,j]>0)&(popdd[,i]>0))])
}}
poptime=diag(popaij)
poptime=as.matrix(poptime)
ej=colSums(popaij)-poptime
ej=as.matrix(ej)
efj=ej/f
n=a
si=efj/(a-1)
colnames(si)=c("si")
print(si)
}
|
# Problem 1
#The area of a triangle is 0.5*base*height
TrangleArea <- function(b, h){
base <- b
height <-h
TriangleArea<- base * height *0.5
return(TriangleArea)
}
TrangleArea(10,9)#example
#Problem 2
#Created a function that obtained the absolute value for numeric vectors...Sam you sneaky devil!
myabs <- function(a){# make the funtion
x <- a# this is probably not needed but I like it
for (i in 1:length(x)){#for loop for helping if the fuction needs to read a vector
if (x[i] < 0){# if it is less that 0 then it multiples it by -1 to make it the absolute value
x[i] <- x[i] * -1
}
}
return(x)# show me x
}
myabs(5)
myabs(-2.3)
myvector <- c(1.1, 2, 0, -4.3, 9, -12)
myabs(myvector)
#wooohooooo!!!!
#Problem 3
FibNumFunc <- function(n,s) {
Fibonaccinumbers<- rep(0,n)
Fibonaccinumbers[2] <- 1
Fibonaccinumbers[1] <- s
for (i in seq(3,n)){
Fibonaccinumbers[i]<-Fibonaccinumbers[i-1] + Fibonaccinumbers[i-2]
}
return(Fibonaccinumbers)
}
FibNumFunc(12,0)
#Problem 4
MyNumberFunc <- function(x, y) {
answer <- (x - y)^2
return(answer)
}
MyNumberFunc(3,5) #example
Vec1 <- c(2, 4, 6)
MyNumberFunc(Vec1, 4)
#Problem 4b
Whatisthemeaningofthis <- function(x) {
answer <- sum(x)
answer1 <- answer/ length(x)
return(answer1)
}
Vec1 <- c(5, 15, 10)
Whatisthemeaningofthis(Vec1)
Xvector <- as.vector(t(x))
Whatisthemeaningofthis(Xvector)
#Problem 4c
Dataforlab07 <- read.csv(file.choose())
Dataforlab07 <- as.vector(t(Dataforlab07))
Totalsumofsquare <- function(A){
Totalmean <- Whatisthemeaningofthis(A)
Totalmean <- rep(Totalmean, length(A))
Difference <- rep(NA, length(A))
for (i in 1:length(A)){
Difference[i] <- MyNumberFunc(Totalmean, A)[i]
}
Answer <- sum(Difference)
return(Answer)
}
Totalsumofsquare(Dataforlab07)
# Wow, this took me a lot longer than I thought
|
/Labs/Lab07.Carter.R
|
no_license
|
JavanCarter/CompBioLabsAndHomework
|
R
| false
| false
| 1,870
|
r
|
# Problem 1
#The area of a triangle is 0.5*base*height
TrangleArea <- function(b, h){
base <- b
height <-h
TriangleArea<- base * height *0.5
return(TriangleArea)
}
TrangleArea(10,9)#example
#Problem 2
#Created a function that obtained the absolute value for numeric vectors...Sam you sneaky devil!
myabs <- function(a){# make the funtion
x <- a# this is probably not needed but I like it
for (i in 1:length(x)){#for loop for helping if the fuction needs to read a vector
if (x[i] < 0){# if it is less that 0 then it multiples it by -1 to make it the absolute value
x[i] <- x[i] * -1
}
}
return(x)# show me x
}
myabs(5)
myabs(-2.3)
myvector <- c(1.1, 2, 0, -4.3, 9, -12)
myabs(myvector)
#wooohooooo!!!!
#Problem 3
FibNumFunc <- function(n,s) {
Fibonaccinumbers<- rep(0,n)
Fibonaccinumbers[2] <- 1
Fibonaccinumbers[1] <- s
for (i in seq(3,n)){
Fibonaccinumbers[i]<-Fibonaccinumbers[i-1] + Fibonaccinumbers[i-2]
}
return(Fibonaccinumbers)
}
FibNumFunc(12,0)
#Problem 4
MyNumberFunc <- function(x, y) {
answer <- (x - y)^2
return(answer)
}
MyNumberFunc(3,5) #example
Vec1 <- c(2, 4, 6)
MyNumberFunc(Vec1, 4)
#Problem 4b
Whatisthemeaningofthis <- function(x) {
answer <- sum(x)
answer1 <- answer/ length(x)
return(answer1)
}
Vec1 <- c(5, 15, 10)
Whatisthemeaningofthis(Vec1)
Xvector <- as.vector(t(x))
Whatisthemeaningofthis(Xvector)
#Problem 4c
Dataforlab07 <- read.csv(file.choose())
Dataforlab07 <- as.vector(t(Dataforlab07))
Totalsumofsquare <- function(A){
Totalmean <- Whatisthemeaningofthis(A)
Totalmean <- rep(Totalmean, length(A))
Difference <- rep(NA, length(A))
for (i in 1:length(A)){
Difference[i] <- MyNumberFunc(Totalmean, A)[i]
}
Answer <- sum(Difference)
return(Answer)
}
Totalsumofsquare(Dataforlab07)
# Wow, this took me a lot longer than I thought
|
library(data.table)
library(ggplot2)
library(httr)
library(XML)
library(rvest)
rm(list=ls())
##########################################
# Theme ##################################
##########################################
t <- theme(plot.title = element_text(face="bold", margin=margin(t = 15, r = 0, b = 15, l = 0, unit = "pt")),
axis.text.x = element_text(size=10,color='#000000',angle=45,hjust=1),
axis.text.y = element_text(size=10,color='#000000'),
axis.title.x = element_text(face="bold", size=10,color='#000000',margin=margin(t = 10, r = 0, b = 0, l = 0, unit = "pt")),
axis.title.y = element_text(face="bold", size=10,color='#000000',margin=margin(t = 0, r = 10, b = 0, l = 0, unit = "pt")),
panel.background = element_rect(fill='#ffffff', color='#a5a5a5',size=0.5),
panel.ontop = F,
panel.grid.major = element_line(color='#a5a5a5', linetype='dashed',size=0.2),
panel.grid.minor = element_line(color='#a5a5a5', linetype='dashed', size=0),
legend.text = element_text(size=10,color='#000000'),
legend.title = element_text(face='bold',size=10,color='#000000'),
legend.box.background = element_rect(fill='#ffffff', color='#ffffff', size=1.5),
strip.text = element_text(size=10,color='#000000', face='bold'),
strip.background = element_rect(colour = NA, fill = '#ffffff'))
pal <- c('#E02128','#ff0072','#1B2C3F','#3e21aa','#2672A4','#43A39E','#EB9E0F','#333745','#8f8f8f','#515151','#000000')
names <- fread('names.csv',stringsAsFactors=F)
names <- names[order(year, region, gender, count)]
names <- names[region=='Flanders']
hits <- fread('hits.csv', stringsAsFactors=F)
pseudo <- fread('pseudo.csv', stringsAsFactors=F)
hits <- merge(x=hits,y=pseudo,by='artist_name',all.y=T)
rm(pseudo)
britney <- names[gender == 'female' & year == 2000]
britney <- britney[order(-count)]
susan <- names[gender == 'female' & name %in% c('Susan','Suzanne','Suzanna')]
hits <- hits[,.(year=min(year)), by=.(artist_name,name1)]
names <- merge(names,hits,by.x='name',by.y='name1',all.y=T)
setnames(names,c('year.x','year.y'),c('year','start_year'))
names <- names[,relative_year := year - start_year]
names <- names[,label := paste0(name,' (',artist_name,')')]
big_selection <- c('Aaliyah','Alana Dante','Alicia Keys','Anouk','Belle Perez',
'Brahim','Britney Spears','Emilia','Spice Girls','Jennifer Lopez',
'Jessica Simpson','Kylie Minogue','Leona Lewis','Lily Allen','Lady Linn And Her Magnificent Seven',
'Natalia','Paris Hilton','Rihanna','Ronan Keating','Shakira','Shania Twain','Tonya')
selection <- c('Aaliyah','Alana Dante','Belle Perez','Anouk','Emilia','Lily Allen','Paris Hilton','Rihanna','Shakira','Shania Twain','Ronan Keating','Britney Spears')
names <- names[artist_name %in% selection]
g <- ggplot(names[name=='Britney'],aes(x=as.character(year),y=count)) +
geom_bar(stat='identity', fill='#ff0072') +
t +
xlab('year') +
ylab('Britneys born') +
ggtitle('Plot 1: Babies given the name Britney')
g
ggsave('britney.png',g,width=48.8,height=27.4, units='cm')
g1 <- ggplot(names[!(name %in% c('Ronan','Britney'))],aes(x=relative_year,y=count,fill=label, label=year)) +
geom_bar(stat='identity') +
geom_text(size=3,angle=90,nudge_y=5) +
geom_vline(xintercept=0, size=1.5) +
t +
scale_fill_manual(values=pal,name='name') +
facet_wrap(~label,ncol=5) +
xlim(-10,20) +
xlab('relative year') +
ylab('babies born with given name') +
ggtitle('Plot 2: Names given for a selection of artists') +
theme(legend.position="none")
g1
ggsave('artists.png',g1,width=48.8,height=27.4, units='cm')
g2 <- ggplot(names[name=='Ronan'],aes(x=as.character(year),y=count)) +
geom_bar(stat='identity', fill='#1B2C3F') +
t +
xlab('year') +
ylab('Ronans born') +
ggtitle('Plot 3: Babies given the name Ronan')
g2
ggsave('ronan.png',g2,width=48.8,height=27.4, units='cm')
|
/analysis.R
|
no_license
|
RoelPi/names
|
R
| false
| false
| 4,133
|
r
|
library(data.table)
library(ggplot2)
library(httr)
library(XML)
library(rvest)
rm(list=ls())
##########################################
# Theme ##################################
##########################################
t <- theme(plot.title = element_text(face="bold", margin=margin(t = 15, r = 0, b = 15, l = 0, unit = "pt")),
axis.text.x = element_text(size=10,color='#000000',angle=45,hjust=1),
axis.text.y = element_text(size=10,color='#000000'),
axis.title.x = element_text(face="bold", size=10,color='#000000',margin=margin(t = 10, r = 0, b = 0, l = 0, unit = "pt")),
axis.title.y = element_text(face="bold", size=10,color='#000000',margin=margin(t = 0, r = 10, b = 0, l = 0, unit = "pt")),
panel.background = element_rect(fill='#ffffff', color='#a5a5a5',size=0.5),
panel.ontop = F,
panel.grid.major = element_line(color='#a5a5a5', linetype='dashed',size=0.2),
panel.grid.minor = element_line(color='#a5a5a5', linetype='dashed', size=0),
legend.text = element_text(size=10,color='#000000'),
legend.title = element_text(face='bold',size=10,color='#000000'),
legend.box.background = element_rect(fill='#ffffff', color='#ffffff', size=1.5),
strip.text = element_text(size=10,color='#000000', face='bold'),
strip.background = element_rect(colour = NA, fill = '#ffffff'))
pal <- c('#E02128','#ff0072','#1B2C3F','#3e21aa','#2672A4','#43A39E','#EB9E0F','#333745','#8f8f8f','#515151','#000000')
names <- fread('names.csv',stringsAsFactors=F)
names <- names[order(year, region, gender, count)]
names <- names[region=='Flanders']
hits <- fread('hits.csv', stringsAsFactors=F)
pseudo <- fread('pseudo.csv', stringsAsFactors=F)
hits <- merge(x=hits,y=pseudo,by='artist_name',all.y=T)
rm(pseudo)
britney <- names[gender == 'female' & year == 2000]
britney <- britney[order(-count)]
susan <- names[gender == 'female' & name %in% c('Susan','Suzanne','Suzanna')]
hits <- hits[,.(year=min(year)), by=.(artist_name,name1)]
names <- merge(names,hits,by.x='name',by.y='name1',all.y=T)
setnames(names,c('year.x','year.y'),c('year','start_year'))
names <- names[,relative_year := year - start_year]
names <- names[,label := paste0(name,' (',artist_name,')')]
big_selection <- c('Aaliyah','Alana Dante','Alicia Keys','Anouk','Belle Perez',
'Brahim','Britney Spears','Emilia','Spice Girls','Jennifer Lopez',
'Jessica Simpson','Kylie Minogue','Leona Lewis','Lily Allen','Lady Linn And Her Magnificent Seven',
'Natalia','Paris Hilton','Rihanna','Ronan Keating','Shakira','Shania Twain','Tonya')
selection <- c('Aaliyah','Alana Dante','Belle Perez','Anouk','Emilia','Lily Allen','Paris Hilton','Rihanna','Shakira','Shania Twain','Ronan Keating','Britney Spears')
names <- names[artist_name %in% selection]
g <- ggplot(names[name=='Britney'],aes(x=as.character(year),y=count)) +
geom_bar(stat='identity', fill='#ff0072') +
t +
xlab('year') +
ylab('Britneys born') +
ggtitle('Plot 1: Babies given the name Britney')
g
ggsave('britney.png',g,width=48.8,height=27.4, units='cm')
g1 <- ggplot(names[!(name %in% c('Ronan','Britney'))],aes(x=relative_year,y=count,fill=label, label=year)) +
geom_bar(stat='identity') +
geom_text(size=3,angle=90,nudge_y=5) +
geom_vline(xintercept=0, size=1.5) +
t +
scale_fill_manual(values=pal,name='name') +
facet_wrap(~label,ncol=5) +
xlim(-10,20) +
xlab('relative year') +
ylab('babies born with given name') +
ggtitle('Plot 2: Names given for a selection of artists') +
theme(legend.position="none")
g1
ggsave('artists.png',g1,width=48.8,height=27.4, units='cm')
g2 <- ggplot(names[name=='Ronan'],aes(x=as.character(year),y=count)) +
geom_bar(stat='identity', fill='#1B2C3F') +
t +
xlab('year') +
ylab('Ronans born') +
ggtitle('Plot 3: Babies given the name Ronan')
g2
ggsave('ronan.png',g2,width=48.8,height=27.4, units='cm')
|
########## Tidy Tuesday: Super Bowl Ads##########
##### Created by: Nikolas Yousefi #############
##### Updated on: 2021-03-02 ###############
### Load libraries #####################
library(tidyverse)
library(here)
library(tidytuesdayR)
library(ggeasy)
library(ghibli)
### Load data ##########################
youtube <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-02/youtube.csv')
View(youtube)
### Data and Graph ##########
youtube_clean <- youtube %>%
drop_na(like_count) %>% # Dropping all NAs in the like count column
select(funny, show_product_quickly, patriotic, celebrity, danger, animals, use_sex, like_count) # Choosing my groups of interest
youtube_longer <- youtube_clean %>%
rename("Animals" = animals,
"Celebrity" = celebrity,
"Danger" = danger,
"Funny" = funny,
"Patriotic" = patriotic,
"Shows product quickly" = show_product_quickly,
"Uses sex" = use_sex) %>% # Capitalizing and removing underscores from the categories
pivot_longer(cols = c(1:7),
names_to = "variables",
values_to = "video_inclusion") %>% # Pivoting longer to make it easier to visualize the data
filter(video_inclusion == TRUE) %>% # Removing all FALSE results so only TRUE ones remain
group_by(variables, video_inclusion) %>% # Grouping by aforementioned categories
summarise(mean_likes = mean(like_count)) # Taking the averages of the like counts per category
ggplot(youtube_longer, aes(x=variables,
y=mean_likes,
fill = video_inclusion)) + # Setting up the axes and fill in of the graph
geom_col(show.legend = FALSE) + # Setting up a column graph and removing the legend
coord_flip()+ # Flipping the coordinates so the X variables are on the Y axis and vice versa
theme_bw() + # Choosing the basic black/white theme
scale_fill_manual(values = ghibli_palette("MarnieDark2")[c(4)])+ # Selecting my color of choice for the columns
labs(x = "Video Characteristics",
y = "Average Likes", # Labeling my axes
title = "Average Youtube likes per types of Super Bowl ads",
caption = "data from rfordatascience/tidytuesday")+ # Labeling the axes and adding a title to the graph
ggeasy::easy_center_title() + # Centering the title
ggsave(here("TT_2021-03-02", "Output", "TT_SuperBowl_2021-03-02.png"),
width = 10, height = 6) # Saving the output to the desired folder at a specific size
|
/TT_2021-03-02/Scripts/TT_SuperBowlAds_2021-03-02_Script.R
|
no_license
|
nayousefi/TidyTuesday
|
R
| false
| false
| 2,494
|
r
|
########## Tidy Tuesday: Super Bowl Ads##########
##### Created by: Nikolas Yousefi #############
##### Updated on: 2021-03-02 ###############
### Load libraries #####################
library(tidyverse)
library(here)
library(tidytuesdayR)
library(ggeasy)
library(ghibli)
### Load data ##########################
youtube <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-02/youtube.csv')
View(youtube)
### Data and Graph ##########
youtube_clean <- youtube %>%
drop_na(like_count) %>% # Dropping all NAs in the like count column
select(funny, show_product_quickly, patriotic, celebrity, danger, animals, use_sex, like_count) # Choosing my groups of interest
youtube_longer <- youtube_clean %>%
rename("Animals" = animals,
"Celebrity" = celebrity,
"Danger" = danger,
"Funny" = funny,
"Patriotic" = patriotic,
"Shows product quickly" = show_product_quickly,
"Uses sex" = use_sex) %>% # Capitalizing and removing underscores from the categories
pivot_longer(cols = c(1:7),
names_to = "variables",
values_to = "video_inclusion") %>% # Pivoting longer to make it easier to visualize the data
filter(video_inclusion == TRUE) %>% # Removing all FALSE results so only TRUE ones remain
group_by(variables, video_inclusion) %>% # Grouping by aforementioned categories
summarise(mean_likes = mean(like_count)) # Taking the averages of the like counts per category
ggplot(youtube_longer, aes(x=variables,
y=mean_likes,
fill = video_inclusion)) + # Setting up the axes and fill in of the graph
geom_col(show.legend = FALSE) + # Setting up a column graph and removing the legend
coord_flip()+ # Flipping the coordinates so the X variables are on the Y axis and vice versa
theme_bw() + # Choosing the basic black/white theme
scale_fill_manual(values = ghibli_palette("MarnieDark2")[c(4)])+ # Selecting my color of choice for the columns
labs(x = "Video Characteristics",
y = "Average Likes", # Labeling my axes
title = "Average Youtube likes per types of Super Bowl ads",
caption = "data from rfordatascience/tidytuesday")+ # Labeling the axes and adding a title to the graph
ggeasy::easy_center_title() + # Centering the title
ggsave(here("TT_2021-03-02", "Output", "TT_SuperBowl_2021-03-02.png"),
width = 10, height = 6) # Saving the output to the desired folder at a specific size
|
# Clear the Environment
rm(list=ls())
# Read csv file as a DataFrame
#
setwd("C:\\Users\\sarathy\\Documents\\2019-Teaching\\Fall2019\\Fall2019-MSIS5503\\MSIS-5503-Data")
df <- read.table('ClassData.csv',
header = TRUE, sep = ',')
#Assign variable names to DataFrame Column objects
id <- df$ID
name <- df$Name
age <- df$Age
gender <-df$Gender
education <- df$Education
creditscore <-df$CreditScore
income <- as.numeric(df$Income)
networth <-as.numeric(df$NetWorth)
sales <-as.numeric(df$Sales)
# Mean of Income
print(paste("The mean of Income is: ", round(mean(income), 2)))
# Median of Income
print(paste("The median of Income is: ", round(median(income), 2)))
# Frequency table to obtain mode
table(income)
# Mean of Income for Males
print(paste("The mean of Male Income is: ", round(mean(income[gender=="M"]), 2)))
# Mean of Income for Females
print(paste("The mean of Female Income is: ", round(mean(income[gender=="F"]), 2)))
deviation <- vector("numeric", 10)
sq_deviation <- vector("numeric", 10)
sum_sq_deviation = 0
# Measures of dispersion
for (i in 1:10) {
deviation[i] <- income[i] - mean(income)
sq_deviation[i] <- deviation[i]^2
sum_sq_deviation <- sq_deviation[i] + sum_sq_deviation
}
print(paste("The sum of squared deviations from mean is ", sum_sq_deviation))
print(paste("The sample variance is ", sum_sq_deviation/9))
print(paste("The sample variance using var() function is ", var(income)))
print(paste("The sample standard deviation is ", sqrt(sum_sq_deviation/9)))
print(paste("The sample standard deviation using sd() is ", sd(income)))
# print sorted income to see the percentiles
print("Sorted Income")
print(income[order(income)])
quantile(income, probs = seq(0, 1, 0.05), na.rm = FALSE, names = TRUE, type = 2)
#
income_iqr <- IQR(income, type = 2)
print(income_iqr)
p25 <- quantile(income, probs = 0.25, na.rm=FALSE, names = TRUE, type=2)
p75 <- quantile(income, probs = 0.75, na.rm=FALSE, names = TRUE, type=2)
print(paste(min(income)," p25 ",p25, median(income), " p75 ",p75, " ", max(income)))
llimit <- p25 - 1.5*income_iqr
ulimit <- p75 + 1.5*income_iqr
print(paste("Lower limit ", llimit, " Upper limit ", ulimit))
print(boxplot.stats(income)$stats)
#
boxplot(income/1000, main="Box Plot for Income",
xlab="Income (in thousands) dollars",
border="blue",
col="green",
horizontal = TRUE)
text(x=boxplot.stats(income/1000)$stats, labels = boxplot.stats(income/1000)$stats, y = 1.25)
#
#
hist(income/1000, main="Histogram of Income in 1000's dollars",
xlab="Income in 1000's dollars",
border="blue",
col="green",
xlim=c(0,500),
las=2,
breaks=seq(0,500,100))
|
/inferential_stats/Descriptive Statistics/DescriptiveStat.R
|
no_license
|
pickle-donut/RScripts
|
R
| false
| false
| 2,696
|
r
|
# Clear the Environment
rm(list=ls())
# Read csv file as a DataFrame
#
setwd("C:\\Users\\sarathy\\Documents\\2019-Teaching\\Fall2019\\Fall2019-MSIS5503\\MSIS-5503-Data")
df <- read.table('ClassData.csv',
header = TRUE, sep = ',')
#Assign variable names to DataFrame Column objects
id <- df$ID
name <- df$Name
age <- df$Age
gender <-df$Gender
education <- df$Education
creditscore <-df$CreditScore
income <- as.numeric(df$Income)
networth <-as.numeric(df$NetWorth)
sales <-as.numeric(df$Sales)
# Mean of Income
print(paste("The mean of Income is: ", round(mean(income), 2)))
# Median of Income
print(paste("The median of Income is: ", round(median(income), 2)))
# Frequency table to obtain mode
table(income)
# Mean of Income for Males
print(paste("The mean of Male Income is: ", round(mean(income[gender=="M"]), 2)))
# Mean of Income for Females
print(paste("The mean of Female Income is: ", round(mean(income[gender=="F"]), 2)))
deviation <- vector("numeric", 10)
sq_deviation <- vector("numeric", 10)
sum_sq_deviation = 0
# Measures of dispersion
for (i in 1:10) {
deviation[i] <- income[i] - mean(income)
sq_deviation[i] <- deviation[i]^2
sum_sq_deviation <- sq_deviation[i] + sum_sq_deviation
}
print(paste("The sum of squared deviations from mean is ", sum_sq_deviation))
print(paste("The sample variance is ", sum_sq_deviation/9))
print(paste("The sample variance using var() function is ", var(income)))
print(paste("The sample standard deviation is ", sqrt(sum_sq_deviation/9)))
print(paste("The sample standard deviation using sd() is ", sd(income)))
# print sorted income to see the percentiles
print("Sorted Income")
print(income[order(income)])
quantile(income, probs = seq(0, 1, 0.05), na.rm = FALSE, names = TRUE, type = 2)
#
income_iqr <- IQR(income, type = 2)
print(income_iqr)
p25 <- quantile(income, probs = 0.25, na.rm=FALSE, names = TRUE, type=2)
p75 <- quantile(income, probs = 0.75, na.rm=FALSE, names = TRUE, type=2)
print(paste(min(income)," p25 ",p25, median(income), " p75 ",p75, " ", max(income)))
llimit <- p25 - 1.5*income_iqr
ulimit <- p75 + 1.5*income_iqr
print(paste("Lower limit ", llimit, " Upper limit ", ulimit))
print(boxplot.stats(income)$stats)
#
boxplot(income/1000, main="Box Plot for Income",
xlab="Income (in thousands) dollars",
border="blue",
col="green",
horizontal = TRUE)
text(x=boxplot.stats(income/1000)$stats, labels = boxplot.stats(income/1000)$stats, y = 1.25)
#
#
hist(income/1000, main="Histogram of Income in 1000's dollars",
xlab="Income in 1000's dollars",
border="blue",
col="green",
xlim=c(0,500),
las=2,
breaks=seq(0,500,100))
|
library(data.table)
#check whether the data file exists or not
dir.list <- dir()
total.matches <- length(dir.list[dir.list == 'household_power_consumption.txt'])
#download file if not available
if(total.matches == 0) {
url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
destfile <- 'household_power_consumption.zip'
download.file(url, destfile, method='curl')
unzip(destfile)
}
#read the file as data.table
dt = fread('household_power_consumption.txt')
#subset data on following dates: 1st and 2nd Feb, 2007
dts = subset(dt, Date %in% c('1/2/2007','2/2/2007'))
#convert Sub_metering_x columns to numeric
dts$Sub_metering_1 = as.numeric(dts$Sub_metering_1)
dts$Sub_metering_2 = as.numeric(dts$Sub_metering_2)
dts$Sub_metering_3 = as.numeric(dts$Sub_metering_3)
#convert Date to date column
dts$DateTime = as.POSIXct( paste(dts$Date,dts$Time),tz='GMT',format='%d/%m/%Y %H:%M:%S')
#open a png file device
png(filename='figure/plot3.png',width=480, height=480, units='px')
#plot
plot(dts$DateTime,dts$Sub_metering_1, type='n', xlab='', ylab='Energy sub metering')
points(dts$DateTime,dts$Sub_metering_1,col='black',type='l')
points(dts$DateTime,dts$Sub_metering_2,col='red',type='l')
points(dts$DateTime,dts$Sub_metering_3,col='blue',type='l')
legend(x='topright',lwd=2,legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'), col=c('black','red','blue'))
#close the device
dev.off()
|
/rcode/plot3.R
|
no_license
|
moizmuhammad/ExData_Plotting1
|
R
| false
| false
| 1,447
|
r
|
library(data.table)
#check whether the data file exists or not
dir.list <- dir()
total.matches <- length(dir.list[dir.list == 'household_power_consumption.txt'])
#download file if not available
if(total.matches == 0) {
url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
destfile <- 'household_power_consumption.zip'
download.file(url, destfile, method='curl')
unzip(destfile)
}
#read the file as data.table
dt = fread('household_power_consumption.txt')
#subset data on following dates: 1st and 2nd Feb, 2007
dts = subset(dt, Date %in% c('1/2/2007','2/2/2007'))
#convert Sub_metering_x columns to numeric
dts$Sub_metering_1 = as.numeric(dts$Sub_metering_1)
dts$Sub_metering_2 = as.numeric(dts$Sub_metering_2)
dts$Sub_metering_3 = as.numeric(dts$Sub_metering_3)
#convert Date to date column
dts$DateTime = as.POSIXct( paste(dts$Date,dts$Time),tz='GMT',format='%d/%m/%Y %H:%M:%S')
#open a png file device
png(filename='figure/plot3.png',width=480, height=480, units='px')
#plot
plot(dts$DateTime,dts$Sub_metering_1, type='n', xlab='', ylab='Energy sub metering')
points(dts$DateTime,dts$Sub_metering_1,col='black',type='l')
points(dts$DateTime,dts$Sub_metering_2,col='red',type='l')
points(dts$DateTime,dts$Sub_metering_3,col='blue',type='l')
legend(x='topright',lwd=2,legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'), col=c('black','red','blue'))
#close the device
dev.off()
|
library(shiny)
library(rCharts)
library(rMaps)
shinyServer(function(input, output) {
output$myplot<- renderChart2({
d1 <- ichoropleth(
Population ~ name,
labels = TRUE,
data = info1,
pal = input$pal,
ncuts = input$ncuts,
legend = TRUE,
# animate = 'year',
# play = TRUE,
)
d1$set(
geographyConfig = list(
dataUrl = "https://dl.dropboxusercontent.com/u/13661419/states2.json",
highlightFillColor = 'orange',
highlightBorderColor = 'white',
highlightBorderWidth = 1.5,
popupOnHover = TRUE,
popupTemplate = "#! function(geography, data){
return '<div class=hoverinfo>' + geography.properties.name +
': ' + data.Population+ '</div>';
} !#"
),
scope = 'nuts2wgs2',
height = 1050,
legend = TRUE,
setProjection = '#! function( element, options ) {
var projection, path;
projection = d3.geo.mercator()
.scale(480)
.center([29.34034978813841, 65.012062015793])
path = d3.geo.path().projection( projection );
return {path: path, projection: projection};
} !#'
)
d1
})
}
)
|
/server.R
|
no_license
|
Arevaju/shiny-maps
|
R
| false
| false
| 1,283
|
r
|
library(shiny)
library(rCharts)
library(rMaps)
shinyServer(function(input, output) {
output$myplot<- renderChart2({
d1 <- ichoropleth(
Population ~ name,
labels = TRUE,
data = info1,
pal = input$pal,
ncuts = input$ncuts,
legend = TRUE,
# animate = 'year',
# play = TRUE,
)
d1$set(
geographyConfig = list(
dataUrl = "https://dl.dropboxusercontent.com/u/13661419/states2.json",
highlightFillColor = 'orange',
highlightBorderColor = 'white',
highlightBorderWidth = 1.5,
popupOnHover = TRUE,
popupTemplate = "#! function(geography, data){
return '<div class=hoverinfo>' + geography.properties.name +
': ' + data.Population+ '</div>';
} !#"
),
scope = 'nuts2wgs2',
height = 1050,
legend = TRUE,
setProjection = '#! function( element, options ) {
var projection, path;
projection = d3.geo.mercator()
.scale(480)
.center([29.34034978813841, 65.012062015793])
path = d3.geo.path().projection( projection );
return {path: path, projection: projection};
} !#'
)
d1
})
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_frame_functions.R
\name{df_winsorise}
\alias{df_winsorise}
\title{Winsorsise data}
\usage{
df_winsorise(x, variables = NULL, z = NULL, rz = NULL, centile = NULL)
}
\arguments{
\item{x}{Data frame}
\item{variables}{Names of variables to winsorsise. If not given, defaults to all numeric variables.}
\item{z}{Standard z score to winsorise to (mean+sd)}
\item{rz}{Robust z score to winsorise to (median+mad)}
\item{centile}{Centile to winsorsise to}
}
\value{
Data frame with the same variables as input.
}
\description{
Winsorsise data
}
\examples{
iris[1, 1] = 100
df_winsorise(iris, z = 2)
df_winsorise(iris, rz = 2)
df_winsorise(iris, centile = .99)
}
|
/man/df_winsorise.Rd
|
permissive
|
Deleetdk/kirkegaard
|
R
| false
| true
| 740
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_frame_functions.R
\name{df_winsorise}
\alias{df_winsorise}
\title{Winsorsise data}
\usage{
df_winsorise(x, variables = NULL, z = NULL, rz = NULL, centile = NULL)
}
\arguments{
\item{x}{Data frame}
\item{variables}{Names of variables to winsorsise. If not given, defaults to all numeric variables.}
\item{z}{Standard z score to winsorise to (mean+sd)}
\item{rz}{Robust z score to winsorise to (median+mad)}
\item{centile}{Centile to winsorsise to}
}
\value{
Data frame with the same variables as input.
}
\description{
Winsorsise data
}
\examples{
iris[1, 1] = 100
df_winsorise(iris, z = 2)
df_winsorise(iris, rz = 2)
df_winsorise(iris, centile = .99)
}
|
library(devtools)
install.packages("optparse")
devtools::install_github("l-gorman/rhomis-R-package")
|
/R/setup.R
|
no_license
|
ilri/rhomis-server-R-scripts
|
R
| false
| false
| 101
|
r
|
library(devtools)
install.packages("optparse")
devtools::install_github("l-gorman/rhomis-R-package")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polygonal-2d.R
\name{plotable_tess}
\alias{plotable_tess}
\title{Create Plotable Tesselation from a Point Pattern.}
\usage{
plotable_tess(points)
}
\arguments{
\item{points}{2D point pattern object created by package
\code{\link{spatstat}}. It must have a window defined using a
binary mask.}
}
\value{
A simple features (\code{sf}) polygons dataframe. May be plotted
with \code{\link{geom_sf}}.
}
\description{
\code{plotable_tess} returns a dataframe of points defining the edges of the
tessellation constructed by \code{\link{polydeclust2d}}. These points can be
used to visualize the tessellations.
}
\examples{
library(ggplot2)
# Tessellation with a mask only may produce excessive edge weights.
dec <- polydeclust2d(samples$x, samples$y, mask = mask, estdomain = FALSE)
samples_dec <- cbind(samples, dec$weights)
ptess <- plotable_tess(dec$ppp)
ggplot() +
geom_raster(data = mask, aes(x, y), fill = "lightblue") +
geom_sf(data = ptess, fill = NA) +
geom_point(data = samples_dec, aes(x, y, size = weight))
# Using the `ripras` option reduces excessive edge weights.
dec <- polydeclust2d(samples$x, samples$y, mask = mask)
samples_dec <- cbind(samples, dec$weights)
ptess <- plotable_tess(dec$ppp)
ggplot() +
geom_raster(data = mask, aes(x, y), fill = "lightblue") +
geom_sf(data = ptess, fill = NA) +
geom_point(data = samples_dec, aes(x, y, size = weight))
}
|
/man/plotable_tess.Rd
|
no_license
|
alex-trueman/declustr
|
R
| false
| true
| 1,504
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polygonal-2d.R
\name{plotable_tess}
\alias{plotable_tess}
\title{Create Plotable Tesselation from a Point Pattern.}
\usage{
plotable_tess(points)
}
\arguments{
\item{points}{2D point pattern object created by package
\code{\link{spatstat}}. It must have a window defined using a
binary mask.}
}
\value{
A simple features (\code{sf}) polygons dataframe. May be plotted
with \code{\link{geom_sf}}.
}
\description{
\code{plotable_tess} returns a dataframe of points defining the edges of the
tessellation constructed by \code{\link{polydeclust2d}}. These points can be
used to visualize the tessellations.
}
\examples{
library(ggplot2)
# Tessellation with a mask only may produce excessive edge weights.
dec <- polydeclust2d(samples$x, samples$y, mask = mask, estdomain = FALSE)
samples_dec <- cbind(samples, dec$weights)
ptess <- plotable_tess(dec$ppp)
ggplot() +
geom_raster(data = mask, aes(x, y), fill = "lightblue") +
geom_sf(data = ptess, fill = NA) +
geom_point(data = samples_dec, aes(x, y, size = weight))
# Using the `ripras` option reduces excessive edge weights.
dec <- polydeclust2d(samples$x, samples$y, mask = mask)
samples_dec <- cbind(samples, dec$weights)
ptess <- plotable_tess(dec$ppp)
ggplot() +
geom_raster(data = mask, aes(x, y), fill = "lightblue") +
geom_sf(data = ptess, fill = NA) +
geom_point(data = samples_dec, aes(x, y, size = weight))
}
|
#fit LBA to incong and none trials
setwd("C:/Users/toelch/Dropbox/DMC_Europe_2016-update/")
setwd("C:/Users/ulf/Dropbox/DMC_Europe_2016-update/")
# Current working directory must be set to the top-level folder
# containing the dmc and tutorial subfolders
source ("tutorial/file_utils.R")
load_model ("lba_B.R")
#prepare data
help.data<-subset(my.data,participant==unique(my.data$participant)[5])
help.data<-subset(help.data,norm3!="ONLY"&norm3!="SAME")
S<-ifelse(help.data$correct=="left","s1","s2")
SI<-ifelse(help.data$social==0,"none",
ifelse(help.data$social3=="valid",paste(help.data$correct,"SI",sep="_"),
paste(ifelse(help.data$correct=="left","right","left"),"SI",sep="_")))
R<-ifelse(help.data$key_resp_direction.keys=="left","r1","r2")
RT<-help.data$key_resp_direction.rt
data.model<-data.frame(S=S,R=R,RT=RT,SI=SI)
rm(S,R,RT,SI)
# Model flags
match.map <- list(M=list(s1="r1", s2="r2"))
responses <- c("r1","r2")
#
# models and priors
## SP ----
factors <- list(S=c("s1","s2"),SI=c("none","left_SI","right_SI"))
p.map <- list(A="1",B=c("SI","R"),mean_v="M",sd_v="1",t0="1",st0="1")
const <- c(st0=0,sd_v=1)
model.1<-model.dmc(p.map,factors,responses,match.map,const)
p.prior <- prior.p.dmc(
dists = c("tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","beta"),
p1=c(A=.3,
B.none.r1=0.3,B.none.r2=0.3,B.left_SI.r1=0.3,B.left_SI.r2=0.3,B.right_SI.r1=0.3,B.right_SI.r2=0.3,
mean_v.true=1,mean_v.false=0,t0=1),
p2=c(1,1,1,1,1,1,1,3,3,1),lower=c(0,0,0,0,0,0,0,NA,NA,.1),upper=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,1)
)
data.model <- data.model.dmc(data.model,model.1)
plot.cell.density(data.cell=data.model[data.model$S=="s1",],C="r1",xlim=c(0,2))
plot.cell.density(data.cell=data.model[data.model$S=="s2",],C="r2",xlim=c(0,2))
par(mfcol=c(2,3)); for (i in names(p.prior)) plot.prior(i,p.prior)
samples <- samples.dmc(nmc=400,p.prior,data.model)
samples <- run.dmc(samples, report = 25, cores=4,p.migrate=.05)
plot.dmc(samples,layout=c(3,4))
summary.dmc(samples)
samples2 <- run.dmc(samples.dmc(nmc=1000,samples=samples),
cores=4,report=25)
plot.dmc(samples2,layout=c(3,4),smooth=FALSE)
summary.dmc(samples)
|
/fit_LBA_with_SP_NONE_giter.R
|
no_license
|
sprocketsullivan/metaDPrime
|
R
| false
| false
| 2,237
|
r
|
#fit LBA to incong and none trials
setwd("C:/Users/toelch/Dropbox/DMC_Europe_2016-update/")
setwd("C:/Users/ulf/Dropbox/DMC_Europe_2016-update/")
# Current working directory must be set to the top-level folder
# containing the dmc and tutorial subfolders
source ("tutorial/file_utils.R")
load_model ("lba_B.R")
#prepare data
help.data<-subset(my.data,participant==unique(my.data$participant)[5])
help.data<-subset(help.data,norm3!="ONLY"&norm3!="SAME")
S<-ifelse(help.data$correct=="left","s1","s2")
SI<-ifelse(help.data$social==0,"none",
ifelse(help.data$social3=="valid",paste(help.data$correct,"SI",sep="_"),
paste(ifelse(help.data$correct=="left","right","left"),"SI",sep="_")))
R<-ifelse(help.data$key_resp_direction.keys=="left","r1","r2")
RT<-help.data$key_resp_direction.rt
data.model<-data.frame(S=S,R=R,RT=RT,SI=SI)
rm(S,R,RT,SI)
# Model flags
match.map <- list(M=list(s1="r1", s2="r2"))
responses <- c("r1","r2")
#
# models and priors
## SP ----
factors <- list(S=c("s1","s2"),SI=c("none","left_SI","right_SI"))
p.map <- list(A="1",B=c("SI","R"),mean_v="M",sd_v="1",t0="1",st0="1")
const <- c(st0=0,sd_v=1)
model.1<-model.dmc(p.map,factors,responses,match.map,const)
p.prior <- prior.p.dmc(
dists = c("tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","tnorm","beta"),
p1=c(A=.3,
B.none.r1=0.3,B.none.r2=0.3,B.left_SI.r1=0.3,B.left_SI.r2=0.3,B.right_SI.r1=0.3,B.right_SI.r2=0.3,
mean_v.true=1,mean_v.false=0,t0=1),
p2=c(1,1,1,1,1,1,1,3,3,1),lower=c(0,0,0,0,0,0,0,NA,NA,.1),upper=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,1)
)
data.model <- data.model.dmc(data.model,model.1)
plot.cell.density(data.cell=data.model[data.model$S=="s1",],C="r1",xlim=c(0,2))
plot.cell.density(data.cell=data.model[data.model$S=="s2",],C="r2",xlim=c(0,2))
par(mfcol=c(2,3)); for (i in names(p.prior)) plot.prior(i,p.prior)
samples <- samples.dmc(nmc=400,p.prior,data.model)
samples <- run.dmc(samples, report = 25, cores=4,p.migrate=.05)
plot.dmc(samples,layout=c(3,4))
summary.dmc(samples)
samples2 <- run.dmc(samples.dmc(nmc=1000,samples=samples),
cores=4,report=25)
plot.dmc(samples2,layout=c(3,4),smooth=FALSE)
summary.dmc(samples)
|
#### Exploring Economic Variables related to Entrepreneurship ####
install.packages("tidycensus") # tidycensus is great for working with population characteristics, but not all available APIs
install.packages("censusapi") # censusapi package allows us to access all available APIs provided by the Census Bureau
install.packages("bea.R") # Bureau of Economic Analysis data access
library(tidycensus)
library(tidyverse)
library(censusapi)
library(bea.R)
library(ggplot2)
library(gridExtra)
## To interact with the API you need to get your API key from http://api.census.gov/data/key_signup.html ##
## A good tutorial to start off by the developer Kyle Walker is at https://walker-data.com/tidycensus/articles/basic-usage.html ##
## Getting started with censusapi package:
# https://github.com/hrecht/censusapi
# https://hrecht.github.io/censusapi/articles/getting-started.html
## After getting your key, activate it and set it up in tidycensus to begin querying ##
census_api_key("9001b546c2d77876a089119664dc25a4235eea37", install = T, overwrite = T)
# Add key to .Renviron
Sys.setenv(CENSUS_KEY="9001b546c2d77876a089119664dc25a4235eea37")
# Reload .Renviron
readRenviron("~/.Renviron")
# Check to see that the expected key is output in your R console
Sys.getenv("CENSUS_KEY")
#### Explore datasets ####
## County Business Pattern dataset
## Business Dynamic Statistics (Timeseries)
## Look at all available API endpoints ##
csapis <- listCensusApis()
View(csapis)
#### CBP Dataset ####
## County Business Pattern dataset variables and geography
View(listCensusMetadata(name = "cbp", vintage = 2017, type = "variables"))
View(listCensusMetadata(name = "cbp", vintage = 2016, type = "variables"))
listCensusMetadata(name = "cbp", vintage = 2017, type = "geography")
listCensusMetadata(name = "cbp", vintage = 2016, type = "geography")
cbp2016_var <- listCensusMetadata(name = "cbp", vintage = 2016, type = "variables")$name
## Now lets get the data of three states at the county level ##
cbp2017_txksme <- getCensus(name = "cbp",
vintage = 2017,
vars = c("STATE","COUNTY","GEO_ID","CD","LFO","NAICS2017","INDGROUP","INDLEVEL","SECTOR","SUBSECTOR","ESTAB","EMPSZES","EMP","EMP_N","PAYANN","PAYANN_N","PAYQTR1","PAYQTR1_N"),
region = "county:*",
regionin = "state:20,23,48")
cbp2018_txksme <- getCensus(name = "cbp",
vintage = 2018,
vars = c("EMP","EMP_N","EMPSZES","ESTAB","GEO_ID","INDGROUP","INDLEVEL","LFO","NAICS2017","PAYANN","PAYQTR1","SECTOR"),
region = "county:*",
regionin = "state:20,23,48")
## Drop unnecessary columns and add few additional ones that would be useful ##
cbp2017_txksme <- cbp2017_txksme %>%
select(-c("STATE","COUNTY")) %>%
mutate(state_name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = ""))
cbp2017_txksme <- cbp2017_txksme %>%
select(-c("CD","PAYANN_N","PAYQTR1_N")) %>%
mutate(EMP = as.numeric(EMP),
EMPSZES = as.numeric(EMPSZES),
ESTAB = as.numeric(ESTAB),
PAYANN = as.numeric(PAYANN),
PAYQTR1 = as.numeric(PAYQTR1))
cbp2018_txksme <- cbp2018_txksme %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = ""),
EMP = as.numeric(EMP),
EMPSZES = as.numeric(EMPSZES),
ESTAB = as.numeric(ESTAB),
PAYANN = as.numeric(PAYANN),
PAYQTR1 = as.numeric(PAYQTR1))
cbp2017_tx <- cbp2017_txksme %>% filter(state_name == "Texas")
cbp2018_tx <- cbp2018_txksme %>% filter(state.name == "Texas")
str(cbp2017_tx)
str(cbp2018_tx)
#### BDS Dataset ####
## Business Dynamic Statistics dataset variables and geography
bds_vars <- listCensusMetadata(name = "timeseries/bds/firms", type = "variables")
View(listCensusMetadata(name = "timeseries/bds/firms", type = "variables"))
View(listCensusMetadata(name = "timeseries/bds/firms", type = "geography"))
bds2016 <- getCensus(name = "timeseries/bds/firms",
vars = c("estabs_entry","estabs_entry_rate","firms","fsize","metro","sic1","state","year","year2"),
region = "state:20,23,48",
time = 2014)
#### Nonemployer Statistics ####
nonemp2018 <- getCensus(name = "nonemp",
vintage = 2018,
vars = c("GEO_ID","INDGROUP","INDLEVEL","LFO","NAICS2017","NESTAB","NRCPTOT","RCPSZES","SECTOR"),
region = "county:*",
regionin = "state:20,23,48"
)
nonemp2017 <- getCensus(name = "nonemp",
vintage = 2017,
vars = c("GEO_ID","INDGROUP","INDLEVEL","LFO","NAICS2017","NESTAB","NRCPTOT","RCPSZES","SECTOR"),
region = "county:*",
regionin = "state:20,23,48")
str(nonemp2017)
nonemp2017 <- nonemp2017 %>%
mutate(NESTAB = as.numeric(NESTAB),
NRCPTOT = as.numeric(NRCPTOT),
RCPSZES = as.numeric(RCPSZES))
nonemp2018 <- nonemp2018 %>%
mutate(NESTAB = as.numeric(NESTAB),
NRCPTOT = as.numeric(NRCPTOT),
RCPSZES = as.numeric(RCPSZES))
#### Aggregating CBP Dataset ####
## Explore descriptive statistics ##
summary(cbp2017_tx[c("ESTAB","EMP","EMPSZES","PAYANN","PAYQTR1")])
summary(cbp2018_tx[c("ESTAB","EMP","EMPSZES","PAYANN","PAYQTR1")])
cbp2017_tx_agg <- cbp2017_tx %>% group_by(county_FIPS) %>%
summarise(avg_estab_2017 = mean(ESTAB),
avg_emp_2017 = mean(EMP),
avg_empszes_2017 = mean(EMPSZES),
avg_payann_2017 = mean(PAYANN),
avg_payqtr1_2017 = mean(PAYQTR1),
total_estab_2017 = sum(ESTAB),
total_emp_2017 = sum(EMP),
pctnonemp_2017 = sum(EMP == 0) / n(),
pctsmallent_2017 = sum(EMP > 0 & EMP <= 10) / n(),
pctsmall_50_2017 = sum(EMP > 0 & EMP <= 50) / n(),
total_2017 = n())
cbp2018_tx_agg <- cbp2018_tx %>% group_by(county_FIPS) %>%
summarise(avg_estab_2018 = mean(ESTAB),
avg_emp_2018 = mean(EMP),
avg_empszes_2018 = mean(EMPSZES),
avg_payann_2018 = mean(PAYANN),
avg_payqtr1_2018 = mean(PAYQTR1),
total_estab_2018 = sum(ESTAB),
total_emp_2018 = sum(EMP),
pctnonemp_2018 = sum(EMP == 0) / n(),
pctsmallent_2018 = sum(EMP > 0 & EMP <= 10) / n(),
pctsmall_50_2018 = sum(EMP > 0 & EMP <= 50) / n(),
total_2018 = n())
str(cbp2017_tx_agg)
str(cbp2018_tx_agg)
summary(cbp2017_tx_agg[c("pctnonemp_2017","pctsmallent_2017","pctsmall_50_2017")])
summary(cbp2018_tx_agg[c("pctnonemp_2018","pctsmallent_2018","pctsmall_50_2018")])
#### Aggregating Nonemployer Statistics ####
summary(nonemp2017[c("NESTAB","NRCPTOT","RCPSZES")])
# Filter Texas and non-farm industries (Based on NAICS sector definitions, I'm going to filter out sector "11": Agriculture, Forestry, Fishing, and Hunting)
nonemp2017_tx <- nonemp2017 %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = "")) %>%
filter(state.name == "Texas" & SECTOR != "11")
nonemp2018_tx <- nonemp2018 %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = "")) %>%
filter(state.name == "Texas"& SECTOR != "11")
# Group by county and calculate variables
# Get the total employment and establishment numbers of 2017 from Economic Census data
totalemp_est_2017 <- getCensus(name = "ecnbasic",
vintage = 2017,
vars = c("EMP","ESTAB","FIRM","GEO_ID","NAICS2017","SECTOR"),
region = "county:*",
regionin = "state:20,23,48")
# Filter and aggregate sum
totalemp_est_2017_tx <- totalemp_est_2017 %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = ""),
EMP = as.numeric(EMP), ESTAB = as.numeric(ESTAB), FIRM = as.numeric(FIRM)) %>%
filter(state.name == "Texas") %>%
group_by(county_FIPS) %>%
summarise(EMP = sum(EMP, na.rm = T),
ESTAB = sum(ESTAB, na.rm = T),
FIRM = sum(FIRM, na.rm = T))
nonemp2017_tx_agg <- nonemp2017_tx %>%
group_by(county_FIPS) %>%
summarise(avg_nestab_2017 = mean(NESTAB, na.rm = T),
avg_nrcptot_2017 = mean(NRCPTOT, na.rm = T),
median_nestab_2017 = median(NESTAB),
totalnest_2017 = sum(NESTAB),
total_ne_2017 = n())
nonemp2018_tx_agg <- nonemp2018 %>%
group_by(county_FIPS) %>%
summarise(avg_nestab_2018 = mean(NESTAB, na.rm = T),
avg_nrcptot_2018 = mean(NRCPTOT, na.rm = T),
median_nestab_2018 = median(NESTAB),
totalnest_2018 = sum(NESTAB),
total_ne_2018 = n())
## Merge the two datasets from 2017 and 2018 ##
nonemp_tx <- left_join(nonemp2017_tx_agg, nonemp2018_tx_agg, by = "county_FIPS")
## Merge the two datasets from 2017 and 2018 ##
cbp_tx <- left_join(cbp2017_tx_agg, cbp2018_tx_agg, by = "county_FIPS")
str(cbp_tx)
# Calculate change variables
cbp_tx <- cbp_tx %>%
mutate(estab_change_2017_2018 = avg_estab_2018 - avg_estab_2017,
emp_change_2017_2018 = avg_emp_2018 - avg_emp_2017,
empsze_change_2017_2018 = avg_empszes_2018 - avg_empszes_2017,
nonemp_change_2017_2018 = pctnonemp_2018 - pctnonemp_2017,
smallbz_change_2017_2018 = pctsmallent_2018 - pctsmallent_2017,
smallbz50_change_2017_2018 = pctsmall_50_2018 - pctsmall_50_2017)
str(cbp_tx)
## Merge to the combined dataset ##
str(tx_bb_entrepreneur_merged)
str(cbp_tx)
tx_bb_entrepreneur_merged_v2 <- tx_bb_entrepreneur_merged %>%
mutate(FIPS = as.character(FIPS)) %>%
left_join(., cbp_tx, by = c("FIPS" = "county_FIPS"))
#### Descriptive Exploration of the Entrepreneurship Variables ####
tx_bb_entrepreneur_merged_v2 %>%
select(IRR2010, pct_proprietors_employment_2017, venturedensitynov18, highlyactive_vdnov18, pctnonemp_2018,
pctsmallent_2018, pctsmall_50_2018) %>% PerformanceAnalytics::chart.Correlation(histogram = T)
|
/Codes/R/05_Entrepreneurship_measure_explore.R
|
permissive
|
jwroycechoi/broadband-entrepreneurship
|
R
| false
| false
| 11,148
|
r
|
#### Exploring Economic Variables related to Entrepreneurship ####
install.packages("tidycensus") # tidycensus is great for working with population characteristics, but not all available APIs
install.packages("censusapi") # censusapi package allows us to access all available APIs provided by the Census Bureau
install.packages("bea.R") # Bureau of Economic Analysis data access
library(tidycensus)
library(tidyverse)
library(censusapi)
library(bea.R)
library(ggplot2)
library(gridExtra)
## To interact with the API you need to get your API key from http://api.census.gov/data/key_signup.html ##
## A good tutorial to start off by the developer Kyle Walker is at https://walker-data.com/tidycensus/articles/basic-usage.html ##
## Getting started with censusapi package:
# https://github.com/hrecht/censusapi
# https://hrecht.github.io/censusapi/articles/getting-started.html
## After getting your key, activate it and set it up in tidycensus to begin querying ##
census_api_key("9001b546c2d77876a089119664dc25a4235eea37", install = T, overwrite = T)
# Add key to .Renviron
Sys.setenv(CENSUS_KEY="9001b546c2d77876a089119664dc25a4235eea37")
# Reload .Renviron
readRenviron("~/.Renviron")
# Check to see that the expected key is output in your R console
Sys.getenv("CENSUS_KEY")
#### Explore datasets ####
## County Business Pattern dataset
## Business Dynamic Statistics (Timeseries)
## Look at all available API endpoints ##
csapis <- listCensusApis()
View(csapis)
#### CBP Dataset ####
## County Business Pattern dataset variables and geography
View(listCensusMetadata(name = "cbp", vintage = 2017, type = "variables"))
View(listCensusMetadata(name = "cbp", vintage = 2016, type = "variables"))
listCensusMetadata(name = "cbp", vintage = 2017, type = "geography")
listCensusMetadata(name = "cbp", vintage = 2016, type = "geography")
cbp2016_var <- listCensusMetadata(name = "cbp", vintage = 2016, type = "variables")$name
## Now lets get the data of three states at the county level ##
cbp2017_txksme <- getCensus(name = "cbp",
vintage = 2017,
vars = c("STATE","COUNTY","GEO_ID","CD","LFO","NAICS2017","INDGROUP","INDLEVEL","SECTOR","SUBSECTOR","ESTAB","EMPSZES","EMP","EMP_N","PAYANN","PAYANN_N","PAYQTR1","PAYQTR1_N"),
region = "county:*",
regionin = "state:20,23,48")
cbp2018_txksme <- getCensus(name = "cbp",
vintage = 2018,
vars = c("EMP","EMP_N","EMPSZES","ESTAB","GEO_ID","INDGROUP","INDLEVEL","LFO","NAICS2017","PAYANN","PAYQTR1","SECTOR"),
region = "county:*",
regionin = "state:20,23,48")
## Drop unnecessary columns and add few additional ones that would be useful ##
cbp2017_txksme <- cbp2017_txksme %>%
select(-c("STATE","COUNTY")) %>%
mutate(state_name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = ""))
cbp2017_txksme <- cbp2017_txksme %>%
select(-c("CD","PAYANN_N","PAYQTR1_N")) %>%
mutate(EMP = as.numeric(EMP),
EMPSZES = as.numeric(EMPSZES),
ESTAB = as.numeric(ESTAB),
PAYANN = as.numeric(PAYANN),
PAYQTR1 = as.numeric(PAYQTR1))
cbp2018_txksme <- cbp2018_txksme %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = ""),
EMP = as.numeric(EMP),
EMPSZES = as.numeric(EMPSZES),
ESTAB = as.numeric(ESTAB),
PAYANN = as.numeric(PAYANN),
PAYQTR1 = as.numeric(PAYQTR1))
cbp2017_tx <- cbp2017_txksme %>% filter(state_name == "Texas")
cbp2018_tx <- cbp2018_txksme %>% filter(state.name == "Texas")
str(cbp2017_tx)
str(cbp2018_tx)
#### BDS Dataset ####
## Business Dynamic Statistics dataset variables and geography
bds_vars <- listCensusMetadata(name = "timeseries/bds/firms", type = "variables")
View(listCensusMetadata(name = "timeseries/bds/firms", type = "variables"))
View(listCensusMetadata(name = "timeseries/bds/firms", type = "geography"))
bds2016 <- getCensus(name = "timeseries/bds/firms",
vars = c("estabs_entry","estabs_entry_rate","firms","fsize","metro","sic1","state","year","year2"),
region = "state:20,23,48",
time = 2014)
#### Nonemployer Statistics ####
nonemp2018 <- getCensus(name = "nonemp",
vintage = 2018,
vars = c("GEO_ID","INDGROUP","INDLEVEL","LFO","NAICS2017","NESTAB","NRCPTOT","RCPSZES","SECTOR"),
region = "county:*",
regionin = "state:20,23,48"
)
nonemp2017 <- getCensus(name = "nonemp",
vintage = 2017,
vars = c("GEO_ID","INDGROUP","INDLEVEL","LFO","NAICS2017","NESTAB","NRCPTOT","RCPSZES","SECTOR"),
region = "county:*",
regionin = "state:20,23,48")
str(nonemp2017)
nonemp2017 <- nonemp2017 %>%
mutate(NESTAB = as.numeric(NESTAB),
NRCPTOT = as.numeric(NRCPTOT),
RCPSZES = as.numeric(RCPSZES))
nonemp2018 <- nonemp2018 %>%
mutate(NESTAB = as.numeric(NESTAB),
NRCPTOT = as.numeric(NRCPTOT),
RCPSZES = as.numeric(RCPSZES))
#### Aggregating CBP Dataset ####
## Explore descriptive statistics ##
summary(cbp2017_tx[c("ESTAB","EMP","EMPSZES","PAYANN","PAYQTR1")])
summary(cbp2018_tx[c("ESTAB","EMP","EMPSZES","PAYANN","PAYQTR1")])
cbp2017_tx_agg <- cbp2017_tx %>% group_by(county_FIPS) %>%
summarise(avg_estab_2017 = mean(ESTAB),
avg_emp_2017 = mean(EMP),
avg_empszes_2017 = mean(EMPSZES),
avg_payann_2017 = mean(PAYANN),
avg_payqtr1_2017 = mean(PAYQTR1),
total_estab_2017 = sum(ESTAB),
total_emp_2017 = sum(EMP),
pctnonemp_2017 = sum(EMP == 0) / n(),
pctsmallent_2017 = sum(EMP > 0 & EMP <= 10) / n(),
pctsmall_50_2017 = sum(EMP > 0 & EMP <= 50) / n(),
total_2017 = n())
cbp2018_tx_agg <- cbp2018_tx %>% group_by(county_FIPS) %>%
summarise(avg_estab_2018 = mean(ESTAB),
avg_emp_2018 = mean(EMP),
avg_empszes_2018 = mean(EMPSZES),
avg_payann_2018 = mean(PAYANN),
avg_payqtr1_2018 = mean(PAYQTR1),
total_estab_2018 = sum(ESTAB),
total_emp_2018 = sum(EMP),
pctnonemp_2018 = sum(EMP == 0) / n(),
pctsmallent_2018 = sum(EMP > 0 & EMP <= 10) / n(),
pctsmall_50_2018 = sum(EMP > 0 & EMP <= 50) / n(),
total_2018 = n())
str(cbp2017_tx_agg)
str(cbp2018_tx_agg)
summary(cbp2017_tx_agg[c("pctnonemp_2017","pctsmallent_2017","pctsmall_50_2017")])
summary(cbp2018_tx_agg[c("pctnonemp_2018","pctsmallent_2018","pctsmall_50_2018")])
#### Aggregating Nonemployer Statistics ####
summary(nonemp2017[c("NESTAB","NRCPTOT","RCPSZES")])
# Filter Texas and non-farm industries (Based on NAICS sector definitions, I'm going to filter out sector "11": Agriculture, Forestry, Fishing, and Hunting)
nonemp2017_tx <- nonemp2017 %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = "")) %>%
filter(state.name == "Texas" & SECTOR != "11")
nonemp2018_tx <- nonemp2018 %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = "")) %>%
filter(state.name == "Texas"& SECTOR != "11")
# Group by county and calculate variables
# Get the total employment and establishment numbers of 2017 from Economic Census data
totalemp_est_2017 <- getCensus(name = "ecnbasic",
vintage = 2017,
vars = c("EMP","ESTAB","FIRM","GEO_ID","NAICS2017","SECTOR"),
region = "county:*",
regionin = "state:20,23,48")
# Filter and aggregate sum
totalemp_est_2017_tx <- totalemp_est_2017 %>%
mutate(state.name = case_when(state == "48" ~ "Texas",
state == "20" ~ "Kansas",
state == "23" ~ "Maine",
TRUE ~ state),
county_FIPS = paste(state, county, sep = ""),
EMP = as.numeric(EMP), ESTAB = as.numeric(ESTAB), FIRM = as.numeric(FIRM)) %>%
filter(state.name == "Texas") %>%
group_by(county_FIPS) %>%
summarise(EMP = sum(EMP, na.rm = T),
ESTAB = sum(ESTAB, na.rm = T),
FIRM = sum(FIRM, na.rm = T))
nonemp2017_tx_agg <- nonemp2017_tx %>%
group_by(county_FIPS) %>%
summarise(avg_nestab_2017 = mean(NESTAB, na.rm = T),
avg_nrcptot_2017 = mean(NRCPTOT, na.rm = T),
median_nestab_2017 = median(NESTAB),
totalnest_2017 = sum(NESTAB),
total_ne_2017 = n())
nonemp2018_tx_agg <- nonemp2018 %>%
group_by(county_FIPS) %>%
summarise(avg_nestab_2018 = mean(NESTAB, na.rm = T),
avg_nrcptot_2018 = mean(NRCPTOT, na.rm = T),
median_nestab_2018 = median(NESTAB),
totalnest_2018 = sum(NESTAB),
total_ne_2018 = n())
## Merge the two datasets from 2017 and 2018 ##
nonemp_tx <- left_join(nonemp2017_tx_agg, nonemp2018_tx_agg, by = "county_FIPS")
## Merge the two datasets from 2017 and 2018 ##
cbp_tx <- left_join(cbp2017_tx_agg, cbp2018_tx_agg, by = "county_FIPS")
str(cbp_tx)
# Calculate change variables
cbp_tx <- cbp_tx %>%
mutate(estab_change_2017_2018 = avg_estab_2018 - avg_estab_2017,
emp_change_2017_2018 = avg_emp_2018 - avg_emp_2017,
empsze_change_2017_2018 = avg_empszes_2018 - avg_empszes_2017,
nonemp_change_2017_2018 = pctnonemp_2018 - pctnonemp_2017,
smallbz_change_2017_2018 = pctsmallent_2018 - pctsmallent_2017,
smallbz50_change_2017_2018 = pctsmall_50_2018 - pctsmall_50_2017)
str(cbp_tx)
## Merge to the combined dataset ##
str(tx_bb_entrepreneur_merged)
str(cbp_tx)
tx_bb_entrepreneur_merged_v2 <- tx_bb_entrepreneur_merged %>%
mutate(FIPS = as.character(FIPS)) %>%
left_join(., cbp_tx, by = c("FIPS" = "county_FIPS"))
#### Descriptive Exploration of the Entrepreneurship Variables ####
tx_bb_entrepreneur_merged_v2 %>%
select(IRR2010, pct_proprietors_employment_2017, venturedensitynov18, highlyactive_vdnov18, pctnonemp_2018,
pctsmallent_2018, pctsmall_50_2018) %>% PerformanceAnalytics::chart.Correlation(histogram = T)
|
rm(list=ls(all.names=TRUE))
rm(list=objects(all.names=TRUE))
#dev.off()
########################################################################
## This script merges the train_set data, with other relevant tables
########################################################################
########################################################################
## Run Path definition file ##
########################################################################
RScriptPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/RScripts_Springleaf/'
Filename.Header <- paste(RScriptPath, 'HeaderFile_Springleaf.R', sep='')
source(Filename.Header)
source(paste(RScriptPath, 'fn_Library_Springleaf.R', sep=''))
RPlotPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/Plots/'
DataPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/Data/'
RDataPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/RData/'
########################################################################
set.seed(1)
cat("reading the train and test data\n")
Filename_train <- paste0(DataPath, 'train.csv')
train <- readr::read_csv(Filename_train)
Filename_test <- paste0(DataPath, 'test.csv')
test <- readr::read_csv(Filename_test)
feature.names <- names(train)[2:ncol(train)-1]
cat("assuming text variables are categorical & replacing them with numeric ids\n")
for (f in feature.names) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
cat("replacing missing values with -1\n")
train[is.na(train)] <- -1
test[is.na(test)] <- -1
cat("training a XGBoost classifier\n")
clf <- xgboost(data = data.matrix(train[,feature.names]),
label = train$target,
nrounds = 40,
objective = "binary:logistic",
eval_metric = "auc")
gc()
cat("making predictions in batches due to 8GB memory limitation\n")
submission <- data.frame(ID=test$ID)
submission$target <- NA
for (rows in split(1:nrow(test), ceiling((1:nrow(test))/10000))) {
submission[rows, "target"] <- predict(clf, data.matrix(test[rows,feature.names]))
}
cat("saving the submission file\n")
Filename_submission <- paste0(RDataPath, "xgboost_submission_2.csv")
write_csv(submission, Filename_submission)
gc()
|
/RS2_XGBoost.R
|
no_license
|
snandi/Rscripts_Springleaf
|
R
| false
| false
| 2,462
|
r
|
rm(list=ls(all.names=TRUE))
rm(list=objects(all.names=TRUE))
#dev.off()
########################################################################
## This script merges the train_set data, with other relevant tables
########################################################################
########################################################################
## Run Path definition file ##
########################################################################
RScriptPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/RScripts_Springleaf/'
Filename.Header <- paste(RScriptPath, 'HeaderFile_Springleaf.R', sep='')
source(Filename.Header)
source(paste(RScriptPath, 'fn_Library_Springleaf.R', sep=''))
RPlotPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/Plots/'
DataPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/Data/'
RDataPath <- '~/Stat/Stat_Competitions/Kaggle_Springleaf_2015Oct/RData/'
########################################################################
set.seed(1)
cat("reading the train and test data\n")
Filename_train <- paste0(DataPath, 'train.csv')
train <- readr::read_csv(Filename_train)
Filename_test <- paste0(DataPath, 'test.csv')
test <- readr::read_csv(Filename_test)
feature.names <- names(train)[2:ncol(train)-1]
cat("assuming text variables are categorical & replacing them with numeric ids\n")
for (f in feature.names) {
if (class(train[[f]])=="character") {
levels <- unique(c(train[[f]], test[[f]]))
train[[f]] <- as.integer(factor(train[[f]], levels=levels))
test[[f]] <- as.integer(factor(test[[f]], levels=levels))
}
}
cat("replacing missing values with -1\n")
train[is.na(train)] <- -1
test[is.na(test)] <- -1
cat("training a XGBoost classifier\n")
clf <- xgboost(data = data.matrix(train[,feature.names]),
label = train$target,
nrounds = 40,
objective = "binary:logistic",
eval_metric = "auc")
gc()
cat("making predictions in batches due to 8GB memory limitation\n")
submission <- data.frame(ID=test$ID)
submission$target <- NA
for (rows in split(1:nrow(test), ceiling((1:nrow(test))/10000))) {
submission[rows, "target"] <- predict(clf, data.matrix(test[rows,feature.names]))
}
cat("saving the submission file\n")
Filename_submission <- paste0(RDataPath, "xgboost_submission_2.csv")
write_csv(submission, Filename_submission)
gc()
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
blassoconditional <- function(Y, X, XtY, XtX, tau2, sigma2) {
.Call('_unbiasedmcmc_blassoconditional', PACKAGE = 'unbiasedmcmc', Y, X, XtY, XtX, tau2, sigma2)
}
blassoconditional_coupled <- function(Y, X, XtY, XtX, tau21, tau22, sigma21, sigma22) {
.Call('_unbiasedmcmc_blassoconditional_coupled', PACKAGE = 'unbiasedmcmc', Y, X, XtY, XtX, tau21, tau22, sigma21, sigma22)
}
c_chains_to_measure_as_list_ <- function(c_chains, k, m) {
.Call('_unbiasedmcmc_c_chains_to_measure_as_list_', PACKAGE = 'unbiasedmcmc', c_chains, k, m)
}
estimator_bin_ <- function(c_chains, component, lower, upper, k, m, lag) {
.Call('_unbiasedmcmc_estimator_bin_', PACKAGE = 'unbiasedmcmc', c_chains, component, lower, upper, k, m, lag)
}
rinvgaussian_c <- function(n, mu, lambda) {
.Call('_unbiasedmcmc_rinvgaussian_c', PACKAGE = 'unbiasedmcmc', n, mu, lambda)
}
rinvgaussian_coupled_c <- function(mu1, mu2, lambda1, lambda2) {
.Call('_unbiasedmcmc_rinvgaussian_coupled_c', PACKAGE = 'unbiasedmcmc', mu1, mu2, lambda1, lambda2)
}
ising_sum_ <- function(state) {
.Call('_unbiasedmcmc_ising_sum_', PACKAGE = 'unbiasedmcmc', state)
}
ising_gibbs_sweep_ <- function(state, proba_beta) {
.Call('_unbiasedmcmc_ising_gibbs_sweep_', PACKAGE = 'unbiasedmcmc', state, proba_beta)
}
ising_coupled_gibbs_sweep_ <- function(state1, state2, proba_beta) {
.Call('_unbiasedmcmc_ising_coupled_gibbs_sweep_', PACKAGE = 'unbiasedmcmc', state1, state2, proba_beta)
}
sigma_ <- function(X, w) {
.Call('_unbiasedmcmc_sigma_', PACKAGE = 'unbiasedmcmc', X, w)
}
m_sigma_function_ <- function(omega, X, invB, KTkappaplusinvBtimesb) {
.Call('_unbiasedmcmc_m_sigma_function_', PACKAGE = 'unbiasedmcmc', omega, X, invB, KTkappaplusinvBtimesb)
}
logcosh <- function(x) {
.Call('_unbiasedmcmc_logcosh', PACKAGE = 'unbiasedmcmc', x)
}
xbeta_ <- function(X, beta) {
.Call('_unbiasedmcmc_xbeta_', PACKAGE = 'unbiasedmcmc', X, beta)
}
w_rejsamplerC <- function(beta1, beta2, X) {
.Call('_unbiasedmcmc_w_rejsamplerC', PACKAGE = 'unbiasedmcmc', beta1, beta2, X)
}
w_max_couplingC <- function(beta1, beta2, X) {
.Call('_unbiasedmcmc_w_max_couplingC', PACKAGE = 'unbiasedmcmc', beta1, beta2, X)
}
fast_rmvnorm_ <- function(nsamples, mean, covariance) {
.Call('_unbiasedmcmc_fast_rmvnorm_', PACKAGE = 'unbiasedmcmc', nsamples, mean, covariance)
}
fast_rmvnorm_cholesky_ <- function(nsamples, mean, cholesky) {
.Call('_unbiasedmcmc_fast_rmvnorm_cholesky_', PACKAGE = 'unbiasedmcmc', nsamples, mean, cholesky)
}
fast_dmvnorm_ <- function(x, mean, covariance) {
.Call('_unbiasedmcmc_fast_dmvnorm_', PACKAGE = 'unbiasedmcmc', x, mean, covariance)
}
fast_dmvnorm_cholesky_inverse_ <- function(x, mean, cholesky_inverse) {
.Call('_unbiasedmcmc_fast_dmvnorm_cholesky_inverse_', PACKAGE = 'unbiasedmcmc', x, mean, cholesky_inverse)
}
rmvnorm_max_coupling_ <- function(mu1, mu2, Sigma1, Sigma2) {
.Call('_unbiasedmcmc_rmvnorm_max_coupling_', PACKAGE = 'unbiasedmcmc', mu1, mu2, Sigma1, Sigma2)
}
rmvnorm_max_coupling_cholesky <- function(mu1, mu2, Cholesky1, Cholesky2, Cholesky_inverse1, Cholesky_inverse2) {
.Call('_unbiasedmcmc_rmvnorm_max_coupling_cholesky', PACKAGE = 'unbiasedmcmc', mu1, mu2, Cholesky1, Cholesky2, Cholesky_inverse1, Cholesky_inverse2)
}
rmvnorm_reflection_max_coupling_ <- function(mu1, mu2, Sigma_chol, inv_Sigma_chol) {
.Call('_unbiasedmcmc_rmvnorm_reflection_max_coupling_', PACKAGE = 'unbiasedmcmc', mu1, mu2, Sigma_chol, inv_Sigma_chol)
}
beta2e_ <- function(beta, C) {
.Call('_unbiasedmcmc_beta2e_', PACKAGE = 'unbiasedmcmc', beta, C)
}
cut_in_fifth_ <- function(x) {
.Call('_unbiasedmcmc_cut_in_fifth_', PACKAGE = 'unbiasedmcmc', x)
}
propensity_module2_loglik2_ <- function(theta1s, theta2s, X, C, Y) {
.Call('_unbiasedmcmc_propensity_module2_loglik2_', PACKAGE = 'unbiasedmcmc', theta1s, theta2s, X, C, Y)
}
prune_measure_ <- function(df) {
.Call('_unbiasedmcmc_prune_measure_', PACKAGE = 'unbiasedmcmc', df)
}
sample_pair01 <- function(selection) {
.Call('_unbiasedmcmc_sample_pair01', PACKAGE = 'unbiasedmcmc', selection)
}
marginal_likelihood_c_2 <- function(selection, X, Y, Y2, g) {
.Call('_unbiasedmcmc_marginal_likelihood_c_2', PACKAGE = 'unbiasedmcmc', selection, X, Y, Y2, g)
}
|
/R/RcppExports.R
|
no_license
|
shizelong1985/unbiasedmcmc
|
R
| false
| false
| 4,417
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
blassoconditional <- function(Y, X, XtY, XtX, tau2, sigma2) {
.Call('_unbiasedmcmc_blassoconditional', PACKAGE = 'unbiasedmcmc', Y, X, XtY, XtX, tau2, sigma2)
}
blassoconditional_coupled <- function(Y, X, XtY, XtX, tau21, tau22, sigma21, sigma22) {
.Call('_unbiasedmcmc_blassoconditional_coupled', PACKAGE = 'unbiasedmcmc', Y, X, XtY, XtX, tau21, tau22, sigma21, sigma22)
}
c_chains_to_measure_as_list_ <- function(c_chains, k, m) {
.Call('_unbiasedmcmc_c_chains_to_measure_as_list_', PACKAGE = 'unbiasedmcmc', c_chains, k, m)
}
estimator_bin_ <- function(c_chains, component, lower, upper, k, m, lag) {
.Call('_unbiasedmcmc_estimator_bin_', PACKAGE = 'unbiasedmcmc', c_chains, component, lower, upper, k, m, lag)
}
rinvgaussian_c <- function(n, mu, lambda) {
.Call('_unbiasedmcmc_rinvgaussian_c', PACKAGE = 'unbiasedmcmc', n, mu, lambda)
}
rinvgaussian_coupled_c <- function(mu1, mu2, lambda1, lambda2) {
.Call('_unbiasedmcmc_rinvgaussian_coupled_c', PACKAGE = 'unbiasedmcmc', mu1, mu2, lambda1, lambda2)
}
ising_sum_ <- function(state) {
.Call('_unbiasedmcmc_ising_sum_', PACKAGE = 'unbiasedmcmc', state)
}
ising_gibbs_sweep_ <- function(state, proba_beta) {
.Call('_unbiasedmcmc_ising_gibbs_sweep_', PACKAGE = 'unbiasedmcmc', state, proba_beta)
}
ising_coupled_gibbs_sweep_ <- function(state1, state2, proba_beta) {
.Call('_unbiasedmcmc_ising_coupled_gibbs_sweep_', PACKAGE = 'unbiasedmcmc', state1, state2, proba_beta)
}
sigma_ <- function(X, w) {
.Call('_unbiasedmcmc_sigma_', PACKAGE = 'unbiasedmcmc', X, w)
}
m_sigma_function_ <- function(omega, X, invB, KTkappaplusinvBtimesb) {
.Call('_unbiasedmcmc_m_sigma_function_', PACKAGE = 'unbiasedmcmc', omega, X, invB, KTkappaplusinvBtimesb)
}
logcosh <- function(x) {
.Call('_unbiasedmcmc_logcosh', PACKAGE = 'unbiasedmcmc', x)
}
xbeta_ <- function(X, beta) {
.Call('_unbiasedmcmc_xbeta_', PACKAGE = 'unbiasedmcmc', X, beta)
}
w_rejsamplerC <- function(beta1, beta2, X) {
.Call('_unbiasedmcmc_w_rejsamplerC', PACKAGE = 'unbiasedmcmc', beta1, beta2, X)
}
w_max_couplingC <- function(beta1, beta2, X) {
.Call('_unbiasedmcmc_w_max_couplingC', PACKAGE = 'unbiasedmcmc', beta1, beta2, X)
}
fast_rmvnorm_ <- function(nsamples, mean, covariance) {
.Call('_unbiasedmcmc_fast_rmvnorm_', PACKAGE = 'unbiasedmcmc', nsamples, mean, covariance)
}
fast_rmvnorm_cholesky_ <- function(nsamples, mean, cholesky) {
.Call('_unbiasedmcmc_fast_rmvnorm_cholesky_', PACKAGE = 'unbiasedmcmc', nsamples, mean, cholesky)
}
fast_dmvnorm_ <- function(x, mean, covariance) {
.Call('_unbiasedmcmc_fast_dmvnorm_', PACKAGE = 'unbiasedmcmc', x, mean, covariance)
}
fast_dmvnorm_cholesky_inverse_ <- function(x, mean, cholesky_inverse) {
.Call('_unbiasedmcmc_fast_dmvnorm_cholesky_inverse_', PACKAGE = 'unbiasedmcmc', x, mean, cholesky_inverse)
}
rmvnorm_max_coupling_ <- function(mu1, mu2, Sigma1, Sigma2) {
.Call('_unbiasedmcmc_rmvnorm_max_coupling_', PACKAGE = 'unbiasedmcmc', mu1, mu2, Sigma1, Sigma2)
}
rmvnorm_max_coupling_cholesky <- function(mu1, mu2, Cholesky1, Cholesky2, Cholesky_inverse1, Cholesky_inverse2) {
.Call('_unbiasedmcmc_rmvnorm_max_coupling_cholesky', PACKAGE = 'unbiasedmcmc', mu1, mu2, Cholesky1, Cholesky2, Cholesky_inverse1, Cholesky_inverse2)
}
rmvnorm_reflection_max_coupling_ <- function(mu1, mu2, Sigma_chol, inv_Sigma_chol) {
.Call('_unbiasedmcmc_rmvnorm_reflection_max_coupling_', PACKAGE = 'unbiasedmcmc', mu1, mu2, Sigma_chol, inv_Sigma_chol)
}
beta2e_ <- function(beta, C) {
.Call('_unbiasedmcmc_beta2e_', PACKAGE = 'unbiasedmcmc', beta, C)
}
cut_in_fifth_ <- function(x) {
.Call('_unbiasedmcmc_cut_in_fifth_', PACKAGE = 'unbiasedmcmc', x)
}
propensity_module2_loglik2_ <- function(theta1s, theta2s, X, C, Y) {
.Call('_unbiasedmcmc_propensity_module2_loglik2_', PACKAGE = 'unbiasedmcmc', theta1s, theta2s, X, C, Y)
}
prune_measure_ <- function(df) {
.Call('_unbiasedmcmc_prune_measure_', PACKAGE = 'unbiasedmcmc', df)
}
sample_pair01 <- function(selection) {
.Call('_unbiasedmcmc_sample_pair01', PACKAGE = 'unbiasedmcmc', selection)
}
marginal_likelihood_c_2 <- function(selection, X, Y, Y2, g) {
.Call('_unbiasedmcmc_marginal_likelihood_c_2', PACKAGE = 'unbiasedmcmc', selection, X, Y, Y2, g)
}
|
setwd("E:/Computer Engg/Machine learning/RScripts");
train<-read.csv("data.csv");
train_data<-train[!is.na(train$shot_made_flag),]
test_data<-train[is.na(train$shot_made_flag),]
#head(train_data)
train_data$game_event_id<-NULL
train_data$game_id<-NULL
train_data$team_id<-NULL
train_data$team_name<-NULL
train_data$game_date<-NULL
train_data$matchup<-NULL
train_data$shot_id<-NULL
#test_data$game_event_id<-NULL
#test_data$game_id<-NULL
#test_data$team_id<-NULL
#test_data$team_name<-NULL
#test_data$game_date<-NULL
#test_data$matchup<-NULL
#test_data$shot_id<-NULL
#test_data$shot_made_flag<-NULL
test_data$action_type = as.numeric(factor(test_data$action_type))
test_data$combined_shot_type = as.numeric(factor(test_data$combined_shot_type))
test_data$season = as.numeric(factor(test_data$season))
test_data$shot_type = as.numeric(factor(test_data$shot_type))
test_data$shot_zone_area = as.numeric(factor(test_data$shot_zone_area))
test_data$shot_zone_basic = as.numeric(factor(test_data$shot_zone_basic))
test_data$shot_zone_range = as.numeric(factor(test_data$shot_zone_range))
test_data$opponent = as.numeric(factor(test_data$opponent))
train_data$action_type = as.numeric(factor(train_data$action_type))
train_data$combined_shot_type = as.numeric(factor(train_data$combined_shot_type))
train_data$season = as.numeric(factor(train_data$season))
train_data$shot_type = as.numeric(factor(train_data$shot_type))
train_data$shot_zone_area = as.numeric(factor(train_data$shot_zone_area))
train_data$shot_zone_basic = as.numeric(factor(train_data$shot_zone_basic))
train_data$shot_zone_range = as.numeric(factor(train_data$shot_zone_range))
train_data$opponent = as.numeric(factor(train_data$opponent))
library(randomForest)
train_data$dist<-(train_data$loc_x**2+train_data$loc_y**2)**0.5
test_data$dist<-(test_data$loc_x**2+test_data$loc_y**2)**0.5
train_data$timeleft<-train_data$minutes_remaining*60+train_data$seconds_remaining
test_data$timeleft<-test_data$minutes_remaining*60+test_data$seconds_remaining
train_data$seconds_remaining<-NULL
test_data$seconds_remaining<-NULL
train_data$minutes_remaining<-NULL
test_data$minutes_remaining<-NULL
forest<-randomForest(shot_made_flag~.,data=train_data,importance=TRUE,ntree=800)
my_prediction<-predict(forest,test_data)
answer<-data.frame(shot_id=test_data$shot_id,shot_made_flag=my_prediction)
write.csv(answer,file="check.csv",row.names=FALSE)
|
/Kaggle/BasketBallgoal/bb.R
|
no_license
|
PrajwalaTM/Machine-Learning
|
R
| false
| false
| 2,398
|
r
|
setwd("E:/Computer Engg/Machine learning/RScripts");
train<-read.csv("data.csv");
train_data<-train[!is.na(train$shot_made_flag),]
test_data<-train[is.na(train$shot_made_flag),]
#head(train_data)
train_data$game_event_id<-NULL
train_data$game_id<-NULL
train_data$team_id<-NULL
train_data$team_name<-NULL
train_data$game_date<-NULL
train_data$matchup<-NULL
train_data$shot_id<-NULL
#test_data$game_event_id<-NULL
#test_data$game_id<-NULL
#test_data$team_id<-NULL
#test_data$team_name<-NULL
#test_data$game_date<-NULL
#test_data$matchup<-NULL
#test_data$shot_id<-NULL
#test_data$shot_made_flag<-NULL
test_data$action_type = as.numeric(factor(test_data$action_type))
test_data$combined_shot_type = as.numeric(factor(test_data$combined_shot_type))
test_data$season = as.numeric(factor(test_data$season))
test_data$shot_type = as.numeric(factor(test_data$shot_type))
test_data$shot_zone_area = as.numeric(factor(test_data$shot_zone_area))
test_data$shot_zone_basic = as.numeric(factor(test_data$shot_zone_basic))
test_data$shot_zone_range = as.numeric(factor(test_data$shot_zone_range))
test_data$opponent = as.numeric(factor(test_data$opponent))
train_data$action_type = as.numeric(factor(train_data$action_type))
train_data$combined_shot_type = as.numeric(factor(train_data$combined_shot_type))
train_data$season = as.numeric(factor(train_data$season))
train_data$shot_type = as.numeric(factor(train_data$shot_type))
train_data$shot_zone_area = as.numeric(factor(train_data$shot_zone_area))
train_data$shot_zone_basic = as.numeric(factor(train_data$shot_zone_basic))
train_data$shot_zone_range = as.numeric(factor(train_data$shot_zone_range))
train_data$opponent = as.numeric(factor(train_data$opponent))
library(randomForest)
train_data$dist<-(train_data$loc_x**2+train_data$loc_y**2)**0.5
test_data$dist<-(test_data$loc_x**2+test_data$loc_y**2)**0.5
train_data$timeleft<-train_data$minutes_remaining*60+train_data$seconds_remaining
test_data$timeleft<-test_data$minutes_remaining*60+test_data$seconds_remaining
train_data$seconds_remaining<-NULL
test_data$seconds_remaining<-NULL
train_data$minutes_remaining<-NULL
test_data$minutes_remaining<-NULL
forest<-randomForest(shot_made_flag~.,data=train_data,importance=TRUE,ntree=800)
my_prediction<-predict(forest,test_data)
answer<-data.frame(shot_id=test_data$shot_id,shot_made_flag=my_prediction)
write.csv(answer,file="check.csv",row.names=FALSE)
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/makenames_MSF.R
\name{makenames_MSF}
\alias{makenames_MSF}
\title{A function that given common gene names in dermatology, create expression with special character (like greek symbols).}
\usage{
makenames_MSF(vs)
}
\arguments{
\item{vs:}{a gene name}
}
\description{
very useful for title and axis in plots.
}
\examples{
makenames_MSF('IL17')
}
|
/man/makenames_MSF.Rd
|
no_license
|
mssm-msf-2019/BiostatsALL
|
R
| false
| false
| 454
|
rd
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/makenames_MSF.R
\name{makenames_MSF}
\alias{makenames_MSF}
\title{A function that given common gene names in dermatology, create expression with special character (like greek symbols).}
\usage{
makenames_MSF(vs)
}
\arguments{
\item{vs:}{a gene name}
}
\description{
very useful for title and axis in plots.
}
\examples{
makenames_MSF('IL17')
}
|
ad = read.csv("data/Advertising.csv")
mlr <- function(x, y) {
beta_hat <- solve(t(x) %*% x) %*% t(x) %*% y
y_hat = x %*% beta_hat
X = x - mean(x)
Y = y - mean(y)
e = y - y_hat
rss = sum(e^2)
tss = sum(Y^2)
R_squared = 1 - rss/tss
RSE = sqrt(rss / (length(x) - 2))
# corr = sum (X * Y) / sqrt( sum(X^2) * sum(Y^2) )
cat(rss, '\n', tss, '\n', R_squared, '\n', RSE, '\n')
}
X = as.matrix(cbind(1, ad$TV, ad$radio, ad$newspaper))
y = as.matrix(ad$sales)
beta_hat = mlr(X, y)
mod1 <- lm(sales ~ ., data = ad)
?"%*%"
|
/data-analytics/5_multiple_linear_regression..R
|
permissive
|
rhtrajssm/course-repo
|
R
| false
| false
| 552
|
r
|
ad = read.csv("data/Advertising.csv")
mlr <- function(x, y) {
beta_hat <- solve(t(x) %*% x) %*% t(x) %*% y
y_hat = x %*% beta_hat
X = x - mean(x)
Y = y - mean(y)
e = y - y_hat
rss = sum(e^2)
tss = sum(Y^2)
R_squared = 1 - rss/tss
RSE = sqrt(rss / (length(x) - 2))
# corr = sum (X * Y) / sqrt( sum(X^2) * sum(Y^2) )
cat(rss, '\n', tss, '\n', R_squared, '\n', RSE, '\n')
}
X = as.matrix(cbind(1, ad$TV, ad$radio, ad$newspaper))
y = as.matrix(ad$sales)
beta_hat = mlr(X, y)
mod1 <- lm(sales ~ ., data = ad)
?"%*%"
|
#' sp bbox to poly
#' @param sp
#' @export
bbox_to_sp <- function(sp) {
bbox <- bbox(sp)
x <- c(bbox[1, 1], bbox[1, 1], bbox[1, 2], bbox[1, 2], bbox[1, 1])
y <- c(bbox[2, 1], bbox[2, 2], bbox[2, 2], bbox[2, 1], bbox[2, 1])
p <- Polygon(cbind(x, y))
ps <- Polygons(list(p), "p1")
sp <- SpatialPolygons(list(ps), 1L, proj4string = CRS(proj4string(sp)))
return(sp)
}
|
/R/bbox_to_sp.R
|
permissive
|
jhollist/miscPackage
|
R
| false
| false
| 393
|
r
|
#' sp bbox to poly
#' @param sp
#' @export
bbox_to_sp <- function(sp) {
bbox <- bbox(sp)
x <- c(bbox[1, 1], bbox[1, 1], bbox[1, 2], bbox[1, 2], bbox[1, 1])
y <- c(bbox[2, 1], bbox[2, 2], bbox[2, 2], bbox[2, 1], bbox[2, 1])
p <- Polygon(cbind(x, y))
ps <- Polygons(list(p), "p1")
sp <- SpatialPolygons(list(ps), 1L, proj4string = CRS(proj4string(sp)))
return(sp)
}
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of SkeletonExistingPredictionModelStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Extracts covariates based on measurements
#'
#' @details
#' This extracts measurement values for a concept set of measurement concept ids
#'
#' @param connection The database connection
#' @param oracleTempSchema The temp schema if using oracle
#' @param cdmDatabaseSchema The schema of the OMOP CDM data
#' @param cdmVersion version of the OMOP CDM data
#' @param cohortTable the table name that contains the target population cohort
#' @param rowIdField string representing the unique identifier in the target population cohort
#' @param aggregated whether the covariate should be aggregated
#' @param cohortId cohort id for the target population cohort
#' @param covariateSettings settings for the covariate cohorts and time periods
#'
#' @return
#' The models will now be in the package
#'
#' @export
getMeasurementCovariateData <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cdmVersion = "5",
cohortTable = "#cohort_person",
rowIdField = "row_id",
aggregated,
cohortId,
covariateSettings) {
# to get table 1 - take source values and then map them - dont map in SQL
# Some SQL to construct the covariate:
sql <- paste("select c.@row_id_field AS row_id, measurement_concept_id, unit_concept_id,",
"{@lnAgeInteraction}?{LOG(YEAR(c.cohort_start_date)-p.year_of_birth)*}:{{@ageInteraction}?{(YEAR(c.cohort_start_date)-p.year_of_birth)*}}",
"{@lnValue}?{LOG(value_as_number)}:{value_as_number} as value_as_number,",
"measurement_date, abs(datediff(dd, measurement_date, c.cohort_start_date)) as index_time,value_as_number raw_value, YEAR(c.cohort_start_date)-p.year_of_birth as age",
"from @cdm_database_schema.measurement m inner join @cohort_temp_table c on c.subject_id = m.person_id",
"and measurement_date >= dateadd(day, @startDay, cohort_start_date) and ",
"measurement_date <= dateadd(day, @endDay, cohort_start_date)",
"inner join @cdm_database_schema.person p on p.person_id=c.subject_id",
"where m.measurement_concept_id in (@concepts) {@lnValue}?{ and value_as_number >0 }"
)
sql <- SqlRender::render(sql,
cohort_temp_table = cohortTable,
row_id_field = rowIdField,
startDay=covariateSettings$startDay,
endDay=covariateSettings$endDay,
concepts = paste(covariateSettings$conceptSet, collapse = ','),
cdm_database_schema = cdmDatabaseSchema,
ageInteraction = covariateSettings$ageInteraction,
lnAgeInteraction = covariateSettings$lnAgeInteraction,
lnValue = covariateSettings$lnValue
)
sql <- SqlRender::translate(sql, targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)
# Retrieve the covariate:
covariates <- DatabaseConnector::querySql(connection, sql)
# Convert colum names to camelCase:
colnames(covariates) <- SqlRender::snakeCaseToCamelCase(colnames(covariates))
# map data:
covariates <- covariates[!is.na(covariates$valueAsNumber),]
covariates <- covariateSettings$scaleMap(covariates)
# aggregate data:
if(covariateSettings$aggregateMethod == 'max'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = max(valueAsNumber),
covariateValueSource = max(rawValue))
} else if(covariateSettings$aggregateMethod == 'min'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = min(valueAsNumber),
covariateValueSource = min(rawValue))
} else if(covariateSettings$aggregateMethod == 'mean'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = mean(valueAsNumber),
covariateValueSource = mean(rawValue))
} else if(covariateSettings$aggregateMethod == 'median'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = median(valueAsNumber),
covariateValueSource = median(rawValue))
} else{
last <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(lastTime = min(indexTime))
covariates <- merge(covariates,last,
by.x = c('rowId','indexTime'),
by.y = c('rowId','lastTime') )
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = mean(valueAsNumber),
covariateValueSource = mean(rawValue))
}
# add covariateID:
covariates$covariateId <- covariateSettings$covariateId
#=================
# CALCULATE TABLE 1 Measurement info
table1 <- covariates %>% dplyr::group_by(covariateId) %>%
dplyr::summarize(meanValue = mean(covariateValueSource),
sdValue = sd(covariateValueSource),
count = length(covariateValueSource))
table1 <- as.data.frame(table1)
covariates <- covariates %>% dplyr::select(rowId, covariateId, covariateValue)
#=================
# impute missing - add age here to be able to input age interaction
sql <- paste("select distinct c.@row_id_field AS row_id ",
", YEAR(c.cohort_start_date)-p.year_of_birth as age",
"from @cohort_temp_table c",
"inner join @cdm_database_schema.person p on p.person_id=c.subject_id")
sql <- SqlRender::render(sql, cohort_temp_table = cohortTable,
row_id_field = rowIdField,
cdm_database_schema = cdmDatabaseSchema)
sql <- SqlRender::translate(sql, targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)
# Retrieve the covariate:
ppl <- DatabaseConnector::querySql(connection, sql)
colnames(ppl) <- SqlRender::snakeCaseToCamelCase(colnames(ppl))
missingPlp <- ppl[!ppl$rowId%in%covariates$rowId,]
if(length(missingPlp$rowId)>0){
if(covariateSettings$lnValue){
covariateSettings$imputationValue <- log(covariateSettings$imputationValue)
}
if(covariateSettings$ageInteraction){
covVal <- missingPlp$age*covariateSettings$imputationValue
} else if(covariateSettings$lnAgeInteraction){
covVal <- log(missingPlp$age)*covariateSettings$imputationValue
} else{
covVal <- covariateSettings$imputationValue
}
extraData <- data.frame(rowId = missingPlp$rowId,
covariateId = covariateSettings$covariateId,
covariateValue = covVal)
covariates <- rbind(covariates, extraData[,colnames(covariates)])
}
# Construct covariate reference:
covariateRef <- data.frame(covariateId = covariateSettings$covariateId,
covariateName = paste('Measurement during day',
covariateSettings$startDay,
'through',
covariateSettings$endDay,
'days relative to index:',
ifelse(covariateSettings$lnValue, 'log(', ''),
covariateSettings$covariateName,
ifelse(covariateSettings$lnValue, ')', ''),
ifelse(covariateSettings$ageInteraction, ' X Age', ''),
ifelse(covariateSettings$lnAgeInteraction, ' X ln(Age)', '')
),
analysisId = covariateSettings$analysisId,
conceptId = 0)
analysisRef <- data.frame(analysisId = covariateSettings$analysisId,
analysisName = "measurement covariate",
domainId = "measurement covariate",
startDay = covariateSettings$startDay,
endDay = covariateSettings$endDay,
isBinary = "N",
missingMeansZero = "Y")
metaData <- list(sql = sql, call = match.call(), table1 = table1)
result <- Andromeda::andromeda(covariates = covariates,
covariateRef = covariateRef,
analysisRef = analysisRef)
attr(result, "metaData") <- metaData
class(result) <- "CovariateData"
return(result)
}
createMeasurementCovariateSettings <- function(covariateName, conceptSet,
startDay=-30, endDay=0,
scaleMap = NULL, aggregateMethod = 'recent',
imputationValue = 0,
ageInteraction = F,
lnAgeInteraction = F,
lnValue = F,
covariateId = 1466,
analysisId = 466
) {
covariateSettings <- list(covariateName=covariateName,
conceptSet=conceptSet,
startDay=startDay,
endDay=endDay,
scaleMap=scaleMap,
aggregateMethod = aggregateMethod,
imputationValue = imputationValue,
ageInteraction = ageInteraction,
lnAgeInteraction = lnAgeInteraction,
lnValue = lnValue,
covariateId = covariateId,
analysisId = analysisId
)
attr(covariateSettings, "fun") <- "EmcDementiaPredictionTable1::getMeasurementCovariateData"
class(covariateSettings) <- "covariateSettings"
return(covariateSettings)
}
|
/R/MeasurementCovariateCode.R
|
no_license
|
mi-erasmusmc/EmcDementiaPredictionTable1
|
R
| false
| false
| 11,226
|
r
|
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of SkeletonExistingPredictionModelStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Extracts covariates based on measurements
#'
#' @details
#' This extracts measurement values for a concept set of measurement concept ids
#'
#' @param connection The database connection
#' @param oracleTempSchema The temp schema if using oracle
#' @param cdmDatabaseSchema The schema of the OMOP CDM data
#' @param cdmVersion version of the OMOP CDM data
#' @param cohortTable the table name that contains the target population cohort
#' @param rowIdField string representing the unique identifier in the target population cohort
#' @param aggregated whether the covariate should be aggregated
#' @param cohortId cohort id for the target population cohort
#' @param covariateSettings settings for the covariate cohorts and time periods
#'
#' @return
#' The models will now be in the package
#'
#' @export
getMeasurementCovariateData <- function(connection,
oracleTempSchema = NULL,
cdmDatabaseSchema,
cdmVersion = "5",
cohortTable = "#cohort_person",
rowIdField = "row_id",
aggregated,
cohortId,
covariateSettings) {
# to get table 1 - take source values and then map them - dont map in SQL
# Some SQL to construct the covariate:
sql <- paste("select c.@row_id_field AS row_id, measurement_concept_id, unit_concept_id,",
"{@lnAgeInteraction}?{LOG(YEAR(c.cohort_start_date)-p.year_of_birth)*}:{{@ageInteraction}?{(YEAR(c.cohort_start_date)-p.year_of_birth)*}}",
"{@lnValue}?{LOG(value_as_number)}:{value_as_number} as value_as_number,",
"measurement_date, abs(datediff(dd, measurement_date, c.cohort_start_date)) as index_time,value_as_number raw_value, YEAR(c.cohort_start_date)-p.year_of_birth as age",
"from @cdm_database_schema.measurement m inner join @cohort_temp_table c on c.subject_id = m.person_id",
"and measurement_date >= dateadd(day, @startDay, cohort_start_date) and ",
"measurement_date <= dateadd(day, @endDay, cohort_start_date)",
"inner join @cdm_database_schema.person p on p.person_id=c.subject_id",
"where m.measurement_concept_id in (@concepts) {@lnValue}?{ and value_as_number >0 }"
)
sql <- SqlRender::render(sql,
cohort_temp_table = cohortTable,
row_id_field = rowIdField,
startDay=covariateSettings$startDay,
endDay=covariateSettings$endDay,
concepts = paste(covariateSettings$conceptSet, collapse = ','),
cdm_database_schema = cdmDatabaseSchema,
ageInteraction = covariateSettings$ageInteraction,
lnAgeInteraction = covariateSettings$lnAgeInteraction,
lnValue = covariateSettings$lnValue
)
sql <- SqlRender::translate(sql, targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)
# Retrieve the covariate:
covariates <- DatabaseConnector::querySql(connection, sql)
# Convert colum names to camelCase:
colnames(covariates) <- SqlRender::snakeCaseToCamelCase(colnames(covariates))
# map data:
covariates <- covariates[!is.na(covariates$valueAsNumber),]
covariates <- covariateSettings$scaleMap(covariates)
# aggregate data:
if(covariateSettings$aggregateMethod == 'max'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = max(valueAsNumber),
covariateValueSource = max(rawValue))
} else if(covariateSettings$aggregateMethod == 'min'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = min(valueAsNumber),
covariateValueSource = min(rawValue))
} else if(covariateSettings$aggregateMethod == 'mean'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = mean(valueAsNumber),
covariateValueSource = mean(rawValue))
} else if(covariateSettings$aggregateMethod == 'median'){
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = median(valueAsNumber),
covariateValueSource = median(rawValue))
} else{
last <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(lastTime = min(indexTime))
covariates <- merge(covariates,last,
by.x = c('rowId','indexTime'),
by.y = c('rowId','lastTime') )
covariates <- covariates %>% dplyr::group_by(rowId) %>%
dplyr::summarize(covariateValue = mean(valueAsNumber),
covariateValueSource = mean(rawValue))
}
# add covariateID:
covariates$covariateId <- covariateSettings$covariateId
#=================
# CALCULATE TABLE 1 Measurement info
table1 <- covariates %>% dplyr::group_by(covariateId) %>%
dplyr::summarize(meanValue = mean(covariateValueSource),
sdValue = sd(covariateValueSource),
count = length(covariateValueSource))
table1 <- as.data.frame(table1)
covariates <- covariates %>% dplyr::select(rowId, covariateId, covariateValue)
#=================
# impute missing - add age here to be able to input age interaction
sql <- paste("select distinct c.@row_id_field AS row_id ",
", YEAR(c.cohort_start_date)-p.year_of_birth as age",
"from @cohort_temp_table c",
"inner join @cdm_database_schema.person p on p.person_id=c.subject_id")
sql <- SqlRender::render(sql, cohort_temp_table = cohortTable,
row_id_field = rowIdField,
cdm_database_schema = cdmDatabaseSchema)
sql <- SqlRender::translate(sql, targetDialect = attr(connection, "dbms"),
oracleTempSchema = oracleTempSchema)
# Retrieve the covariate:
ppl <- DatabaseConnector::querySql(connection, sql)
colnames(ppl) <- SqlRender::snakeCaseToCamelCase(colnames(ppl))
missingPlp <- ppl[!ppl$rowId%in%covariates$rowId,]
if(length(missingPlp$rowId)>0){
if(covariateSettings$lnValue){
covariateSettings$imputationValue <- log(covariateSettings$imputationValue)
}
if(covariateSettings$ageInteraction){
covVal <- missingPlp$age*covariateSettings$imputationValue
} else if(covariateSettings$lnAgeInteraction){
covVal <- log(missingPlp$age)*covariateSettings$imputationValue
} else{
covVal <- covariateSettings$imputationValue
}
extraData <- data.frame(rowId = missingPlp$rowId,
covariateId = covariateSettings$covariateId,
covariateValue = covVal)
covariates <- rbind(covariates, extraData[,colnames(covariates)])
}
# Construct covariate reference:
covariateRef <- data.frame(covariateId = covariateSettings$covariateId,
covariateName = paste('Measurement during day',
covariateSettings$startDay,
'through',
covariateSettings$endDay,
'days relative to index:',
ifelse(covariateSettings$lnValue, 'log(', ''),
covariateSettings$covariateName,
ifelse(covariateSettings$lnValue, ')', ''),
ifelse(covariateSettings$ageInteraction, ' X Age', ''),
ifelse(covariateSettings$lnAgeInteraction, ' X ln(Age)', '')
),
analysisId = covariateSettings$analysisId,
conceptId = 0)
analysisRef <- data.frame(analysisId = covariateSettings$analysisId,
analysisName = "measurement covariate",
domainId = "measurement covariate",
startDay = covariateSettings$startDay,
endDay = covariateSettings$endDay,
isBinary = "N",
missingMeansZero = "Y")
metaData <- list(sql = sql, call = match.call(), table1 = table1)
result <- Andromeda::andromeda(covariates = covariates,
covariateRef = covariateRef,
analysisRef = analysisRef)
attr(result, "metaData") <- metaData
class(result) <- "CovariateData"
return(result)
}
createMeasurementCovariateSettings <- function(covariateName, conceptSet,
startDay=-30, endDay=0,
scaleMap = NULL, aggregateMethod = 'recent',
imputationValue = 0,
ageInteraction = F,
lnAgeInteraction = F,
lnValue = F,
covariateId = 1466,
analysisId = 466
) {
covariateSettings <- list(covariateName=covariateName,
conceptSet=conceptSet,
startDay=startDay,
endDay=endDay,
scaleMap=scaleMap,
aggregateMethod = aggregateMethod,
imputationValue = imputationValue,
ageInteraction = ageInteraction,
lnAgeInteraction = lnAgeInteraction,
lnValue = lnValue,
covariateId = covariateId,
analysisId = analysisId
)
attr(covariateSettings, "fun") <- "EmcDementiaPredictionTable1::getMeasurementCovariateData"
class(covariateSettings) <- "covariateSettings"
return(covariateSettings)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/avaible_syllable_funs.R
\name{print.available}
\alias{print.available}
\title{Prints an available Object.}
\usage{
\method{print}{available}(x, ...)
}
\arguments{
\item{x}{The available object}
\item{\ldots}{ignored}
}
\description{
Prints an available object.
}
|
/man/print.available.Rd
|
no_license
|
trinker/syllable
|
R
| false
| true
| 343
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/avaible_syllable_funs.R
\name{print.available}
\alias{print.available}
\title{Prints an available Object.}
\usage{
\method{print}{available}(x, ...)
}
\arguments{
\item{x}{The available object}
\item{\ldots}{ignored}
}
\description{
Prints an available object.
}
|
library(shiny)
source("src/metric_correlations.R")
library(shinydashboard)
library(vistime)
library(plotly)
ui = tabsetPanel(
tabPanel("Performance Metric Interplay",fluidPage(
sidebarLayout(
sidebarPanel(
helpText("Explore the interation between two performance metrics for a chosen virtual machine(s)"),
selectInput("serial",
label = "Choose the host machine",
choices = unique(all_data$gpuSerial),
selected = unique(all_data$gpuSerial)[1]),
selectInput("var1",
label = "Choose the first variable",
choices = c("tempC",
"powerDraw",
"GpuUtilPerc",
"MemUtilPerc",
"runtime"),
selected = "tempC"),
selectInput("var2",
label = "Choose the second variable",
choices = c("tempC",
"powerDraw",
"GpuUtilPerc",
"MemUtilPerc",
"runtime"),
selected = "runtime")
),
mainPanel(
infoBoxOutput("corr"),
plotOutput("metric_variation")
)
)
)),
tabPanel("Tile Variation",fluidPage(
sidebarLayout(
sidebarPanel(
helpText("Visualise the variation a each performance metric over the image tiles"),
selectInput("metric",
label = "Select a performance metric",
choices = c("tempC",
"powerDraw",
"GpuUtilPerc",
"MemUtilPerc",
"runtime"),
selected = "tempC"),
helpText("Final terapixel image for comparison:"),
img(src = "full_terapixel_image.png", height = 210, width = 210),
),
mainPanel(
plotOutput("tile_variation")
)
)
)),
tabPanel("Event Timeline", fluidPage(
sidebarLayout(
sidebarPanel(width = 6,
selectInput("host",
label = "Choose the host machine",
choices = unique(app_wide$hostname),
selected = unique(app_wide$hostname)[1]
),
sliderInput("time_int","Range:", timeFormat = "%H:%M:%S",label = "Select Time Interval",
min = min(app_wide$START), max = max(app_wide$STOP),
value = c(min(app_wide$START),min(app_wide$START) +180),
step = 60)
),
mainPanel(width = 6,
plotlyOutput("dominant_events"))
)
)))
server = function(input,output) {
output$metric_variation = renderPlot({
ggplot(filter(all_data,gpuSerial == input$serial), aes_string(x = input$var1, y = input$var2)) +
geom_point() + geom_text(aes(label = task_no), vjust = 1.2)
})
output$tile_variation = renderPlot({
mid = mean(unlist(all_data[,input$metric]))
ggplot(filter(all_data,level == 12), aes_string(x = "x", y = "y", colour = input$metric)) +
geom_point() +
scale_color_gradient2(midpoint=mid, low="blue", mid="white",
high="red", space = "Lab",guide = "colourbar")
})
output$corr = renderInfoBox({
infoBox("Correlation Coefficient",
round(cor(unlist(all_data[all_data$gpuSerial == input$serial,input$var1]),
unlist(all_data[all_data$gpuSerial == input$serial,input$var2])),2),
color = "blue",fill = TRUE)
})
output$dominant_events = renderPlotly({
vistime(filter(app_wide,
hostname == input$host & eventName != "TotalRender" & START >= input$time_int[1] &
STOP <= input$time_int[2]),
col.group = "eventName",col.start = "START",col.end = "STOP",
col.event = "eventName",show_labels = FALSE)
})
}
shinyApp(ui = ui, server = server)
|
/Terapixel Project/CSC8634-Results-App.R
|
no_license
|
LukeBattle/Terapixel
|
R
| false
| false
| 4,354
|
r
|
library(shiny)
source("src/metric_correlations.R")
library(shinydashboard)
library(vistime)
library(plotly)
ui = tabsetPanel(
tabPanel("Performance Metric Interplay",fluidPage(
sidebarLayout(
sidebarPanel(
helpText("Explore the interation between two performance metrics for a chosen virtual machine(s)"),
selectInput("serial",
label = "Choose the host machine",
choices = unique(all_data$gpuSerial),
selected = unique(all_data$gpuSerial)[1]),
selectInput("var1",
label = "Choose the first variable",
choices = c("tempC",
"powerDraw",
"GpuUtilPerc",
"MemUtilPerc",
"runtime"),
selected = "tempC"),
selectInput("var2",
label = "Choose the second variable",
choices = c("tempC",
"powerDraw",
"GpuUtilPerc",
"MemUtilPerc",
"runtime"),
selected = "runtime")
),
mainPanel(
infoBoxOutput("corr"),
plotOutput("metric_variation")
)
)
)),
tabPanel("Tile Variation",fluidPage(
sidebarLayout(
sidebarPanel(
helpText("Visualise the variation a each performance metric over the image tiles"),
selectInput("metric",
label = "Select a performance metric",
choices = c("tempC",
"powerDraw",
"GpuUtilPerc",
"MemUtilPerc",
"runtime"),
selected = "tempC"),
helpText("Final terapixel image for comparison:"),
img(src = "full_terapixel_image.png", height = 210, width = 210),
),
mainPanel(
plotOutput("tile_variation")
)
)
)),
tabPanel("Event Timeline", fluidPage(
sidebarLayout(
sidebarPanel(width = 6,
selectInput("host",
label = "Choose the host machine",
choices = unique(app_wide$hostname),
selected = unique(app_wide$hostname)[1]
),
sliderInput("time_int","Range:", timeFormat = "%H:%M:%S",label = "Select Time Interval",
min = min(app_wide$START), max = max(app_wide$STOP),
value = c(min(app_wide$START),min(app_wide$START) +180),
step = 60)
),
mainPanel(width = 6,
plotlyOutput("dominant_events"))
)
)))
server = function(input,output) {
output$metric_variation = renderPlot({
ggplot(filter(all_data,gpuSerial == input$serial), aes_string(x = input$var1, y = input$var2)) +
geom_point() + geom_text(aes(label = task_no), vjust = 1.2)
})
output$tile_variation = renderPlot({
mid = mean(unlist(all_data[,input$metric]))
ggplot(filter(all_data,level == 12), aes_string(x = "x", y = "y", colour = input$metric)) +
geom_point() +
scale_color_gradient2(midpoint=mid, low="blue", mid="white",
high="red", space = "Lab",guide = "colourbar")
})
output$corr = renderInfoBox({
infoBox("Correlation Coefficient",
round(cor(unlist(all_data[all_data$gpuSerial == input$serial,input$var1]),
unlist(all_data[all_data$gpuSerial == input$serial,input$var2])),2),
color = "blue",fill = TRUE)
})
output$dominant_events = renderPlotly({
vistime(filter(app_wide,
hostname == input$host & eventName != "TotalRender" & START >= input$time_int[1] &
STOP <= input$time_int[2]),
col.group = "eventName",col.start = "START",col.end = "STOP",
col.event = "eventName",show_labels = FALSE)
})
}
shinyApp(ui = ui, server = server)
|
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";", colClasses=c("character","character","double", "double","double","double","double","double","numeric"), na.strings="?")
data_s<-subset(data, data$Date=="1/2/2007"|data$Date=="2/2/2007")
data_s$Date <- as.Date(data_s$Date, format = "%d/%m/%Y")
data_s$DateTime <- strptime(paste(data_s$Date, data_s$Time), format = "%Y-%m-%d %H:%M:%S")
head(data_s$DateTime)
plot(data_s$DateTime, data_s$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
|
/ExData_Plotting1-master/Plot2.R
|
no_license
|
luw517/Data-Science-Specialization
|
R
| false
| false
| 535
|
r
|
data<-read.table("household_power_consumption.txt",header=TRUE,sep=";", colClasses=c("character","character","double", "double","double","double","double","double","numeric"), na.strings="?")
data_s<-subset(data, data$Date=="1/2/2007"|data$Date=="2/2/2007")
data_s$Date <- as.Date(data_s$Date, format = "%d/%m/%Y")
data_s$DateTime <- strptime(paste(data_s$Date, data_s$Time), format = "%Y-%m-%d %H:%M:%S")
head(data_s$DateTime)
plot(data_s$DateTime, data_s$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab="")
|
\name{statsr-package}
\alias{statsr-package}
\alias{statsr}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Who wrote it, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
% Optionally other standard keywords, one per line,
% from the file KEYWORDS in the R documentation.
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
}
|
/man/statsr-package.Rd
|
permissive
|
daesik82/statsr
|
R
| false
| false
| 780
|
rd
|
\name{statsr-package}
\alias{statsr-package}
\alias{statsr}
\docType{package}
\title{
A short title line describing what the package does
}
\description{
A more detailed description of what the package does. A length
of about one to five lines is recommended.
}
\details{
This section should provide a more detailed overview of how to use the
package, including the most important functions.
}
\author{
Who wrote it, email optional.
Maintainer: Your Name <your@email.com>
}
\references{
This optional section can contain literature or other references for
background information.
}
% Optionally other standard keywords, one per line,
% from the file KEYWORDS in the R documentation.
\keyword{ package }
\seealso{
Optional links to other man pages
}
\examples{
}
|
#' @rdname CST_RainFARM
#' @title RainFARM stochastic precipitation downscaling of a CSTools object
#'
#' @author Jost von Hardenberg - ISAC-CNR, \email{j.vonhardenberg@isac.cnr.it}
#'
#' @description This function implements the RainFARM stochastic precipitation
#' downscaling method and accepts a CSTools object (an object of the class
#' 's2dv_cube' as provided by `CST_Load`) as input.
#' Adapted for climate downscaling and including orographic correction
#' as described in Terzago et al. 2018.
#' @references Terzago, S. et al. (2018). NHESS 18(11), 2825-2840.
#' http://doi.org/10.5194/nhess-18-2825-2018 ;
#' D'Onofrio et al. (2014), J of Hydrometeorology 15, 830-843; Rebora et. al. (2006), JHM 7, 724.
#' @param data An object of the class 's2dv_cube' as returned by `CST_Load`,
#' containing the spatial precipitation fields to downscale.
#' The data object is expected to have an element named \code{$data} with at least two
#' spatial dimensions named "lon" and "lat" and one or more dimensions over which
#' to compute average spectral slopes (unless specified with parameter \code{slope}),
#' which can be specified by parameter \code{time_dim}.
#' The number of longitudes and latitudes in the input data is expected to be even and the same. If not
#' the function will perform a subsetting to ensure this condition.
#' @param weights Matrix with climatological weights which can be obtained using
#' the \code{CST_RFWeights} function. If \code{weights=1.} (default) no weights are used.
#' The names of these dimensions must be at least 'lon' and 'lat'.
#' @param nf Refinement factor for downscaling (the output resolution is increased by this factor).
#' @param slope Prescribed spectral slope. The default is \code{slope=0.}
#' meaning that the slope is determined automatically over the dimensions specified by \code{time_dim}. A 1D array with named dimension can be provided (see details and examples)
#' @param kmin First wavenumber for spectral slope (default: \code{kmin=1}).
#' @param nens Number of ensemble members to produce (default: \code{nens=1}).
#' @param fglob Logical to conserve global precipitation over the domain (default: FALSE).
#' @param fsmooth Logical to conserve precipitation with a smoothing kernel (default: TRUE).
#' @param time_dim String or character array with name(s) of dimension(s)
#' (e.g. "ftime", "sdate", "member" ...) over which to compute spectral slopes.
#' If a character array of dimension names is provided, the spectral slopes
#' will be computed as an average over all elements belonging to those dimensions.
#' If omitted one of c("ftime", "sdate", "time") is searched and the first one with more
#' than one element is chosen.
#' @param verbose Logical for verbose output (default: FALSE).
#' @param drop_realization_dim Logical to remove the "realization" stochastic ensemble dimension,
#' needed for saving data through function CST_SaveData (default: FALSE)
#' with the following behaviour if set to TRUE:
#'
#' 1) if \code{nens==1}: the dimension is dropped;
#'
#' 2) if \code{nens>1} and a "member" dimension exists:
#' the "realization" and "member" dimensions are compacted (multiplied) and the resulting dimension is named "member";
#'
#' 3) if \code{nens>1} and a "member" dimension does not exist: the "realization" dimension is renamed to "member".
#' @param nprocs The number of parallel processes to spawn for the use for parallel computation in multiple cores. (default: 1)
#'
#' @return CST_RainFARM() returns a downscaled CSTools object (i.e., of the
#' class 's2dv_cube').
#' If \code{nens>1} an additional dimension named "realizatio"n is added to the
#' \code{$data} array after the "member" dimension (unless
#' \code{drop_realization_dim=TRUE} is specified).
#' The ordering of the remaining dimensions in the \code{$data} element of the input object is maintained.
#' @details Wether parameter 'slope' and 'weights' presents seasonality dependency, a dimension name should match between these parameters and the input data in parameter 'data'. See example 2 below where weights and slope vary with 'sdate' dimension.
#' @import multiApply
#' @import rainfarmr
#' @examples
#' #Example 1: using CST_RainFARM for a CSTools object
#' nf <- 8 # Choose a downscaling by factor 8
#' exp <- 1 : (2 * 3 * 4 * 8 * 8)
#' dim(exp) <- c(dataset = 1, member = 2, sdate = 3, ftime = 4, lat = 8, lon = 8)
#' lon <- seq(10, 13.5, 0.5)
#' dim(lon) <- c(lon = length(lon))
#' lat <- seq(40, 43.5, 0.5)
#' dim(lat) <- c(lat = length(lat))
#' data <- list(data = exp, lon = lon, lat = lat)
#' # Create a test array of weights
#' ww <- array(1., dim = c(lon = 8 * nf, lat = 8 * nf))
#' res <- CST_RainFARM(data, nf = nf, weights = ww, nens=3)
#' str(res)
#' #List of 3
#' # $ data: num [1, 1:2, 1:3, 1:3, 1:4, 1:64, 1:64] 260 553 281 278 143 ...
#' # $ lon : num [1:64] 9.78 9.84 9.91 9.97 10.03 ...
#' # $ lat : num [1:64] 39.8 39.8 39.9 40 40 ...
#' dim(res$data)
#' # dataset member realization sdate ftime lat lon
#' # 1 2 3 3 4 64 64
#'
#' # Example 2:
#' slo <- array(c(0.1, 0.5, 0.7), c(sdate= 3))
#' wei <- array(rnorm(8 * 8 * 3), c(lon = 8, lat = 8, sdate = 3))
#' res <- CST_RainFARM(lonlat_prec,
#' weights = wei, slope = slo, nf = 2)
#' @export
CST_RainFARM <- function(data, weights = 1., slope = 0, nf, kmin = 1,
nens = 1, fglob = FALSE, fsmooth = TRUE,
nprocs = 1, time_dim = NULL, verbose = FALSE,
drop_realization_dim = FALSE) {
res <- RainFARM(data$data, data$lon, data$lat,
nf = nf, weights = weights, nens, slope, kmin, fglob, fsmooth,
nprocs, time_dim, lon_dim = "lon", lat_dim = "lat",
drop_realization_dim, verbose)
att_lon <- attributes(data$lon)[-1]
att_lat <- attributes(data$lat)[-1]
data$data <- res$data
data$lon <- res$lon
attributes(data$lon) <- att_lon
data$lat <- res$lat
attributes(data$lat) <- att_lat
return(data)
}
#' @rdname RainFARM
#' @title RainFARM stochastic precipitation downscaling (reduced version)
#' @author Jost von Hardenberg - ISAC-CNR, \email{j.vonhardenberg@isac.cnr.it}
#' @description This function implements the RainFARM stochastic precipitation downscaling method
#' and accepts in input an array with named dims ("lon", "lat")
#' and one or more dimension (such as "ftime", "sdate" or "time")
#' over which to average automatically determined spectral slopes.
#' Adapted for climate downscaling and including orographic correction.
#' References:
#' Terzago, S. et al. (2018). NHESS 18(11), 2825-2840. http://doi.org/10.5194/nhess-18-2825-2018,
#' D'Onofrio et al. (2014), J of Hydrometeorology 15, 830-843; Rebora et. al. (2006), JHM 7, 724.
#' @param data Precipitation array to downscale.
#' The input array is expected to have at least two dimensions named "lon" and "lat" by default
#' (these default names can be changed with the \code{lon_dim} and \code{lat_dim} parameters)
#' and one or more dimensions over which to average these slopes,
#' which can be specified by parameter \code{time_dim}.
#' The number of longitudes and latitudes in the input data is expected to be even and the same. If not
#' the function will perform a subsetting to ensure this condition.
#' @param lon Vector or array of longitudes.
#' @param lat Vector or array of latitudes.
#' @param weights multi-dimensional array with climatological weights which can be obtained using
#' the \code{CST_RFWeights} function. If \code{weights=1.} (default) no weights are used.
#' The names of these dimensions must be at least 'lon' and 'lat'.
#' @param nf Refinement factor for downscaling (the output resolution is increased by this factor).
#' @param slope Prescribed spectral slope. The default is \code{slope=0.}
#' meaning that the slope is determined automatically over the dimensions specified by \code{time_dim}. A 1D array with named dimension can be provided (see details and examples)
#' @param kmin First wavenumber for spectral slope (default: \code{kmin=1}).
#' @param nens Number of ensemble members to produce (default: \code{nens=1}).
#' @param fglob Logical to conseve global precipitation over the domain (default: FALSE)
#' @param fsmooth Logical to conserve precipitation with a smoothing kernel (default: TRUE)
#' @param time_dim String or character array with name(s) of time dimension(s)
#' (e.g. "ftime", "sdate", "time" ...) over which to compute spectral slopes.
#' If a character array of dimension names is provided, the spectral slopes
#' will be computed over all elements belonging to those dimensions.
#' If omitted one of c("ftime", "sdate", "time")
#' is searched and the first one with more than one element is chosen.
#' @param lon_dim Name of lon dimension ("lon" by default).
#' @param lat_dim Name of lat dimension ("lat" by default).
#' @param verbose logical for verbose output (default: FALSE).
#' @param drop_realization_dim Logical to remove the "realization" stochastic ensemble dimension (default: FALSE)
#' with the following behaviour if set to TRUE:
#'
#' 1) if \code{nens==1}: the dimension is dropped;
#'
#' 2) if \code{nens>1} and a "member" dimension exists:
#' the "realization" and "member" dimensions are compacted (multiplied) and the resulting dimension is named "member";
#'
#' 3) if \code{nens>1} and a "member" dimension does not exist: the "realization" dimension is renamed to "member".
#'
#' @param nprocs The number of parallel processes to spawn for the use for parallel computation in multiple cores. (default: 1)
#' @return RainFARM() returns a list containing the fine-scale longitudes, latitudes
#' and the sequence of \code{nens} downscaled fields.
#' If \code{nens>1} an additional dimension named "realization" is added to the output array
#' after the "member" dimension (if it exists and unless \code{drop_realization_dim=TRUE} is specified).
#' The ordering of the remaining dimensions in the \code{exp} element of the input object is maintained.
#' @details Wether parameter 'slope' and 'weights' presents seasonality dependency, a dimension name should match between these parameters and the input data in parameter 'data'. See example 2 below where weights and slope vary with 'sdate' dimension.
#' @import multiApply
#' @importFrom s2dverification Subset
#' @importFrom abind abind
#' @export
#' @examples
#' # Example for the 'reduced' RainFARM function
#' nf <- 8 # Choose a downscaling by factor 8
#' nens <- 3 # Number of ensemble members
#' # create a test array with dimension 8x8 and 20 timesteps
#' # or provide your own read from a netcdf file
#' pr <- rnorm(8 * 8 * 20)
#' dim(pr) <- c(lon = 8, lat = 8, ftime = 20)
#' lon_mat <- seq(10, 13.5, 0.5) # could also be a 2d matrix
#' lat_mat <- seq(40, 43.5, 0.5)
#' # Create a test array of weights
#' ww <- array(1., dim = c(lon = 8 * nf, lat = 8 * nf))
#' # or create proper weights using an external fine-scale climatology file
#' # Specify a weightsfn filename if you wish to save the weights
#' \dontrun{
#' ww <- CST_RFWeights("./worldclim.nc", nf, lon = lon_mat, lat = lat_mat,
#' fsmooth = TRUE)
#' }
#' # downscale using weights (ww=1. means do not use weights)
#' res <- RainFARM(pr, lon_mat, lat_mat, nf,
#' fsmooth = TRUE, fglob = FALSE,
#' weights = ww, nens = 2, verbose = TRUE)
#' str(res)
#' #List of 3
#' # $ data: num [1:3, 1:20, 1:64, 1:64] 0.186 0.212 0.138 3.748 0.679 ...
#' # $ lon : num [1:64] 9.78 9.84 9.91 9.97 10.03 ...
#' # $ lat : num [1:64] 39.8 39.8 39.9 40 40 ...
#' dim(res$data)
#' # lon lat ftime realization
#' # 64 64 20 2
#' # Example 2:
#' slo <- array(c(0.1, 0.5, 0.7), c(sdate= 3))
#' wei <- array(rnorm(8*8*3), c(lon = 8, lat = 8, sdate = 3))
#' res <- RainFARM(lonlat_prec$data, lon = lonlat_prec$lon,
#' lat = lonlat_prec$lat, weights = wei, slope = slo, nf = 2)
RainFARM <- function(data, lon, lat, nf, weights = 1., nens = 1,
slope = 0, kmin = 1, fglob = FALSE, fsmooth = TRUE,
nprocs = 1, time_dim = NULL, lon_dim = "lon", lat_dim = "lat",
drop_realization_dim = FALSE, verbose = FALSE) {
# Ensure input grid is square and with even dimensions
if ( (dim(data)[lon_dim] != dim(data)[lat_dim]) |
(dim(data)[lon_dim] %% 2 == 1)) {
warning("Warning: input data are expected to be on a square grid",
" with an even number of pixels per side.")
nmin <- min(dim(data)[lon_dim], dim(data)[lat_dim])
nmin <- floor(nmin / 2) * 2
data <- .subset(data, lat_dim, 1:nmin)
data <- .subset(data, lon_dim, 1:nmin)
if (length(dim(lon)) == 2) {
lon <- lon[1:nmin, 1:nmin]
lat <- lat[1:nmin, 1:nmin]
} else {
lon <- lon[1:nmin]
lat <- lat[1:nmin]
}
warning("The input data have been cut to the range.")
warning(paste0("lon: [", lon[1], ", ", lon[length(lon)], "] ",
" lat: [", lat[1], ", ", lat[length(lat)], "]"))
}
if (length(dim(weights)) > 0) {
if (length(names(dim(weights))) == 0) {
stop("Parameter 'weights' must have dimension names when it is not a scalar.")
} else {
if (length(which(names(dim(weights)) == 'lon')) > 0 &
length(which(names(dim(weights)) == 'lat')) > 0) {
lonposw <- which(names(dim(weights)) == 'lon')
latposw <- which(names(dim(weights)) == 'lat')
} else {
stop("Parameter 'weights' must have dimension names 'lon' and 'lat' when",
" it is not a scalar.")
}
}
}
if (!(length(dim(weights)) == 0)) {
if (!(dim(weights)[lonposw] == dim(data)[lon_dim] * nf) &
!(dim(weights)[latposw] == dim(data)[lat_dim] * nf)) {
stop(paste("The dimensions of the weights matrix (", dim(weights)[1],
"x", dim(weights)[2] ,
") are not consistent with the size of the data (",
dim(data)[lon_dim], ") and the refinement factor (", nf, ")"))
}
}
# Check/detect time_dim
if (is.null(time_dim)) {
time_dim_names <- c("ftime", "sdate", "time")
time_dim_num <- which(time_dim_names %in% names(dim(data)))
if (length(time_dim_num) > 0) {
# Find time dimension with length > 1
ilong <- which(dim(data)[time_dim_names[time_dim_num]] > 1)
if (length(ilong) > 0) {
time_dim <- time_dim_names[time_dim_num[ilong[1]]]
} else {
stop("No time dimension longer than one found.")
}
} else {
stop("Could not automatically detect a target time dimension ",
"in the provided data in 'data'.")
}
warning(paste("Selected time dim:", time_dim))
}
# Check if slope is an array
#if (length(slope) > 1) {
# warning("Parameter 'slope' has length > 1 and only the first ",
# "element will be used.")
# slope <- as.numeric(slope[1])
#}
# Perform common calls
r <- lon_lat_fine(lon, lat, nf)
lon_f <- r$lon
lat_f <- r$lat
# reorder and group time_dim together at the end
cdim0 <- dim(data)
imask <- names(cdim0) %in% time_dim
data <- .aperm2(data, c(which(!imask), which(imask)))
cdim <- dim(data)
ind <- 1:length(which(!imask))
# compact (multiply) time_dim dimensions
dim(data) <- c(cdim[ind], rainfarm_samples = prod(cdim[-ind]))
# Repeatedly apply .RainFARM
if (length(weights) == 1 & length(slope) == 1) {
result <- Apply(data, c(lon_dim, lat_dim, "rainfarm_samples"), .RainFARM,
weights, slope, nf, nens, kmin,
fglob, fsmooth, ncores = nprocs, verbose,
split_factor = "greatest")$output1
} else if (length(slope) == 1 & length(weights) > 1 ) {
result <- Apply(list(data, weights),
list(c(lon_dim, lat_dim, "rainfarm_samples"),
c(lonposw, latposw)),
.RainFARM, slope = slope,
nf = nf, nens = nens, kmin = kmin,
fglob = fglob, fsmooth = fsmooth, ncores = nprocs,
verbose = verbose,
split_factor = "greatest")$output1
} else {
result <- Apply(list(data, weights, slope),
list(c(lon_dim, lat_dim, "rainfarm_samples"),
c(lonposw, latposw), NULL),
fun = .RainFARM,
nf = nf, nens = nens, kmin = kmin,
fglob = fglob, fsmooth = fsmooth, ncores = nprocs,
verbose = verbose,
split_factor = "greatest")$output1
}
# result has dims: lon, lat, rainfarm_samples, realization, other dims
# Expand back rainfarm_samples to compacted dims
dim(result) <- c(dim(result)[1:2], cdim[-ind], dim(result)[-(1:3)])
# Reorder as it was in original data
# + realization dim after member if it exists
ienspos <- which(names(cdim0) == "member")
if (length(ienspos) == 0) ienspos <- length(names(cdim0))
iorder <- sapply(c(names(cdim0)[1:ienspos], "realization",
names(cdim0)[-(1:ienspos)]),
grep, names(dim(result)))
ndim <- names(dim(result))
result <- aperm(result, iorder)
# R < 3.2.3 compatibility fix
names(dim(result)) <- ndim[iorder]
if (drop_realization_dim) {
cdim <- dim(result)
if (nens == 1) {
dim(result) <- cdim[-which(names(cdim) == "realization")[1]]
} else if ("member" %in% names(cdim)) {
# compact member and realization dimension if member dim exists,
# else rename realization to member
ind <- which(names(cdim) %in% c("member", "realization"))
dim(result) <- c(cdim[1:(ind[1] - 1)], cdim[ind[1]] * cdim[ind[2]],
cdim[(ind[2] + 1):length(cdim)])
} else {
ind <- which(names(cdim) %in% "realization")
names(dim(result))[ind] <- "member"
}
}
return(list(data = result, lon = lon_f, lat = lat_f))
}
#' Atomic RainFARM
#' @param pr Precipitation array to downscale with dimensions (lon, lat, time).
#' @param weights Matrix with climatological weights which can be obtained using
#' the \code{CST_RFWeights} function (default: \code{weights=1.} i.e. no weights).
#' @param slope Prescribed spectral slope (default: \code{slope=0.}
#' @param nf Refinement factor for downscaling (the output resolution is increased by this factor).
#' meaning that the slope is determined automatically over the dimensions specified by \code{time_dim}.
#' @param kmin First wavenumber for spectral slope (default: \code{kmin=1}).
#' @param nens Number of ensemble members to produce (default: \code{nens=1}).
#' @param fglob Logical to conseve global precipitation over the domain (default: FALSE).
#' @param fsmooth Logical to conserve precipitation with a smoothing kernel (default: TRUE).
#' @param verbose Logical for verbose output (default: FALSE).
#' @return .RainFARM returns a downscaled array with dimensions (lon, lat, time, realization)
#' @noRd
.RainFARM <- function(pr, weights, slope, nf, nens, kmin,
fglob, fsmooth, verbose) {
posna <- NULL
if (any(is.na(pr))) {
posna <- unlist(lapply(1:dim(pr)['rainfarm_samples'],
function(x){!is.na(pr[1, 1, x])}))
pr <- Subset(pr, 'rainfarm_samples', posna)
}
if (slope == 0) {
fxp <- fft2d(pr)
sx <- fitslope(fxp, kmin = kmin)
} else {
sx <- slope
}
result_dims <- c(dim(pr)[1] * nf, dim(pr)[2] * nf, dim(pr)[3],
realization = nens)
r <- array(dim = result_dims)
for (i in 1:nens) {
r[, , , i] <- rainfarm(pr, sx, nf, weights, fglob = fglob,
fsmooth = fsmooth, verbose = verbose)
}
# restoring NA values in their position:
if (!is.null(posna)) {
pos <- which(posna == FALSE)
dimdata <- dim(r)
xdim <- which(names(dimdata) == 'rainfarm_samples')
dimdata[xdim] <- dimdata[xdim] + length(pos)
new <- array(NA, dimdata)
posT <- which(posna == TRUE)
i = 1
invisible(lapply(posT, function(x) {
new[,,x,] <<- r[,,i,]
i <<- i + 1
}))
#names(dim(r)) <- names(result_dims)
warning("Missing values found in the samples.")
r <- new
}
return(r)
}
# Function to generalize through do.call() n-dimensional array subsetting
# and array indexing. Derived from Stack Overflow issue
# https://stackoverflow.com/questions/14500707/select-along-one-of-n-dimensions-in-array
.subset <- function(field, dim_name, range, drop = FALSE) {
ndim <- names(dim(field))
idim <- which(ndim %in% dim_name )
# Create list representing arguments supplied to [
# bquote() creates an object corresponding to a missing argument
indices <- rep(list(bquote()), length(dim(field)))
indices[[idim]] <- range
# do.call on the indices
field <- do.call("[", c(list(field), indices, list(drop = drop)))
# Needed for R <=3.2
names(dim(field)) <- ndim
return(field)
}
|
/R/CST_RainFARM.R
|
no_license
|
rpkgs/CSTools
|
R
| false
| false
| 21,124
|
r
|
#' @rdname CST_RainFARM
#' @title RainFARM stochastic precipitation downscaling of a CSTools object
#'
#' @author Jost von Hardenberg - ISAC-CNR, \email{j.vonhardenberg@isac.cnr.it}
#'
#' @description This function implements the RainFARM stochastic precipitation
#' downscaling method and accepts a CSTools object (an object of the class
#' 's2dv_cube' as provided by `CST_Load`) as input.
#' Adapted for climate downscaling and including orographic correction
#' as described in Terzago et al. 2018.
#' @references Terzago, S. et al. (2018). NHESS 18(11), 2825-2840.
#' http://doi.org/10.5194/nhess-18-2825-2018 ;
#' D'Onofrio et al. (2014), J of Hydrometeorology 15, 830-843; Rebora et. al. (2006), JHM 7, 724.
#' @param data An object of the class 's2dv_cube' as returned by `CST_Load`,
#' containing the spatial precipitation fields to downscale.
#' The data object is expected to have an element named \code{$data} with at least two
#' spatial dimensions named "lon" and "lat" and one or more dimensions over which
#' to compute average spectral slopes (unless specified with parameter \code{slope}),
#' which can be specified by parameter \code{time_dim}.
#' The number of longitudes and latitudes in the input data is expected to be even and the same. If not
#' the function will perform a subsetting to ensure this condition.
#' @param weights Matrix with climatological weights which can be obtained using
#' the \code{CST_RFWeights} function. If \code{weights=1.} (default) no weights are used.
#' The names of these dimensions must be at least 'lon' and 'lat'.
#' @param nf Refinement factor for downscaling (the output resolution is increased by this factor).
#' @param slope Prescribed spectral slope. The default is \code{slope=0.}
#' meaning that the slope is determined automatically over the dimensions specified by \code{time_dim}. A 1D array with named dimension can be provided (see details and examples)
#' @param kmin First wavenumber for spectral slope (default: \code{kmin=1}).
#' @param nens Number of ensemble members to produce (default: \code{nens=1}).
#' @param fglob Logical to conserve global precipitation over the domain (default: FALSE).
#' @param fsmooth Logical to conserve precipitation with a smoothing kernel (default: TRUE).
#' @param time_dim String or character array with name(s) of dimension(s)
#' (e.g. "ftime", "sdate", "member" ...) over which to compute spectral slopes.
#' If a character array of dimension names is provided, the spectral slopes
#' will be computed as an average over all elements belonging to those dimensions.
#' If omitted one of c("ftime", "sdate", "time") is searched and the first one with more
#' than one element is chosen.
#' @param verbose Logical for verbose output (default: FALSE).
#' @param drop_realization_dim Logical to remove the "realization" stochastic ensemble dimension,
#' needed for saving data through function CST_SaveData (default: FALSE)
#' with the following behaviour if set to TRUE:
#'
#' 1) if \code{nens==1}: the dimension is dropped;
#'
#' 2) if \code{nens>1} and a "member" dimension exists:
#' the "realization" and "member" dimensions are compacted (multiplied) and the resulting dimension is named "member";
#'
#' 3) if \code{nens>1} and a "member" dimension does not exist: the "realization" dimension is renamed to "member".
#' @param nprocs The number of parallel processes to spawn for the use for parallel computation in multiple cores. (default: 1)
#'
#' @return CST_RainFARM() returns a downscaled CSTools object (i.e., of the
#' class 's2dv_cube').
#' If \code{nens>1} an additional dimension named "realizatio"n is added to the
#' \code{$data} array after the "member" dimension (unless
#' \code{drop_realization_dim=TRUE} is specified).
#' The ordering of the remaining dimensions in the \code{$data} element of the input object is maintained.
#' @details Wether parameter 'slope' and 'weights' presents seasonality dependency, a dimension name should match between these parameters and the input data in parameter 'data'. See example 2 below where weights and slope vary with 'sdate' dimension.
#' @import multiApply
#' @import rainfarmr
#' @examples
#' #Example 1: using CST_RainFARM for a CSTools object
#' nf <- 8 # Choose a downscaling by factor 8
#' exp <- 1 : (2 * 3 * 4 * 8 * 8)
#' dim(exp) <- c(dataset = 1, member = 2, sdate = 3, ftime = 4, lat = 8, lon = 8)
#' lon <- seq(10, 13.5, 0.5)
#' dim(lon) <- c(lon = length(lon))
#' lat <- seq(40, 43.5, 0.5)
#' dim(lat) <- c(lat = length(lat))
#' data <- list(data = exp, lon = lon, lat = lat)
#' # Create a test array of weights
#' ww <- array(1., dim = c(lon = 8 * nf, lat = 8 * nf))
#' res <- CST_RainFARM(data, nf = nf, weights = ww, nens=3)
#' str(res)
#' #List of 3
#' # $ data: num [1, 1:2, 1:3, 1:3, 1:4, 1:64, 1:64] 260 553 281 278 143 ...
#' # $ lon : num [1:64] 9.78 9.84 9.91 9.97 10.03 ...
#' # $ lat : num [1:64] 39.8 39.8 39.9 40 40 ...
#' dim(res$data)
#' # dataset member realization sdate ftime lat lon
#' # 1 2 3 3 4 64 64
#'
#' # Example 2:
#' slo <- array(c(0.1, 0.5, 0.7), c(sdate= 3))
#' wei <- array(rnorm(8 * 8 * 3), c(lon = 8, lat = 8, sdate = 3))
#' res <- CST_RainFARM(lonlat_prec,
#' weights = wei, slope = slo, nf = 2)
#' @export
CST_RainFARM <- function(data, weights = 1., slope = 0, nf, kmin = 1,
nens = 1, fglob = FALSE, fsmooth = TRUE,
nprocs = 1, time_dim = NULL, verbose = FALSE,
drop_realization_dim = FALSE) {
res <- RainFARM(data$data, data$lon, data$lat,
nf = nf, weights = weights, nens, slope, kmin, fglob, fsmooth,
nprocs, time_dim, lon_dim = "lon", lat_dim = "lat",
drop_realization_dim, verbose)
att_lon <- attributes(data$lon)[-1]
att_lat <- attributes(data$lat)[-1]
data$data <- res$data
data$lon <- res$lon
attributes(data$lon) <- att_lon
data$lat <- res$lat
attributes(data$lat) <- att_lat
return(data)
}
#' @rdname RainFARM
#' @title RainFARM stochastic precipitation downscaling (reduced version)
#' @author Jost von Hardenberg - ISAC-CNR, \email{j.vonhardenberg@isac.cnr.it}
#' @description This function implements the RainFARM stochastic precipitation downscaling method
#' and accepts in input an array with named dims ("lon", "lat")
#' and one or more dimension (such as "ftime", "sdate" or "time")
#' over which to average automatically determined spectral slopes.
#' Adapted for climate downscaling and including orographic correction.
#' References:
#' Terzago, S. et al. (2018). NHESS 18(11), 2825-2840. http://doi.org/10.5194/nhess-18-2825-2018,
#' D'Onofrio et al. (2014), J of Hydrometeorology 15, 830-843; Rebora et. al. (2006), JHM 7, 724.
#' @param data Precipitation array to downscale.
#' The input array is expected to have at least two dimensions named "lon" and "lat" by default
#' (these default names can be changed with the \code{lon_dim} and \code{lat_dim} parameters)
#' and one or more dimensions over which to average these slopes,
#' which can be specified by parameter \code{time_dim}.
#' The number of longitudes and latitudes in the input data is expected to be even and the same. If not
#' the function will perform a subsetting to ensure this condition.
#' @param lon Vector or array of longitudes.
#' @param lat Vector or array of latitudes.
#' @param weights multi-dimensional array with climatological weights which can be obtained using
#' the \code{CST_RFWeights} function. If \code{weights=1.} (default) no weights are used.
#' The names of these dimensions must be at least 'lon' and 'lat'.
#' @param nf Refinement factor for downscaling (the output resolution is increased by this factor).
#' @param slope Prescribed spectral slope. The default is \code{slope=0.}
#' meaning that the slope is determined automatically over the dimensions specified by \code{time_dim}. A 1D array with named dimension can be provided (see details and examples)
#' @param kmin First wavenumber for spectral slope (default: \code{kmin=1}).
#' @param nens Number of ensemble members to produce (default: \code{nens=1}).
#' @param fglob Logical to conseve global precipitation over the domain (default: FALSE)
#' @param fsmooth Logical to conserve precipitation with a smoothing kernel (default: TRUE)
#' @param time_dim String or character array with name(s) of time dimension(s)
#' (e.g. "ftime", "sdate", "time" ...) over which to compute spectral slopes.
#' If a character array of dimension names is provided, the spectral slopes
#' will be computed over all elements belonging to those dimensions.
#' If omitted one of c("ftime", "sdate", "time")
#' is searched and the first one with more than one element is chosen.
#' @param lon_dim Name of lon dimension ("lon" by default).
#' @param lat_dim Name of lat dimension ("lat" by default).
#' @param verbose logical for verbose output (default: FALSE).
#' @param drop_realization_dim Logical to remove the "realization" stochastic ensemble dimension (default: FALSE)
#' with the following behaviour if set to TRUE:
#'
#' 1) if \code{nens==1}: the dimension is dropped;
#'
#' 2) if \code{nens>1} and a "member" dimension exists:
#' the "realization" and "member" dimensions are compacted (multiplied) and the resulting dimension is named "member";
#'
#' 3) if \code{nens>1} and a "member" dimension does not exist: the "realization" dimension is renamed to "member".
#'
#' @param nprocs The number of parallel processes to spawn for the use for parallel computation in multiple cores. (default: 1)
#' @return RainFARM() returns a list containing the fine-scale longitudes, latitudes
#' and the sequence of \code{nens} downscaled fields.
#' If \code{nens>1} an additional dimension named "realization" is added to the output array
#' after the "member" dimension (if it exists and unless \code{drop_realization_dim=TRUE} is specified).
#' The ordering of the remaining dimensions in the \code{exp} element of the input object is maintained.
#' @details Wether parameter 'slope' and 'weights' presents seasonality dependency, a dimension name should match between these parameters and the input data in parameter 'data'. See example 2 below where weights and slope vary with 'sdate' dimension.
#' @import multiApply
#' @importFrom s2dverification Subset
#' @importFrom abind abind
#' @export
#' @examples
#' # Example for the 'reduced' RainFARM function
#' nf <- 8 # Choose a downscaling by factor 8
#' nens <- 3 # Number of ensemble members
#' # create a test array with dimension 8x8 and 20 timesteps
#' # or provide your own read from a netcdf file
#' pr <- rnorm(8 * 8 * 20)
#' dim(pr) <- c(lon = 8, lat = 8, ftime = 20)
#' lon_mat <- seq(10, 13.5, 0.5) # could also be a 2d matrix
#' lat_mat <- seq(40, 43.5, 0.5)
#' # Create a test array of weights
#' ww <- array(1., dim = c(lon = 8 * nf, lat = 8 * nf))
#' # or create proper weights using an external fine-scale climatology file
#' # Specify a weightsfn filename if you wish to save the weights
#' \dontrun{
#' ww <- CST_RFWeights("./worldclim.nc", nf, lon = lon_mat, lat = lat_mat,
#' fsmooth = TRUE)
#' }
#' # downscale using weights (ww=1. means do not use weights)
#' res <- RainFARM(pr, lon_mat, lat_mat, nf,
#' fsmooth = TRUE, fglob = FALSE,
#' weights = ww, nens = 2, verbose = TRUE)
#' str(res)
#' #List of 3
#' # $ data: num [1:3, 1:20, 1:64, 1:64] 0.186 0.212 0.138 3.748 0.679 ...
#' # $ lon : num [1:64] 9.78 9.84 9.91 9.97 10.03 ...
#' # $ lat : num [1:64] 39.8 39.8 39.9 40 40 ...
#' dim(res$data)
#' # lon lat ftime realization
#' # 64 64 20 2
#' # Example 2:
#' slo <- array(c(0.1, 0.5, 0.7), c(sdate= 3))
#' wei <- array(rnorm(8*8*3), c(lon = 8, lat = 8, sdate = 3))
#' res <- RainFARM(lonlat_prec$data, lon = lonlat_prec$lon,
#' lat = lonlat_prec$lat, weights = wei, slope = slo, nf = 2)
RainFARM <- function(data, lon, lat, nf, weights = 1., nens = 1,
slope = 0, kmin = 1, fglob = FALSE, fsmooth = TRUE,
nprocs = 1, time_dim = NULL, lon_dim = "lon", lat_dim = "lat",
drop_realization_dim = FALSE, verbose = FALSE) {
# Ensure input grid is square and with even dimensions
if ( (dim(data)[lon_dim] != dim(data)[lat_dim]) |
(dim(data)[lon_dim] %% 2 == 1)) {
warning("Warning: input data are expected to be on a square grid",
" with an even number of pixels per side.")
nmin <- min(dim(data)[lon_dim], dim(data)[lat_dim])
nmin <- floor(nmin / 2) * 2
data <- .subset(data, lat_dim, 1:nmin)
data <- .subset(data, lon_dim, 1:nmin)
if (length(dim(lon)) == 2) {
lon <- lon[1:nmin, 1:nmin]
lat <- lat[1:nmin, 1:nmin]
} else {
lon <- lon[1:nmin]
lat <- lat[1:nmin]
}
warning("The input data have been cut to the range.")
warning(paste0("lon: [", lon[1], ", ", lon[length(lon)], "] ",
" lat: [", lat[1], ", ", lat[length(lat)], "]"))
}
if (length(dim(weights)) > 0) {
if (length(names(dim(weights))) == 0) {
stop("Parameter 'weights' must have dimension names when it is not a scalar.")
} else {
if (length(which(names(dim(weights)) == 'lon')) > 0 &
length(which(names(dim(weights)) == 'lat')) > 0) {
lonposw <- which(names(dim(weights)) == 'lon')
latposw <- which(names(dim(weights)) == 'lat')
} else {
stop("Parameter 'weights' must have dimension names 'lon' and 'lat' when",
" it is not a scalar.")
}
}
}
if (!(length(dim(weights)) == 0)) {
if (!(dim(weights)[lonposw] == dim(data)[lon_dim] * nf) &
!(dim(weights)[latposw] == dim(data)[lat_dim] * nf)) {
stop(paste("The dimensions of the weights matrix (", dim(weights)[1],
"x", dim(weights)[2] ,
") are not consistent with the size of the data (",
dim(data)[lon_dim], ") and the refinement factor (", nf, ")"))
}
}
# Check/detect time_dim
if (is.null(time_dim)) {
time_dim_names <- c("ftime", "sdate", "time")
time_dim_num <- which(time_dim_names %in% names(dim(data)))
if (length(time_dim_num) > 0) {
# Find time dimension with length > 1
ilong <- which(dim(data)[time_dim_names[time_dim_num]] > 1)
if (length(ilong) > 0) {
time_dim <- time_dim_names[time_dim_num[ilong[1]]]
} else {
stop("No time dimension longer than one found.")
}
} else {
stop("Could not automatically detect a target time dimension ",
"in the provided data in 'data'.")
}
warning(paste("Selected time dim:", time_dim))
}
# Check if slope is an array
#if (length(slope) > 1) {
# warning("Parameter 'slope' has length > 1 and only the first ",
# "element will be used.")
# slope <- as.numeric(slope[1])
#}
# Perform common calls
r <- lon_lat_fine(lon, lat, nf)
lon_f <- r$lon
lat_f <- r$lat
# reorder and group time_dim together at the end
cdim0 <- dim(data)
imask <- names(cdim0) %in% time_dim
data <- .aperm2(data, c(which(!imask), which(imask)))
cdim <- dim(data)
ind <- 1:length(which(!imask))
# compact (multiply) time_dim dimensions
dim(data) <- c(cdim[ind], rainfarm_samples = prod(cdim[-ind]))
# Repeatedly apply .RainFARM
if (length(weights) == 1 & length(slope) == 1) {
result <- Apply(data, c(lon_dim, lat_dim, "rainfarm_samples"), .RainFARM,
weights, slope, nf, nens, kmin,
fglob, fsmooth, ncores = nprocs, verbose,
split_factor = "greatest")$output1
} else if (length(slope) == 1 & length(weights) > 1 ) {
result <- Apply(list(data, weights),
list(c(lon_dim, lat_dim, "rainfarm_samples"),
c(lonposw, latposw)),
.RainFARM, slope = slope,
nf = nf, nens = nens, kmin = kmin,
fglob = fglob, fsmooth = fsmooth, ncores = nprocs,
verbose = verbose,
split_factor = "greatest")$output1
} else {
result <- Apply(list(data, weights, slope),
list(c(lon_dim, lat_dim, "rainfarm_samples"),
c(lonposw, latposw), NULL),
fun = .RainFARM,
nf = nf, nens = nens, kmin = kmin,
fglob = fglob, fsmooth = fsmooth, ncores = nprocs,
verbose = verbose,
split_factor = "greatest")$output1
}
# result has dims: lon, lat, rainfarm_samples, realization, other dims
# Expand back rainfarm_samples to compacted dims
dim(result) <- c(dim(result)[1:2], cdim[-ind], dim(result)[-(1:3)])
# Reorder as it was in original data
# + realization dim after member if it exists
ienspos <- which(names(cdim0) == "member")
if (length(ienspos) == 0) ienspos <- length(names(cdim0))
iorder <- sapply(c(names(cdim0)[1:ienspos], "realization",
names(cdim0)[-(1:ienspos)]),
grep, names(dim(result)))
ndim <- names(dim(result))
result <- aperm(result, iorder)
# R < 3.2.3 compatibility fix
names(dim(result)) <- ndim[iorder]
if (drop_realization_dim) {
cdim <- dim(result)
if (nens == 1) {
dim(result) <- cdim[-which(names(cdim) == "realization")[1]]
} else if ("member" %in% names(cdim)) {
# compact member and realization dimension if member dim exists,
# else rename realization to member
ind <- which(names(cdim) %in% c("member", "realization"))
dim(result) <- c(cdim[1:(ind[1] - 1)], cdim[ind[1]] * cdim[ind[2]],
cdim[(ind[2] + 1):length(cdim)])
} else {
ind <- which(names(cdim) %in% "realization")
names(dim(result))[ind] <- "member"
}
}
return(list(data = result, lon = lon_f, lat = lat_f))
}
#' Atomic RainFARM
#' @param pr Precipitation array to downscale with dimensions (lon, lat, time).
#' @param weights Matrix with climatological weights which can be obtained using
#' the \code{CST_RFWeights} function (default: \code{weights=1.} i.e. no weights).
#' @param slope Prescribed spectral slope (default: \code{slope=0.}
#' @param nf Refinement factor for downscaling (the output resolution is increased by this factor).
#' meaning that the slope is determined automatically over the dimensions specified by \code{time_dim}.
#' @param kmin First wavenumber for spectral slope (default: \code{kmin=1}).
#' @param nens Number of ensemble members to produce (default: \code{nens=1}).
#' @param fglob Logical to conseve global precipitation over the domain (default: FALSE).
#' @param fsmooth Logical to conserve precipitation with a smoothing kernel (default: TRUE).
#' @param verbose Logical for verbose output (default: FALSE).
#' @return .RainFARM returns a downscaled array with dimensions (lon, lat, time, realization)
#' @noRd
.RainFARM <- function(pr, weights, slope, nf, nens, kmin,
fglob, fsmooth, verbose) {
posna <- NULL
if (any(is.na(pr))) {
posna <- unlist(lapply(1:dim(pr)['rainfarm_samples'],
function(x){!is.na(pr[1, 1, x])}))
pr <- Subset(pr, 'rainfarm_samples', posna)
}
if (slope == 0) {
fxp <- fft2d(pr)
sx <- fitslope(fxp, kmin = kmin)
} else {
sx <- slope
}
result_dims <- c(dim(pr)[1] * nf, dim(pr)[2] * nf, dim(pr)[3],
realization = nens)
r <- array(dim = result_dims)
for (i in 1:nens) {
r[, , , i] <- rainfarm(pr, sx, nf, weights, fglob = fglob,
fsmooth = fsmooth, verbose = verbose)
}
# restoring NA values in their position:
if (!is.null(posna)) {
pos <- which(posna == FALSE)
dimdata <- dim(r)
xdim <- which(names(dimdata) == 'rainfarm_samples')
dimdata[xdim] <- dimdata[xdim] + length(pos)
new <- array(NA, dimdata)
posT <- which(posna == TRUE)
i = 1
invisible(lapply(posT, function(x) {
new[,,x,] <<- r[,,i,]
i <<- i + 1
}))
#names(dim(r)) <- names(result_dims)
warning("Missing values found in the samples.")
r <- new
}
return(r)
}
# Function to generalize through do.call() n-dimensional array subsetting
# and array indexing. Derived from Stack Overflow issue
# https://stackoverflow.com/questions/14500707/select-along-one-of-n-dimensions-in-array
.subset <- function(field, dim_name, range, drop = FALSE) {
ndim <- names(dim(field))
idim <- which(ndim %in% dim_name )
# Create list representing arguments supplied to [
# bquote() creates an object corresponding to a missing argument
indices <- rep(list(bquote()), length(dim(field)))
indices[[idim]] <- range
# do.call on the indices
field <- do.call("[", c(list(field), indices, list(drop = drop)))
# Needed for R <=3.2
names(dim(field)) <- ndim
return(field)
}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Factor Loading Curve Sampling Algorithm
#'
#' Sample the factor loading curve basis coefficients subject to
#' an orthonormality constraint.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x K} matrix of factors
#' @param Psi \code{J x K} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param lambda \code{K}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x K} matrix of (orthogonal) factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC <- function(BtY, Beta, Psi, BtB, Omega, lambda, sigmat2) {
.Call('_dfosr_sampleFLC', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm
#'
#' Sample the factor loading curve basis coefficients subject to
#' an orthonormality constraint for the special case in which \code{BtB = diag(J)}.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x K} matrix of factors
#' @param Psi \code{J x K} matrix of previous factor loading curve coefficients
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param lambda \code{K}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x K} matrix of (orthogonal) factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_orthog <- function(BtY, Beta, Psi, Omega, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_orthog', PACKAGE = 'dfosr', BtY, Beta, Psi, Omega, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm
#'
#' Sample the factor loading curve basis coefficients subject to
#' an orthonormality constraint with an additional matrix of (orthogonal) constraints
#' as an input.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x K} matrix of factors
#' @param Psi \code{J x K} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param BtCon \code{J x Jc} matrix of additional constraints, pre-multiplied by B.t()
#' @param lambda \code{K}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x K} matrix of (orthogonal) factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_cons <- function(BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_cons', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm for K=1
#'
#' Sample the factor loading curve basis coefficients for K=1 factor subject to
#' an additional matrix of (orthogonal) constraints as an input.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x 1} matrix of factors
#' @param Psi \code{J x 1} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param BtCon \code{J x Jc} matrix of additional constraints, pre-multiplied by B.t()
#' @param lambda \code{1}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x 1} matrix of factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_cons_1 <- function(BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_cons_1', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm for K=1
#'
#' Sample the factor loading curve basis coefficients for K=1 factor.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x 1} matrix of factors
#' @param Psi \code{J x 1} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param lambda \code{1}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x 1} matrix of factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_1 <- function(BtY, Beta, Psi, BtB, Omega, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_1', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, lambda, sigmat2)
}
|
/R/RcppExports.R
|
no_license
|
drkowal/dfosr
|
R
| false
| false
| 5,360
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Factor Loading Curve Sampling Algorithm
#'
#' Sample the factor loading curve basis coefficients subject to
#' an orthonormality constraint.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x K} matrix of factors
#' @param Psi \code{J x K} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param lambda \code{K}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x K} matrix of (orthogonal) factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC <- function(BtY, Beta, Psi, BtB, Omega, lambda, sigmat2) {
.Call('_dfosr_sampleFLC', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm
#'
#' Sample the factor loading curve basis coefficients subject to
#' an orthonormality constraint for the special case in which \code{BtB = diag(J)}.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x K} matrix of factors
#' @param Psi \code{J x K} matrix of previous factor loading curve coefficients
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param lambda \code{K}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x K} matrix of (orthogonal) factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_orthog <- function(BtY, Beta, Psi, Omega, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_orthog', PACKAGE = 'dfosr', BtY, Beta, Psi, Omega, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm
#'
#' Sample the factor loading curve basis coefficients subject to
#' an orthonormality constraint with an additional matrix of (orthogonal) constraints
#' as an input.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x K} matrix of factors
#' @param Psi \code{J x K} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param BtCon \code{J x Jc} matrix of additional constraints, pre-multiplied by B.t()
#' @param lambda \code{K}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x K} matrix of (orthogonal) factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_cons <- function(BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_cons', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm for K=1
#'
#' Sample the factor loading curve basis coefficients for K=1 factor subject to
#' an additional matrix of (orthogonal) constraints as an input.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x 1} matrix of factors
#' @param Psi \code{J x 1} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param BtCon \code{J x Jc} matrix of additional constraints, pre-multiplied by B.t()
#' @param lambda \code{1}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x 1} matrix of factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_cons_1 <- function(BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_cons_1', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, BtCon, lambda, sigmat2)
}
#' Factor Loading Curve Sampling Algorithm for K=1
#'
#' Sample the factor loading curve basis coefficients for K=1 factor.
#'
#' @param BtY \code{J x T} matrix \code{B.t()*Y} for basis matrix B
#' @param Beta \code{T x 1} matrix of factors
#' @param Psi \code{J x 1} matrix of previous factor loading curve coefficients
#' @param BtB \code{J x J} matrix of \code{B.t()*B}
#' @param Omega \code{J x J} prior precision/penalty matrix
#' @param lambda \code{1}-dimensional vector of prior precisions
#' @param sigmat2 \code{T}-dimensional vector of time-dependent observation error variances
#' @return Psi \code{J x 1} matrix of factor loading curve coefficients
#'
#' @note This function uses \code{Rcpp} for computational efficiency.
#'
#' @useDynLib dfosr
#' @import Rcpp
#' @export
sampleFLC_1 <- function(BtY, Beta, Psi, BtB, Omega, lambda, sigmat2) {
.Call('_dfosr_sampleFLC_1', PACKAGE = 'dfosr', BtY, Beta, Psi, BtB, Omega, lambda, sigmat2)
}
|
library(limma)
library(edgeR)
library(ggplot2)
library(gplots)
library(Rtsne)
library(org.Rn.eg.db)
library(AnnotationDbi)
library(ComplexHeatmap)
GSE93272<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE93272.csv', row.names = 1, header = TRUE, sep = ',')
GSE45291<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE45291.csv', row.names = 1, header = TRUE, sep = ',')
GSE74143<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE74143.csv', row.names = 1, header = TRUE, sep = ',')
GSE65010<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE65010.csv', row.names = 1, header = TRUE, sep = ',')
GSE15573<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE15573.csv', row.names = 1, header = TRUE, sep = ',')
GSE61635<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE61635.csv', row.names = 1, header = TRUE, sep = ',')
GSE65391<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE65391.csv', row.names = 1, header = TRUE, sep = ',')
GSE138458<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE138458.csv', row.names = 1, header = TRUE, sep = ',')
GSE143272<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE143272.csv', row.names = 1, header = TRUE, sep = ',')
GSE113469<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE113469.csv', row.names = 1, header = TRUE, sep = ',')
GSE50772<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE50772.csv', row.names = 1, header = TRUE, sep = ',')
GSE55457<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE55457.csv', row.names = 1, header = TRUE, sep = ',')
combine_index<-intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(rownames(GSE93272), rownames(GSE45291)), rownames(GSE74143)), rownames(GSE65010)), rownames(GSE15573)), rownames(GSE61635)), rownames(GSE65391)), rownames(GSE138458)), rownames(GSE143272)), rownames(GSE113469)), rownames(GSE50772)), rownames(GSE55457))
GSE93272<-GSE93272[combine_index, ]
GSE45291<-GSE45291[combine_index, ]
GSE74143<-GSE74143[combine_index, ]
GSE65010<-GSE65010[combine_index, ]
GSE15573<-GSE15573[combine_index, ]
GSE61635<-GSE61635[combine_index, ]
GSE65391<-GSE65391[combine_index, ]
GSE138458<-GSE138458[combine_index, ]
GSE143272<-GSE143272[combine_index, ]
GSE113469<-GSE113469[combine_index, ]
GSE50772<-GSE50772[combine_index, ]
GSE55457<-GSE55457[combine_index, ]
data<-cbind(GSE93272, GSE45291, GSE74143, GSE65010, GSE15573, GSE61635, GSE65391, GSE138458, GSE143272, GSE113469, GSE50772)
labels<-c()
for (i in 1:ncol(data)) {
if (i <= 697){
labels[i]<-substring(colnames(data)[i], 4, 5)
}
if (i > 697){
labels[i]<-substring(colnames(data)[i], 4, 6)
}
}
colors<-rainbow(length(unique(labels)))
names(colors)<-unique(labels)
tsne<- Rtsne(t(data), dims = 2, perplexity=30, verbose=TRUE, max_iter = 500)
plot(tsne$Y, main="Raw", col=colors[labels], type = 'p', pch = 19, cex = 1.4, cex.axis = 2, cex.main = 2, font.axis = 2, xlab = '', ylab = '')
legend('bottomleft', title = 'Batch', pch = 16, legend = unique(labels), col = colors, ncol = 3, cex = 1.35, text.font = 2, pt.cex = 2.5)
batch<-c()
status<-c()
for (i in 1:ncol(data)) {
status[i]<-substring(colnames(data)[i], 1, 2)
if (i <= 697){
index<-substring(colnames(data)[i], 4, 5)
if (index == 'B1'){
batch[i]<-1
}
if (index == 'B2'){
batch[i]<-2
}
if (index == 'B3'){
batch[i]<-3
}
if (index == 'B4'){
batch[i]<-4
}
if (index == 'B5'){
batch[i]<-5
}
if (index == 'B6'){
batch[i]<-6
}
if (index == 'B7'){
batch[i]<-7
}
if (index == 'B8'){
batch[i]<-8
}
if (index == 'B9'){
batch[i]<-9
}
}
if (i > 697){
index<-substring(colnames(data)[i], 4, 6)
if (index == 'B10'){
batch[i]<-10
}
if (index == 'B11'){
batch[i]<-11
}
}
}
status<-as.factor(status)
design<-model.matrix(~status)
data_norm<-as.data.frame(removeBatchEffect(data, batch = batch, design = design))
tsne<-Rtsne(t(data_norm), dims = 2, perplexity=30, verbose=TRUE, max_iter = 500)
plot(tsne$Y, main="Norm", col=colors[labels], type = 'p', pch = 19, cex = 1.4, cex.axis = 2, cex.main = 2, font.axis = 2, xlab = '', ylab = '')
legend('bottomleft', title = 'Batch', pch = 16, legend = unique(labels), col = colors, ncol = 3, cex = 1.35, text.font = 2, pt.cex = 2.5)
write.csv(data_norm, file = 'D:\\work\\Rheumatoid arthritis\\data\\combine.csv')
write.csv(GSE55457, file = 'D:\\work\\Rheumatoid arthritis\\data\\test.csv')
|
/Rheumatoid Arthritis-Code/R/data_preprocessing.R
|
no_license
|
fdgey34/RA-CODE
|
R
| false
| false
| 4,694
|
r
|
library(limma)
library(edgeR)
library(ggplot2)
library(gplots)
library(Rtsne)
library(org.Rn.eg.db)
library(AnnotationDbi)
library(ComplexHeatmap)
GSE93272<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE93272.csv', row.names = 1, header = TRUE, sep = ',')
GSE45291<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE45291.csv', row.names = 1, header = TRUE, sep = ',')
GSE74143<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE74143.csv', row.names = 1, header = TRUE, sep = ',')
GSE65010<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE65010.csv', row.names = 1, header = TRUE, sep = ',')
GSE15573<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE15573.csv', row.names = 1, header = TRUE, sep = ',')
GSE61635<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE61635.csv', row.names = 1, header = TRUE, sep = ',')
GSE65391<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE65391.csv', row.names = 1, header = TRUE, sep = ',')
GSE138458<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE138458.csv', row.names = 1, header = TRUE, sep = ',')
GSE143272<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE143272.csv', row.names = 1, header = TRUE, sep = ',')
GSE113469<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE113469.csv', row.names = 1, header = TRUE, sep = ',')
GSE50772<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE50772.csv', row.names = 1, header = TRUE, sep = ',')
GSE55457<-read.csv(file = 'D:\\work\\Rheumatoid arthritis\\data\\GSE55457.csv', row.names = 1, header = TRUE, sep = ',')
combine_index<-intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(intersect(rownames(GSE93272), rownames(GSE45291)), rownames(GSE74143)), rownames(GSE65010)), rownames(GSE15573)), rownames(GSE61635)), rownames(GSE65391)), rownames(GSE138458)), rownames(GSE143272)), rownames(GSE113469)), rownames(GSE50772)), rownames(GSE55457))
GSE93272<-GSE93272[combine_index, ]
GSE45291<-GSE45291[combine_index, ]
GSE74143<-GSE74143[combine_index, ]
GSE65010<-GSE65010[combine_index, ]
GSE15573<-GSE15573[combine_index, ]
GSE61635<-GSE61635[combine_index, ]
GSE65391<-GSE65391[combine_index, ]
GSE138458<-GSE138458[combine_index, ]
GSE143272<-GSE143272[combine_index, ]
GSE113469<-GSE113469[combine_index, ]
GSE50772<-GSE50772[combine_index, ]
GSE55457<-GSE55457[combine_index, ]
data<-cbind(GSE93272, GSE45291, GSE74143, GSE65010, GSE15573, GSE61635, GSE65391, GSE138458, GSE143272, GSE113469, GSE50772)
labels<-c()
for (i in 1:ncol(data)) {
if (i <= 697){
labels[i]<-substring(colnames(data)[i], 4, 5)
}
if (i > 697){
labels[i]<-substring(colnames(data)[i], 4, 6)
}
}
colors<-rainbow(length(unique(labels)))
names(colors)<-unique(labels)
tsne<- Rtsne(t(data), dims = 2, perplexity=30, verbose=TRUE, max_iter = 500)
plot(tsne$Y, main="Raw", col=colors[labels], type = 'p', pch = 19, cex = 1.4, cex.axis = 2, cex.main = 2, font.axis = 2, xlab = '', ylab = '')
legend('bottomleft', title = 'Batch', pch = 16, legend = unique(labels), col = colors, ncol = 3, cex = 1.35, text.font = 2, pt.cex = 2.5)
batch<-c()
status<-c()
for (i in 1:ncol(data)) {
status[i]<-substring(colnames(data)[i], 1, 2)
if (i <= 697){
index<-substring(colnames(data)[i], 4, 5)
if (index == 'B1'){
batch[i]<-1
}
if (index == 'B2'){
batch[i]<-2
}
if (index == 'B3'){
batch[i]<-3
}
if (index == 'B4'){
batch[i]<-4
}
if (index == 'B5'){
batch[i]<-5
}
if (index == 'B6'){
batch[i]<-6
}
if (index == 'B7'){
batch[i]<-7
}
if (index == 'B8'){
batch[i]<-8
}
if (index == 'B9'){
batch[i]<-9
}
}
if (i > 697){
index<-substring(colnames(data)[i], 4, 6)
if (index == 'B10'){
batch[i]<-10
}
if (index == 'B11'){
batch[i]<-11
}
}
}
status<-as.factor(status)
design<-model.matrix(~status)
data_norm<-as.data.frame(removeBatchEffect(data, batch = batch, design = design))
tsne<-Rtsne(t(data_norm), dims = 2, perplexity=30, verbose=TRUE, max_iter = 500)
plot(tsne$Y, main="Norm", col=colors[labels], type = 'p', pch = 19, cex = 1.4, cex.axis = 2, cex.main = 2, font.axis = 2, xlab = '', ylab = '')
legend('bottomleft', title = 'Batch', pch = 16, legend = unique(labels), col = colors, ncol = 3, cex = 1.35, text.font = 2, pt.cex = 2.5)
write.csv(data_norm, file = 'D:\\work\\Rheumatoid arthritis\\data\\combine.csv')
write.csv(GSE55457, file = 'D:\\work\\Rheumatoid arthritis\\data\\test.csv')
|
library(httr)
library(tidyverse)
library(xml2)
library(rvest)
library(stringr)
library(rebus)
library(lubridate)
library(ggthemes)
library(ggdark)
library(scales)
library(extrafont)
library(shiny)
library(shinythemes)
library(reactable)
library(ggplot2)
library(extrafont)
#library(ggchicklet)
library(ballr)
#############################################################
### read in bio details
nba_bio_2021 <- read_csv("https://raw.githubusercontent.com/katiesegreti/NBA/main/nba_bios.csv") %>% unique()
#update bios for early march and fix this soon
nba_player_teams <- read_csv("https://raw.githubusercontent.com/katiesegreti/NBA/main/NBA_player_teams_0302.csv") %>% select(player, current_team) %>%
mutate(current_team = if_else(current_team == "BKN", "BRK", current_team))
### read in history
player_history <- read_csv("https://raw.githubusercontent.com/katiesegreti/NBA/main/nba_history_013021.csv") %>%
mutate(season = as.numeric(paste0(substr(season, 1, 2), substr(season, 6, 7)))) %>%
select(player, pos, age, tm, g, gs, mp, fg, fga, fgpercent, x3p, x3pa, x3ppercent, x2p, x2pa, x2ppercent, efgpercent, ft, fta, ftpercent, orb, drb, trb, ast, stl, blk, tov, pf, pts, link, season)
### read in shooting data
# wnba_shooting <- read_csv("https://raw.githubusercontent.com/katiesegreti/WNBA/master/WNBA_2020_shooting_players.csv") %>%
# mutate( player = as.factor(player),
# tm = as.factor(tm),
# zone = as.factor(zone),
# stat = as.factor(stat),
# shots = as.factor(shots)
# )
######################################################
###############CONSOLODATE THE NUMBER OF DATASETS!!!!!
######################################################
# get the current 2020 stats
#every day, get the current season stats.
current_season <- NBAPerGameStatistics(season = 2021)
#combine today's 2020 stats with history (also combine season and team into a column for chart making)
#COMBINE TODAY'S DATA (2021 SEASON) WITH HISTORY
today_with_history <- current_season %>%
mutate(season = 2021) %>%
select(player, pos, age, tm, g, gs, mp, fg, fga, fgpercent, x3p, x3pa, x3ppercent,
x2p, x2pa, x2ppercent, efgpercent, ft, fta, ftpercent, orb, drb, trb, ast,
stl, blk, tov, pf, pts, link, season) %>%
bind_rows(player_history) %>%
mutate(season_team = paste0(season, " ", tm)) %>% unique()
#JOIN THE LATEST STATS WITH BIO DETAILS
# wnba_today <- current_stats %>%
# left_join(wnba_bio_2020)
#
# wnba_today1 <- wnba_today %>%
# mutate(season_team = paste0(season, " ", tm))
#filter to TOT for players on >1 team in 2021
player_counts_2021 <- today_with_history %>% filter(season == 2021) %>%
count(player)
get_teams <- function(playername) {
x <- current_season %>% filter(player == playername) %>% select(tm)
data.frame(player = playername, team = last(x$tm))
}
#filter to "TOT" for players who were on more than one team
players_2021 <- today_with_history %>%
filter(season == 2021) %>%
left_join(player_counts_2021) %>%
filter(n == 1 | tm == "TOT") %>%
mutate(hilite = 0)
#get the current team for each player
#
#player_teams <- players_2021$player %>% map(function(x) get_teams(x)) %>% bind_rows()
player_teams <- players_2021$player %>% map(function(x) get_teams(x)) %>% bind_rows() %>%
left_join(nba_player_teams, by = "player") %>%
mutate(team = if_else(!is.na(current_team), current_team, team)) %>% select(player, team)
# players_2021a <- today_with_history %>%
# filter(season == 2021) %>%
# mutate(hilite = 0)
players_2021a <- players_2021 %>%
left_join(player_teams) %>%
mutate(tm = team) %>%
select(-team)
# nba team colors
ATL <- "#e03a3e"
BRK <- "#000000"
BOS <- "#008348"
CHO <- "#00788c"
CHI <- "#ce1141"
CLE <- "#6f263d"
DAL <- "#bbc4ca"
DEN <- "#fec524"
DET <- "#c8102e"
GSW <- "#fdb927"
HOU <- "#ce1141"
IND <- "#fdbb30"
LAC <- "#1d428a"
LAL <- "#552583"
MEM <- "#5d76a9"
MIA <- "#98002e"
MIL <- "#00471b"
MIN <- "#78be20"
NOP <- "#b4975a"
NYK <- "#f58426"
OKC <- "#007ac1"
ORL <- "#c4ced4"
PHI <- "#006bb6"
PHO <- "#1d1160"
POR <- "#e03a3e"
SAC <- "#5a2b81"
SAS <- "#000000"
TOR <- "#ce1141"
UTA <- "#00471b"
WAS <- "#002b5c"
team_colors <- c("ATL" = ATL, "BRK" = BRK , "BOS" = BOS, "CHO" = CHO,
"CHI" = CHI, "CLE" = CLE, "DAL" = DAL, "DEN" = DEN, "DET" = DET,
"GSW" = GSW, "HOU" = HOU, "IND" = IND, "LAC" = LAC, "LAL" = LAL,
"MEM" = MEM, "MIA" = MIA, "MIL" = MIL, "MIN" = MIN, "NOP" = NOP,
"NYK" = NYK, "OKC" = OKC, "ORL" = ORL, "PHI" = PHI, "PHO" = PHO,
"POR" = POR, "SAC" = SAC, "SAS" = SAS, "TOR" = TOR, "UTA" = UTA, "WAS" = WAS)
team_names <- c("", "Atlanta Hawks" = "ATL", "Brooklyn Nets" = "BRK",
"Boston Celtics" = "BOS", "Charlotte Hornets" = "CHO", "Chicago Bulls" = "CHI",
"Cleveland Caveliers" = "CLE", "Dallas Mavericks" = "DAL", "Denver Nuggets" = "DEN",
"Detroit Pistons" = "DET", "Golden State Warriors" = "GSW", "Houston Rockets" = "HOU",
"Indiana Pacers" = "IND", "Los Angeles Clippers" = "LAC", "Los Angeles Lakers" = "LAL",
"Memphis Grizzlies" = "MEM", "Miami Heat" = "MIA", "Milwaukee Bucks" = "MIL",
"Minnesota Timberwolves" = "MIN", "New Orleans Pelicans" = "NOP", "New York Knicks" = "NYK",
"Oklahoma City Thunder" = "OKC", "Orlando Magic" = "ORL", "Philadelphia 76ers" = "PHI",
"Phoenix Suns" = "PHO", "Portland Trailblazers" = "POR", "Sacramento Kings" = "SAC",
"San Antonio Spurs" = "SAS", "Toronto Raptors" = "TOR", "Utah Jazz" = "UTA", "Washington Wizards" = "WAS"
)
#colors
darkslateblue <- "#2e3c81"
crimson <- "#e73a3c"
#non-dark theme
bg_color <- "#f2f2f2"
nr_theme <- theme_wsj() +
theme(
text = element_text(family = "Arial"),
panel.background = element_rect(fill = bg_color),
plot.background = element_rect(fill = bg_color),
legend.position = "none",
axis.line.x.bottom = element_blank(),
axis.ticks.x.bottom = element_blank(),
axis.text.x = element_text(face = "plain", family = "Arial"),
panel.grid.major.y = element_line( colour = darkslateblue),
plot.title = element_text(size = 22, family = "Arial"),
legend.background = element_rect(fill = bg_color),
plot.subtitle = element_text(size = 18, family = "Arial")
)
#sticky style for reactable
sticky_style <- list(position = "sticky", left = 0, background = "#fff", zIndex = 1,
borderRight = "1px solid #eee")
# write a function to make avg_point charts by season for a player
avgpoint_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
# get number of seasons for conditional formatting on the label sizes?
n_seasons = length(season_labs)
max_value = max(df$pts)
df %>% ggplot(aes(x = season_team, y = pts)) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
geom_col(fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(pts, 1)),
y = pts + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Points: ", player_name),
x = "season",
y = "") +
nr_theme
}
#fg % chart
fgpct_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$fgpercent)
df %>% ggplot(aes(x = season_team, y = fgpercent)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_y_continuous(labels = scales::percent_format(accuracy = 5L)) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(fgpercent, 2) * 100, "%"),
y = fgpercent + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Field Goal Percentage: ", player_name),
x = "",
y = "") +
nr_theme
}
#fg % chart
threepct_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$x3ppercent)
df %>% ggplot(aes(x = season_team, y = x3ppercent)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_y_continuous(labels = scales::percent_format(accuracy = 5L)) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(x3ppercent * 100, "%"),
y = x3ppercent + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("3 Point Percentage: ", player_name),
x = "",
y = "") +
nr_theme
}
###### SO MY DATASETS TO USE WITH CHARTS AND TABLES ARE today_with_history and players_2021
##### today_with_history for stats history charts
##### players_2021 with only current season data for league compare charts
#rebound chart
rebound_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$trb)
df %>% ggplot(aes(x = season_team, y = trb)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(trb, 1),""),
y = trb + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Rebounds: ", player_name),
x = "season",
y = "") +
nr_theme
}
#assist chart
assist_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$ast)
df %>% ggplot(aes(x = season_team, y = ast)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(ast, 1),""),
y = ast + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, color = "white", size = 4) +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Assists: ", player_name),
x = "season",
y = "") +
nr_theme
}
#block chart
block_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$blk)
df %>% ggplot(aes(x = season_team, y = blk)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(blk, 1),""),
y = blk + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Blocks: ", player_name),
x = "season",
y = "") +
nr_theme
}
#steal chart
steal_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$stl)
df %>% ggplot(aes(x = season_team, y = stl)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(stl, 1),""),
y = stl + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Steals: ", player_name),
x = "season",
y = "") +
nr_theme
}
#team comparison chart for pts
team_compare_player_pts <- function(player_name, team, team_fullname) {
df <- players_2021a %>% filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$pts, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, pts), y = pts, fill = hilite)) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
geom_col(width = .7) +
geom_text(aes(label = pts), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste("Average points compared to", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for pts
league_compare_player_pts <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, pts), y = pts, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average points compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for fg%
team_compare_player_fg <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team, fgpercent < 1 ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$fgpercent, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, fgpercent), y = fgpercent, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = paste0(fgpercent * 100, "%")), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_y_continuous(labels = percent) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Field goal percentage compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#leage comparison chart for fg%
league_compare_player_fg <- function(player_name) {
players_2021 %>% filter(fgpercent < 1) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, fgpercent), y = fgpercent, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
scale_y_continuous(labels = percent) +
labs(title = player_name,
subtitle = "Field goal percentage compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for 3p%
team_compare_player_3p <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team & x3ppercent < 1) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value = max(df$x3ppercent, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, x3ppercent), y = x3ppercent, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = paste0(x3ppercent * 100, "%")), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_y_continuous(labels = percent) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("3 point percentage compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#leage comparison chart for 3p%
league_compare_player_3p <- function(player_name) {
players_2021 %>% filter(x3ppercent < 1 & x3ppercent > 0) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, x3ppercent), y = x3ppercent, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
scale_y_continuous(labels = percent) +
labs(title = player_name,
subtitle = "3 point percentage compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison for rebounds
team_compare_player_rebounds <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$trb, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, trb), y = trb, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = trb), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average rebounds compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for rebounds
league_compare_player_rebounds <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, trb), y = trb, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average rebounds compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for assists
team_compare_player_assists <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$ast, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, ast), y = ast, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = trb), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average assists compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for assists
league_compare_player_assists <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, ast), y = ast, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average assists compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for blocks
team_compare_player_blocks <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$blk)
df %>% ggplot(aes(x = reorder(player, blk), y = blk, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = blk), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average blocks compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for blocks
league_compare_player_blocks <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, blk), y = blk, fill = hilite)) +
geom_col(width = .7) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average blocks compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for steals
team_compare_player_steals <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$stl)
df %>% ggplot(aes(x = reorder(player, stl), y = stl, fill = hilite)) +
geom_col(width = .7) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average steals compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for steals
league_compare_player_steals <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, stl), y = stl, fill = hilite)) +
geom_col(size = 10) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average steals compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#make it a function that takes player name
# shooting_chart <- function(player_name) {
# df <- wnba_shooting %>%
# filter(player == toupper(player_name) ) %>%
# mutate(shots = ordered(shots, levels = c("missed", "made")))
# max_fga <- max(df$FGA, na.rm = TRUE)
# df %>%
# ggplot(aes(x = reorder(zone, FGA), y = value, fill = shots)) +
# geom_col(width = .7) +
# geom_text(aes(label = ifelse(value > 1, value, ""), y = ifelse(shots == "made", value / 2, (FGA - value / 2)))) +
# geom_label(aes(label = ifelse(FGA > 0, paste0(percent, "%"), "-"),
# y = ifelse(FGA > 0, FGA + max_fga * 0.05, 1)),
# fill = "#385097", color = "white") +
# scale_x_discrete(labels = c("right" = "right corner 3", "restricted" = "restricted area",
# "paint" = "in the paint", "mid" = "mid range", "left" = "left corner 3",
# "break" = "above the break 3")) +
# scale_fill_manual(values = c("grey", "#dd1f22")) +
# coord_flip() +
# nyl_theme + theme(
# legend.position = "top"
# ) +
# labs(
# title = paste0("Shooting by Zone - ", player_name),
# subtitle = "Field Goal Attempts - 2020 Season",
# x = "",
# y = ""
# )
# }
|
/global.R
|
no_license
|
katiesegreti/NBA
|
R
| false
| false
| 26,006
|
r
|
library(httr)
library(tidyverse)
library(xml2)
library(rvest)
library(stringr)
library(rebus)
library(lubridate)
library(ggthemes)
library(ggdark)
library(scales)
library(extrafont)
library(shiny)
library(shinythemes)
library(reactable)
library(ggplot2)
library(extrafont)
#library(ggchicklet)
library(ballr)
#############################################################
### read in bio details
nba_bio_2021 <- read_csv("https://raw.githubusercontent.com/katiesegreti/NBA/main/nba_bios.csv") %>% unique()
#update bios for early march and fix this soon
nba_player_teams <- read_csv("https://raw.githubusercontent.com/katiesegreti/NBA/main/NBA_player_teams_0302.csv") %>% select(player, current_team) %>%
mutate(current_team = if_else(current_team == "BKN", "BRK", current_team))
### read in history
player_history <- read_csv("https://raw.githubusercontent.com/katiesegreti/NBA/main/nba_history_013021.csv") %>%
mutate(season = as.numeric(paste0(substr(season, 1, 2), substr(season, 6, 7)))) %>%
select(player, pos, age, tm, g, gs, mp, fg, fga, fgpercent, x3p, x3pa, x3ppercent, x2p, x2pa, x2ppercent, efgpercent, ft, fta, ftpercent, orb, drb, trb, ast, stl, blk, tov, pf, pts, link, season)
### read in shooting data
# wnba_shooting <- read_csv("https://raw.githubusercontent.com/katiesegreti/WNBA/master/WNBA_2020_shooting_players.csv") %>%
# mutate( player = as.factor(player),
# tm = as.factor(tm),
# zone = as.factor(zone),
# stat = as.factor(stat),
# shots = as.factor(shots)
# )
######################################################
###############CONSOLODATE THE NUMBER OF DATASETS!!!!!
######################################################
# get the current 2020 stats
#every day, get the current season stats.
current_season <- NBAPerGameStatistics(season = 2021)
#combine today's 2020 stats with history (also combine season and team into a column for chart making)
#COMBINE TODAY'S DATA (2021 SEASON) WITH HISTORY
today_with_history <- current_season %>%
mutate(season = 2021) %>%
select(player, pos, age, tm, g, gs, mp, fg, fga, fgpercent, x3p, x3pa, x3ppercent,
x2p, x2pa, x2ppercent, efgpercent, ft, fta, ftpercent, orb, drb, trb, ast,
stl, blk, tov, pf, pts, link, season) %>%
bind_rows(player_history) %>%
mutate(season_team = paste0(season, " ", tm)) %>% unique()
#JOIN THE LATEST STATS WITH BIO DETAILS
# wnba_today <- current_stats %>%
# left_join(wnba_bio_2020)
#
# wnba_today1 <- wnba_today %>%
# mutate(season_team = paste0(season, " ", tm))
#filter to TOT for players on >1 team in 2021
player_counts_2021 <- today_with_history %>% filter(season == 2021) %>%
count(player)
get_teams <- function(playername) {
x <- current_season %>% filter(player == playername) %>% select(tm)
data.frame(player = playername, team = last(x$tm))
}
#filter to "TOT" for players who were on more than one team
players_2021 <- today_with_history %>%
filter(season == 2021) %>%
left_join(player_counts_2021) %>%
filter(n == 1 | tm == "TOT") %>%
mutate(hilite = 0)
#get the current team for each player
#
#player_teams <- players_2021$player %>% map(function(x) get_teams(x)) %>% bind_rows()
player_teams <- players_2021$player %>% map(function(x) get_teams(x)) %>% bind_rows() %>%
left_join(nba_player_teams, by = "player") %>%
mutate(team = if_else(!is.na(current_team), current_team, team)) %>% select(player, team)
# players_2021a <- today_with_history %>%
# filter(season == 2021) %>%
# mutate(hilite = 0)
players_2021a <- players_2021 %>%
left_join(player_teams) %>%
mutate(tm = team) %>%
select(-team)
# nba team colors
ATL <- "#e03a3e"
BRK <- "#000000"
BOS <- "#008348"
CHO <- "#00788c"
CHI <- "#ce1141"
CLE <- "#6f263d"
DAL <- "#bbc4ca"
DEN <- "#fec524"
DET <- "#c8102e"
GSW <- "#fdb927"
HOU <- "#ce1141"
IND <- "#fdbb30"
LAC <- "#1d428a"
LAL <- "#552583"
MEM <- "#5d76a9"
MIA <- "#98002e"
MIL <- "#00471b"
MIN <- "#78be20"
NOP <- "#b4975a"
NYK <- "#f58426"
OKC <- "#007ac1"
ORL <- "#c4ced4"
PHI <- "#006bb6"
PHO <- "#1d1160"
POR <- "#e03a3e"
SAC <- "#5a2b81"
SAS <- "#000000"
TOR <- "#ce1141"
UTA <- "#00471b"
WAS <- "#002b5c"
team_colors <- c("ATL" = ATL, "BRK" = BRK , "BOS" = BOS, "CHO" = CHO,
"CHI" = CHI, "CLE" = CLE, "DAL" = DAL, "DEN" = DEN, "DET" = DET,
"GSW" = GSW, "HOU" = HOU, "IND" = IND, "LAC" = LAC, "LAL" = LAL,
"MEM" = MEM, "MIA" = MIA, "MIL" = MIL, "MIN" = MIN, "NOP" = NOP,
"NYK" = NYK, "OKC" = OKC, "ORL" = ORL, "PHI" = PHI, "PHO" = PHO,
"POR" = POR, "SAC" = SAC, "SAS" = SAS, "TOR" = TOR, "UTA" = UTA, "WAS" = WAS)
team_names <- c("", "Atlanta Hawks" = "ATL", "Brooklyn Nets" = "BRK",
"Boston Celtics" = "BOS", "Charlotte Hornets" = "CHO", "Chicago Bulls" = "CHI",
"Cleveland Caveliers" = "CLE", "Dallas Mavericks" = "DAL", "Denver Nuggets" = "DEN",
"Detroit Pistons" = "DET", "Golden State Warriors" = "GSW", "Houston Rockets" = "HOU",
"Indiana Pacers" = "IND", "Los Angeles Clippers" = "LAC", "Los Angeles Lakers" = "LAL",
"Memphis Grizzlies" = "MEM", "Miami Heat" = "MIA", "Milwaukee Bucks" = "MIL",
"Minnesota Timberwolves" = "MIN", "New Orleans Pelicans" = "NOP", "New York Knicks" = "NYK",
"Oklahoma City Thunder" = "OKC", "Orlando Magic" = "ORL", "Philadelphia 76ers" = "PHI",
"Phoenix Suns" = "PHO", "Portland Trailblazers" = "POR", "Sacramento Kings" = "SAC",
"San Antonio Spurs" = "SAS", "Toronto Raptors" = "TOR", "Utah Jazz" = "UTA", "Washington Wizards" = "WAS"
)
#colors
darkslateblue <- "#2e3c81"
crimson <- "#e73a3c"
#non-dark theme
bg_color <- "#f2f2f2"
nr_theme <- theme_wsj() +
theme(
text = element_text(family = "Arial"),
panel.background = element_rect(fill = bg_color),
plot.background = element_rect(fill = bg_color),
legend.position = "none",
axis.line.x.bottom = element_blank(),
axis.ticks.x.bottom = element_blank(),
axis.text.x = element_text(face = "plain", family = "Arial"),
panel.grid.major.y = element_line( colour = darkslateblue),
plot.title = element_text(size = 22, family = "Arial"),
legend.background = element_rect(fill = bg_color),
plot.subtitle = element_text(size = 18, family = "Arial")
)
#sticky style for reactable
sticky_style <- list(position = "sticky", left = 0, background = "#fff", zIndex = 1,
borderRight = "1px solid #eee")
# write a function to make avg_point charts by season for a player
avgpoint_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
# get number of seasons for conditional formatting on the label sizes?
n_seasons = length(season_labs)
max_value = max(df$pts)
df %>% ggplot(aes(x = season_team, y = pts)) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
geom_col(fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(pts, 1)),
y = pts + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Points: ", player_name),
x = "season",
y = "") +
nr_theme
}
#fg % chart
fgpct_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$fgpercent)
df %>% ggplot(aes(x = season_team, y = fgpercent)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_y_continuous(labels = scales::percent_format(accuracy = 5L)) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(fgpercent, 2) * 100, "%"),
y = fgpercent + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Field Goal Percentage: ", player_name),
x = "",
y = "") +
nr_theme
}
#fg % chart
threepct_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$x3ppercent)
df %>% ggplot(aes(x = season_team, y = x3ppercent)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_y_continuous(labels = scales::percent_format(accuracy = 5L)) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(x3ppercent * 100, "%"),
y = x3ppercent + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("3 Point Percentage: ", player_name),
x = "",
y = "") +
nr_theme
}
###### SO MY DATASETS TO USE WITH CHARTS AND TABLES ARE today_with_history and players_2021
##### today_with_history for stats history charts
##### players_2021 with only current season data for league compare charts
#rebound chart
rebound_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$trb)
df %>% ggplot(aes(x = season_team, y = trb)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(trb, 1),""),
y = trb + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Rebounds: ", player_name),
x = "season",
y = "") +
nr_theme
}
#assist chart
assist_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$ast)
df %>% ggplot(aes(x = season_team, y = ast)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(ast, 1),""),
y = ast + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, color = "white", size = 4) +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Assists: ", player_name),
x = "season",
y = "") +
nr_theme
}
#block chart
block_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$blk)
df %>% ggplot(aes(x = season_team, y = blk)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(blk, 1),""),
y = blk + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Blocks: ", player_name),
x = "season",
y = "") +
nr_theme
}
#steal chart
steal_chart <- function(player_name) {
df <- today_with_history %>% filter(player == player_name & tm != "TOT")
season_labs = df$season_team %>% map(function(x) str_split(x, " ")[[1]][1]) %>% unlist() %>% as.numeric() %>% sort()
max_value = max(df$stl)
df %>% ggplot(aes(x = season_team, y = stl)) +
geom_col(fill = darkslateblue) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
scale_x_discrete(labels = season_labs) +
geom_label(aes(label = paste0(round(stl, 1),""),
y = stl + max_value / 25 ), size = 4, fill = crimson) +
geom_text(aes(label = paste0(g,"\n games")), y = max_value / 6, color = "white", size = 3) +
geom_label(aes(label = tm, fill = tm), y = max_value / 15, size = 4, color = "white") +
scale_fill_manual(values = team_colors) +
labs(title = paste0("Average Steals: ", player_name),
x = "season",
y = "") +
nr_theme
}
#team comparison chart for pts
team_compare_player_pts <- function(player_name, team, team_fullname) {
df <- players_2021a %>% filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$pts, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, pts), y = pts, fill = hilite)) +
#geom_chicklet(radius = grid::unit(2, 'mm'), fill = darkslateblue) +
geom_col(width = .7) +
geom_text(aes(label = pts), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste("Average points compared to", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for pts
league_compare_player_pts <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, pts), y = pts, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average points compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for fg%
team_compare_player_fg <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team, fgpercent < 1 ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$fgpercent, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, fgpercent), y = fgpercent, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = paste0(fgpercent * 100, "%")), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_y_continuous(labels = percent) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Field goal percentage compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#leage comparison chart for fg%
league_compare_player_fg <- function(player_name) {
players_2021 %>% filter(fgpercent < 1) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, fgpercent), y = fgpercent, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
scale_y_continuous(labels = percent) +
labs(title = player_name,
subtitle = "Field goal percentage compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for 3p%
team_compare_player_3p <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team & x3ppercent < 1) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value = max(df$x3ppercent, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, x3ppercent), y = x3ppercent, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = paste0(x3ppercent * 100, "%")), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_y_continuous(labels = percent) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("3 point percentage compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#leage comparison chart for 3p%
league_compare_player_3p <- function(player_name) {
players_2021 %>% filter(x3ppercent < 1 & x3ppercent > 0) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, x3ppercent), y = x3ppercent, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
scale_y_continuous(labels = percent) +
labs(title = player_name,
subtitle = "3 point percentage compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison for rebounds
team_compare_player_rebounds <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$trb, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, trb), y = trb, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = trb), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average rebounds compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for rebounds
league_compare_player_rebounds <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, trb), y = trb, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average rebounds compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for assists
team_compare_player_assists <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$ast, na.rm = TRUE)
df %>% ggplot(aes(x = reorder(player, ast), y = ast, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = trb), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average assists compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for assists
league_compare_player_assists <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, ast), y = ast, fill = hilite)) +
geom_col(aes(width = if_else(hilite == 1, 2, 0.3))) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average assists compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for blocks
team_compare_player_blocks <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$blk)
df %>% ggplot(aes(x = reorder(player, blk), y = blk, fill = hilite)) +
geom_col(width = .7) +
geom_text(aes(label = blk), y = max_value / 25, color = "white") +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average blocks compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for blocks
league_compare_player_blocks <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, blk), y = blk, fill = hilite)) +
geom_col(width = .7) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average blocks compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#team comparison chart for steals
team_compare_player_steals <- function(player_name, team, team_fullname) {
df <- players_2021a %>%
filter(tm == team ) %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite))
max_value <- max(df$stl)
df %>% ggplot(aes(x = reorder(player, stl), y = stl, fill = hilite)) +
geom_col(width = .7) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
coord_flip() +
labs(title = player_name,
subtitle = paste0("Average steals compared to ", team_fullname),
x = "",
y = "") +
nr_theme
}
#league comparison chart for steals
league_compare_player_steals <- function(player_name) {
players_2021 %>%
mutate(hilite = if_else(player == player_name, 1, 0)) %>%
mutate(hilite = as.factor(hilite)) %>%
ggplot(aes(x = reorder(player, stl), y = stl, fill = hilite)) +
geom_col(size = 10) +
scale_fill_manual(values = c(darkslateblue, crimson)) +
scale_x_discrete(labels = "") +
labs(title = player_name,
subtitle = "Average steals compared to all NBA",
x = "",
y = "") +
nr_theme + theme(
axis.title.x=element_blank(),
axis.text.x=element_blank(),
axis.ticks.x=element_blank()
)
}
#make it a function that takes player name
# shooting_chart <- function(player_name) {
# df <- wnba_shooting %>%
# filter(player == toupper(player_name) ) %>%
# mutate(shots = ordered(shots, levels = c("missed", "made")))
# max_fga <- max(df$FGA, na.rm = TRUE)
# df %>%
# ggplot(aes(x = reorder(zone, FGA), y = value, fill = shots)) +
# geom_col(width = .7) +
# geom_text(aes(label = ifelse(value > 1, value, ""), y = ifelse(shots == "made", value / 2, (FGA - value / 2)))) +
# geom_label(aes(label = ifelse(FGA > 0, paste0(percent, "%"), "-"),
# y = ifelse(FGA > 0, FGA + max_fga * 0.05, 1)),
# fill = "#385097", color = "white") +
# scale_x_discrete(labels = c("right" = "right corner 3", "restricted" = "restricted area",
# "paint" = "in the paint", "mid" = "mid range", "left" = "left corner 3",
# "break" = "above the break 3")) +
# scale_fill_manual(values = c("grey", "#dd1f22")) +
# coord_flip() +
# nyl_theme + theme(
# legend.position = "top"
# ) +
# labs(
# title = paste0("Shooting by Zone - ", player_name),
# subtitle = "Field Goal Attempts - 2020 Season",
# x = "",
# y = ""
# )
# }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudhsmv2_operations.R
\name{restore_backup}
\alias{restore_backup}
\title{Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state}
\usage{
restore_backup(BackupId)
}
\arguments{
\item{BackupId}{[required] The ID of the backup to be restored. To find the ID of a backup, use the DescribeBackups operation.}
}
\description{
Restores a specified AWS CloudHSM backup that is in the \code{PENDING_DELETION} state. For more information on deleting a backup, see DeleteBackup.
}
\section{Accepted Parameters}{
\preformatted{restore_backup(
BackupId = "string"
)
}
}
|
/service/paws.cloudhsmv2/man/restore_backup.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 670
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.cloudhsmv2_operations.R
\name{restore_backup}
\alias{restore_backup}
\title{Restores a specified AWS CloudHSM backup that is in the PENDING_DELETION state}
\usage{
restore_backup(BackupId)
}
\arguments{
\item{BackupId}{[required] The ID of the backup to be restored. To find the ID of a backup, use the DescribeBackups operation.}
}
\description{
Restores a specified AWS CloudHSM backup that is in the \code{PENDING_DELETION} state. For more information on deleting a backup, see DeleteBackup.
}
\section{Accepted Parameters}{
\preformatted{restore_backup(
BackupId = "string"
)
}
}
|
## Setup ----
setwd("/data/Data")
rm(list = ls())
packages <- c("raster", "ncdf4", "rgdal", "data.table", "devtools")
if(max(!packages %in% installed.packages())>=1)install.packages(packages[!packages %in% installed.packages()])
lapply(packages, require, character.only = TRUE)
install_github("cszang/pdsi")
# (on Linux)
library(pdsi)
pdsi::build_linux_binary()
## Rainfall ----
ERArain <- stack("/data/Data/ERA_Interim_rainfall.nc")
months <- substr(names(ERArain), 2, 8)
indices <- which(as.integer(substr(months, 1, 4))<=2013)
months <- months[indices]
ERArain <- subset(ERArain, indices)
new_z <- unique(months)
indices <- rep(seq_len(nlayers(ERArain)/2), each=2)
ERArain <- stackApply(ERArain, indices, sum)
ERArain <- calc(ERArain, function(x){x*1000})
names(ERArain) <- as.character(new_z)
ERArain <- setZ(ERArain, new_z)
## Temperature ----
ERAtemp <- stack("/data/Data/ERA_Interim_temp.nc")
months <- substr(names(ERAtemp), 2, 8)
indices <- which(as.integer(substr(months, 1, 4))<=2013)
months <- months[indices]
ERAtemp <- subset(ERAtemp, indices)
ERAtemp <- calc(ERAtemp, function(x){x-273.15})
## AWC ----
AWC <- raster("ISRIC_available_water.tif")
AWC <- projectRaster(AWC, ERArain)
# plot(AWC)
# density(AWC)
## Make data table ----
ERArain <- rasterToPolygons(ERArain)
ERAtemp <- as.matrix(ERAtemp)
latitude <- coordinates(ERArain)[, 2]
awc <- as.vector(AWC)
## TAKE TOO LONG :(
# for (i in 1:dim(ERAtemp)[1]){
# if(i == 1){
# x <- ERAtemp[i,]
# names <- names(x)
# climate <- data.table(cell = rep_len(i, dim(ERAtemp)[2]),
# year = as.integer(substr(names, 2, 5)),
# month = as.integer(substr(names, 7, 8)),
# temp = x,
# rain = ERArain@data[i,])
# cat(paste("Finished cell", i))
# }else{
# x <- ERAtemp[i,]
# names <- names(x)
# newcell <- data.table(cell = rep_len(i, dim(ERAtemp)[2]),
# year = as.integer(substr(names, 2, 5)),
# month = as.integer(substr(names, 7, 8)),
# temp = x,
# rain = ERArain@data[i,])
# climate <- rbind(climate, newcell)
# if(i %% 500 == 0) {
# cat(paste("Finished cell", i))
# }
# }}
|
/Rscripts/Weather/Palmer_drought.R
|
no_license
|
XPTG6/weathershocks
|
R
| false
| false
| 2,306
|
r
|
## Setup ----
setwd("/data/Data")
rm(list = ls())
packages <- c("raster", "ncdf4", "rgdal", "data.table", "devtools")
if(max(!packages %in% installed.packages())>=1)install.packages(packages[!packages %in% installed.packages()])
lapply(packages, require, character.only = TRUE)
install_github("cszang/pdsi")
# (on Linux)
library(pdsi)
pdsi::build_linux_binary()
## Rainfall ----
ERArain <- stack("/data/Data/ERA_Interim_rainfall.nc")
months <- substr(names(ERArain), 2, 8)
indices <- which(as.integer(substr(months, 1, 4))<=2013)
months <- months[indices]
ERArain <- subset(ERArain, indices)
new_z <- unique(months)
indices <- rep(seq_len(nlayers(ERArain)/2), each=2)
ERArain <- stackApply(ERArain, indices, sum)
ERArain <- calc(ERArain, function(x){x*1000})
names(ERArain) <- as.character(new_z)
ERArain <- setZ(ERArain, new_z)
## Temperature ----
ERAtemp <- stack("/data/Data/ERA_Interim_temp.nc")
months <- substr(names(ERAtemp), 2, 8)
indices <- which(as.integer(substr(months, 1, 4))<=2013)
months <- months[indices]
ERAtemp <- subset(ERAtemp, indices)
ERAtemp <- calc(ERAtemp, function(x){x-273.15})
## AWC ----
AWC <- raster("ISRIC_available_water.tif")
AWC <- projectRaster(AWC, ERArain)
# plot(AWC)
# density(AWC)
## Make data table ----
ERArain <- rasterToPolygons(ERArain)
ERAtemp <- as.matrix(ERAtemp)
latitude <- coordinates(ERArain)[, 2]
awc <- as.vector(AWC)
## TAKE TOO LONG :(
# for (i in 1:dim(ERAtemp)[1]){
# if(i == 1){
# x <- ERAtemp[i,]
# names <- names(x)
# climate <- data.table(cell = rep_len(i, dim(ERAtemp)[2]),
# year = as.integer(substr(names, 2, 5)),
# month = as.integer(substr(names, 7, 8)),
# temp = x,
# rain = ERArain@data[i,])
# cat(paste("Finished cell", i))
# }else{
# x <- ERAtemp[i,]
# names <- names(x)
# newcell <- data.table(cell = rep_len(i, dim(ERAtemp)[2]),
# year = as.integer(substr(names, 2, 5)),
# month = as.integer(substr(names, 7, 8)),
# temp = x,
# rain = ERArain@data[i,])
# climate <- rbind(climate, newcell)
# if(i %% 500 == 0) {
# cat(paste("Finished cell", i))
# }
# }}
|
library(shiny)
shinyServer(
function(input, output) {
output$arcFrame <- renderUI({
HTML('
<style>
.embed-container {
position: relative;
padding-bottom: 80%;
height: 0;
max-width: 100%;
}
</style>
<iframe
width="500"
height="400"
frameborder="0"
scrolling="no"
marginheight="0"
marginwidth="0"
title="provPrepTest"
src="//charlotte.maps.arcgis.com/apps/Embed/index.html?webmap=19da1da27f8a4a6ea508bdd9b10e44a4&extent=-80.7557,35.1872,-80.6,35.3118&zoom=true&scale=true&legendlayers=true&disable_scroll=true&theme=light">
</iframe>
')
})
})
## output$arcFrame <- renderUI({
## HTML('
## <style>
## .embed-container iframe , .embed-container object, .embed-container iframe {
## position: absolute;
## top: 0;
## left: 0;
## width: 100%;
## height: 100%;
## }
## small {
## position: absolute;
## z-index: 40;
## bottom: 0;
## margin-bottom: -15px;
## }
## </style>
## <div class="embed-container">
## <iframe
## width="500"
## height="400"
## frameborder="0"
## scrolling="no"
## marginheight="0"
## marginwidth="0"
## title="provPrepTest"
## src="//charlotte.maps.arcgis.com/apps/Embed/index.html?webmap=19da1da27f8a4a6ea508bdd9b10e44a4&extent=-80.7557,35.1872,-80.6,35.3118&zoom=true&scale=true&legendlayers=true&disable_scroll=true&theme=light">
## </iframe>
## </div>
## ')
## })
## shinyServer(
## function(input, output) {
## output$arcFrame <- renderUI({
## map <- tags$iframe(src="//charlotte.maps.arcgis.com/apps/Embed/index.html?webmap=19da1da27f8a4a6ea508bdd9b10e44a4&extent=-80.7557,35.1872,-80.6,35.3118&zoom=true&scale=true&legendlayers=true&disable_scroll=true&theme=light",
## height=600,
## width=500)
## print(map)
## })
## }
## )
|
/soArc/server.r
|
no_license
|
joelnc/ShinyProjects
|
R
| false
| false
| 2,339
|
r
|
library(shiny)
shinyServer(
function(input, output) {
output$arcFrame <- renderUI({
HTML('
<style>
.embed-container {
position: relative;
padding-bottom: 80%;
height: 0;
max-width: 100%;
}
</style>
<iframe
width="500"
height="400"
frameborder="0"
scrolling="no"
marginheight="0"
marginwidth="0"
title="provPrepTest"
src="//charlotte.maps.arcgis.com/apps/Embed/index.html?webmap=19da1da27f8a4a6ea508bdd9b10e44a4&extent=-80.7557,35.1872,-80.6,35.3118&zoom=true&scale=true&legendlayers=true&disable_scroll=true&theme=light">
</iframe>
')
})
})
## output$arcFrame <- renderUI({
## HTML('
## <style>
## .embed-container iframe , .embed-container object, .embed-container iframe {
## position: absolute;
## top: 0;
## left: 0;
## width: 100%;
## height: 100%;
## }
## small {
## position: absolute;
## z-index: 40;
## bottom: 0;
## margin-bottom: -15px;
## }
## </style>
## <div class="embed-container">
## <iframe
## width="500"
## height="400"
## frameborder="0"
## scrolling="no"
## marginheight="0"
## marginwidth="0"
## title="provPrepTest"
## src="//charlotte.maps.arcgis.com/apps/Embed/index.html?webmap=19da1da27f8a4a6ea508bdd9b10e44a4&extent=-80.7557,35.1872,-80.6,35.3118&zoom=true&scale=true&legendlayers=true&disable_scroll=true&theme=light">
## </iframe>
## </div>
## ')
## })
## shinyServer(
## function(input, output) {
## output$arcFrame <- renderUI({
## map <- tags$iframe(src="//charlotte.maps.arcgis.com/apps/Embed/index.html?webmap=19da1da27f8a4a6ea508bdd9b10e44a4&extent=-80.7557,35.1872,-80.6,35.3118&zoom=true&scale=true&legendlayers=true&disable_scroll=true&theme=light",
## height=600,
## width=500)
## print(map)
## })
## }
## )
|
rm(list = ls())
best_sets <- c("~/data/RTM/current_samples_Kalacska_priors_all.rds",
"~/data/RTM/current_samples_Marvin.rds",
"~/data/RTM/current_samples_Sanchez_all.rds",
"~/data/RTM/current_samples_Foster_all.rds")
best_sets <- c("~/data/RTM/current_samples_Sanchez_all.rds")
best_set <- readRDS(best_sets)
Nensemble = 250
ensemble <- getSample(best_set,numSamples = Nensemble)
all.spectra <- data.frame()
for (i in seq(1,max(Nensemble,nrow(ensemble)))){
temp.param <- ensemble[i,]
Liana_param <- temp.param[3:(2+((length(temp.param)-2)/2))]
best_set_L<- c(Liana_param['Nlayers'],Liana_param['Cab'],Liana_param['Car'],Liana_param['Cw'],Liana_param['Cm'])
best_run_L <- PEcAnRTM::prospect(best_set_L, version = "5")
Tree_param <- temp.param[(3+((length(temp.param)-2)/2)):length(temp.param)]
best_set_T<- c(Tree_param['Nlayers'],Tree_param['Cab'],Tree_param['Car'],Tree_param['Cw'],Tree_param['Cm'])
best_run_T <- PEcAnRTM::prospect(best_set_T, version = "5")
test.plot <- rbind(all.spectra,
rbind(as.data.frame(best_run_L) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_L),
pft = "Liana"),
as.data.frame(best_run_T) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_T),
pft = "Tree")) %>% mutate(run = i))
}
group.plot <- test.plot %>% group_by(wl,pft) %>% summarise(r_m = mean(reflectance),
r_min = min(reflectance),
r_max = max(reflectance))
MAP_samples <- MAP(best_set)$parametersMAP
Liana_param <- MAP_samples[3:(2+((length(MAP_samples)-2)/2))]
best_set_L<- c(Liana_param['Nlayers'],Liana_param['Cab'],Liana_param['Car'],Liana_param['Cw'],Liana_param['Cm'])
best_run_L <- PEcAnRTM::prospect(best_set_L, version = "5")
Tree_param <- MAP_samples[(3+((length(MAP_samples)-2)/2)):length(MAP_samples)]
best_set_T<- c(Tree_param['Nlayers'],Tree_param['Cab'],Tree_param['Car'],Tree_param['Cw'],Tree_param['Cm'])
best_run_T <- PEcAnRTM::prospect(best_set_T, version = "5")
best.run <- rbind(as.data.frame(best_run_L) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_L),
pft = "Liana"),
as.data.frame(best_run_T) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_T),
pft = "Tree"))
ggplot() +
geom_line(data = best.run,aes(x = wl,y = reflectance,color = pft),linetype = 2) +
geom_ribbon(data = group.plot,aes(x = wl,ymin = r_min,ymax = r_max,color = pft,fill = pft)) +
theme_bw()
# ggplot() +
# geom_line(data = test.plot,aes(x = wl,y = transmittance)) +
# theme_bw()
|
/scripts/plot_spectra_from_posterior_canopy.R
|
no_license
|
femeunier/LianaAlbedo
|
R
| false
| false
| 2,801
|
r
|
rm(list = ls())
best_sets <- c("~/data/RTM/current_samples_Kalacska_priors_all.rds",
"~/data/RTM/current_samples_Marvin.rds",
"~/data/RTM/current_samples_Sanchez_all.rds",
"~/data/RTM/current_samples_Foster_all.rds")
best_sets <- c("~/data/RTM/current_samples_Sanchez_all.rds")
best_set <- readRDS(best_sets)
Nensemble = 250
ensemble <- getSample(best_set,numSamples = Nensemble)
all.spectra <- data.frame()
for (i in seq(1,max(Nensemble,nrow(ensemble)))){
temp.param <- ensemble[i,]
Liana_param <- temp.param[3:(2+((length(temp.param)-2)/2))]
best_set_L<- c(Liana_param['Nlayers'],Liana_param['Cab'],Liana_param['Car'],Liana_param['Cw'],Liana_param['Cm'])
best_run_L <- PEcAnRTM::prospect(best_set_L, version = "5")
Tree_param <- temp.param[(3+((length(temp.param)-2)/2)):length(temp.param)]
best_set_T<- c(Tree_param['Nlayers'],Tree_param['Cab'],Tree_param['Car'],Tree_param['Cw'],Tree_param['Cm'])
best_run_T <- PEcAnRTM::prospect(best_set_T, version = "5")
test.plot <- rbind(all.spectra,
rbind(as.data.frame(best_run_L) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_L),
pft = "Liana"),
as.data.frame(best_run_T) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_T),
pft = "Tree")) %>% mutate(run = i))
}
group.plot <- test.plot %>% group_by(wl,pft) %>% summarise(r_m = mean(reflectance),
r_min = min(reflectance),
r_max = max(reflectance))
MAP_samples <- MAP(best_set)$parametersMAP
Liana_param <- MAP_samples[3:(2+((length(MAP_samples)-2)/2))]
best_set_L<- c(Liana_param['Nlayers'],Liana_param['Cab'],Liana_param['Car'],Liana_param['Cw'],Liana_param['Cm'])
best_run_L <- PEcAnRTM::prospect(best_set_L, version = "5")
Tree_param <- MAP_samples[(3+((length(MAP_samples)-2)/2)):length(MAP_samples)]
best_set_T<- c(Tree_param['Nlayers'],Tree_param['Cab'],Tree_param['Car'],Tree_param['Cw'],Tree_param['Cm'])
best_run_T <- PEcAnRTM::prospect(best_set_T, version = "5")
best.run <- rbind(as.data.frame(best_run_L) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_L),
pft = "Liana"),
as.data.frame(best_run_T) %>% mutate(wl = PEcAnRTM::wavelengths(best_run_T),
pft = "Tree"))
ggplot() +
geom_line(data = best.run,aes(x = wl,y = reflectance,color = pft),linetype = 2) +
geom_ribbon(data = group.plot,aes(x = wl,ymin = r_min,ymax = r_max,color = pft,fill = pft)) +
theme_bw()
# ggplot() +
# geom_line(data = test.plot,aes(x = wl,y = transmittance)) +
# theme_bw()
|
##' Fonction \code{calc_spread}
##'
##' Cette fonction permet de calculer les spread pour un PTF obligataire.
##'
##' @name calc_spread
##' @docType methods
##' @param obligation est un objet de type \code{\link{Obligation}}.
##' @param yield_curve est un vecteur \code{numeric}.
##' @author Damien Tichit pour Sia Partners
##' @export
##' @include Obligation-class.R
##'
setGeneric(name = "calc_spread", def = function(obligation, yield_curve) {standardGeneric("calc_spread")})
setMethod(
f = "calc_spread",
signature = c(obligation = "Obligation", yield_curve = "numeric"),
definition = function(obligation, yield_curve){
## ###########################
## Extraction des donnnes
## ###########################
# Extraction des donnees du PTF
name_ptf_oblig <- names(obligation@ptf)
nominal_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "nominal"))
remboursement_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "remboursement"))
vm_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "valeur_marche"))
coupon_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "coupon"))
maturite_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "maturite"))
dur_det_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "duree_detention"))
# Calcul de la maturite residuelle du PTF
mat_res_ptf <- maturite_ptf - dur_det_ptf
## ###########################
## Calcul des spread
## ###########################
# Calcul des spread
spread <- sapply(1L:nrow(obligation@ptf), function(id) {
uniroot(f = function(x)
calcul_vm_obligation(nominal = nominal_ptf[id], coupon = coupon_ptf[id], mat_res = mat_res_ptf[id], remboursement = remboursement_ptf[id], spread = x, yield = yield_curve) - vm_ptf[id],
interval = c(-1, 1), tol = .Machine$double.eps^0.5)$root
})
# Output
return(spread)
}
)
|
/R/Obligation-calc_spread.R
|
no_license
|
DTichit/ALModel
|
R
| false
| false
| 2,078
|
r
|
##' Fonction \code{calc_spread}
##'
##' Cette fonction permet de calculer les spread pour un PTF obligataire.
##'
##' @name calc_spread
##' @docType methods
##' @param obligation est un objet de type \code{\link{Obligation}}.
##' @param yield_curve est un vecteur \code{numeric}.
##' @author Damien Tichit pour Sia Partners
##' @export
##' @include Obligation-class.R
##'
setGeneric(name = "calc_spread", def = function(obligation, yield_curve) {standardGeneric("calc_spread")})
setMethod(
f = "calc_spread",
signature = c(obligation = "Obligation", yield_curve = "numeric"),
definition = function(obligation, yield_curve){
## ###########################
## Extraction des donnnes
## ###########################
# Extraction des donnees du PTF
name_ptf_oblig <- names(obligation@ptf)
nominal_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "nominal"))
remboursement_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "remboursement"))
vm_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "valeur_marche"))
coupon_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "coupon"))
maturite_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "maturite"))
dur_det_ptf <- .subset2(obligation@ptf, which(name_ptf_oblig == "duree_detention"))
# Calcul de la maturite residuelle du PTF
mat_res_ptf <- maturite_ptf - dur_det_ptf
## ###########################
## Calcul des spread
## ###########################
# Calcul des spread
spread <- sapply(1L:nrow(obligation@ptf), function(id) {
uniroot(f = function(x)
calcul_vm_obligation(nominal = nominal_ptf[id], coupon = coupon_ptf[id], mat_res = mat_res_ptf[id], remboursement = remboursement_ptf[id], spread = x, yield = yield_curve) - vm_ptf[id],
interval = c(-1, 1), tol = .Machine$double.eps^0.5)$root
})
# Output
return(spread)
}
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_info.R
\name{user_contributions}
\alias{user_contributions}
\title{Retrieve user contributions}
\usage{
user_contributions(language = NULL, project = NULL, domain = NULL,
username, properties = c("ids", "title", "timestamp", "comment",
"parsedcomment", "size", "sizediff", "flags", "tags"), mainspace = FALSE,
limit = 50, clean_response = FALSE, ...)
}
\arguments{
\item{language}{The language code of the project you wish to query,
if appropriate.}
\item{project}{The project you wish to query ("wikiquote"), if appropriate.
Should be provided in conjunction with \code{language}.}
\item{domain}{as an alternative to a \code{language} and \code{project} combination,
you can also provide a domain ("rationalwiki.org") to the URL constructor, allowing
for the querying of non-Wikimedia MediaWiki instances.}
\item{username}{The username of the user whose contributions you want to retrieve.
Due to limitations at the API end, you can only retrieve edits for one user at a time.}
\item{properties}{The metadata you want associated with each edit. Potential metadata includes "ids"
(the revision ID of the revision, which can be passed into \code{\link{revision_content}}),
"title" (the name of the page that was edited), "timestamp", "comment" (the edit summary associated
with the revision), "parsedcomment" (the same, but parsed, generating HTML from any wikitext
in that comment), "size" (the size, in uncompressed bytes, of the edit), "sizediff" (the size
delta between this edit, and the last edit to the page), "flags" (whether the revision was
'minor' or not), and "tags" (any tags associated with the revision).}
\item{mainspace}{A boolean flag; FALSE retrieves all of the most recent contributions, while
TRUE limits the retrieved contributions to those in the 'mainspace' - in other words, edits to
actual articles. Set to FALSE by default}
\item{limit}{The number of edits to be retrieved. 50 is the maximum for logged-out API users,
and putting in more than 50 will generate a warning.}
\item{clean_response}{whether to do some basic sanitising of the resulting data structure.
Set to FALSE by default.}
\item{...}{further arguments to pass to httr's GET.}
}
\description{
Retrieves metadata associated with the most recent contributions by a
specified user.
}
\examples{
#Retrieve the timestamps of a user's recent contributions to the English-language Wikipedia
contribs <- user_contributions("en", "wikipedia", username = "Ironholds",
properties = "timestamp")
#Retrieve the timestamps of a user's recent contributions to a non-Wikimedia wiki.
rw_contribs <- user_contributions(domain = "rationalwiki.org", username = "David Gerard",
properties = "ids", limit = 1)
}
\seealso{
\code{\link{user_information}} for information about a specific user (or group of users),
and \code{recent_changes} for non-user-specific recent actions.
}
|
/man/user_contributions.Rd
|
permissive
|
cran/WikipediR
|
R
| false
| true
| 3,039
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_info.R
\name{user_contributions}
\alias{user_contributions}
\title{Retrieve user contributions}
\usage{
user_contributions(language = NULL, project = NULL, domain = NULL,
username, properties = c("ids", "title", "timestamp", "comment",
"parsedcomment", "size", "sizediff", "flags", "tags"), mainspace = FALSE,
limit = 50, clean_response = FALSE, ...)
}
\arguments{
\item{language}{The language code of the project you wish to query,
if appropriate.}
\item{project}{The project you wish to query ("wikiquote"), if appropriate.
Should be provided in conjunction with \code{language}.}
\item{domain}{as an alternative to a \code{language} and \code{project} combination,
you can also provide a domain ("rationalwiki.org") to the URL constructor, allowing
for the querying of non-Wikimedia MediaWiki instances.}
\item{username}{The username of the user whose contributions you want to retrieve.
Due to limitations at the API end, you can only retrieve edits for one user at a time.}
\item{properties}{The metadata you want associated with each edit. Potential metadata includes "ids"
(the revision ID of the revision, which can be passed into \code{\link{revision_content}}),
"title" (the name of the page that was edited), "timestamp", "comment" (the edit summary associated
with the revision), "parsedcomment" (the same, but parsed, generating HTML from any wikitext
in that comment), "size" (the size, in uncompressed bytes, of the edit), "sizediff" (the size
delta between this edit, and the last edit to the page), "flags" (whether the revision was
'minor' or not), and "tags" (any tags associated with the revision).}
\item{mainspace}{A boolean flag; FALSE retrieves all of the most recent contributions, while
TRUE limits the retrieved contributions to those in the 'mainspace' - in other words, edits to
actual articles. Set to FALSE by default}
\item{limit}{The number of edits to be retrieved. 50 is the maximum for logged-out API users,
and putting in more than 50 will generate a warning.}
\item{clean_response}{whether to do some basic sanitising of the resulting data structure.
Set to FALSE by default.}
\item{...}{further arguments to pass to httr's GET.}
}
\description{
Retrieves metadata associated with the most recent contributions by a
specified user.
}
\examples{
#Retrieve the timestamps of a user's recent contributions to the English-language Wikipedia
contribs <- user_contributions("en", "wikipedia", username = "Ironholds",
properties = "timestamp")
#Retrieve the timestamps of a user's recent contributions to a non-Wikimedia wiki.
rw_contribs <- user_contributions(domain = "rationalwiki.org", username = "David Gerard",
properties = "ids", limit = 1)
}
\seealso{
\code{\link{user_information}} for information about a specific user (or group of users),
and \code{recent_changes} for non-user-specific recent actions.
}
|
getwd()
list.files()
pf <- read.csv('pseudo_facebook.tsv', sep= '\t')
#install.packages('ggplot2')
library('ggplot2')
library(ggthemes)
theme_set(theme_minimal(24))
names(pf)
qplot(x = dob_day, data = pf) +
scale_x_continuous(breaks=1:31)
ggplot(aes(x = dob_day), data = pf) +
geom_histogram(binwidth = 1) +
scale_x_continuous(breaks = 1:31)
ggplot(aes(x = dob_day), data = pf) +
geom_histogram(binwidth = 1) +
scale_x_continuous(breaks = 1:31) +
facet_wrap(~dob_month, ncol = 3)
#friend counts
ggplot(aes(x = friend_count), data = pf) +
geom_histogram() +
scale_x_continuous(limits = c(0, 1000))
#frind counts with binwidth and breaks
ggplot(aes(x = friend_count), data = pf) +
geom_histogram(binwidth = 25) +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50))
#genders friend counts
ggplot(aes(x = friend_count), data = pf) +
geom_histogram() +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50)) +
facet_wrap(~gender)
#remove na from graph
ggplot(aes(x = friend_count), data = subset(pf, !is.na(gender))) +
geom_histogram() +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50)) +
facet_wrap(~gender)
#table for genders friend counts
table(pf$gender)
by(pf$friend_count, pf$gender, summary)
#tenure plot
ggplot(aes(x = tenure), data = pf) +
geom_histogram(binwidth = 30, color = 'black', fill = '#099DD9')
ggplot(aes(x = tenure/365), data = pf) +
geom_histogram(binwidth = .25, color = 'black', fill = '#F79420')
#labeling plots
ggplot(aes(x = tenure / 365), data = pf) +
geom_histogram(color = 'black', fill = '#F79420') +
scale_x_continuous(breaks = seq(1, 7, 1), limits = c(0, 7)) +
xlab('Number of years using Facebook') +
ylab('Number of users in sample')
#User ages
ggplot(aes(x = age), data = pf) +
geom_histogram(binwidth = 1, fill = '#5760AB') +
scale_x_continuous(breaks = seq(0, 113, 5))
library(gridExtra)
ggplot(aes(x = friend_count), data = pf) +
geom_histogram() +
scale_x_log10()
#Frequency Polygons
ggplot(aes(x = friend_count, y = ..count../sum(..count..)),
data = subset(pf, !is.na(gender))) +
geom_freqpoly(aes(color = gender), binwidth=10) +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50)) +
xlab('Friend Count') +
ylab('Proportion of users with that friend count')
ggplot(aes(x = www_likes), data = subset(pf, !is.na(gender))) +
geom_freqpoly(aes(color = gender)) +
scale_x_log10()
#Likes on the web
by(pf$www_likes, pf$gender, sum)
|
/lesson1.R
|
no_license
|
m3hm3taydin/Explore-and-Summarize-Data
|
R
| false
| false
| 2,525
|
r
|
getwd()
list.files()
pf <- read.csv('pseudo_facebook.tsv', sep= '\t')
#install.packages('ggplot2')
library('ggplot2')
library(ggthemes)
theme_set(theme_minimal(24))
names(pf)
qplot(x = dob_day, data = pf) +
scale_x_continuous(breaks=1:31)
ggplot(aes(x = dob_day), data = pf) +
geom_histogram(binwidth = 1) +
scale_x_continuous(breaks = 1:31)
ggplot(aes(x = dob_day), data = pf) +
geom_histogram(binwidth = 1) +
scale_x_continuous(breaks = 1:31) +
facet_wrap(~dob_month, ncol = 3)
#friend counts
ggplot(aes(x = friend_count), data = pf) +
geom_histogram() +
scale_x_continuous(limits = c(0, 1000))
#frind counts with binwidth and breaks
ggplot(aes(x = friend_count), data = pf) +
geom_histogram(binwidth = 25) +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50))
#genders friend counts
ggplot(aes(x = friend_count), data = pf) +
geom_histogram() +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50)) +
facet_wrap(~gender)
#remove na from graph
ggplot(aes(x = friend_count), data = subset(pf, !is.na(gender))) +
geom_histogram() +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50)) +
facet_wrap(~gender)
#table for genders friend counts
table(pf$gender)
by(pf$friend_count, pf$gender, summary)
#tenure plot
ggplot(aes(x = tenure), data = pf) +
geom_histogram(binwidth = 30, color = 'black', fill = '#099DD9')
ggplot(aes(x = tenure/365), data = pf) +
geom_histogram(binwidth = .25, color = 'black', fill = '#F79420')
#labeling plots
ggplot(aes(x = tenure / 365), data = pf) +
geom_histogram(color = 'black', fill = '#F79420') +
scale_x_continuous(breaks = seq(1, 7, 1), limits = c(0, 7)) +
xlab('Number of years using Facebook') +
ylab('Number of users in sample')
#User ages
ggplot(aes(x = age), data = pf) +
geom_histogram(binwidth = 1, fill = '#5760AB') +
scale_x_continuous(breaks = seq(0, 113, 5))
library(gridExtra)
ggplot(aes(x = friend_count), data = pf) +
geom_histogram() +
scale_x_log10()
#Frequency Polygons
ggplot(aes(x = friend_count, y = ..count../sum(..count..)),
data = subset(pf, !is.na(gender))) +
geom_freqpoly(aes(color = gender), binwidth=10) +
scale_x_continuous(limits = c(0, 1000), breaks = seq(0, 1000, 50)) +
xlab('Friend Count') +
ylab('Proportion of users with that friend count')
ggplot(aes(x = www_likes), data = subset(pf, !is.na(gender))) +
geom_freqpoly(aes(color = gender)) +
scale_x_log10()
#Likes on the web
by(pf$www_likes, pf$gender, sum)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_describe_snapshot_schedules}
\alias{redshift_describe_snapshot_schedules}
\title{Returns a list of snapshot schedules}
\usage{
redshift_describe_snapshot_schedules(ClusterIdentifier,
ScheduleIdentifier, TagKeys, TagValues, Marker, MaxRecords)
}
\arguments{
\item{ClusterIdentifier}{The unique identifier for the cluster whose snapshot schedules you want
to view.}
\item{ScheduleIdentifier}{A unique identifier for a snapshot schedule.}
\item{TagKeys}{The key value for a snapshot schedule tag.}
\item{TagValues}{The value corresponding to the key of the snapshot schedule tag.}
\item{Marker}{A value that indicates the starting point for the next set of response
records in a subsequent request. If a value is returned in a response,
you can retrieve the next set of records by providing this returned
marker value in the \code{marker} parameter and retrying the command. If the
\code{marker} field is empty, all response records have been retrieved for
the request.}
\item{MaxRecords}{The maximum number or response records to return in each call. If the
number of remaining response records exceeds the specified \code{MaxRecords}
value, a value is returned in a \code{marker} field of the response. You can
retrieve the next set of records by retrying the command with the
returned \code{marker} value.}
}
\value{
A list with the following syntax:\preformatted{list(
SnapshotSchedules = list(
list(
ScheduleDefinitions = list(
"string"
),
ScheduleIdentifier = "string",
ScheduleDescription = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
),
NextInvocations = list(
as.POSIXct(
"2015-01-01"
)
),
AssociatedClusterCount = 123,
AssociatedClusters = list(
list(
ClusterIdentifier = "string",
ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
)
)
)
),
Marker = "string"
)
}
}
\description{
Returns a list of snapshot schedules.
}
\section{Request syntax}{
\preformatted{svc$describe_snapshot_schedules(
ClusterIdentifier = "string",
ScheduleIdentifier = "string",
TagKeys = list(
"string"
),
TagValues = list(
"string"
),
Marker = "string",
MaxRecords = 123
)
}
}
\keyword{internal}
|
/cran/paws.database/man/redshift_describe_snapshot_schedules.Rd
|
permissive
|
TWarczak/paws
|
R
| false
| true
| 2,439
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/redshift_operations.R
\name{redshift_describe_snapshot_schedules}
\alias{redshift_describe_snapshot_schedules}
\title{Returns a list of snapshot schedules}
\usage{
redshift_describe_snapshot_schedules(ClusterIdentifier,
ScheduleIdentifier, TagKeys, TagValues, Marker, MaxRecords)
}
\arguments{
\item{ClusterIdentifier}{The unique identifier for the cluster whose snapshot schedules you want
to view.}
\item{ScheduleIdentifier}{A unique identifier for a snapshot schedule.}
\item{TagKeys}{The key value for a snapshot schedule tag.}
\item{TagValues}{The value corresponding to the key of the snapshot schedule tag.}
\item{Marker}{A value that indicates the starting point for the next set of response
records in a subsequent request. If a value is returned in a response,
you can retrieve the next set of records by providing this returned
marker value in the \code{marker} parameter and retrying the command. If the
\code{marker} field is empty, all response records have been retrieved for
the request.}
\item{MaxRecords}{The maximum number or response records to return in each call. If the
number of remaining response records exceeds the specified \code{MaxRecords}
value, a value is returned in a \code{marker} field of the response. You can
retrieve the next set of records by retrying the command with the
returned \code{marker} value.}
}
\value{
A list with the following syntax:\preformatted{list(
SnapshotSchedules = list(
list(
ScheduleDefinitions = list(
"string"
),
ScheduleIdentifier = "string",
ScheduleDescription = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
),
NextInvocations = list(
as.POSIXct(
"2015-01-01"
)
),
AssociatedClusterCount = 123,
AssociatedClusters = list(
list(
ClusterIdentifier = "string",
ScheduleAssociationState = "MODIFYING"|"ACTIVE"|"FAILED"
)
)
)
),
Marker = "string"
)
}
}
\description{
Returns a list of snapshot schedules.
}
\section{Request syntax}{
\preformatted{svc$describe_snapshot_schedules(
ClusterIdentifier = "string",
ScheduleIdentifier = "string",
TagKeys = list(
"string"
),
TagValues = list(
"string"
),
Marker = "string",
MaxRecords = 123
)
}
}
\keyword{internal}
|
library(forecast)
#http://www.statmethods.net/advstats/timeseries.html
regressionthists <- function(thists){
tomorrowsvalue=0
timeseriesversion = ts(adjustedmatrix[,])
# simple exponential - models level
fit <- HoltWinters(timeseriesversion, beta=FALSE, gamma=FALSE)
# double exponential - models level and trend
fit2 <- HoltWinters(timeseriesversion, gamma=FALSE)
# triple exponential - models level, trend, and seasonal components
fit3 <- HoltWinters(timeseriesversion)
return(tomorrowsvalue)
}
|
/predictiveanalytics/regressionfuntions.R
|
permissive
|
keithaumiller/unicorninvesting
|
R
| false
| false
| 514
|
r
|
library(forecast)
#http://www.statmethods.net/advstats/timeseries.html
regressionthists <- function(thists){
tomorrowsvalue=0
timeseriesversion = ts(adjustedmatrix[,])
# simple exponential - models level
fit <- HoltWinters(timeseriesversion, beta=FALSE, gamma=FALSE)
# double exponential - models level and trend
fit2 <- HoltWinters(timeseriesversion, gamma=FALSE)
# triple exponential - models level, trend, and seasonal components
fit3 <- HoltWinters(timeseriesversion)
return(tomorrowsvalue)
}
|
rm(list=ls(all=T))
setwd("C:/Users/Lenovo/Documents/LM/EdWisor/Projects/Project 1")
getwd()
###############################LOAD LIBRARIES ################################
x=c("ggplot2", "DMwR", "corrgram", "Hmisc", "rpart", "randomForest", "geosphere")
install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
################ LOAD THE GIVEN TRAIN DATA ##############################
train= read.csv("train_cab.csv", header = T)[,-2]
########Check data shape ########
str(train)
#################convert fare to Numeric type and Passenger to Integer type###################
train$fare_amount=as.numeric(as.character(train$fare_amount))
train$passenger_count=as.integer(train$passenger_count)
############## Eliminate cells with same pickup and dropoff location
train=subset(train, !(train$pickup_longitude==train$dropoff_longitude & train$pickup_latitude==train$dropoff_latitude))
########replace "0's" with NA
train[train==0]= NA
#######################Missing Value Analysis ##############################
#########function to calculate missing values ###################
missingvalue= function(data){
missing_value = data.frame(apply(data, 2 , function(x){sum(is.na(x))}))
colnames(missing_value)="Missing_Value_count"
missing_value$percentage=apply(missing_value , 1 , function(x){x/nrow(train)*100})
missing_value = cbind(row.names(missing_value), missing_value)
row.names(missing_value)=NULL
colnames(missing_value)[1]="Variables"
print(missing_value)
###########plot Missing Values#######################
library(ggplot2)
ggplot(data = missing_value, aes(x=reorder(Variables , -percentage),y = percentage))+
geom_bar(stat = "identity",fill = "blue")+xlab("Variables")+
ggtitle("Missing Values") + theme_bw()
}
##################Calculate Missing Values########################
missingvalue(train)
###########As PAssenger_count is a categorical Variable , so we will use mode for Imputation#########
##########calculate mode - create function ###########
mode= function(data){
uniq=unique(data)
as.numeric(as.character(uniq[which.max(tabulate(match(data,uniq)))]))
#print(mode_d)
}
mode(train$passenger_count)
#################################IMPUTATION###########################
#impute with the mode
train$passenger_count[is.na(train$passenger_count)] = mode(train$passenger_count)
################Choose for suitable method for imputation of missing values for other variables ###########
# ####Taking a subset of data
# #train[40,1]= 17.5 #######Data noted to compare ####Actual value
#
# ###Mean method
# train$fare_amount[is.na(train$fare_amount)] = mean(train$fare_amount, na.rm = T)
#
# #Mean= 15.12488
#
# ####Median Method
#
# train$fare_amount[is.na(train$fare_amount)] = median(train$fare_amount, na.rm = T)
#
# #Median= 8.5
#
# ######KNN Method
#
# train = knnImputation(train, k = 5)
# #KNN= 15.90051
########Saving the data in df set ###########
df=train
train=train[complete.cases(train[,1]),]
#As KNN is giving the value closest to Actual Value, We choose KNN for missing value imputation
library(DMwR)
train=knnImputation(train, k=5)
missingvalue(train)
#####################OUTLIER ANALYSIS############################################
df=train
############outliers in fare_amount
#Remove negative values from 'fare_amount'
train$fare_amount=ifelse(train$fare_amount<0, NA, train$fare_amount)
train$fare_amount=ifelse(train$fare_amount>30,NA, train$fare_amount)
#################outliers in passenger_count
##################all values greater than 8 are converted to NA
unique(train$passenger_count)
##################Convert more then 8 to NA ##########
for (i in 1:nrow(train)){
if (as.integer(train$passenger_count[i]) > 8){
train$passenger_count[i]=NA
}
}
#######################Outliers in location points ##########
#range of the locations
range(train$pickup_longitude)
range(train$pickup_latitude)
range(train$dropoff_longitude)
range(train$dropoff_latitude)
cnames=colnames(train[,c(2:5)])
for (i in 1:length(cnames))
{
assign(paste0("gn",i), ggplot(aes_string(y = (cnames[i]), x = "fare_amount"), data = train)+
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour="red", fill = "grey" ,outlier.shape=18,
outlier.size=1, notch=FALSE) +
theme(legend.position="bottom")+
labs(y=cnames[i],x="y")+
ggtitle(paste("Box plot of fare amount",cnames[i])))
}
############# Plotting plots together
gridExtra::grid.arrange(gn1, gn2, ncol=2)
gridExtra::grid.arrange(gn3, gn4, ncol=2)
#Replace all outliers with NA and impute
#create NA on outliers
for(i in cnames){
val = train[,i][train[,i] %in% boxplot.stats(train[,i])$out]
print(length(val))
train[,i][train[,i] %in% val] = NA
}
missingvalue(train)
##########replace missing value with mode
mode(train$passenger_count)
train$passenger_count[is.na(train$passenger_count)] = mode(train$passenger_count)
train=train[complete.cases(train[,1]), ]
#replace all other missing value with mean
train$fare_amount[is.na(train$fare_amount)] = mean(train$fare_amount, na.rm=T)
train$pickup_longitude[is.na(train$pickup_longitude)] = mean(train$pickup_longitude, na.rm=T)
train$pickup_latitude[is.na(train$pickup_latitude)] = mean(train$pickup_latitude, na.rm=T)
train$dropoff_longitude[is.na(train$dropoff_longitude)] = mean(train$dropoff_longitude, na.rm=T)
train$dropoff_latitude[is.na(train$dropoff_latitude)] = mean(train$dropoff_latitude, na.rm=T)
missingvalue(train)
#now convert Passenger_count into factor
train$passenger_count=as.factor(train$passenger_count)
#########################FEATURE SCALING/ENGINEERING######################
df=train
#create new variable
library(geosphere)
train$dist= distHaversine(cbind(train$pickup_longitude, train$pickup_latitude), cbind(train$dropoff_longitude,train$dropoff_latitude))
#the output is in metres, Change it to kms
train$dist=as.numeric(train$dist)/1000
df=train
train=df
###########################################CORRELATION AMALYSIS ####################################
library(corrgram)
corrgram(train[,-6], order = F,
upper.panel=panel.pie, text.panel=panel.txt, main = "Correlation Plot")
#####correlation between the numeric variables
num_cor=round(cor(train[,-6]), 3)
#Eliminate the pickup and dropoff locations if same (if any)
train=subset(train, !(train$pickup_longitude==train$dropoff_longitude & train$pickup_latitude==train$dropoff_latitude))
#######remove unnecessary variables
rm(abc,df,gn1,gn2,gn3,gn4,cnames,i,val)
########################## MODEL DEVELOPMENT ###################################################3
#create sampling and divide data into train and test
set.seed(123)
train_index = sample(1:nrow(train), 0.8 * nrow(train))
train1 = train[train_index,]#do not add column if already removed
test1 = train[-train_index,]#do not add column if already removed
########### Define Mape - The error matrix to calculate the error and accuracy ################
MAPE = function(y, yhat){
mean(abs((y - yhat)/y*100))
}
############################################Decision Tree#####################################
library(rpart)
fit = rpart(fare_amount ~. , data = train1, method = "anova", minsplit=5)
summary(fit)
predictions_DT = predict(fit, test1[,-1])
MAPE(test1[,1], predictions_DT)
write.csv(predictions_DT, "DT_R_PRed5.csv", row.names = F)
#Error 27.75005
#Accuracy 73.25
########################################Random Forest###############################################
library(randomForest)
RF_model = randomForest(fare_amount ~. , train1, importance = TRUE, ntree=100)
RF_Predictions = predict(RF_model, test1[,-1])
MAPE(test1[,1], RF_Predictions)
importance(RF_model, type = 1)
#error 22.50844 for n=100
#accuracy = 77.50
######################################Linear Regression###########################################################
lm_model = lm(fare_amount ~. , data = train1)
summary(lm_model)
predictions_LR = predict(lm_model, test1[,-1])
MAPE(test1[,1], predictions_LR)
#error 26.12016
#Accuracy 73.88
#####################################KNN Implementation############################################################
library(class)
KNN_Predictions = knn(train1[, 2:7], test1[, 2:7], train1$fare_amount, k = 1)
#convert the values into numeric
KNN_Predictions=as.numeric(as.character((KNN_Predictions)))
#Calculate MAPE
MAPE(test1[,1], KNN_Predictions)
#error 33.7978
#Accuracy = 66.21
##############Model Selection and Final Tuning##########################
#Random Forest with using mtry = 2 that is fixing only two variables to split at each tree node
RF_model = randomForest(fare_amount ~. , train1, importance = TRUE, ntree=200, mtry=2)
RF_Predictions = predict(RF_model, test1[,-1])
MAPE(test1[,1], RF_Predictions)
importance(RF_model, type = 1)
#error 22.38 for n=100
#Accuracy 77.7
rm(a, num_cor,pre, i)
###################################Predict VAlues in Test Data###################
pred_data=read.csv("test.csv", header= T)[,-1]
#########create distance variable
pred_data=subset(pred_data, !(pred_data$pickup_longitude==pred_data$dropoff_longitude & pred_data$pickup_latitude==pred_data$dropoff_latitude))
pred_data[pred_data==0]= NA
# COnnvert Data into proper data types
str(pred_data)
pred_data$passenger_count=as.factor(pred_data$passenger_count)
#calculate distance
pred_data$dist= distHaversine(cbind(pred_data$pickup_longitude, pred_data$pickup_latitude), cbind(pred_data$dropoff_longitude,pred_data$dropoff_latitude))
#the output is in metres, Change it to kms
pred_data$dist=as.numeric(pred_data$dist)/1000
# Create the target variable
pred_data$fare_amount=0
pred_data=pred_data[,c(1,2,3,4,5,6,7)]
#Random Forest
RF_model = randomForest(fare_amount ~. , train, importance = TRUE, ntree=200, mtry=2)
pred_data$fare_amount = predict(RF_model, pred_data[,-1])
write.csv(pred_data, "Predicted_Data.csv", row.names = F)
|
/Project_Cab_Fare.R
|
no_license
|
ranjankumarhashedin/Cab-Fare-Prediction
|
R
| false
| false
| 10,381
|
r
|
rm(list=ls(all=T))
setwd("C:/Users/Lenovo/Documents/LM/EdWisor/Projects/Project 1")
getwd()
###############################LOAD LIBRARIES ################################
x=c("ggplot2", "DMwR", "corrgram", "Hmisc", "rpart", "randomForest", "geosphere")
install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
################ LOAD THE GIVEN TRAIN DATA ##############################
train= read.csv("train_cab.csv", header = T)[,-2]
########Check data shape ########
str(train)
#################convert fare to Numeric type and Passenger to Integer type###################
train$fare_amount=as.numeric(as.character(train$fare_amount))
train$passenger_count=as.integer(train$passenger_count)
############## Eliminate cells with same pickup and dropoff location
train=subset(train, !(train$pickup_longitude==train$dropoff_longitude & train$pickup_latitude==train$dropoff_latitude))
########replace "0's" with NA
train[train==0]= NA
#######################Missing Value Analysis ##############################
#########function to calculate missing values ###################
missingvalue= function(data){
missing_value = data.frame(apply(data, 2 , function(x){sum(is.na(x))}))
colnames(missing_value)="Missing_Value_count"
missing_value$percentage=apply(missing_value , 1 , function(x){x/nrow(train)*100})
missing_value = cbind(row.names(missing_value), missing_value)
row.names(missing_value)=NULL
colnames(missing_value)[1]="Variables"
print(missing_value)
###########plot Missing Values#######################
library(ggplot2)
ggplot(data = missing_value, aes(x=reorder(Variables , -percentage),y = percentage))+
geom_bar(stat = "identity",fill = "blue")+xlab("Variables")+
ggtitle("Missing Values") + theme_bw()
}
##################Calculate Missing Values########################
missingvalue(train)
###########As PAssenger_count is a categorical Variable , so we will use mode for Imputation#########
##########calculate mode - create function ###########
mode= function(data){
uniq=unique(data)
as.numeric(as.character(uniq[which.max(tabulate(match(data,uniq)))]))
#print(mode_d)
}
mode(train$passenger_count)
#################################IMPUTATION###########################
#impute with the mode
train$passenger_count[is.na(train$passenger_count)] = mode(train$passenger_count)
################Choose for suitable method for imputation of missing values for other variables ###########
# ####Taking a subset of data
# #train[40,1]= 17.5 #######Data noted to compare ####Actual value
#
# ###Mean method
# train$fare_amount[is.na(train$fare_amount)] = mean(train$fare_amount, na.rm = T)
#
# #Mean= 15.12488
#
# ####Median Method
#
# train$fare_amount[is.na(train$fare_amount)] = median(train$fare_amount, na.rm = T)
#
# #Median= 8.5
#
# ######KNN Method
#
# train = knnImputation(train, k = 5)
# #KNN= 15.90051
########Saving the data in df set ###########
df=train
train=train[complete.cases(train[,1]),]
#As KNN is giving the value closest to Actual Value, We choose KNN for missing value imputation
library(DMwR)
train=knnImputation(train, k=5)
missingvalue(train)
#####################OUTLIER ANALYSIS############################################
df=train
############outliers in fare_amount
#Remove negative values from 'fare_amount'
train$fare_amount=ifelse(train$fare_amount<0, NA, train$fare_amount)
train$fare_amount=ifelse(train$fare_amount>30,NA, train$fare_amount)
#################outliers in passenger_count
##################all values greater than 8 are converted to NA
unique(train$passenger_count)
##################Convert more then 8 to NA ##########
for (i in 1:nrow(train)){
if (as.integer(train$passenger_count[i]) > 8){
train$passenger_count[i]=NA
}
}
#######################Outliers in location points ##########
#range of the locations
range(train$pickup_longitude)
range(train$pickup_latitude)
range(train$dropoff_longitude)
range(train$dropoff_latitude)
cnames=colnames(train[,c(2:5)])
for (i in 1:length(cnames))
{
assign(paste0("gn",i), ggplot(aes_string(y = (cnames[i]), x = "fare_amount"), data = train)+
stat_boxplot(geom = "errorbar", width = 0.5) +
geom_boxplot(outlier.colour="red", fill = "grey" ,outlier.shape=18,
outlier.size=1, notch=FALSE) +
theme(legend.position="bottom")+
labs(y=cnames[i],x="y")+
ggtitle(paste("Box plot of fare amount",cnames[i])))
}
############# Plotting plots together
gridExtra::grid.arrange(gn1, gn2, ncol=2)
gridExtra::grid.arrange(gn3, gn4, ncol=2)
#Replace all outliers with NA and impute
#create NA on outliers
for(i in cnames){
val = train[,i][train[,i] %in% boxplot.stats(train[,i])$out]
print(length(val))
train[,i][train[,i] %in% val] = NA
}
missingvalue(train)
##########replace missing value with mode
mode(train$passenger_count)
train$passenger_count[is.na(train$passenger_count)] = mode(train$passenger_count)
train=train[complete.cases(train[,1]), ]
#replace all other missing value with mean
train$fare_amount[is.na(train$fare_amount)] = mean(train$fare_amount, na.rm=T)
train$pickup_longitude[is.na(train$pickup_longitude)] = mean(train$pickup_longitude, na.rm=T)
train$pickup_latitude[is.na(train$pickup_latitude)] = mean(train$pickup_latitude, na.rm=T)
train$dropoff_longitude[is.na(train$dropoff_longitude)] = mean(train$dropoff_longitude, na.rm=T)
train$dropoff_latitude[is.na(train$dropoff_latitude)] = mean(train$dropoff_latitude, na.rm=T)
missingvalue(train)
#now convert Passenger_count into factor
train$passenger_count=as.factor(train$passenger_count)
#########################FEATURE SCALING/ENGINEERING######################
df=train
#create new variable
library(geosphere)
train$dist= distHaversine(cbind(train$pickup_longitude, train$pickup_latitude), cbind(train$dropoff_longitude,train$dropoff_latitude))
#the output is in metres, Change it to kms
train$dist=as.numeric(train$dist)/1000
df=train
train=df
###########################################CORRELATION AMALYSIS ####################################
library(corrgram)
corrgram(train[,-6], order = F,
upper.panel=panel.pie, text.panel=panel.txt, main = "Correlation Plot")
#####correlation between the numeric variables
num_cor=round(cor(train[,-6]), 3)
#Eliminate the pickup and dropoff locations if same (if any)
train=subset(train, !(train$pickup_longitude==train$dropoff_longitude & train$pickup_latitude==train$dropoff_latitude))
#######remove unnecessary variables
rm(abc,df,gn1,gn2,gn3,gn4,cnames,i,val)
########################## MODEL DEVELOPMENT ###################################################3
#create sampling and divide data into train and test
set.seed(123)
train_index = sample(1:nrow(train), 0.8 * nrow(train))
train1 = train[train_index,]#do not add column if already removed
test1 = train[-train_index,]#do not add column if already removed
########### Define Mape - The error matrix to calculate the error and accuracy ################
MAPE = function(y, yhat){
mean(abs((y - yhat)/y*100))
}
############################################Decision Tree#####################################
library(rpart)
fit = rpart(fare_amount ~. , data = train1, method = "anova", minsplit=5)
summary(fit)
predictions_DT = predict(fit, test1[,-1])
MAPE(test1[,1], predictions_DT)
write.csv(predictions_DT, "DT_R_PRed5.csv", row.names = F)
#Error 27.75005
#Accuracy 73.25
########################################Random Forest###############################################
library(randomForest)
RF_model = randomForest(fare_amount ~. , train1, importance = TRUE, ntree=100)
RF_Predictions = predict(RF_model, test1[,-1])
MAPE(test1[,1], RF_Predictions)
importance(RF_model, type = 1)
#error 22.50844 for n=100
#accuracy = 77.50
######################################Linear Regression###########################################################
lm_model = lm(fare_amount ~. , data = train1)
summary(lm_model)
predictions_LR = predict(lm_model, test1[,-1])
MAPE(test1[,1], predictions_LR)
#error 26.12016
#Accuracy 73.88
#####################################KNN Implementation############################################################
library(class)
KNN_Predictions = knn(train1[, 2:7], test1[, 2:7], train1$fare_amount, k = 1)
#convert the values into numeric
KNN_Predictions=as.numeric(as.character((KNN_Predictions)))
#Calculate MAPE
MAPE(test1[,1], KNN_Predictions)
#error 33.7978
#Accuracy = 66.21
##############Model Selection and Final Tuning##########################
#Random Forest with using mtry = 2 that is fixing only two variables to split at each tree node
RF_model = randomForest(fare_amount ~. , train1, importance = TRUE, ntree=200, mtry=2)
RF_Predictions = predict(RF_model, test1[,-1])
MAPE(test1[,1], RF_Predictions)
importance(RF_model, type = 1)
#error 22.38 for n=100
#Accuracy 77.7
rm(a, num_cor,pre, i)
###################################Predict VAlues in Test Data###################
pred_data=read.csv("test.csv", header= T)[,-1]
#########create distance variable
pred_data=subset(pred_data, !(pred_data$pickup_longitude==pred_data$dropoff_longitude & pred_data$pickup_latitude==pred_data$dropoff_latitude))
pred_data[pred_data==0]= NA
# COnnvert Data into proper data types
str(pred_data)
pred_data$passenger_count=as.factor(pred_data$passenger_count)
#calculate distance
pred_data$dist= distHaversine(cbind(pred_data$pickup_longitude, pred_data$pickup_latitude), cbind(pred_data$dropoff_longitude,pred_data$dropoff_latitude))
#the output is in metres, Change it to kms
pred_data$dist=as.numeric(pred_data$dist)/1000
# Create the target variable
pred_data$fare_amount=0
pred_data=pred_data[,c(1,2,3,4,5,6,7)]
#Random Forest
RF_model = randomForest(fare_amount ~. , train, importance = TRUE, ntree=200, mtry=2)
pred_data$fare_amount = predict(RF_model, pred_data[,-1])
write.csv(pred_data, "Predicted_Data.csv", row.names = F)
|
library(smrdfortran)
library(SMRD)
test = 2
if(test == 1){
ndist1 = 1
ndist2 = 3
beta0 = 30.27241
beta1 = -5.100121
stress = 270
sigma = 0.2894549
ugamma = 5.365834
sgamma = 0.03140004
w = logb(5000)
}
if(test == 2){
ndist1 = 1
ndist2 = 2
beta0 = 9.3562771160
beta1 = -2.9350357450
sigma = 0.0002000000
ugamma = 3.8630272310
sgamma = 0.1183292607
stress = c(58.00, 60.00, 50.00, 57.75, 66.13, 48.04,
55.16, 51.54, 62.76, 47.50, 48.00, 55.58)
w = c(16.11809565, 16.11809565, 20.72326584, 20.72326584,
11.75529108, 12.20936721, 16.11809565, 11.66641021,
11.83384887, 16.11809565, 16.11809565, 12.21484406)
}
if(test == 3){
j = 1
ndist1 = 1
ndist2 = 1
beta0 = 9.3562771160
beta1 = -2.9350357450
sigma = 0.0002000000
ugamma = 3.8630272310
sgamma = 0.1183292607
stress = c(58.00, 60.00, 50.00, 57.75, 66.13, 48.04,
55.16, 51.54, 62.76, 47.50, 48.00, 55.58)
stress = stress[j]
w = c(16.11809565, 16.11809565, 20.72326584, 20.72326584,
11.75529108, 12.20936721, 16.11809565, 11.66641021,
11.83384887, 16.11809565, 16.11809565, 12.21484406)
w = w[j]
}
debug1 = F
if(!exists("kprint")) kprint = 0
max.length <- max(length(beta0), length(beta1), length(sigma),
length(ugamma), length(sgamma), length(stress), length(w))
beta0 <- SMRD:::expand.vec(beta0, max.length)
beta1 <- SMRD:::expand.vec(beta1, max.length)
sigma <- SMRD:::expand.vec(sigma, max.length)
ugamma <- SMRD:::expand.vec(ugamma, max.length)
sgamma <- SMRD:::expand.vec(sgamma, max.length)
stress <- SMRD:::expand.vec(stress, max.length)
w <- SMRD:::expand.vec(w, max.length)
if (debug1) browser()
zout <- .Fortran("sxpdf3", as.integer(ndist1), as.integer(ndist2),
as.double(beta0), as.double(beta1), as.double(stress),
as.double(sigma), as.double(ugamma), as.double(sgamma),
as.double(w), as.integer(max.length), answer = double(max.length),
ier = integer(max.length))
new = SMRD:::SXPDF3(as.integer(ndist1),
as.integer(ndist2),
as.double(beta0),
as.double(beta1),
as.double(stress),
as.double(sigma),
as.double(ugamma),
as.double(sgamma),
as.double(w),
as.integer(max.length),
answer = double(max.length),
ier = integer(max.length),
as.integer(kprint))
|
/inst/test_objs/SXPDF3_test.R
|
no_license
|
anhnguyendepocen/SMRD
|
R
| false
| false
| 2,700
|
r
|
library(smrdfortran)
library(SMRD)
test = 2
if(test == 1){
ndist1 = 1
ndist2 = 3
beta0 = 30.27241
beta1 = -5.100121
stress = 270
sigma = 0.2894549
ugamma = 5.365834
sgamma = 0.03140004
w = logb(5000)
}
if(test == 2){
ndist1 = 1
ndist2 = 2
beta0 = 9.3562771160
beta1 = -2.9350357450
sigma = 0.0002000000
ugamma = 3.8630272310
sgamma = 0.1183292607
stress = c(58.00, 60.00, 50.00, 57.75, 66.13, 48.04,
55.16, 51.54, 62.76, 47.50, 48.00, 55.58)
w = c(16.11809565, 16.11809565, 20.72326584, 20.72326584,
11.75529108, 12.20936721, 16.11809565, 11.66641021,
11.83384887, 16.11809565, 16.11809565, 12.21484406)
}
if(test == 3){
j = 1
ndist1 = 1
ndist2 = 1
beta0 = 9.3562771160
beta1 = -2.9350357450
sigma = 0.0002000000
ugamma = 3.8630272310
sgamma = 0.1183292607
stress = c(58.00, 60.00, 50.00, 57.75, 66.13, 48.04,
55.16, 51.54, 62.76, 47.50, 48.00, 55.58)
stress = stress[j]
w = c(16.11809565, 16.11809565, 20.72326584, 20.72326584,
11.75529108, 12.20936721, 16.11809565, 11.66641021,
11.83384887, 16.11809565, 16.11809565, 12.21484406)
w = w[j]
}
debug1 = F
if(!exists("kprint")) kprint = 0
max.length <- max(length(beta0), length(beta1), length(sigma),
length(ugamma), length(sgamma), length(stress), length(w))
beta0 <- SMRD:::expand.vec(beta0, max.length)
beta1 <- SMRD:::expand.vec(beta1, max.length)
sigma <- SMRD:::expand.vec(sigma, max.length)
ugamma <- SMRD:::expand.vec(ugamma, max.length)
sgamma <- SMRD:::expand.vec(sgamma, max.length)
stress <- SMRD:::expand.vec(stress, max.length)
w <- SMRD:::expand.vec(w, max.length)
if (debug1) browser()
zout <- .Fortran("sxpdf3", as.integer(ndist1), as.integer(ndist2),
as.double(beta0), as.double(beta1), as.double(stress),
as.double(sigma), as.double(ugamma), as.double(sgamma),
as.double(w), as.integer(max.length), answer = double(max.length),
ier = integer(max.length))
new = SMRD:::SXPDF3(as.integer(ndist1),
as.integer(ndist2),
as.double(beta0),
as.double(beta1),
as.double(stress),
as.double(sigma),
as.double(ugamma),
as.double(sgamma),
as.double(w),
as.integer(max.length),
answer = double(max.length),
ier = integer(max.length),
as.integer(kprint))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scatter.R
\name{scatterServer}
\alias{scatterServer}
\title{scatterServer: shiny module server for scatterplot.}
\usage{
scatterServer(id, data, data_label, data_varStruct = NULL, nfactor.limit = 10)
}
\arguments{
\item{id}{id}
\item{data}{Reactive data}
\item{data_label}{Reactive data label}
\item{data_varStruct}{Reactive List of variable structure, Default: NULL}
\item{nfactor.limit}{nlevels limit in factor variable, Default: 10}
}
\value{
Shiny module server for scatterplot.
}
\description{
Shiny module server for scatterplot.
}
\details{
Shiny module server for scatterplot.
}
\examples{
library(shiny)
library(ggplot2)
library(ggpubr)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
scatterUI("scatter")
),
mainPanel(
plotOutput("scatter_plot"),
ggplotdownUI("scatter")
)
)
)
server <- function(input, output, session) {
data <- reactive(mtcars)
data.label <- reactive(jstable::mk.lev(mtcars))
out_scatter <- scatterServer("scatter",
data = data, data_label = data.label,
data_varStruct = NULL
)
output$scatter_plot <- renderPlot({
print(out_scatter())
})
}
}
|
/man/scatterServer.Rd
|
permissive
|
jinseob2kim/jsmodule
|
R
| false
| true
| 1,216
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scatter.R
\name{scatterServer}
\alias{scatterServer}
\title{scatterServer: shiny module server for scatterplot.}
\usage{
scatterServer(id, data, data_label, data_varStruct = NULL, nfactor.limit = 10)
}
\arguments{
\item{id}{id}
\item{data}{Reactive data}
\item{data_label}{Reactive data label}
\item{data_varStruct}{Reactive List of variable structure, Default: NULL}
\item{nfactor.limit}{nlevels limit in factor variable, Default: 10}
}
\value{
Shiny module server for scatterplot.
}
\description{
Shiny module server for scatterplot.
}
\details{
Shiny module server for scatterplot.
}
\examples{
library(shiny)
library(ggplot2)
library(ggpubr)
ui <- fluidPage(
sidebarLayout(
sidebarPanel(
scatterUI("scatter")
),
mainPanel(
plotOutput("scatter_plot"),
ggplotdownUI("scatter")
)
)
)
server <- function(input, output, session) {
data <- reactive(mtcars)
data.label <- reactive(jstable::mk.lev(mtcars))
out_scatter <- scatterServer("scatter",
data = data, data_label = data.label,
data_varStruct = NULL
)
output$scatter_plot <- renderPlot({
print(out_scatter())
})
}
}
|
#############################################################################################
###### master script part 1 - setting up and creating or loading of wide data table #########
#############################################################################################
# This script sets up a run of the post-processing analysis
# installing the R-package:
rm(list = ls())
setwd("~/pkg/paper/PostprocessingSST/")
options(max.print = 1e3)
#install.packages('devtools')
library(devtools)
devtools::install_github('ClaudioHeinrich/pp.sst')
library(pp.sst)
#start timer:
time_s1 = proc.time()
# choose an abbreviation for this run and give it a description, see the README file for more details.
name_abbr = "pp.sst/Full"
description = 'Working on the full data set.'
#### specify your directories: ###
# Directory for derived datasets, this should change when you change name_abbr
save_dir = file.path('~','SST','Derived', name_abbr)
dir.create(save_dir,recursive = TRUE,showWarnings = FALSE)
# Directory for plots, this should change when you change name_abbr
plot_dir = file.path('~','SST','Figures', name_abbr)
dir.create(plot_dir, recursive = TRUE, showWarnings = FALSE)
# choose the area of the globe to consider: Reduce this to a smaller window when testing scripts.
lat_box = c(0, 65)
lon_box = c(-90, 40)
# a couple of parameters:
ens_size = 9 # size of forecast ensemble
training_years = 1985:2000
validation_years = 2001:2016
months = 1:2
mc_cores = 6 # number of cores for parallelization
### subset data set ###
# note that some example data DT is included in the package, check the documentation by typing ?DT.
# If you are interested in getting access to the full dataset considered in the paper please contact the authors.
DT = DT[Lon >= lon_box[1] & Lon <= lon_box[2] & Lat >= lat_box[1] & Lat <= lat_box[2]][month %in% months][year %in% c(training_years,validation_years)]
# tidy up DT:
setkey(x = DT,year,month,Lon,Lat)
DT[, YM := 12*year + month]
DT = DT[order(year,month,Lon,Lat)]
setcolorder(DT,c("year","month",'Lon','Lat','YM','grid_id','SST_bar','Ens_bar','Ens_sd'))
#### time, update script counter, save ####
time_s1 = proc.time() - time_s1
script_counter = 1
save.image(file = paste0(save_dir,"setup.RData"))
|
/pp.sst/scripts/01.master.setup.R
|
permissive
|
jasa-acs/Multivariate-Postprocessing-Methods-for-High-Dimensional-Seasonal-Weather-Forecasts
|
R
| false
| false
| 2,291
|
r
|
#############################################################################################
###### master script part 1 - setting up and creating or loading of wide data table #########
#############################################################################################
# This script sets up a run of the post-processing analysis
# installing the R-package:
rm(list = ls())
setwd("~/pkg/paper/PostprocessingSST/")
options(max.print = 1e3)
#install.packages('devtools')
library(devtools)
devtools::install_github('ClaudioHeinrich/pp.sst')
library(pp.sst)
#start timer:
time_s1 = proc.time()
# choose an abbreviation for this run and give it a description, see the README file for more details.
name_abbr = "pp.sst/Full"
description = 'Working on the full data set.'
#### specify your directories: ###
# Directory for derived datasets, this should change when you change name_abbr
save_dir = file.path('~','SST','Derived', name_abbr)
dir.create(save_dir,recursive = TRUE,showWarnings = FALSE)
# Directory for plots, this should change when you change name_abbr
plot_dir = file.path('~','SST','Figures', name_abbr)
dir.create(plot_dir, recursive = TRUE, showWarnings = FALSE)
# choose the area of the globe to consider: Reduce this to a smaller window when testing scripts.
lat_box = c(0, 65)
lon_box = c(-90, 40)
# a couple of parameters:
ens_size = 9 # size of forecast ensemble
training_years = 1985:2000
validation_years = 2001:2016
months = 1:2
mc_cores = 6 # number of cores for parallelization
### subset data set ###
# note that some example data DT is included in the package, check the documentation by typing ?DT.
# If you are interested in getting access to the full dataset considered in the paper please contact the authors.
DT = DT[Lon >= lon_box[1] & Lon <= lon_box[2] & Lat >= lat_box[1] & Lat <= lat_box[2]][month %in% months][year %in% c(training_years,validation_years)]
# tidy up DT:
setkey(x = DT,year,month,Lon,Lat)
DT[, YM := 12*year + month]
DT = DT[order(year,month,Lon,Lat)]
setcolorder(DT,c("year","month",'Lon','Lat','YM','grid_id','SST_bar','Ens_bar','Ens_sd'))
#### time, update script counter, save ####
time_s1 = proc.time() - time_s1
script_counter = 1
save.image(file = paste0(save_dir,"setup.RData"))
|
library(Rcpp)
cppFunction('
NumericVector diffc(NumericVector b, NumericVector deg, NumericVector freq)
{
int n=b.size();
NumericVector diff(n);
int i,j;
for(i=0;i<n;i++)
{
diff[i]=deg[i];
for(j=0;j<n;j++)
{
diff[i]-=freq[j]/(1+exp(-b[i]-b[j]));
}
}
return diff;
}
')
library(rootSolve)
solvebeta=function(deg,freq)
{
bg=log(deg/sqrt(sum(deg*freq)))#bguess
zo=multiroot(f=diffc,deg=deg,freq=freq,start=bg)
while(zo$estim.precis>1e-3)zo=multiroot(f=diffc,deg=deg,freq=freq,start=runif(length(deg),-0.1,0.1))
return(zo)
}
N=316
gamma=2.0
Sq=0
rlist={}
Sqlist={}
errlist={}
for(gamma in seq(2.0,3.5,0.5))
{
for(Sq in 1:1000)
{
fin=sprintf("degseq_N%dr%.1fSq%d.txt",N,gamma,Sq)
d=read.table(fin)$V1
t=as.data.frame(table(d))
deg=as.numeric(as.vector(t$d))
freq=t$Freq
s=solvebeta(deg,freq)
b=s[[1]]
err=s[[4]]
rlist=c(rlist,gamma)
Sqlist=c(Sqlist,Sq)
errlist=c(errlist,err)
fout=sprintf("dbroot_N%dr%.1fSq%d.txt",N,gamma,Sq)
write.table(data.frame(deg,b,freq),fout,row.names=F,col.names=F)
}
}
fout="errlist.txt"
write.table(data.frame(rlist,Sqlist,errlist),file=fout,row.names=F)
|
/WeibinResults/seq316_dbroot_z/solve_root.R
|
no_license
|
Erich-McMillan/physics-research
|
R
| false
| false
| 1,123
|
r
|
library(Rcpp)
cppFunction('
NumericVector diffc(NumericVector b, NumericVector deg, NumericVector freq)
{
int n=b.size();
NumericVector diff(n);
int i,j;
for(i=0;i<n;i++)
{
diff[i]=deg[i];
for(j=0;j<n;j++)
{
diff[i]-=freq[j]/(1+exp(-b[i]-b[j]));
}
}
return diff;
}
')
library(rootSolve)
solvebeta=function(deg,freq)
{
bg=log(deg/sqrt(sum(deg*freq)))#bguess
zo=multiroot(f=diffc,deg=deg,freq=freq,start=bg)
while(zo$estim.precis>1e-3)zo=multiroot(f=diffc,deg=deg,freq=freq,start=runif(length(deg),-0.1,0.1))
return(zo)
}
N=316
gamma=2.0
Sq=0
rlist={}
Sqlist={}
errlist={}
for(gamma in seq(2.0,3.5,0.5))
{
for(Sq in 1:1000)
{
fin=sprintf("degseq_N%dr%.1fSq%d.txt",N,gamma,Sq)
d=read.table(fin)$V1
t=as.data.frame(table(d))
deg=as.numeric(as.vector(t$d))
freq=t$Freq
s=solvebeta(deg,freq)
b=s[[1]]
err=s[[4]]
rlist=c(rlist,gamma)
Sqlist=c(Sqlist,Sq)
errlist=c(errlist,err)
fout=sprintf("dbroot_N%dr%.1fSq%d.txt",N,gamma,Sq)
write.table(data.frame(deg,b,freq),fout,row.names=F,col.names=F)
}
}
fout="errlist.txt"
write.table(data.frame(rlist,Sqlist,errlist),file=fout,row.names=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{nycc_cd_23}
\alias{nycc_cd_23}
\title{2023-2033 Council District Shapefile}
\format{
## `nycc_cd_23`
An sf collection with 51 rows and 7 columns w/ multiploygon geography:
\describe{
\item{coun_dist}{Council District}
\item{Shape_Leng, Shape_Area}{Length and Area of Council Dsitrict}
\item{lab_coord}{Coordinates in Matrix Form}
\item{lab_x lab_y}{Longitude and Latitude}
\item{geography}{Multipolygon of Council District}
}
}
\source{
<https://s-media.nyc.gov/agencies/dcp/assets/files/zip/data-tools/bytes/nycc_23b.zip>
}
\usage{
nycc_cd_23
}
\description{
2023-2033 Council District Shapefile
}
\keyword{datasets}
|
/man/nycc_cd_23.Rd
|
permissive
|
NewYorkCityCouncil/councildown
|
R
| false
| true
| 734
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{nycc_cd_23}
\alias{nycc_cd_23}
\title{2023-2033 Council District Shapefile}
\format{
## `nycc_cd_23`
An sf collection with 51 rows and 7 columns w/ multiploygon geography:
\describe{
\item{coun_dist}{Council District}
\item{Shape_Leng, Shape_Area}{Length and Area of Council Dsitrict}
\item{lab_coord}{Coordinates in Matrix Form}
\item{lab_x lab_y}{Longitude and Latitude}
\item{geography}{Multipolygon of Council District}
}
}
\source{
<https://s-media.nyc.gov/agencies/dcp/assets/files/zip/data-tools/bytes/nycc_23b.zip>
}
\usage{
nycc_cd_23
}
\description{
2023-2033 Council District Shapefile
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insert_econ_datastore.R
\name{insert_econ_datastore}
\alias{insert_econ_datastore}
\title{Inserts economy datastore data}
\usage{
insert_econ_datastore(log = "")
}
\arguments{
\item{log}{log file to save output to - defaults to}
}
\value{
Logical - TRUE for worked ok.
}
\description{
Inserts economy datastore data
}
\examples{
run_updates()
}
|
/man/insert_econ_datastore.Rd
|
permissive
|
joeheywood/resdata
|
R
| false
| true
| 424
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/insert_econ_datastore.R
\name{insert_econ_datastore}
\alias{insert_econ_datastore}
\title{Inserts economy datastore data}
\usage{
insert_econ_datastore(log = "")
}
\arguments{
\item{log}{log file to save output to - defaults to}
}
\value{
Logical - TRUE for worked ok.
}
\description{
Inserts economy datastore data
}
\examples{
run_updates()
}
|
library(tswge)
# psi-weights for simple MA(1) model (theta) X(t)=(1-.8B)a(t)
psi.weights.wge(theta=.8,lag.max=5)
# psi-weights for simple AR(1) model (phi) (1-.8B)X(t)=a(t)
psi.weights.wge(phi=.8,lag.max=5) #note that psi(j)=.8j
# psi-weights for ARMA(2,1) model (1-1.2B+.6B2)X(t)=(1-.5B)a(t)
psi.weights.wge(phi=c(1.2,-.6),theta=c(.5),lag.max=5)
#5.7.3 Check
# psi-weights for simple AR(2) model X(t)-1.95X(t-1)+1.9X(t-2)=a_t
psi.weights.wge(phi=c(-.7),lag.max=4)
fore.arma.wge()
|
/Unit05/psi weights.R
|
no_license
|
cmadding/MSDS6373
|
R
| false
| false
| 485
|
r
|
library(tswge)
# psi-weights for simple MA(1) model (theta) X(t)=(1-.8B)a(t)
psi.weights.wge(theta=.8,lag.max=5)
# psi-weights for simple AR(1) model (phi) (1-.8B)X(t)=a(t)
psi.weights.wge(phi=.8,lag.max=5) #note that psi(j)=.8j
# psi-weights for ARMA(2,1) model (1-1.2B+.6B2)X(t)=(1-.5B)a(t)
psi.weights.wge(phi=c(1.2,-.6),theta=c(.5),lag.max=5)
#5.7.3 Check
# psi-weights for simple AR(2) model X(t)-1.95X(t-1)+1.9X(t-2)=a_t
psi.weights.wge(phi=c(-.7),lag.max=4)
fore.arma.wge()
|
library(SensusR)
### Name: sensus.plot.lag.cdf
### Title: Plot the CDF of inter-reading time lags.
### Aliases: sensus.plot.lag.cdf
### ** Examples
data.path = system.file("extdata", "example-data", package="SensusR")
data = sensus.read.json.files(data.path)
sensus.plot.lag.cdf(data$AccelerometerDatum)
|
/data/genthat_extracted_code/SensusR/examples/sensus.plot.lag.cdf.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 311
|
r
|
library(SensusR)
### Name: sensus.plot.lag.cdf
### Title: Plot the CDF of inter-reading time lags.
### Aliases: sensus.plot.lag.cdf
### ** Examples
data.path = system.file("extdata", "example-data", package="SensusR")
data = sensus.read.json.files(data.path)
sensus.plot.lag.cdf(data$AccelerometerDatum)
|
meanmed <-
function(n=10,lambda=1,nSim=100000)
{
dump("meanmed","c:\\StatBook\\meanmed.r")
s=sample(x=c(-1,1),replace=T,size=n*nSim,prob=c(.5,.5))
Y=matrix(s*rexp(n=n*nSim,rate=lambda),ncol=n)
med=apply(X=Y,MARGIN=1,FUN=median)
mea=apply(X=Y,MARGIN=1,FUN=mean)
print(paste("Variance of median=",var(med)))
print(paste("Variance of mean=",var(mea)))
print(paste("Ratio=",var(mea)/var(med)))
}
|
/RcodeData/meanmed.r
|
no_license
|
PepSalehi/advancedstatistics
|
R
| false
| false
| 404
|
r
|
meanmed <-
function(n=10,lambda=1,nSim=100000)
{
dump("meanmed","c:\\StatBook\\meanmed.r")
s=sample(x=c(-1,1),replace=T,size=n*nSim,prob=c(.5,.5))
Y=matrix(s*rexp(n=n*nSim,rate=lambda),ncol=n)
med=apply(X=Y,MARGIN=1,FUN=median)
mea=apply(X=Y,MARGIN=1,FUN=mean)
print(paste("Variance of median=",var(med)))
print(paste("Variance of mean=",var(mea)))
print(paste("Ratio=",var(mea)/var(med)))
}
|
library(dplyr)
data <- read.csv('./aplicada2/datos_ex2')
acum <- 0
for (y_i in data$y) {
acum <- y_i^2 + acum
}
t(data$y)%*%data$y
m <- lm( y ~ ., data=data)
beta <- as.matrix(coefficients(m))
X <- data %>% select( -y ) %>% mutate(x0 = 1)
X <- as.matrix(X[,c(5,1,2,3,4)])
Y <- as.matrix(data %>% select( y ))
n_y_barra <- (sum(Y)^2)/length(Y)
ssr <- t(beta)%*%t(X)%*%Y - n_y_barra
ssres <- t(Y)%*%Y - t(beta)%*%t(X)%*%Y
sst <- ssr + ssres
tano <- anova(m)
ssr_an <- sum(tano$`Sum Sq`)
mx1 <- lm( y ~ x1 , data = data)
y_hat <- predict( mx1, data=data)
sr <- y_hat -mean(Y)
st <- Y -mean(Y)
st <- t(st) %*% st
st
t(Y)%*%Y - n_y_barra
sr <- t(sr) %*% sr
sr
t <- anova(mx1)
t$`Sum Sq`[1]
|
/e.R
|
no_license
|
andrscyv/aplicada2
|
R
| false
| false
| 691
|
r
|
library(dplyr)
data <- read.csv('./aplicada2/datos_ex2')
acum <- 0
for (y_i in data$y) {
acum <- y_i^2 + acum
}
t(data$y)%*%data$y
m <- lm( y ~ ., data=data)
beta <- as.matrix(coefficients(m))
X <- data %>% select( -y ) %>% mutate(x0 = 1)
X <- as.matrix(X[,c(5,1,2,3,4)])
Y <- as.matrix(data %>% select( y ))
n_y_barra <- (sum(Y)^2)/length(Y)
ssr <- t(beta)%*%t(X)%*%Y - n_y_barra
ssres <- t(Y)%*%Y - t(beta)%*%t(X)%*%Y
sst <- ssr + ssres
tano <- anova(m)
ssr_an <- sum(tano$`Sum Sq`)
mx1 <- lm( y ~ x1 , data = data)
y_hat <- predict( mx1, data=data)
sr <- y_hat -mean(Y)
st <- Y -mean(Y)
st <- t(st) %*% st
st
t(Y)%*%Y - n_y_barra
sr <- t(sr) %*% sr
sr
t <- anova(mx1)
t$`Sum Sq`[1]
|
require(bio.survey)
require(bio.lobster)
require(bio.groundfish)
p = bio.lobster::load.environment()
p$libs = NULL
ff = "LFA35-38Assessment"
fp1 = file.path(project.datadirectory('bio.lobster'),"analysis",ff)
fpf1 = file.path(project.figuredirectory('bio.lobster'),ff)
dir.create(fpf1,showWarnings=F)
dir.create(fp1,showWarnings=F)
p$yrs = 1970:2020
p1 = p
stratifiedAnalysesCommercial = function( p=p1, survey,lfa, fpf = fpf1, fp = fp1,f=ff,wd=10,ht=8){
p$write.csv(aout,file=file.path(fpf, paste(lfa,'DFOCommB.csv')))
p$add.reference.lines = F
p$time.series.start.year = p$years.to.estimate[1]
p$time.series.end.year = p$years.to.estimate[length(p$years.to.estimate)]
p$metric = 'weights' #weights
p$measure = 'stratified.total' #'stratified.total'
p$figure.title = ""
p$reference.measure = 'median' # mean, geomean
p$file.name = file.path(f,paste(lfa,'DFOrestratifiedtotalweightscommercial.png',sep=""))
p$y.maximum = NULL # NULL # if ymax is too high for one year
p$show.truncated.weights = F #if using ymax and want to show the weights that are cut off as values on figure
p$legend = FALSE
p$running.median = T
p$running.length = 3
p$running.mean = F #can only have rmedian or rmean
p$error.polygon=F
p$error.bars=T
require(bio.lobster)
p$ylim2 = NULL
xx = aggregate(ObsLobs~yr,data=aout,FUN=sum)
names(xx) =c('x','y')
p$ylim=NULL
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p,wd=wd,ht=ht)
return(aout)
}
aout = stratifiedAnalysesCommercial(survey='DFO',lfa='LFA35-38')
write.csv(aout,file.path(fp1,'LFA3538CommercialB.csv'))
|
/inst/Assessments/LFA35-38Assessment/3.stratifiedAnalysisCommercial.r
|
no_license
|
LobsterScience/bio.lobster
|
R
| false
| false
| 1,673
|
r
|
require(bio.survey)
require(bio.lobster)
require(bio.groundfish)
p = bio.lobster::load.environment()
p$libs = NULL
ff = "LFA35-38Assessment"
fp1 = file.path(project.datadirectory('bio.lobster'),"analysis",ff)
fpf1 = file.path(project.figuredirectory('bio.lobster'),ff)
dir.create(fpf1,showWarnings=F)
dir.create(fp1,showWarnings=F)
p$yrs = 1970:2020
p1 = p
stratifiedAnalysesCommercial = function( p=p1, survey,lfa, fpf = fpf1, fp = fp1,f=ff,wd=10,ht=8){
p$write.csv(aout,file=file.path(fpf, paste(lfa,'DFOCommB.csv')))
p$add.reference.lines = F
p$time.series.start.year = p$years.to.estimate[1]
p$time.series.end.year = p$years.to.estimate[length(p$years.to.estimate)]
p$metric = 'weights' #weights
p$measure = 'stratified.total' #'stratified.total'
p$figure.title = ""
p$reference.measure = 'median' # mean, geomean
p$file.name = file.path(f,paste(lfa,'DFOrestratifiedtotalweightscommercial.png',sep=""))
p$y.maximum = NULL # NULL # if ymax is too high for one year
p$show.truncated.weights = F #if using ymax and want to show the weights that are cut off as values on figure
p$legend = FALSE
p$running.median = T
p$running.length = 3
p$running.mean = F #can only have rmedian or rmean
p$error.polygon=F
p$error.bars=T
require(bio.lobster)
p$ylim2 = NULL
xx = aggregate(ObsLobs~yr,data=aout,FUN=sum)
names(xx) =c('x','y')
p$ylim=NULL
ref.out= figure.stratified.analysis(x=aout,out.dir = 'bio.lobster', p=p,wd=wd,ht=ht)
return(aout)
}
aout = stratifiedAnalysesCommercial(survey='DFO',lfa='LFA35-38')
write.csv(aout,file.path(fp1,'LFA3538CommercialB.csv'))
|
# Installare pacchetti
#install.packages("xts")
library("xts")
#install.packages("TTR")
library("TTR")
library("plotrix")
library("hydroTSM")
library("zoo")
library("tsbox")
# caricare file excel
SAmbrogio<-read.csv2(file="c:/Users/Marianna/Documents/Universita/Tesi/R - S. Ambrogio/NoOutliers_S.Ambrogio.CSV", header=TRUE, sep=";")
# Creare timeseries
## Creare un oggetto con le date (tutti gli anni)
datestotal<-seq(as.Date("2015-01-01"), length=1277, by="days")
## Creare la time series (tutti gli anni)
tsNtotINtotal_original<-xts(x=SAmbrogio[,31], order.by=datestotal)
tsPtotINtotal_original<-xts(x=SAmbrogio[,29], order.by=datestotal)
tsNtotINtotal_spazi<-na.approx(xts(x=SAmbrogio[,31], order.by=datestotal))
tsNtotINtotal<-tsNtotINtotal_original[intersect(which(!is.na(tsPtotINtotal_original)),which(!is.na(tsNtotINtotal_original)))]
tsPtotINtotal<-tsPtotINtotal_original[intersect(which(!is.na(tsPtotINtotal_original)),which(!is.na(tsNtotINtotal_original)))]
rapportoP_N<-tsPtotINtotal/tsNtotINtotal
rapportoP_N_NA<-tsPtotINtotal_original/tsNtotINtotal_original
MArapportoP_N<-na.omit(rollapply(rapportoP_N_NA, width=31, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, fill=NA, align="center"))
mediaP_N<-mean(rapportoP_N)
## Creare un oggetto con le date (2015)
tsNtotIN2015<-tsNtotINtotal_spazi["2015"]
## Creare un oggetto con le date (2016)
tsNtotIN2016<-tsNtotINtotal_spazi["2016"]
## Creare un oggetto con le date (2017)
tsNtotIN2017<-tsNtotINtotal_spazi["2017"]
## Creare un oggetto con le date (2018)
tsNtotIN2018<-tsNtotINtotal_spazi["2018"]
# Plottare la time series
windows(width = 16,height = 9)
par(mar=c(6,6,4,4),mgp=c(4,1,0)) #margini e distanza etichette-asse
plot(as.zoo(rapportoP_N),type="n",xlab="Mesi",ylab=expression(paste("P"[tot-IN],"/N"[tot-IN]," [ - ]")),yaxt="n",xaxt="n",yaxs="i",xaxs="i",cex.lab=1.2,ylim=c(0,0.4),col="grey")
drawTimeAxis(as.zoo(tsNtotINtotal_spazi), tick.tstep ="months", lab.tstep ="months",las=2,lab.fmt="%m")
axis(side=2,at=seq(from = 0,to = 0.4,by = 0.05),las=2,format(seq(from = 0,to = 0.4,by = 0.05), big.mark = ".", decimal.mark = ","))
grid(nx=NA,ny=8,col="grey")
lines(as.zoo(rapportoP_N),col="darkslategrey")
lines(as.zoo(ts_trend(rapportoP_N)),lwd=2)
a<-lm(rapportoP_N~index(rapportoP_N))
abline(a,col="red",lwd=2,lty=5)
perc_a<--(1-coredata(a$fitted.values[length(a$fitted.values)])/coredata(a$fitted.values[1]))*100
abline(v=index(tsNtotIN2016[1,]),lwd=2)
abline(v=index(tsNtotIN2017[1,]),lwd=2)
abline(v=index(tsNtotIN2018[1,]),lwd=2)
text(x=index(tsNtotIN2015[182,]),y=0.375,label="2015")
text(x=index(tsNtotIN2016[182,]),y=0.375,label="2016")
text(x=index(tsNtotIN2017[182,]),y=0.375,label="2017")
text(x=index(tsNtotIN2018[90,]),y=0.375,label="2018")
text(x=index(tsNtotIN2016[181,]),y=0.225, label=paste("Variazione = ",format(round(perc_a,digits=1),decimal.mark = ","),"%"),col="red")
legend(x=index(tsNtotIN2015[45,]),y=0.35, c(expression(paste("P"[tot-IN],"/N"[tot-IN])),"Regressione","LOESS"),col=c("darkslategrey","red","black"),lty=c(1,5,1),lwd=c(1,2,2),bg="white")
# DETRENDING
rapportoP_NDET<-rapportoP_N-(ts_trend(rapportoP_N))
mediaDET<-mean(rapportoP_NDET)
# STAGIONALITA'
rapportoP_NAGG<-apply.weekly(rapportoP_NDET,median)
windows(width = 16,height = 9)
par(mar=c(6,6,4,4),mgp=c(3,1,0),cex.main=2)
options(OutDec= ",")
acf(rapportoP_NAGG,lag.max = 60, main=expression(paste("P"[tot-IN],"/N"[tot-IN])),yaxt="n", ci.col="black",cex.lab=1.2)
axis(side=2,las=2)
|
/Master Thesis/R - S. Ambrogio/S.Ambrogio Trend and Seasonality/SA _TS IN/15. SA - TS_Ptotin_Ntotin.R
|
no_license
|
maricorsi17/University-Projects
|
R
| false
| false
| 3,481
|
r
|
# Installare pacchetti
#install.packages("xts")
library("xts")
#install.packages("TTR")
library("TTR")
library("plotrix")
library("hydroTSM")
library("zoo")
library("tsbox")
# caricare file excel
SAmbrogio<-read.csv2(file="c:/Users/Marianna/Documents/Universita/Tesi/R - S. Ambrogio/NoOutliers_S.Ambrogio.CSV", header=TRUE, sep=";")
# Creare timeseries
## Creare un oggetto con le date (tutti gli anni)
datestotal<-seq(as.Date("2015-01-01"), length=1277, by="days")
## Creare la time series (tutti gli anni)
tsNtotINtotal_original<-xts(x=SAmbrogio[,31], order.by=datestotal)
tsPtotINtotal_original<-xts(x=SAmbrogio[,29], order.by=datestotal)
tsNtotINtotal_spazi<-na.approx(xts(x=SAmbrogio[,31], order.by=datestotal))
tsNtotINtotal<-tsNtotINtotal_original[intersect(which(!is.na(tsPtotINtotal_original)),which(!is.na(tsNtotINtotal_original)))]
tsPtotINtotal<-tsPtotINtotal_original[intersect(which(!is.na(tsPtotINtotal_original)),which(!is.na(tsNtotINtotal_original)))]
rapportoP_N<-tsPtotINtotal/tsNtotINtotal
rapportoP_N_NA<-tsPtotINtotal_original/tsNtotINtotal_original
MArapportoP_N<-na.omit(rollapply(rapportoP_N_NA, width=31, FUN=function(x) mean(x, na.rm=TRUE), by=1, by.column=TRUE, fill=NA, align="center"))
mediaP_N<-mean(rapportoP_N)
## Creare un oggetto con le date (2015)
tsNtotIN2015<-tsNtotINtotal_spazi["2015"]
## Creare un oggetto con le date (2016)
tsNtotIN2016<-tsNtotINtotal_spazi["2016"]
## Creare un oggetto con le date (2017)
tsNtotIN2017<-tsNtotINtotal_spazi["2017"]
## Creare un oggetto con le date (2018)
tsNtotIN2018<-tsNtotINtotal_spazi["2018"]
# Plottare la time series
windows(width = 16,height = 9)
par(mar=c(6,6,4,4),mgp=c(4,1,0)) #margini e distanza etichette-asse
plot(as.zoo(rapportoP_N),type="n",xlab="Mesi",ylab=expression(paste("P"[tot-IN],"/N"[tot-IN]," [ - ]")),yaxt="n",xaxt="n",yaxs="i",xaxs="i",cex.lab=1.2,ylim=c(0,0.4),col="grey")
drawTimeAxis(as.zoo(tsNtotINtotal_spazi), tick.tstep ="months", lab.tstep ="months",las=2,lab.fmt="%m")
axis(side=2,at=seq(from = 0,to = 0.4,by = 0.05),las=2,format(seq(from = 0,to = 0.4,by = 0.05), big.mark = ".", decimal.mark = ","))
grid(nx=NA,ny=8,col="grey")
lines(as.zoo(rapportoP_N),col="darkslategrey")
lines(as.zoo(ts_trend(rapportoP_N)),lwd=2)
a<-lm(rapportoP_N~index(rapportoP_N))
abline(a,col="red",lwd=2,lty=5)
perc_a<--(1-coredata(a$fitted.values[length(a$fitted.values)])/coredata(a$fitted.values[1]))*100
abline(v=index(tsNtotIN2016[1,]),lwd=2)
abline(v=index(tsNtotIN2017[1,]),lwd=2)
abline(v=index(tsNtotIN2018[1,]),lwd=2)
text(x=index(tsNtotIN2015[182,]),y=0.375,label="2015")
text(x=index(tsNtotIN2016[182,]),y=0.375,label="2016")
text(x=index(tsNtotIN2017[182,]),y=0.375,label="2017")
text(x=index(tsNtotIN2018[90,]),y=0.375,label="2018")
text(x=index(tsNtotIN2016[181,]),y=0.225, label=paste("Variazione = ",format(round(perc_a,digits=1),decimal.mark = ","),"%"),col="red")
legend(x=index(tsNtotIN2015[45,]),y=0.35, c(expression(paste("P"[tot-IN],"/N"[tot-IN])),"Regressione","LOESS"),col=c("darkslategrey","red","black"),lty=c(1,5,1),lwd=c(1,2,2),bg="white")
# DETRENDING
rapportoP_NDET<-rapportoP_N-(ts_trend(rapportoP_N))
mediaDET<-mean(rapportoP_NDET)
# STAGIONALITA'
rapportoP_NAGG<-apply.weekly(rapportoP_NDET,median)
windows(width = 16,height = 9)
par(mar=c(6,6,4,4),mgp=c(3,1,0),cex.main=2)
options(OutDec= ",")
acf(rapportoP_NAGG,lag.max = 60, main=expression(paste("P"[tot-IN],"/N"[tot-IN])),yaxt="n", ci.col="black",cex.lab=1.2)
axis(side=2,las=2)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Tcells.R
\name{Tcells}
\alias{Tcells}
\title{Tcells: an R package to estimate cell dynamics with mixed models.}
\usage{
Tcells()
}
\description{
Collection of pdMat, varClasses and corClasses objects and function to
fit cell dynamic models with mixed models using \code{\link[nlme]{lme}} in the package 'nlme'.
}
\seealso{
\url{http://127.0.0.1:11398/help/library/Tcells/doc/Plans.html}
\url{http://127.0.0.1/help/library/Tcells/doc/Plans.html}
To see a paper with some of the theory behind this package, use the command:\cr
\code{vignette("Tcells")}
}
|
/man/Tcells.Rd
|
no_license
|
gmonette/Tcells
|
R
| false
| false
| 644
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Tcells.R
\name{Tcells}
\alias{Tcells}
\title{Tcells: an R package to estimate cell dynamics with mixed models.}
\usage{
Tcells()
}
\description{
Collection of pdMat, varClasses and corClasses objects and function to
fit cell dynamic models with mixed models using \code{\link[nlme]{lme}} in the package 'nlme'.
}
\seealso{
\url{http://127.0.0.1:11398/help/library/Tcells/doc/Plans.html}
\url{http://127.0.0.1/help/library/Tcells/doc/Plans.html}
To see a paper with some of the theory behind this package, use the command:\cr
\code{vignette("Tcells")}
}
|
# Ian Castillo Rosales
# R Programming - Assignment 3
# 30062014
rankhospital <- function(estado, resultado, num) {
# Lectura de los datos
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- data[c(2, 7, 11, 17, 23)]
names(data) <- c("name", "state", "heart attack", "heart failure", "pneumonia")
# Validación de los resultados
res_validos <- c("heart attack", "heart failure", "pneumonia")
if(all(is.na(match(res_validos, resultado)))==T){
stop("invalid outcome")
}
# Validación del estado
est_validos <- data[, 2]
est_validos <- unique(est_validos)
if(all(is.na(match(est_validos, estado)))==T){
stop("invalid state")
}
# Validación del valor num
if( num != "best" && num != "worst" && num%%1 != 0 ){
stop("invalid num")
}
## Grab only rows with our state value
data <- data[data$state==estado & data[resultado] != 'Not Available', ]
## Order the data
data[resultado] <- as.data.frame(sapply(data[resultado], as.numeric))
data <- data[order(data$name, decreasing = FALSE), ]
data <- data[order(data[resultado], decreasing = FALSE), ]
## Process the num argument
vals <- data[, resultado]
if( num == "best" ) {
rowNum <- which.min(vals)
} else if( num == "worst" ) {
rowNum <- which.max(vals)
} else {
rowNum <- num
}
## Return hospital name in that state with lowest 30-day death rate
data[rowNum, ]$name
}
rankhospital("TX", "heart failure", 4)
rankhospital("MD", "heart attack", "worst")
rankhospital("MN", "heart attack", 5000)
|
/R Programming/ProgrammingAssignment3/rankhospital.R
|
no_license
|
donelianc/coursera-data-science
|
R
| false
| false
| 1,782
|
r
|
# Ian Castillo Rosales
# R Programming - Assignment 3
# 30062014
rankhospital <- function(estado, resultado, num) {
# Lectura de los datos
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
data <- data[c(2, 7, 11, 17, 23)]
names(data) <- c("name", "state", "heart attack", "heart failure", "pneumonia")
# Validación de los resultados
res_validos <- c("heart attack", "heart failure", "pneumonia")
if(all(is.na(match(res_validos, resultado)))==T){
stop("invalid outcome")
}
# Validación del estado
est_validos <- data[, 2]
est_validos <- unique(est_validos)
if(all(is.na(match(est_validos, estado)))==T){
stop("invalid state")
}
# Validación del valor num
if( num != "best" && num != "worst" && num%%1 != 0 ){
stop("invalid num")
}
## Grab only rows with our state value
data <- data[data$state==estado & data[resultado] != 'Not Available', ]
## Order the data
data[resultado] <- as.data.frame(sapply(data[resultado], as.numeric))
data <- data[order(data$name, decreasing = FALSE), ]
data <- data[order(data[resultado], decreasing = FALSE), ]
## Process the num argument
vals <- data[, resultado]
if( num == "best" ) {
rowNum <- which.min(vals)
} else if( num == "worst" ) {
rowNum <- which.max(vals)
} else {
rowNum <- num
}
## Return hospital name in that state with lowest 30-day death rate
data[rowNum, ]$name
}
rankhospital("TX", "heart failure", 4)
rankhospital("MD", "heart attack", "worst")
rankhospital("MN", "heart attack", 5000)
|
## 36106 - Data Algorithms and Meaning
## Assignment 2 Part A: Linear Regression
##
## Mutaz Abu Ghazaleh
## 13184383
##
## linear model for location 1 industry 1
## Library
library(dplyr)
library(ggplot2)
library(readr)
library(tidyr)
library(lubridate)
library(scales)
library(RcppRoll) # used to calculate rolling mean
library(broom)
library(caret)
setwd("c:/mdsi/dam/at2a")
#### Task 3 - lm ####
# For industry = 1 and location = 1, train a linear regression model
# with monthly_amount as the target.
# Remember that time is very important in this model,
# so be sure to include a variable for the time sequence (this can simply be a 1 for the
# first month, 2 for the second month, etc.)
# Note: this code file represents trials and final outcome after experminting with
# different options for feature engineering, fit forumla
#### 1. feature engineering: load featurised data set ####
# load the prepared feature engineered transaction file
df_features <- read_csv("./transactions_features.csv",
col_types = list(
readr::col_date(format=""),
readr::col_factor(levels = NULL),
readr::col_factor(levels = NULL),
readr::col_double(),
readr::col_integer(),
readr::col_factor(levels=NULL),
readr::col_integer(),
readr::col_double(),
readr::col_double()
))
#### functions ####
# print RMSE, RSE and AdjR2 for model
model_summary <- function(mod){
mod_summary <- list()
mod_summary$r2 <- summary(mod)$adj.r.squared
mod_summary$rse <- summary(mod)$sigma
mod_summary$aug <- mod %>% augment()
# calculate RMSE
mod_summary$RMSE <- sqrt(mean(mod_summary$aug$.resid ^ 2))
#inspect RSE, Adj R-squared
#
# sprintf("RMSE: %0.3f", mod_summary$RMSE)
# sprintf("RSE: %0.4f", mod_summary$rse)
# sprintf("Adj R-sqr: %0.4f", mod_summary$r2)
print(paste0("Adj R-sqr: ", mod_summary$r2))
print(paste0("RMSE: ", mod_summary$RMSE)) # or RMSE(pred = mod1$fitted.values, obs = mod1$model$monthly_mean)
print(paste0("RSE: ", mod_summary$rse))
}
# fit model for based on formula for a given industry amd location
fit_model <- function (df, formula, ind=1, loc=1){
df_subset <- df %>% filter(industry==ind, location==loc)
mod <- lm (data = df_subset, formula = formula)
print(formula)
model_summary(mod)
return(mod)
}
# cross validate model with out-ofsample and print average out-of-sample RMSE
fit_model_cv <- function (df, formula, ind=1, loc=1){
df_subset <- df %>% filter(industry==ind, location==loc)
trControl <- trainControl(method = "cv",number = 15, verboseIter = FALSE)
mod <- train(formula, df_subset, method = "lm", trControl = trControl)
print(formula)
print("cross validation")
print(mod$results)
print("final model")
model_summary(mod$finalModel)
return(mod)
}
#### 3. create the model using lm() (evaluate different features) ####
# model and compare with different features/formulas ####
df_features %>% filter(industry==1, location==1) %>% summarise(mean = mean(monthly_mean))
# mod1: year and month (categorical)
fit_model(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month) -> mod1
#mod2: year and monthn (numerical)
fit_model(df_features, ind=1,loc=1, formula = monthly_mean ~ year + monthn) -> mod2
#mod3: year, month and lagged fetures (m3 and m6)
fit_model(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month + m3+m6) -> mod3
#### cross validation ####
#dropped mod2 and only cross validaing mod1 and mod3 formulas, with and without lagged features
fit_model_cv(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month) -> mod1.cv
fit_model_cv(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month + m3+m6) -> mod3.cv
#### predict december 2016
#create a prediciton out of sample
predict(mod1,
data.frame(year=2016, month=factor(12)))
|
/at2a/03 - modelling.R
|
no_license
|
mutazag/DAM
|
R
| false
| false
| 4,095
|
r
|
## 36106 - Data Algorithms and Meaning
## Assignment 2 Part A: Linear Regression
##
## Mutaz Abu Ghazaleh
## 13184383
##
## linear model for location 1 industry 1
## Library
library(dplyr)
library(ggplot2)
library(readr)
library(tidyr)
library(lubridate)
library(scales)
library(RcppRoll) # used to calculate rolling mean
library(broom)
library(caret)
setwd("c:/mdsi/dam/at2a")
#### Task 3 - lm ####
# For industry = 1 and location = 1, train a linear regression model
# with monthly_amount as the target.
# Remember that time is very important in this model,
# so be sure to include a variable for the time sequence (this can simply be a 1 for the
# first month, 2 for the second month, etc.)
# Note: this code file represents trials and final outcome after experminting with
# different options for feature engineering, fit forumla
#### 1. feature engineering: load featurised data set ####
# load the prepared feature engineered transaction file
df_features <- read_csv("./transactions_features.csv",
col_types = list(
readr::col_date(format=""),
readr::col_factor(levels = NULL),
readr::col_factor(levels = NULL),
readr::col_double(),
readr::col_integer(),
readr::col_factor(levels=NULL),
readr::col_integer(),
readr::col_double(),
readr::col_double()
))
#### functions ####
# print RMSE, RSE and AdjR2 for model
model_summary <- function(mod){
mod_summary <- list()
mod_summary$r2 <- summary(mod)$adj.r.squared
mod_summary$rse <- summary(mod)$sigma
mod_summary$aug <- mod %>% augment()
# calculate RMSE
mod_summary$RMSE <- sqrt(mean(mod_summary$aug$.resid ^ 2))
#inspect RSE, Adj R-squared
#
# sprintf("RMSE: %0.3f", mod_summary$RMSE)
# sprintf("RSE: %0.4f", mod_summary$rse)
# sprintf("Adj R-sqr: %0.4f", mod_summary$r2)
print(paste0("Adj R-sqr: ", mod_summary$r2))
print(paste0("RMSE: ", mod_summary$RMSE)) # or RMSE(pred = mod1$fitted.values, obs = mod1$model$monthly_mean)
print(paste0("RSE: ", mod_summary$rse))
}
# fit model for based on formula for a given industry amd location
fit_model <- function (df, formula, ind=1, loc=1){
df_subset <- df %>% filter(industry==ind, location==loc)
mod <- lm (data = df_subset, formula = formula)
print(formula)
model_summary(mod)
return(mod)
}
# cross validate model with out-ofsample and print average out-of-sample RMSE
fit_model_cv <- function (df, formula, ind=1, loc=1){
df_subset <- df %>% filter(industry==ind, location==loc)
trControl <- trainControl(method = "cv",number = 15, verboseIter = FALSE)
mod <- train(formula, df_subset, method = "lm", trControl = trControl)
print(formula)
print("cross validation")
print(mod$results)
print("final model")
model_summary(mod$finalModel)
return(mod)
}
#### 3. create the model using lm() (evaluate different features) ####
# model and compare with different features/formulas ####
df_features %>% filter(industry==1, location==1) %>% summarise(mean = mean(monthly_mean))
# mod1: year and month (categorical)
fit_model(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month) -> mod1
#mod2: year and monthn (numerical)
fit_model(df_features, ind=1,loc=1, formula = monthly_mean ~ year + monthn) -> mod2
#mod3: year, month and lagged fetures (m3 and m6)
fit_model(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month + m3+m6) -> mod3
#### cross validation ####
#dropped mod2 and only cross validaing mod1 and mod3 formulas, with and without lagged features
fit_model_cv(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month) -> mod1.cv
fit_model_cv(df_features, ind=1,loc=1, formula = monthly_mean ~ year + month + m3+m6) -> mod3.cv
#### predict december 2016
#create a prediciton out of sample
predict(mod1,
data.frame(year=2016, month=factor(12)))
|
# ======== Packages required =========
options(java.parameters = c("-XX:+UseConcMarkSweepGC", "-Xmx16384m"))
# Rcran
packages<-c('Xmisc')
for (package in packages){
if(package %in% rownames(installed.packages()) == FALSE) {
install.packages(package)}
}
# Bioconductor
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
bio.packages<-c("Rhisat2")
for (bio.package in bio.packages){
if(bio.package %in% rownames(installed.packages()) == FALSE) {
BiocManager::install(bio.package)}
}
# === setting environment ===
parser <- Xmisc::ArgumentParser$new()
parser$add_usage('Rsubread_featureCount_cl.R [options]')
parser$add_description('An executable R script parsing arguments from Unix-like command line.')
parser$add_argument('--h',type='logical', action='store_true', help='Print the help page')
parser$add_argument('--help',type='logical',action='store_true',help='Print the help page')
parser$add_argument('--dir', type = 'character', default = getwd(), help = '"directory",Enter your working directory')
parser$add_argument('--ref', type = 'character', default = 'genome.fa', help = '"reference",Enter your reference filename (eg. *.fa), not compressed!')
parser$add_argument('--i', type = 'character', default = 'hg', help = '"index",Enter your index prefix')
parser$add_argument('--od', type = 'character', default = getwd(), help = '"outdir",Enter your output directory')
parser$helpme()
# === variables ====
dirPath <- dir
dirPath <-gsub ('\\\\','/',dirPath)
od <-gsub ('\\\\','/',od)
if (dir.exists(dirPath) && dir.exists(od)){
setwd(dirPath)
cat(paste0("Setting ",dirPath," as the working directory\n"))
} else if (!dir.exists(dirPath) && !dir.exists(od)) {
cat("Both directories are not existing!\n")
quit()
} else if (!dir.exists(dirPath)){
cat("Reference directory is not existing!\n")
quit()
} else if (!dir.exists(od)){
cat("Output directory is not existing!\n")
quit()
}
# ======
Rhisat2::hisat2_build(references = ref, outdir = od, prefix = i, force = TRUE, strict = TRUE, execute = TRUE)
|
/Rhisat2_index_cl.R
|
no_license
|
plague1981/RNAseq_Hisat2_Stringtie_ballgown
|
R
| false
| false
| 2,070
|
r
|
# ======== Packages required =========
options(java.parameters = c("-XX:+UseConcMarkSweepGC", "-Xmx16384m"))
# Rcran
packages<-c('Xmisc')
for (package in packages){
if(package %in% rownames(installed.packages()) == FALSE) {
install.packages(package)}
}
# Bioconductor
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
bio.packages<-c("Rhisat2")
for (bio.package in bio.packages){
if(bio.package %in% rownames(installed.packages()) == FALSE) {
BiocManager::install(bio.package)}
}
# === setting environment ===
parser <- Xmisc::ArgumentParser$new()
parser$add_usage('Rsubread_featureCount_cl.R [options]')
parser$add_description('An executable R script parsing arguments from Unix-like command line.')
parser$add_argument('--h',type='logical', action='store_true', help='Print the help page')
parser$add_argument('--help',type='logical',action='store_true',help='Print the help page')
parser$add_argument('--dir', type = 'character', default = getwd(), help = '"directory",Enter your working directory')
parser$add_argument('--ref', type = 'character', default = 'genome.fa', help = '"reference",Enter your reference filename (eg. *.fa), not compressed!')
parser$add_argument('--i', type = 'character', default = 'hg', help = '"index",Enter your index prefix')
parser$add_argument('--od', type = 'character', default = getwd(), help = '"outdir",Enter your output directory')
parser$helpme()
# === variables ====
dirPath <- dir
dirPath <-gsub ('\\\\','/',dirPath)
od <-gsub ('\\\\','/',od)
if (dir.exists(dirPath) && dir.exists(od)){
setwd(dirPath)
cat(paste0("Setting ",dirPath," as the working directory\n"))
} else if (!dir.exists(dirPath) && !dir.exists(od)) {
cat("Both directories are not existing!\n")
quit()
} else if (!dir.exists(dirPath)){
cat("Reference directory is not existing!\n")
quit()
} else if (!dir.exists(od)){
cat("Output directory is not existing!\n")
quit()
}
# ======
Rhisat2::hisat2_build(references = ref, outdir = od, prefix = i, force = TRUE, strict = TRUE, execute = TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{spp654}
\alias{spp654}
\title{species function}
\usage{
spp654(a, b, c, d, e)
}
\arguments{
\item{a}{environmental parameter}
\item{b}{environmental parameter}
\item{c}{environmental parameter}
\item{d}{environmental parameter}
\item{e}{environmental parameter}
}
\description{
species function
}
\examples{
spp654()
}
\keyword{function}
\keyword{species}
|
/man/spp654.Rd
|
permissive
|
Djeppschmidt/Model.Microbiome
|
R
| false
| true
| 456
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{spp654}
\alias{spp654}
\title{species function}
\usage{
spp654(a, b, c, d, e)
}
\arguments{
\item{a}{environmental parameter}
\item{b}{environmental parameter}
\item{c}{environmental parameter}
\item{d}{environmental parameter}
\item{e}{environmental parameter}
}
\description{
species function
}
\examples{
spp654()
}
\keyword{function}
\keyword{species}
|
library(dplyr)
source("fun.R")
# part 1
# test
t1 <- read.csv("test1.txt", header = FALSE)[[1]]
r1_test <- t1 %>%
get_differences() %>%
table()
r1_test["1"] == 7 & r1_test["3"] == 5
t2 <- read.csv("test2.txt", header = FALSE)[[1]]
r2_test <- t2 %>%
get_differences() %>%
table()
r2_test["1"] == 22 & r2_test["3"] == 10
# solve
inp <- read.csv("input.txt", header = FALSE)[[1]]
r1 <- inp %>%
get_differences() %>%
table()
r1["1"] * r1["3"]
# 2244
# part 2
# test
r2_t1 <- t1 %>%
get_differences() %>%
count_combinations()
r2_t1 == 8
r2_t2 <- t2 %>%
get_differences() %>%
count_combinations()
r2_t2 == 19208
# solve
r2 <- inp %>%
get_differences() %>%
count_combinations()
print(format(r2, scientific = FALSE))
# 3947645370368
|
/Day10/day10.R
|
no_license
|
Darius-Jaraminas/advent_of_code_2020
|
R
| false
| false
| 755
|
r
|
library(dplyr)
source("fun.R")
# part 1
# test
t1 <- read.csv("test1.txt", header = FALSE)[[1]]
r1_test <- t1 %>%
get_differences() %>%
table()
r1_test["1"] == 7 & r1_test["3"] == 5
t2 <- read.csv("test2.txt", header = FALSE)[[1]]
r2_test <- t2 %>%
get_differences() %>%
table()
r2_test["1"] == 22 & r2_test["3"] == 10
# solve
inp <- read.csv("input.txt", header = FALSE)[[1]]
r1 <- inp %>%
get_differences() %>%
table()
r1["1"] * r1["3"]
# 2244
# part 2
# test
r2_t1 <- t1 %>%
get_differences() %>%
count_combinations()
r2_t1 == 8
r2_t2 <- t2 %>%
get_differences() %>%
count_combinations()
r2_t2 == 19208
# solve
r2 <- inp %>%
get_differences() %>%
count_combinations()
print(format(r2, scientific = FALSE))
# 3947645370368
|
### Title: Pool Results of Imputed DV Simulation
### Author: Kyle M. Lang
### Created: 2015-11-16
### Modified: 2019-11-05
rm(list = ls(all = TRUE))
source("../supportFunctions.R")
plotDir <- "../../plots/study2/"
outDir <- "../../output/study2/"
saveDir <- "../../results/study2/"
saveDate <- format(Sys.time(), "%Y%m%d")
nReps <- 500
## Define levels of variable simulation parameters:
nVec <- c(500, 250, 100)
pmVec <- c(0.1, 0.2, 0.4)
r2Vec <- c(0.15, 0.3, 0.6)
clVec <- c(0.0, 0.1, 0.3, 0.5)
apVec <- c(1.0, 0.75, 0.5, 0.25, 0.0)
condMat <- expand.grid(ap = apVec,
cl = clVec,
pm = pmVec,
n = nVec,
r2 = r2Vec)
outList2 <- list()
reps <- rep(0, nrow(condMat))
for(i in 1 : nrow(condMat)) {
fnCore <- paste0("_n", condMat[i, "n"],
"_rs", condMat[i, "r2"],
"_cl", condMat[i, "cl"],
"_ap", condMat[i, "ap"],
"_pm", condMat[i, "pm"])
outList <- list()
for(rp in 1 : nReps) {
compFileName <- paste0(outDir,
"compOut", fnCore,
"_rep", rp,
".rds")
ldFileName <- paste0(outDir,
"ldOut", fnCore,
"_rep", rp,
".rds")
miFileName <- paste0(outDir,
"miOut", fnCore,
"_rep", rp,
".rds")
midFileName <- paste0(outDir,
"midOut", fnCore,
"_rep", rp,
".rds")
test1 <-
file.exists(compFileName) &
file.exists(ldFileName) &
file.exists(miFileName) &
file.exists(midFileName)
if(test1) {
reps[i] <- reps[i] + 1
outList$comp[[rp]] <- readRDS(compFileName)
outList$ld[[rp]] <- readRDS(ldFileName)
outList$mi[[rp]] <- readRDS(miFileName)
outList$mid[[rp]] <- readRDS(midFileName)
}
}# END for(rp in 1 : nReps)
outList2[[i]] <- outList
}# END for(i in 1 : nrow(condMat)
saveRDS(reps, file = paste0(saveDir, "impDvSimRepCounts-", saveDate, ".rds"))
saveRDS(outList2, file = paste0(saveDir, "impDvSimOutList-", saveDate, ".rds"))
any(reps < 500)
|
/code/analysis/pre2019/poolResults.R
|
permissive
|
kylelang/impDvSim
|
R
| false
| false
| 2,522
|
r
|
### Title: Pool Results of Imputed DV Simulation
### Author: Kyle M. Lang
### Created: 2015-11-16
### Modified: 2019-11-05
rm(list = ls(all = TRUE))
source("../supportFunctions.R")
plotDir <- "../../plots/study2/"
outDir <- "../../output/study2/"
saveDir <- "../../results/study2/"
saveDate <- format(Sys.time(), "%Y%m%d")
nReps <- 500
## Define levels of variable simulation parameters:
nVec <- c(500, 250, 100)
pmVec <- c(0.1, 0.2, 0.4)
r2Vec <- c(0.15, 0.3, 0.6)
clVec <- c(0.0, 0.1, 0.3, 0.5)
apVec <- c(1.0, 0.75, 0.5, 0.25, 0.0)
condMat <- expand.grid(ap = apVec,
cl = clVec,
pm = pmVec,
n = nVec,
r2 = r2Vec)
outList2 <- list()
reps <- rep(0, nrow(condMat))
for(i in 1 : nrow(condMat)) {
fnCore <- paste0("_n", condMat[i, "n"],
"_rs", condMat[i, "r2"],
"_cl", condMat[i, "cl"],
"_ap", condMat[i, "ap"],
"_pm", condMat[i, "pm"])
outList <- list()
for(rp in 1 : nReps) {
compFileName <- paste0(outDir,
"compOut", fnCore,
"_rep", rp,
".rds")
ldFileName <- paste0(outDir,
"ldOut", fnCore,
"_rep", rp,
".rds")
miFileName <- paste0(outDir,
"miOut", fnCore,
"_rep", rp,
".rds")
midFileName <- paste0(outDir,
"midOut", fnCore,
"_rep", rp,
".rds")
test1 <-
file.exists(compFileName) &
file.exists(ldFileName) &
file.exists(miFileName) &
file.exists(midFileName)
if(test1) {
reps[i] <- reps[i] + 1
outList$comp[[rp]] <- readRDS(compFileName)
outList$ld[[rp]] <- readRDS(ldFileName)
outList$mi[[rp]] <- readRDS(miFileName)
outList$mid[[rp]] <- readRDS(midFileName)
}
}# END for(rp in 1 : nReps)
outList2[[i]] <- outList
}# END for(i in 1 : nrow(condMat)
saveRDS(reps, file = paste0(saveDir, "impDvSimRepCounts-", saveDate, ".rds"))
saveRDS(outList2, file = paste0(saveDir, "impDvSimOutList-", saveDate, ".rds"))
any(reps < 500)
|
library(trialr)
library(dplyr)
library(loo)
# Code ----
#' Class of models fit by \pkg{trialr} using the EffTox design.
#'
#' @name efftox_fit-class
#' @aliases efftox_fit
#' @docType class
#'
#' @details
#' See \code{methods(class = "efftox_fit")} for an overview of available
#' methods.
#'
#' @slot
#'
#' @slot dose_indices A vector of integers representing the dose-levels under
#' consideration.
#' @slot recommended_dose An integer representing the dose-level recommended
#' for the next patient or cohort; or \code{NA} stopping is recommended.
#' @slot prob_eff The posterior mean probabilities of efficacy at doses 1:n;
#' a vector of numbers between 0 and 1.
#' @slot prob_tox The posterior mean probabilities of toxicity at doses 1:n;
#' a vector of numbers between 0 and 1.
#' @slot prob_acc_eff The posterior mean probabilities that efficacy at the
#' doses is acceptable, i.e. that it exceeds the minimum acceptable efficacy
#' threshold; a vector of numbers between 0 and 1.
#' @slot prob_acc_tox The posterior mean probabilities that toxicity at the
#' doses is acceptable, i.e. that it is less than the maximum toxicity
#' threshold; a vector of numbers between 0 and 1.
#' @slot utility The utilities of doses 1:n, calculated by plugging the
#' posterior mean probabilities of efficacy and toxicity into the utility
#' formula, as advocated by Thall & Cook. Contrast to \code{post_utility};
#' a vector of numbers.
#' @slot post_utility The posterior mean utilities of doses 1:n, calculated
#' from the posterior distributions of the utilities. This is in contrast to
#' \code{utility}, which uses plug-in posterior means of efficacy and toxicity,
#' as advocated by Thall & Cook; a vector of numbers.
#' @slot acceptable A vector of logical values to indicate whether doses 1:n
#' are acceptable, according to the rules for acceptable efficacy & toxicity,
#' and rules on not skipping untested doses.
#' @slot fit An object of class \code{\link[rstan:stanfit]{stanfit}},
#' containing the posterior samples.
#'
#' @seealso
#' \code{\link{stan_efftox}}
#' \code{\link{stan_efftox_demo}}
#' \code{\link{efftox_process}}
efftox_fit <- function(dose_indices, recommended_dose, prob_eff, prob_tox,
prob_acc_eff, prob_acc_tox, utility, post_utility,
acceptable, fit) {
# efftox_fit class
version <- list(
trialr = utils::packageVersion("trialr"),
rstan = utils::packageVersion("rstan")
)
x <- nlist(dose_indices, recommended_dose, prob_eff, prob_tox, prob_acc_eff,
prob_acc_tox, utility, post_utility, acceptable, fit, version)
class(x) <- "efftox_fit"
x
}
#' Fit an EffTox model
#'
#' Fit an EffTox model using Stan for full Bayesian inference.
#'
#' @param outcome_str A string representing the outcomes observed hitherto.
#' See \code{\link{efftox_parse_outcomes}} for a description of syntax and
#' examples. Alternatively, you may provide \code{doses_given}, \code{eff} and
#' \code{tox} parameters. See Details.
#' @param real_doses A vector of numbers.The doses under investigation. They
#' should be ordered from lowest to highest and be in consistent units.
#' E.g., #' to conduct a dose-finding trial of doses 10mg, 20mg and 50mg, use
#' c(10, 20, 50).
#' @param efficacy_hurdle Minimum acceptable efficacy probability.
#' A number between 0 and 1.
#' @param toxicity_hurdle Maximum acceptable toxicity probability.
#' A number between 0 and 1.
#' @param p_e Certainty required to infer a dose is acceptable with regards to
#' being probably efficacious; a number between 0 and 1.
#' @param p_t Certainty required to infer a dose is acceptable with regards to
#' being probably tolerable; a number between 0 and 1.
#' @param eff0 Efficacy probability required when toxicity is impossible;
#' a number between 0 and 1 (see Details).
#' @param tox1 Toxicity probability permitted when efficacy is guaranteed;
#' a number between 0 and 1 (see Details).
#' @param eff_star Efficacy probability of an equi-utility third point (see
#' Details).
#' @param tox_star Toxicity probability of an equi-utility third point (see
#' Details).
#' @param alpha_mean The prior normal mean of the intercept term in the toxicity
#' logit model. A number.
#' @param alpha_sd The prior normal standard deviation of the intercept term in
#' the toxicity logit model. A number.
#' @param beta_mean The prior normal mean of the slope term in the toxicity
#' logit model. A number.
#' @param beta_sd The prior normal standard deviation of the slope term in the
#' toxicity logit model. A number.
#' @param gamma_mean The prior mean of the intercept term in the efficacy logit model. A number.
#' @param gamma_sd The prior standard deviation of the intercept term in the efficacy logit model. A number.
#' @param zeta_mean The prior mean of the slope term in the efficacy logit model. A number.
#' @param zeta_sd The prior standard deviation of the slope term in the efficacy logit model. A number.
#' @param eta_mean The prior mean of the squared term coefficient in the efficacy logit model. A number.
#' @param eta_sd The prior standard deviation of the squared term coefficient in the efficacy logit model. A number.
#' @param psi_mean The prior mean of the association term in the combined efficacy-toxicity model. A number.
#' @param psi_sd The prior standard deviation of the association term in the combined efficacy-toxicity model. A number.
#' @param doses_given A optional vector of dose-levels given to patients
#' 1:num_patients, where 1=lowest dose, 2=second dose, etc. Only required when
#' \code{outcome_str} is not provided.
#' @param eff An optional vector of efficacy outcomes for patients
#' 1:num_patients, where 1=efficacy and 0=no efficacy. Only required when
#' \code{outcome_str} is not provided.
#' @param tox An optional vector of toxicity outcomes for patients
#' 1:num_patients, where 1=toxicity and 0=no toxicity. Only required when
#' \code{outcome_str} is not provided.
#' @param ...Extra parameters are passed to \code{rstan::sampling}. Commonly
#' used options are \code{iter}, \code{chains}, \code{warmup}, \code{cores},
#' \code{control}. \code{\link[rstan:sampling]{sampling}}.
#'
#' @details
#' The quickest and easiest way to fit an EffTox model to some observed outcomes
#' is to describe the outcomes using \pkg{trialr}'s syntax for efficacy-toxicity
#' dose-finding outcomes. See \code{\link{efftox_parse_outcomes}} for full
#' details and examples.
#'
#' Utility or attractivess scores are calculated in EffTox using L^p norms.
#' Imagine the first quadrant of a scatter plot with prob_eff along the x-axis
#' and prob_tox along the y-axis.
#' The point (1, 0) (i.e. guaranteed efficacy & no toxicity) is the holy grail.
#' The neutral contour intersects the points (eff0, 0), (1, tox1) and
#' (eff_star, tox_star). A unique curve intersects these three points and
#' identifies a value for p, the exponent in the L^p norm. On this neutral-
#' utility contour, scores are equal to zero. A family of curves with different
#' utility scores is defined that are "parallel" to this neutral curve.
#' Points with probabilities of efficacy and toxicity that are nearer to (1, 0)
#' will yield greater scores, and vice-versa.
#'
#' @return An object of class \code{\link{efftox_fit}}
#'
#' @author Kristian Brock \email{kristian.brock@@gmail.com}
#'
#' @references
#' Thall, P., & Cook, J. (2004). Dose-Finding Based on Efficacy-Toxicity
#' Trade-Offs. Biometrics, 60(3), 684–693.
#'
#' Thall, P., Herrick, R., Nguyen, H., Venier, J., & Norris, J. (2014).
#' Effective sample size for computing prior hyperparameters in Bayesian
#' phase I-II dose-finding. Clinical Trials, 11(6), 657–666.
#' https://doi.org/10.1177/1740774514547397
#'
#' Brock, K., Billingham, L., Copland, M., Siddique, S., Sirovica, M., &
#' Yap, C. (2017). Implementing the EffTox dose-finding design in the
#' Matchpoint trial. BMC Medical Research Methodology, 17(1), 112.
#' https://doi.org/10.1186/s12874-017-0381-x
#'
#' @seealso
#' \code{\link{efftox_fit}}
#' \code{\link{stan_efftox_demo}}
#' \code{\link{efftox_process}}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # This model is presented in Thall et al. (2014)
#' mod1 <- stan_efftox('1N 2E 3B',
#' real_doses = c(1.0, 2.0, 4.0, 6.6, 10.0),
#' efficacy_hurdle = 0.5, toxicity_hurdle = 0.3,
#' p_e = 0.1, p_t = 0.1,
#' eff0 = 0.5, tox1 = 0.65,
#' eff_star = 0.7, tox_star = 0.25,
#' alpha_mean = -7.9593, alpha_sd = 3.5487,
#' beta_mean = 1.5482, beta_sd = 3.5018,
#' gamma_mean = 0.7367, gamma_sd = 2.5423,
#' zeta_mean = 3.4181, zeta_sd = 2.4406,
#' eta_mean = 0, eta_sd = 0.2,
#' psi_mean = 0, psi_sd = 1, seed = 123)
#'
#' # Shorthand for the above is:
#' mod2 <- stan_efftox_demo('1N 2E 3B', seed = 123)
#'
#' # the seed is passed to the Stan sampler. The usual Stan sampler params like
#' # cores, iter, chains etc are passed on too via the ellipsis operator.
#' }
stan_efftox <- function(outcome_str = NULL,
real_doses, efficacy_hurdle, toxicity_hurdle, p_e, p_t,
eff0, tox1, eff_star, tox_star,
alpha_mean, alpha_sd, beta_mean, beta_sd,
gamma_mean, gamma_sd, zeta_mean, zeta_sd,
eta_mean, eta_sd, psi_mean, psi_sd,
doses_given = NULL,
eff = NULL,
tox = NULL,
...) {
p <- efftox_solve_p(eff0, tox1, eff_star, tox_star)
# Create data object to pass to Stan. Add parameters
dat <- nlist(real_doses, num_doses = length(real_doses),
efficacy_hurdle, toxicity_hurdle,
p_e, p_t, p, eff0, tox1, eff_star, tox_star,
alpha_mean, alpha_sd, beta_mean, beta_sd, gamma_mean, gamma_sd,
zeta_mean, zeta_sd, eta_mean, eta_sd, psi_mean, psi_sd
)
# Add outcomes
if(is.null(outcome_str)) {
if(length(doses_given) != length(efficacy))
stop('doses_given and efficacy vectors should have same length')
if(length(toxicity) != length(efficacy))
stop('toxicity and efficacy vectors should have same length')
dat$doses <- doses_given
dat$eff <- eff
dat$tox <- tox
dat$num_patients <- length(doses_given)
} else {
outcomes_df <- efftox_parse_outcomes(outcome_str, as.list = TRUE)
dat$num_patients <- outcomes_df$num_patients
dat$doses <- outcomes_df$doses
dat$eff <- outcomes_df$eff
dat$tox <- outcomes_df$tox
}
# Fit data to model using Stan
samp <- rstan::sampling(stanmodels$EffTox, data = dat, ...)
# Create useful output from posterior samples
decision <- efftox_process(dat, samp)
return(decision)
}
#' Fit the EffTox model presented in Thal et al. (2014)
#'
#' Fit the EffTox model presented in Thal et al. (2014) using Stan for full
#' Bayesian inference.
#'
#' @param outcome_str A string representing the outcomes observed hitherto.
#' See \code{\link{efftox_parse_outcomes}} for a description of syntax and
#' examples. Alternatively, you may provide \code{doses_given}, \code{eff} and
#' \code{tox} parameters. See Details.
#' @param ...Extra parameters are passed to \code{rstan::sampling}. Commonly
#' used options are \code{iter}, \code{chains}, \code{warmup}, \code{cores},
#' \code{control}. \code{\link[rstan:sampling]{sampling}}.
#'
#' @return An object of class \code{\link{efftox_fit}}
#'
#' @author Kristian Brock \email{kristian.brock@@gmail.com}
#'
#' @references
#' Thall, P., & Cook, J. (2004). Dose-Finding Based on Efficacy-Toxicity
#' Trade-Offs. Biometrics, 60(3), 684–693.
#'
#' Thall, P., Herrick, R., Nguyen, H., Venier, J., & Norris, J. (2014).
#' Effective sample size for computing prior hyperparameters in Bayesian
#' phase I-II dose-finding. Clinical Trials, 11(6), 657–666.
#' https://doi.org/10.1177/1740774514547397
#'
#' Brock, K., Billingham, L., Copland, M., Siddique, S., Sirovica, M., &
#' Yap, C. (2017). Implementing the EffTox dose-finding design in the
#' Matchpoint trial. BMC Medical Research Methodology, 17(1), 112.
#' https://doi.org/10.1186/s12874-017-0381-x
#'
#' @seealso
#' \code{\link{efftox_fit}}
#' \code{\link{stan_efftox}}
#' \code{\link{efftox_process}}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # This model is presented in Thall et al. (2014)
#' mod2 <- stan_efftox_demo('1N 2E 3B', seed = 123)
#'
#' # The seed is passed to the Stan sampler. The usual Stan sampler params like
#' # cores, iter, chains etc are passed on too via the ellipsis operator.
#' }
stan_efftox_demo <- function(outcome_str, ...) {
stan_efftox(outcome_str,
real_doses = c(1.0, 2.0, 4.0, 6.6, 10.0),
efficacy_hurdle = 0.5, toxicity_hurdle = 0.3,
p_e = 0.1, p_t = 0.1,
p = 0.9773632,
eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25,
alpha_mean = -7.9593, alpha_sd = 3.5487,
beta_mean = 1.5482, beta_sd = 3.5018,
gamma_mean = 0.7367, gamma_sd = 2.5423,
zeta_mean = 3.4181, zeta_sd = 2.4406,
eta_mean = 0, eta_sd = 0.2,
psi_mean = 0, psi_sd = 1, ...)
}
print.efftox_fit <- function(x) {
df <- efftox_analysis_to_df(x)
print(df)
if(sum(x$acceptable) == 0) {
cat('The model advocates stopping.')
} else {
cat(paste0('The model recommends selecting dose-level ',
x$recommended_dose, '.'))
}
}
as.data.frame.efftox_fit <- function(x, ...) {
as.data.frame(x$fit, ...)
}
plot.efftox_fit <- function(x, pars = 'utility', ...) {
plot(x$fit, pars = pars, ...)
}
# Usage ----
mod1 <- stan_efftox_demo('1N 2E 3B', seed = 123)
names(mod1)
print(mod1)
as.data.frame(mod1) %>% head
as.data.frame(mod1, pars = c('utility')) %>% head
plot(mod1)
plot(mod1, pars = 'prob_eff')
mod2 <- stan_efftox('1N 2E 3B',
real_doses = c(1.0, 2.0, 4.0, 6.6, 10.0),
efficacy_hurdle = 0.5, toxicity_hurdle = 0.3,
p_e = 0.1, p_t = 0.1,
p = 0.9773632, eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25,
alpha_mean = -7.9593, alpha_sd = 3.5487,
beta_mean = 1.5482, beta_sd = 3.5018,
gamma_mean = 0.7367, gamma_sd = 2.5423,
zeta_mean = 3.4181, zeta_sd = 2.4406,
eta_mean = 0, eta_sd = 0.2,
psi_mean = 0, psi_sd = 1, seed = 123)
# chains = 6, iter = 5000
mod2
dat <- efftox_parameters_demo()
dat$num_patients <- 3
dat$eff <- c(0, 1, 1)
dat$tox <- c(0, 0, 1)
dat$doses <- c(1, 2, 3)
samp3 <- rstan::sampling(stanmodels$EffTox, data = dat, seed = 123)
mod3 <- efftox_process(dat, samp3)
mod3
mod1
mod2
mod3
mod4 <- stan_efftox_demo('1NNN 2ENN 3BTE', chains = 6, iter = 5000)
mod4
stan_efftox_demo('1TT')
stan_efftox_demo('1TT 2TT')
stan_efftox_demo('1TT 2TT 3TT')
stan_efftox_demo('1TT 2TT 3TT 4TT') # Stops, at last
|
/Scratch/developing-stan_efftox.R
|
no_license
|
statwonk/trialr
|
R
| false
| false
| 15,510
|
r
|
library(trialr)
library(dplyr)
library(loo)
# Code ----
#' Class of models fit by \pkg{trialr} using the EffTox design.
#'
#' @name efftox_fit-class
#' @aliases efftox_fit
#' @docType class
#'
#' @details
#' See \code{methods(class = "efftox_fit")} for an overview of available
#' methods.
#'
#' @slot
#'
#' @slot dose_indices A vector of integers representing the dose-levels under
#' consideration.
#' @slot recommended_dose An integer representing the dose-level recommended
#' for the next patient or cohort; or \code{NA} stopping is recommended.
#' @slot prob_eff The posterior mean probabilities of efficacy at doses 1:n;
#' a vector of numbers between 0 and 1.
#' @slot prob_tox The posterior mean probabilities of toxicity at doses 1:n;
#' a vector of numbers between 0 and 1.
#' @slot prob_acc_eff The posterior mean probabilities that efficacy at the
#' doses is acceptable, i.e. that it exceeds the minimum acceptable efficacy
#' threshold; a vector of numbers between 0 and 1.
#' @slot prob_acc_tox The posterior mean probabilities that toxicity at the
#' doses is acceptable, i.e. that it is less than the maximum toxicity
#' threshold; a vector of numbers between 0 and 1.
#' @slot utility The utilities of doses 1:n, calculated by plugging the
#' posterior mean probabilities of efficacy and toxicity into the utility
#' formula, as advocated by Thall & Cook. Contrast to \code{post_utility};
#' a vector of numbers.
#' @slot post_utility The posterior mean utilities of doses 1:n, calculated
#' from the posterior distributions of the utilities. This is in contrast to
#' \code{utility}, which uses plug-in posterior means of efficacy and toxicity,
#' as advocated by Thall & Cook; a vector of numbers.
#' @slot acceptable A vector of logical values to indicate whether doses 1:n
#' are acceptable, according to the rules for acceptable efficacy & toxicity,
#' and rules on not skipping untested doses.
#' @slot fit An object of class \code{\link[rstan:stanfit]{stanfit}},
#' containing the posterior samples.
#'
#' @seealso
#' \code{\link{stan_efftox}}
#' \code{\link{stan_efftox_demo}}
#' \code{\link{efftox_process}}
efftox_fit <- function(dose_indices, recommended_dose, prob_eff, prob_tox,
prob_acc_eff, prob_acc_tox, utility, post_utility,
acceptable, fit) {
# efftox_fit class
version <- list(
trialr = utils::packageVersion("trialr"),
rstan = utils::packageVersion("rstan")
)
x <- nlist(dose_indices, recommended_dose, prob_eff, prob_tox, prob_acc_eff,
prob_acc_tox, utility, post_utility, acceptable, fit, version)
class(x) <- "efftox_fit"
x
}
#' Fit an EffTox model
#'
#' Fit an EffTox model using Stan for full Bayesian inference.
#'
#' @param outcome_str A string representing the outcomes observed hitherto.
#' See \code{\link{efftox_parse_outcomes}} for a description of syntax and
#' examples. Alternatively, you may provide \code{doses_given}, \code{eff} and
#' \code{tox} parameters. See Details.
#' @param real_doses A vector of numbers.The doses under investigation. They
#' should be ordered from lowest to highest and be in consistent units.
#' E.g., #' to conduct a dose-finding trial of doses 10mg, 20mg and 50mg, use
#' c(10, 20, 50).
#' @param efficacy_hurdle Minimum acceptable efficacy probability.
#' A number between 0 and 1.
#' @param toxicity_hurdle Maximum acceptable toxicity probability.
#' A number between 0 and 1.
#' @param p_e Certainty required to infer a dose is acceptable with regards to
#' being probably efficacious; a number between 0 and 1.
#' @param p_t Certainty required to infer a dose is acceptable with regards to
#' being probably tolerable; a number between 0 and 1.
#' @param eff0 Efficacy probability required when toxicity is impossible;
#' a number between 0 and 1 (see Details).
#' @param tox1 Toxicity probability permitted when efficacy is guaranteed;
#' a number between 0 and 1 (see Details).
#' @param eff_star Efficacy probability of an equi-utility third point (see
#' Details).
#' @param tox_star Toxicity probability of an equi-utility third point (see
#' Details).
#' @param alpha_mean The prior normal mean of the intercept term in the toxicity
#' logit model. A number.
#' @param alpha_sd The prior normal standard deviation of the intercept term in
#' the toxicity logit model. A number.
#' @param beta_mean The prior normal mean of the slope term in the toxicity
#' logit model. A number.
#' @param beta_sd The prior normal standard deviation of the slope term in the
#' toxicity logit model. A number.
#' @param gamma_mean The prior mean of the intercept term in the efficacy logit model. A number.
#' @param gamma_sd The prior standard deviation of the intercept term in the efficacy logit model. A number.
#' @param zeta_mean The prior mean of the slope term in the efficacy logit model. A number.
#' @param zeta_sd The prior standard deviation of the slope term in the efficacy logit model. A number.
#' @param eta_mean The prior mean of the squared term coefficient in the efficacy logit model. A number.
#' @param eta_sd The prior standard deviation of the squared term coefficient in the efficacy logit model. A number.
#' @param psi_mean The prior mean of the association term in the combined efficacy-toxicity model. A number.
#' @param psi_sd The prior standard deviation of the association term in the combined efficacy-toxicity model. A number.
#' @param doses_given A optional vector of dose-levels given to patients
#' 1:num_patients, where 1=lowest dose, 2=second dose, etc. Only required when
#' \code{outcome_str} is not provided.
#' @param eff An optional vector of efficacy outcomes for patients
#' 1:num_patients, where 1=efficacy and 0=no efficacy. Only required when
#' \code{outcome_str} is not provided.
#' @param tox An optional vector of toxicity outcomes for patients
#' 1:num_patients, where 1=toxicity and 0=no toxicity. Only required when
#' \code{outcome_str} is not provided.
#' @param ...Extra parameters are passed to \code{rstan::sampling}. Commonly
#' used options are \code{iter}, \code{chains}, \code{warmup}, \code{cores},
#' \code{control}. \code{\link[rstan:sampling]{sampling}}.
#'
#' @details
#' The quickest and easiest way to fit an EffTox model to some observed outcomes
#' is to describe the outcomes using \pkg{trialr}'s syntax for efficacy-toxicity
#' dose-finding outcomes. See \code{\link{efftox_parse_outcomes}} for full
#' details and examples.
#'
#' Utility or attractivess scores are calculated in EffTox using L^p norms.
#' Imagine the first quadrant of a scatter plot with prob_eff along the x-axis
#' and prob_tox along the y-axis.
#' The point (1, 0) (i.e. guaranteed efficacy & no toxicity) is the holy grail.
#' The neutral contour intersects the points (eff0, 0), (1, tox1) and
#' (eff_star, tox_star). A unique curve intersects these three points and
#' identifies a value for p, the exponent in the L^p norm. On this neutral-
#' utility contour, scores are equal to zero. A family of curves with different
#' utility scores is defined that are "parallel" to this neutral curve.
#' Points with probabilities of efficacy and toxicity that are nearer to (1, 0)
#' will yield greater scores, and vice-versa.
#'
#' @return An object of class \code{\link{efftox_fit}}
#'
#' @author Kristian Brock \email{kristian.brock@@gmail.com}
#'
#' @references
#' Thall, P., & Cook, J. (2004). Dose-Finding Based on Efficacy-Toxicity
#' Trade-Offs. Biometrics, 60(3), 684–693.
#'
#' Thall, P., Herrick, R., Nguyen, H., Venier, J., & Norris, J. (2014).
#' Effective sample size for computing prior hyperparameters in Bayesian
#' phase I-II dose-finding. Clinical Trials, 11(6), 657–666.
#' https://doi.org/10.1177/1740774514547397
#'
#' Brock, K., Billingham, L., Copland, M., Siddique, S., Sirovica, M., &
#' Yap, C. (2017). Implementing the EffTox dose-finding design in the
#' Matchpoint trial. BMC Medical Research Methodology, 17(1), 112.
#' https://doi.org/10.1186/s12874-017-0381-x
#'
#' @seealso
#' \code{\link{efftox_fit}}
#' \code{\link{stan_efftox_demo}}
#' \code{\link{efftox_process}}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # This model is presented in Thall et al. (2014)
#' mod1 <- stan_efftox('1N 2E 3B',
#' real_doses = c(1.0, 2.0, 4.0, 6.6, 10.0),
#' efficacy_hurdle = 0.5, toxicity_hurdle = 0.3,
#' p_e = 0.1, p_t = 0.1,
#' eff0 = 0.5, tox1 = 0.65,
#' eff_star = 0.7, tox_star = 0.25,
#' alpha_mean = -7.9593, alpha_sd = 3.5487,
#' beta_mean = 1.5482, beta_sd = 3.5018,
#' gamma_mean = 0.7367, gamma_sd = 2.5423,
#' zeta_mean = 3.4181, zeta_sd = 2.4406,
#' eta_mean = 0, eta_sd = 0.2,
#' psi_mean = 0, psi_sd = 1, seed = 123)
#'
#' # Shorthand for the above is:
#' mod2 <- stan_efftox_demo('1N 2E 3B', seed = 123)
#'
#' # the seed is passed to the Stan sampler. The usual Stan sampler params like
#' # cores, iter, chains etc are passed on too via the ellipsis operator.
#' }
stan_efftox <- function(outcome_str = NULL,
real_doses, efficacy_hurdle, toxicity_hurdle, p_e, p_t,
eff0, tox1, eff_star, tox_star,
alpha_mean, alpha_sd, beta_mean, beta_sd,
gamma_mean, gamma_sd, zeta_mean, zeta_sd,
eta_mean, eta_sd, psi_mean, psi_sd,
doses_given = NULL,
eff = NULL,
tox = NULL,
...) {
p <- efftox_solve_p(eff0, tox1, eff_star, tox_star)
# Create data object to pass to Stan. Add parameters
dat <- nlist(real_doses, num_doses = length(real_doses),
efficacy_hurdle, toxicity_hurdle,
p_e, p_t, p, eff0, tox1, eff_star, tox_star,
alpha_mean, alpha_sd, beta_mean, beta_sd, gamma_mean, gamma_sd,
zeta_mean, zeta_sd, eta_mean, eta_sd, psi_mean, psi_sd
)
# Add outcomes
if(is.null(outcome_str)) {
if(length(doses_given) != length(efficacy))
stop('doses_given and efficacy vectors should have same length')
if(length(toxicity) != length(efficacy))
stop('toxicity and efficacy vectors should have same length')
dat$doses <- doses_given
dat$eff <- eff
dat$tox <- tox
dat$num_patients <- length(doses_given)
} else {
outcomes_df <- efftox_parse_outcomes(outcome_str, as.list = TRUE)
dat$num_patients <- outcomes_df$num_patients
dat$doses <- outcomes_df$doses
dat$eff <- outcomes_df$eff
dat$tox <- outcomes_df$tox
}
# Fit data to model using Stan
samp <- rstan::sampling(stanmodels$EffTox, data = dat, ...)
# Create useful output from posterior samples
decision <- efftox_process(dat, samp)
return(decision)
}
#' Fit the EffTox model presented in Thal et al. (2014)
#'
#' Fit the EffTox model presented in Thal et al. (2014) using Stan for full
#' Bayesian inference.
#'
#' @param outcome_str A string representing the outcomes observed hitherto.
#' See \code{\link{efftox_parse_outcomes}} for a description of syntax and
#' examples. Alternatively, you may provide \code{doses_given}, \code{eff} and
#' \code{tox} parameters. See Details.
#' @param ...Extra parameters are passed to \code{rstan::sampling}. Commonly
#' used options are \code{iter}, \code{chains}, \code{warmup}, \code{cores},
#' \code{control}. \code{\link[rstan:sampling]{sampling}}.
#'
#' @return An object of class \code{\link{efftox_fit}}
#'
#' @author Kristian Brock \email{kristian.brock@@gmail.com}
#'
#' @references
#' Thall, P., & Cook, J. (2004). Dose-Finding Based on Efficacy-Toxicity
#' Trade-Offs. Biometrics, 60(3), 684–693.
#'
#' Thall, P., Herrick, R., Nguyen, H., Venier, J., & Norris, J. (2014).
#' Effective sample size for computing prior hyperparameters in Bayesian
#' phase I-II dose-finding. Clinical Trials, 11(6), 657–666.
#' https://doi.org/10.1177/1740774514547397
#'
#' Brock, K., Billingham, L., Copland, M., Siddique, S., Sirovica, M., &
#' Yap, C. (2017). Implementing the EffTox dose-finding design in the
#' Matchpoint trial. BMC Medical Research Methodology, 17(1), 112.
#' https://doi.org/10.1186/s12874-017-0381-x
#'
#' @seealso
#' \code{\link{efftox_fit}}
#' \code{\link{stan_efftox}}
#' \code{\link{efftox_process}}
#'
#' @export
#'
#' @examples
#' \dontrun{
#' # This model is presented in Thall et al. (2014)
#' mod2 <- stan_efftox_demo('1N 2E 3B', seed = 123)
#'
#' # The seed is passed to the Stan sampler. The usual Stan sampler params like
#' # cores, iter, chains etc are passed on too via the ellipsis operator.
#' }
stan_efftox_demo <- function(outcome_str, ...) {
stan_efftox(outcome_str,
real_doses = c(1.0, 2.0, 4.0, 6.6, 10.0),
efficacy_hurdle = 0.5, toxicity_hurdle = 0.3,
p_e = 0.1, p_t = 0.1,
p = 0.9773632,
eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25,
alpha_mean = -7.9593, alpha_sd = 3.5487,
beta_mean = 1.5482, beta_sd = 3.5018,
gamma_mean = 0.7367, gamma_sd = 2.5423,
zeta_mean = 3.4181, zeta_sd = 2.4406,
eta_mean = 0, eta_sd = 0.2,
psi_mean = 0, psi_sd = 1, ...)
}
print.efftox_fit <- function(x) {
df <- efftox_analysis_to_df(x)
print(df)
if(sum(x$acceptable) == 0) {
cat('The model advocates stopping.')
} else {
cat(paste0('The model recommends selecting dose-level ',
x$recommended_dose, '.'))
}
}
as.data.frame.efftox_fit <- function(x, ...) {
as.data.frame(x$fit, ...)
}
plot.efftox_fit <- function(x, pars = 'utility', ...) {
plot(x$fit, pars = pars, ...)
}
# Usage ----
mod1 <- stan_efftox_demo('1N 2E 3B', seed = 123)
names(mod1)
print(mod1)
as.data.frame(mod1) %>% head
as.data.frame(mod1, pars = c('utility')) %>% head
plot(mod1)
plot(mod1, pars = 'prob_eff')
mod2 <- stan_efftox('1N 2E 3B',
real_doses = c(1.0, 2.0, 4.0, 6.6, 10.0),
efficacy_hurdle = 0.5, toxicity_hurdle = 0.3,
p_e = 0.1, p_t = 0.1,
p = 0.9773632, eff0 = 0.5, tox1 = 0.65,
eff_star = 0.7, tox_star = 0.25,
alpha_mean = -7.9593, alpha_sd = 3.5487,
beta_mean = 1.5482, beta_sd = 3.5018,
gamma_mean = 0.7367, gamma_sd = 2.5423,
zeta_mean = 3.4181, zeta_sd = 2.4406,
eta_mean = 0, eta_sd = 0.2,
psi_mean = 0, psi_sd = 1, seed = 123)
# chains = 6, iter = 5000
mod2
dat <- efftox_parameters_demo()
dat$num_patients <- 3
dat$eff <- c(0, 1, 1)
dat$tox <- c(0, 0, 1)
dat$doses <- c(1, 2, 3)
samp3 <- rstan::sampling(stanmodels$EffTox, data = dat, seed = 123)
mod3 <- efftox_process(dat, samp3)
mod3
mod1
mod2
mod3
mod4 <- stan_efftox_demo('1NNN 2ENN 3BTE', chains = 6, iter = 5000)
mod4
stan_efftox_demo('1TT')
stan_efftox_demo('1TT 2TT')
stan_efftox_demo('1TT 2TT 3TT')
stan_efftox_demo('1TT 2TT 3TT 4TT') # Stops, at last
|
#Original code sourced from https://www.r-bloggers.com/exploratory-factor-analysis-in-r/
#Installing the Psych package and loading it
install.packages("psych")
library(psych)
#Loading the dataset
bfi_data=bfi
#Remove rows with missing values and keep only complete cases
bfi_data=bfi_data[complete.cases(bfi_data),]
#Create the correlation matrix from bfi_data
bfi_cor <- cor(bfi_data)
#Factor analysis of the data
factors_data <- fa(r = bfi_cor, nfactors = 6)
#Getting the factor loadings and model analysis
factors_data
|
/EFA Intro.R
|
no_license
|
ashishgalande/MR-Sample-Code
|
R
| false
| false
| 546
|
r
|
#Original code sourced from https://www.r-bloggers.com/exploratory-factor-analysis-in-r/
#Installing the Psych package and loading it
install.packages("psych")
library(psych)
#Loading the dataset
bfi_data=bfi
#Remove rows with missing values and keep only complete cases
bfi_data=bfi_data[complete.cases(bfi_data),]
#Create the correlation matrix from bfi_data
bfi_cor <- cor(bfi_data)
#Factor analysis of the data
factors_data <- fa(r = bfi_cor, nfactors = 6)
#Getting the factor loadings and model analysis
factors_data
|
\name{qdensity}
\alias{qdensity}
\title{Draw a univariate density plot}
\usage{
qdensity(x, data, binwidth = NULL, main = "", xlim = NULL, ylim = NULL, xlab = NULL,
ylab = NULL)
}
\arguments{
\item{x}{variable name which designates variable
displayed on the horizontal axis}
\item{data}{a mutaframe created by \code{\link{qdata}}}
\item{main}{the main title}
\item{xlim}{a numeric vector of length 2 (like
\code{c(x0, x1)}) for x-axis limits; it will be
calculated from the data limits if not specified
(\code{NULL}). Note when \code{x0 > x1}, the axis
direction will be reversed (i.e. from larger values to
small values)}
\item{ylim}{y-axis limits; similar to \code{xlim}}
\item{xlab}{x-axis title}
\item{ylab}{y-axis title}
\item{binwidth}{the bin width (\code{range(x) / bins} by
default)}
}
\description{
Draw a univariate density plot, with a rug plot underneath.
}
\details{
Common interactions are documented in \code{\link{common_key_press}}.
Specific interactions include: Arrow \code{Up}/\code{Down} in-/de-creases
size of points; Arrow \code{Left}/\code{Right} de-/in-creases binwidth for
density; Key \code{Z} toggle zoom on/off (default is off); mouse click &
drag will specify a zoom window.
Note there are two short tickmarks in the plot denoting the binwidth.
}
\examples{
library(cranvas)
### (1) ames housing data
qames <- qdata(ameshousing)
qdensity(saleprice, data = qames)
### (2) tennis data
qtennis <- qdata(tennis)
qdensity(first.serve.pct, data = qtennis)
qdensity(second.serve.pts, data = qtennis)
qdensity(serve.speed, data = qtennis)
record_selector(name, data = qtennis)
### (3) pollen data
if (require("animation")) {
data(pollen, package = "animation")
qpollen <- qdata(pollen)
print(qdensity(RIDGE, data = qpollen))
}
### (4) flea (with colors)
data(flea, package = "tourr")
qflea <- qdata(flea, color = species)
qdensity(tars1, data = qflea)
qdensity(tars2, data = qflea)
qdensity(aede1, data = qflea)
qdensity(aede3, data = qflea)
cranvas_off()
}
\seealso{
Other plots: \code{\link{qbar}}; \code{\link{qboxplot}};
\code{\link{qhist}}, \code{\link{qspine}};
\code{\link{qmval}}; \code{\link{qparallel}};
\code{\link{qtime}}
}
|
/man/qdensity.Rd
|
no_license
|
eejd/cranvas
|
R
| false
| false
| 2,231
|
rd
|
\name{qdensity}
\alias{qdensity}
\title{Draw a univariate density plot}
\usage{
qdensity(x, data, binwidth = NULL, main = "", xlim = NULL, ylim = NULL, xlab = NULL,
ylab = NULL)
}
\arguments{
\item{x}{variable name which designates variable
displayed on the horizontal axis}
\item{data}{a mutaframe created by \code{\link{qdata}}}
\item{main}{the main title}
\item{xlim}{a numeric vector of length 2 (like
\code{c(x0, x1)}) for x-axis limits; it will be
calculated from the data limits if not specified
(\code{NULL}). Note when \code{x0 > x1}, the axis
direction will be reversed (i.e. from larger values to
small values)}
\item{ylim}{y-axis limits; similar to \code{xlim}}
\item{xlab}{x-axis title}
\item{ylab}{y-axis title}
\item{binwidth}{the bin width (\code{range(x) / bins} by
default)}
}
\description{
Draw a univariate density plot, with a rug plot underneath.
}
\details{
Common interactions are documented in \code{\link{common_key_press}}.
Specific interactions include: Arrow \code{Up}/\code{Down} in-/de-creases
size of points; Arrow \code{Left}/\code{Right} de-/in-creases binwidth for
density; Key \code{Z} toggle zoom on/off (default is off); mouse click &
drag will specify a zoom window.
Note there are two short tickmarks in the plot denoting the binwidth.
}
\examples{
library(cranvas)
### (1) ames housing data
qames <- qdata(ameshousing)
qdensity(saleprice, data = qames)
### (2) tennis data
qtennis <- qdata(tennis)
qdensity(first.serve.pct, data = qtennis)
qdensity(second.serve.pts, data = qtennis)
qdensity(serve.speed, data = qtennis)
record_selector(name, data = qtennis)
### (3) pollen data
if (require("animation")) {
data(pollen, package = "animation")
qpollen <- qdata(pollen)
print(qdensity(RIDGE, data = qpollen))
}
### (4) flea (with colors)
data(flea, package = "tourr")
qflea <- qdata(flea, color = species)
qdensity(tars1, data = qflea)
qdensity(tars2, data = qflea)
qdensity(aede1, data = qflea)
qdensity(aede3, data = qflea)
cranvas_off()
}
\seealso{
Other plots: \code{\link{qbar}}; \code{\link{qboxplot}};
\code{\link{qhist}}, \code{\link{qspine}};
\code{\link{qmval}}; \code{\link{qparallel}};
\code{\link{qtime}}
}
|
setwd('E:/MOOCs/Coursera/Data Science - Specialization/Exploratory Data Analysis/Course Project 1')
filename <- 'household_power_consumption.txt'
colNames <- c("date", "time", "global_active_power", "global_reactive_power", "voltage", "global_intensity", "sub_metering1", "sub_metering2", "sub_metering3")
colClasses = c("character", "character", rep("numeric",7) )
df <- read.table(filename, header=TRUE, sep=";", col.names=colNames, colClasses=colClasses, na.strings="?")
df$date <- as.Date(df$date, format="%d/%m/%Y")
df <- df[df$date >= as.Date("2007-02-01") & df$date<=as.Date("2007-02-02"),]
png(filename="plot1.png", width=480, height=480, units="px")
hist(df$global_active_power, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
/ExploratoryDataAnalysis/CourseProject1/plot1.R
|
no_license
|
mjimcua/datasciencecoursera-1
|
R
| false
| false
| 785
|
r
|
setwd('E:/MOOCs/Coursera/Data Science - Specialization/Exploratory Data Analysis/Course Project 1')
filename <- 'household_power_consumption.txt'
colNames <- c("date", "time", "global_active_power", "global_reactive_power", "voltage", "global_intensity", "sub_metering1", "sub_metering2", "sub_metering3")
colClasses = c("character", "character", rep("numeric",7) )
df <- read.table(filename, header=TRUE, sep=";", col.names=colNames, colClasses=colClasses, na.strings="?")
df$date <- as.Date(df$date, format="%d/%m/%Y")
df <- df[df$date >= as.Date("2007-02-01") & df$date<=as.Date("2007-02-02"),]
png(filename="plot1.png", width=480, height=480, units="px")
hist(df$global_active_power, col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
#Carregar bibliotecas
options(max.print = 99999999)
#Carrega functions
library(tools)
source(file_path_as_absolute("functions.R"))
#Configuracoes
DATABASE <- "icwsm-2016"
clearConsole();
dadosQ1 <- query("SELECT id, q1 as resposta, textParser, textoParserEmoticom as textoCompleto, hashtags FROM tweets WHERE situacao = 'S'")
#dadosQ2 <- query("SELECT id, q2 as resposta, textParser, hashtags, emoticonPos, emoticonNeg FROM tweets WHERE situacao = 'S' AND q1 = '1' AND q2 IS NOT NULL")
#dadosQ3 <- query("SELECT id, q3 as resposta, textParser, hashtags, emoticonPos, emoticonNeg FROM tweets WHERE situacao = 'S' AND q2 = '1' AND q3 IS NOT NULL")
dados <- dadosQ1
dados$resposta[is.na(dados$resposta)] <- 0
dados$resposta <- as.factor(dados$resposta)
clearConsole()
if (!require("text2vec")) {
install.packages("text2vec")
}
library(text2vec)
library(data.table)
library(SnowballC)
setDT(dados)
setkey(dados, id)
stem_tokenizer1 =function(x) {
tokens = word_tokenizer(x)
lapply(tokens, SnowballC::wordStem, language="en")
}
prep_fun = tolower
tok_fun = word_tokenizer
it_train = itoken(dados$textParser,
preprocessor = prep_fun,
#tokenizer = stem_tokenizer1,
tokenizer = tok_fun,
ids = dados$id,
progressbar = TRUE)
stop_words = tm::stopwords("en")
#vocab = create_vocabulary(it_train, ngram = c(1L, 1L), stopwords = stop_words)
vocab = create_vocabulary(it_train, stopwords = stop_words)
vocab
vectorizer = vocab_vectorizer(vocab)
#pruned_vocab = prune_vocabulary(vocab,
# term_count_min = 25#,
# #doc_proportion_max = 0.99,
# #doc_proportion_min = 0.0001
# )
#print("Resultado Final")
#initFileLog("mais_teste.txt")
#view(pruned_vocab)
#finishFileLog("mais_teste.txt")
#inspect(pruned_vocab)
#dump(as.data.frame(pruned_vocab), "testes/mais.csv")
#vectorizer = vocab_vectorizer(pruned_vocab)
dtm_train_texto = create_dtm(it_train, vectorizer)
it_train = itoken(dados$hashtags,
preprocessor = prep_fun,
tokenizer = tok_fun,
#tokenizer = stem_tokenizer1,
ids = dados$id,
progressbar = TRUE)
vocabHashTags = create_vocabulary(it_train)
#pruned_vocab_hash = prune_vocabulary(vocabHashTags,
# term_count_min = 3,
# doc_proportion_max = 0.99,
# doc_proportion_min = 0.0001)
#pruned_vocab_hash
vectorizerHashTags = vocab_vectorizer(vocabHashTags)
dtm_train_hash_tags = create_dtm(it_train, vectorizerHashTags)
dataTexto <- as.matrix(dtm_train_texto)
dataFrameTexto <- as.data.frame(as.matrix(dtm_train_texto))
dataFrameHash <- as.data.frame(as.matrix(dtm_train_hash_tags))
clearConsole()
if (!require("pacman")) install.packages("pacman")
pacman::p_load_current_gh("trinker/lexicon", "trinker/sentimentr")
if (!require("pacman")) install.packages("pacman")
pacman::p_load(sentimentr)
sentiments <- sentiment_by(dados$textoCompleto)
dados$sentiment <- sentiments$ave_sentiment
sentimentsHash <- sentiment_by(dados$hashtags)
dados$sentimentH <- sentimentsHash$ave_sentiment
dados$sentimentHdados$emotiom <- 0
dados$emotiom[sentiments$ave_sentiment < -0.5] <- -2
dados$emotiom[sentiments$ave_sentiment < 0] <- -1
dados$emotiom[sentiments$ave_sentiment > 0] <- 1
dados$emotiom[sentiments$ave_sentiment > 0.5] <- 2
sentiments <- sentiment_by(dados$hashtags)
dados$hashEmo <- 0
dados$hashEmo[sentiments$ave_sentiment < -0.5] <- -2
dados$hashEmo[sentiments$ave_sentiment < 0] <- -1
dados$hashEmo[sentiments$ave_sentiment > 0] <- 1
dados$hashEmo[sentiments$ave_sentiment > 0.5] <- 2
fore <- function(x) {
query(paste("UPDATE `tweets` SET sentiment = ", x[2], ", sentimentH = ", x[3], " WHERE id = ", x[1], "", sep=""));
}
apply(subset(dados, select = c(id, sentiment, sentimentH)), 1, fore)
if (!require("doMC")) {
install.packages("doMC")
}
library(doMC)
registerDoMC(4)
if (!require("rowr")) {
install.packages("rowr")
}
library(rowr)
maFinal <- cbind.fill(dados, dataFrameTexto)
maFinal <- cbind.fill(maFinal, dataFrameHash)
maFinal <- subset(maFinal, select = -c(textParser, id, hashtags))
save(maFinal, file="denovo_99_completo.Rda")
#dump(maFinal, "testes/maFinal_no.csv")
#maFinal = read.csv("testes/tweet_data_my.csv", header = TRUE)
#maFinal = read.csv("testes/maFinal_no.csv", header = TRUE)
library(tools)
library(caret)
if (!require("doMC")) {
install.packages("doMC")
}
library(doMC)
registerDoMC(4)
print("Treinando")
fit <- train(x = subset(maFinal, select = -c(resposta)),
y = maFinal$resposta,
method = "svmRadial",
trControl = trainControl(method = "cv", number = 5)
)
fit
if (!require("mlbench")) {
install.packages("mlbench")
}
library(mlbench)
importance <- varImp(fit, scale=FALSE)
head(importance)
#print(importance)
plot(importance, top = 40)
#C Accuracy Kappa
#0.25 0.5603493 0
#0.50 0.5603493 0
#1.00 0.5603493 0
#C RMSE Rsquared
#0.25 0.6012984 0.001059074
#0.50 0.6011936 0.001059074
#1.00 0.6011833 0.001676222
#C RMSE Rsquared
#0.25 0.6011149 0.001161257
#0.50 0.6009995 0.001161257
#1.00 0.6009580 0.001144398
#install.packages("tm")
#install.packages("Rstem")
#install.packages("sentimentr")
#Recall
#https://www.r-bloggers.com/sentiment-analysis-with-machine-learning-in-r/
if (!require("pacman")) install.packages("pacman")
pacman::p_load_current_gh("trinker/lexicon", "trinker/sentimentr")
if (!require("pacman")) install.packages("pacman")
pacman::p_load(sentimentr)
sentiment(maFinal$pg)
a <- sentiment_by(dadosQ1$textParser)
a
a$ave_sentiment[1:10]
summary(a$ave_sentiment)
#dadosQ1
#(out <- with(presidential_debates_2012, sentiment_by(dialogue, list(person, time))))
#(out <- with(dadosQ1, sentiment_by(dadosQ1$textParser, list(id))))
#plot(out)
if (!require("rJava")) {
library(rJava)
}
if (!require("NLP")) {
install.packages(c("NLP", "openNLP", "RWeka", "qdap"))
}
install.packages("openNLPmodels.en",
repos = "http://datacube.wu.ac.at/",
type = "source")
library(RWeka)
library(NLP)
library(openNLP)
library(magrittr)
save(dados, file="teste.Rda")
bora <- as.String(dados$textoCompleto[12])
bora
#bio_annotations <- annotate(bora, list(sent_ann, word_ann))
#bio_annotations
#class(bio_annotations)
#head(bio_annotations)
#bio_doc <- AnnotatedPlainTextDocument(bora, bio_annotations)
#bio_doc
#sents(bio_doc) %>% head(2)
word_ann <- Maxent_Word_Token_Annotator()
sent_ann <- Maxent_Sent_Token_Annotator()
person_ann <- Maxent_Entity_Annotator(kind = "person")
location_ann <- Maxent_Entity_Annotator(kind = "location")
organization_ann <- Maxent_Entity_Annotator(kind = "organization")
money_ann <- Maxent_Entity_Annotator(kind = "money")
pipeline <- list(sent_ann,
word_ann,
person_ann,
location_ann,
organization_ann,
money_ann)
bio_annotations <- annotate(bora, pipeline)
bio_doc <- AnnotatedPlainTextDocument(bora, bio_annotations)
# Extract entities from an AnnotatedPlainTextDocument
entities <- function(doc, kind) {
s <- doc$content
a <- annotations(doc)[[1]]
if(hasArg(kind)) {
k <- sapply(a$features, `[[`, "kind")
s[a[k == kind]]
} else {
s[a[a$type == "entity"]]
}
}
bora
teste <- entities(bio_doc, kind = "person")
ncol(teste)
teste
teste <- entities(bio_doc, kind = "location")
nrow(teste)
teste2 <- as.data.frame(teste)
entities(bio_doc, kind = "organization")
entities(bio_doc, kind = "money")
#com analise de sentimentos das palabras e hashtags
#C Accuracy Kappa
#0.25 0.5603493 0
#0.50 0.5603493 0
#1.00 0.5603493 0
library(NLP)
library(openNLP)
library(magrittr)
filenames <- Sys.glob("files/*.txt")
filenames
texts <- filenames %>%
lapply(readLines) %>%
lapply(paste0, collapse = " ") %>%
lapply(as.String)
names(texts) <- basename(filenames)
str(texts, max.level = 1)
annotate_entities <- function(doc, annotation_pipeline) {
annotations <- annotate(doc, annotation_pipeline)
AnnotatedPlainTextDocument(doc, annotations)
}
itinerants_pipeline <- list(
Maxent_Sent_Token_Annotator(),
Maxent_Word_Token_Annotator(),
Maxent_Entity_Annotator(kind = "person"),
Maxent_Entity_Annotator(kind = "location")
)
texts_annotated <- as.String(texts) %>%
lapply(annotate_entities, itinerants_pipeline)
entities <- function(doc, kind) {
s <- doc$content
a <- annotations(doc)[[1]]
if(hasArg(kind)) {
k <- sapply(a$features, `[[`, "kind")
s[a[k == kind]]
} else {
s[a[a$type == "entity"]]
}
}
places <- texts_annotated %>%
lapply(entities, kind = "location")
people <- texts_annotated %>%
lapply(entities, kind = "person")
places %>%
sapply(length)
places %>%
lapply(unique) %>%
sapply(length)
people %>%
sapply(length)
if (!require("ggmap")) {
install.packages("ggmap")
}
library(ggmap)
places[["cartwright-peter.txt"]]
people[["cartwright-peter.txt"]]
people
all_places <- union(places[["pratt-parley.txt"]], places[["cartwright-peter.txt"]]) %>% union(places[["lee-jarena.txt"]])
all_places
#https://rpubs.com/lmullen/nlp-chapter
|
/icwsm-2016/teste.R
|
no_license
|
MarcosGrzeca/R-testes
|
R
| false
| false
| 9,388
|
r
|
#Carregar bibliotecas
options(max.print = 99999999)
#Carrega functions
library(tools)
source(file_path_as_absolute("functions.R"))
#Configuracoes
DATABASE <- "icwsm-2016"
clearConsole();
dadosQ1 <- query("SELECT id, q1 as resposta, textParser, textoParserEmoticom as textoCompleto, hashtags FROM tweets WHERE situacao = 'S'")
#dadosQ2 <- query("SELECT id, q2 as resposta, textParser, hashtags, emoticonPos, emoticonNeg FROM tweets WHERE situacao = 'S' AND q1 = '1' AND q2 IS NOT NULL")
#dadosQ3 <- query("SELECT id, q3 as resposta, textParser, hashtags, emoticonPos, emoticonNeg FROM tweets WHERE situacao = 'S' AND q2 = '1' AND q3 IS NOT NULL")
dados <- dadosQ1
dados$resposta[is.na(dados$resposta)] <- 0
dados$resposta <- as.factor(dados$resposta)
clearConsole()
if (!require("text2vec")) {
install.packages("text2vec")
}
library(text2vec)
library(data.table)
library(SnowballC)
setDT(dados)
setkey(dados, id)
stem_tokenizer1 =function(x) {
tokens = word_tokenizer(x)
lapply(tokens, SnowballC::wordStem, language="en")
}
prep_fun = tolower
tok_fun = word_tokenizer
it_train = itoken(dados$textParser,
preprocessor = prep_fun,
#tokenizer = stem_tokenizer1,
tokenizer = tok_fun,
ids = dados$id,
progressbar = TRUE)
stop_words = tm::stopwords("en")
#vocab = create_vocabulary(it_train, ngram = c(1L, 1L), stopwords = stop_words)
vocab = create_vocabulary(it_train, stopwords = stop_words)
vocab
vectorizer = vocab_vectorizer(vocab)
#pruned_vocab = prune_vocabulary(vocab,
# term_count_min = 25#,
# #doc_proportion_max = 0.99,
# #doc_proportion_min = 0.0001
# )
#print("Resultado Final")
#initFileLog("mais_teste.txt")
#view(pruned_vocab)
#finishFileLog("mais_teste.txt")
#inspect(pruned_vocab)
#dump(as.data.frame(pruned_vocab), "testes/mais.csv")
#vectorizer = vocab_vectorizer(pruned_vocab)
dtm_train_texto = create_dtm(it_train, vectorizer)
it_train = itoken(dados$hashtags,
preprocessor = prep_fun,
tokenizer = tok_fun,
#tokenizer = stem_tokenizer1,
ids = dados$id,
progressbar = TRUE)
vocabHashTags = create_vocabulary(it_train)
#pruned_vocab_hash = prune_vocabulary(vocabHashTags,
# term_count_min = 3,
# doc_proportion_max = 0.99,
# doc_proportion_min = 0.0001)
#pruned_vocab_hash
vectorizerHashTags = vocab_vectorizer(vocabHashTags)
dtm_train_hash_tags = create_dtm(it_train, vectorizerHashTags)
dataTexto <- as.matrix(dtm_train_texto)
dataFrameTexto <- as.data.frame(as.matrix(dtm_train_texto))
dataFrameHash <- as.data.frame(as.matrix(dtm_train_hash_tags))
clearConsole()
if (!require("pacman")) install.packages("pacman")
pacman::p_load_current_gh("trinker/lexicon", "trinker/sentimentr")
if (!require("pacman")) install.packages("pacman")
pacman::p_load(sentimentr)
sentiments <- sentiment_by(dados$textoCompleto)
dados$sentiment <- sentiments$ave_sentiment
sentimentsHash <- sentiment_by(dados$hashtags)
dados$sentimentH <- sentimentsHash$ave_sentiment
dados$sentimentHdados$emotiom <- 0
dados$emotiom[sentiments$ave_sentiment < -0.5] <- -2
dados$emotiom[sentiments$ave_sentiment < 0] <- -1
dados$emotiom[sentiments$ave_sentiment > 0] <- 1
dados$emotiom[sentiments$ave_sentiment > 0.5] <- 2
sentiments <- sentiment_by(dados$hashtags)
dados$hashEmo <- 0
dados$hashEmo[sentiments$ave_sentiment < -0.5] <- -2
dados$hashEmo[sentiments$ave_sentiment < 0] <- -1
dados$hashEmo[sentiments$ave_sentiment > 0] <- 1
dados$hashEmo[sentiments$ave_sentiment > 0.5] <- 2
fore <- function(x) {
query(paste("UPDATE `tweets` SET sentiment = ", x[2], ", sentimentH = ", x[3], " WHERE id = ", x[1], "", sep=""));
}
apply(subset(dados, select = c(id, sentiment, sentimentH)), 1, fore)
if (!require("doMC")) {
install.packages("doMC")
}
library(doMC)
registerDoMC(4)
if (!require("rowr")) {
install.packages("rowr")
}
library(rowr)
maFinal <- cbind.fill(dados, dataFrameTexto)
maFinal <- cbind.fill(maFinal, dataFrameHash)
maFinal <- subset(maFinal, select = -c(textParser, id, hashtags))
save(maFinal, file="denovo_99_completo.Rda")
#dump(maFinal, "testes/maFinal_no.csv")
#maFinal = read.csv("testes/tweet_data_my.csv", header = TRUE)
#maFinal = read.csv("testes/maFinal_no.csv", header = TRUE)
library(tools)
library(caret)
if (!require("doMC")) {
install.packages("doMC")
}
library(doMC)
registerDoMC(4)
print("Treinando")
fit <- train(x = subset(maFinal, select = -c(resposta)),
y = maFinal$resposta,
method = "svmRadial",
trControl = trainControl(method = "cv", number = 5)
)
fit
if (!require("mlbench")) {
install.packages("mlbench")
}
library(mlbench)
importance <- varImp(fit, scale=FALSE)
head(importance)
#print(importance)
plot(importance, top = 40)
#C Accuracy Kappa
#0.25 0.5603493 0
#0.50 0.5603493 0
#1.00 0.5603493 0
#C RMSE Rsquared
#0.25 0.6012984 0.001059074
#0.50 0.6011936 0.001059074
#1.00 0.6011833 0.001676222
#C RMSE Rsquared
#0.25 0.6011149 0.001161257
#0.50 0.6009995 0.001161257
#1.00 0.6009580 0.001144398
#install.packages("tm")
#install.packages("Rstem")
#install.packages("sentimentr")
#Recall
#https://www.r-bloggers.com/sentiment-analysis-with-machine-learning-in-r/
if (!require("pacman")) install.packages("pacman")
pacman::p_load_current_gh("trinker/lexicon", "trinker/sentimentr")
if (!require("pacman")) install.packages("pacman")
pacman::p_load(sentimentr)
sentiment(maFinal$pg)
a <- sentiment_by(dadosQ1$textParser)
a
a$ave_sentiment[1:10]
summary(a$ave_sentiment)
#dadosQ1
#(out <- with(presidential_debates_2012, sentiment_by(dialogue, list(person, time))))
#(out <- with(dadosQ1, sentiment_by(dadosQ1$textParser, list(id))))
#plot(out)
if (!require("rJava")) {
library(rJava)
}
if (!require("NLP")) {
install.packages(c("NLP", "openNLP", "RWeka", "qdap"))
}
install.packages("openNLPmodels.en",
repos = "http://datacube.wu.ac.at/",
type = "source")
library(RWeka)
library(NLP)
library(openNLP)
library(magrittr)
save(dados, file="teste.Rda")
bora <- as.String(dados$textoCompleto[12])
bora
#bio_annotations <- annotate(bora, list(sent_ann, word_ann))
#bio_annotations
#class(bio_annotations)
#head(bio_annotations)
#bio_doc <- AnnotatedPlainTextDocument(bora, bio_annotations)
#bio_doc
#sents(bio_doc) %>% head(2)
word_ann <- Maxent_Word_Token_Annotator()
sent_ann <- Maxent_Sent_Token_Annotator()
person_ann <- Maxent_Entity_Annotator(kind = "person")
location_ann <- Maxent_Entity_Annotator(kind = "location")
organization_ann <- Maxent_Entity_Annotator(kind = "organization")
money_ann <- Maxent_Entity_Annotator(kind = "money")
pipeline <- list(sent_ann,
word_ann,
person_ann,
location_ann,
organization_ann,
money_ann)
bio_annotations <- annotate(bora, pipeline)
bio_doc <- AnnotatedPlainTextDocument(bora, bio_annotations)
# Extract entities from an AnnotatedPlainTextDocument
entities <- function(doc, kind) {
s <- doc$content
a <- annotations(doc)[[1]]
if(hasArg(kind)) {
k <- sapply(a$features, `[[`, "kind")
s[a[k == kind]]
} else {
s[a[a$type == "entity"]]
}
}
bora
teste <- entities(bio_doc, kind = "person")
ncol(teste)
teste
teste <- entities(bio_doc, kind = "location")
nrow(teste)
teste2 <- as.data.frame(teste)
entities(bio_doc, kind = "organization")
entities(bio_doc, kind = "money")
#com analise de sentimentos das palabras e hashtags
#C Accuracy Kappa
#0.25 0.5603493 0
#0.50 0.5603493 0
#1.00 0.5603493 0
library(NLP)
library(openNLP)
library(magrittr)
filenames <- Sys.glob("files/*.txt")
filenames
texts <- filenames %>%
lapply(readLines) %>%
lapply(paste0, collapse = " ") %>%
lapply(as.String)
names(texts) <- basename(filenames)
str(texts, max.level = 1)
annotate_entities <- function(doc, annotation_pipeline) {
annotations <- annotate(doc, annotation_pipeline)
AnnotatedPlainTextDocument(doc, annotations)
}
itinerants_pipeline <- list(
Maxent_Sent_Token_Annotator(),
Maxent_Word_Token_Annotator(),
Maxent_Entity_Annotator(kind = "person"),
Maxent_Entity_Annotator(kind = "location")
)
texts_annotated <- as.String(texts) %>%
lapply(annotate_entities, itinerants_pipeline)
entities <- function(doc, kind) {
s <- doc$content
a <- annotations(doc)[[1]]
if(hasArg(kind)) {
k <- sapply(a$features, `[[`, "kind")
s[a[k == kind]]
} else {
s[a[a$type == "entity"]]
}
}
places <- texts_annotated %>%
lapply(entities, kind = "location")
people <- texts_annotated %>%
lapply(entities, kind = "person")
places %>%
sapply(length)
places %>%
lapply(unique) %>%
sapply(length)
people %>%
sapply(length)
if (!require("ggmap")) {
install.packages("ggmap")
}
library(ggmap)
places[["cartwright-peter.txt"]]
people[["cartwright-peter.txt"]]
people
all_places <- union(places[["pratt-parley.txt"]], places[["cartwright-peter.txt"]]) %>% union(places[["lee-jarena.txt"]])
all_places
#https://rpubs.com/lmullen/nlp-chapter
|
find_EWSR1_FLI1_fusions <- function(
breakpoints,
GOI_list,
patient_df,
dilution_df
) {
# load function and define GOI regions:
find_fusions <- dget(paste0(func_dir, "find_fusions.R"))
all_GOI <- c(
GOI_list$EWSR1,
GOI_list$FLI1,
GOI_list$ERG,
GOI_list$ETV1,
GOI_list$ETV4,
GOI_list$FEV
)
# find breakpoint overlaps with EWSR1:
EWSR1_fusions <- lapply(breakpoints, find_fusions, GOI_list$EWSR1)
# remove NAs:
EWSR1_fusions <- EWSR1_fusions[!is.na(EWSR1_fusions)]
# count ranges:
print("EWSR1 fusions per sample:")
print(sapply(EWSR1_fusions, length))
# determine whether joining ranges overlap FLI1, ETV1 or ERG:
for (i in 1:length(EWSR1_fusions)) {
if (length(EWSR1_fusions[[i]]) > 0) {
seqnams <- gsub(
":.*$", "",
gsub("^.*chr", "chr", EWSR1_fusions[[i]]$join)
)
coord <- as.numeric(
gsub(
"[^0-9.-]", "",
gsub("^.*chr.*:", "", EWSR1_fusions[[i]]$join)
)
)
if (!exists("join_ranges")) {
join_ranges <- list(
GRanges(
seqnames = EWSR1_fusions[[i]]$join_chr,
ranges = IRanges(
start = EWSR1_fusions[[i]]$join_coord,
end = EWSR1_fusions[[i]]$join_coord
),
strand = "*",
join_chr = "chr22",
join_coord = start(EWSR1_fusions[[i]])
)
)
} else {
join_ranges[[i]] <- GRanges(
seqnames = EWSR1_fusions[[i]]$join_chr,
ranges = IRanges(
start = EWSR1_fusions[[i]]$join_coord,
end = EWSR1_fusions[[i]]$join_coord
),
strand = "*",
join_chr = "chr22",
join_coord = start(EWSR1_fusions[[i]])
)
}
} else {
if (!exists("join_ranges")) {
join_ranges <- list(NA)
} else {
join_ranges[[i]] <- NA
}
}
}
names(join_ranges) <- names(EWSR1_fusions)
# identify EWSR1 fusions with GOI:
GOI_fusions <- lapply(GOI_list, function(x) {
for (i in 1:length(join_ranges)) {
if (i==1) {
fusions <- list(find_fusions(join_ranges[[i]], x))
} else {
fusions[[i]] <- find_fusions(join_ranges[[i]], x)
}
}
names(fusions) <- names(join_ranges)
return(fusions)
})
# count GOI fusions:
fusion_nos <- lapply(GOI_fusions, function(x) {
lengths <- sapply(x, function(y) {
if (length(y) > 0) {
if (!is.na(y)) {
return(length(y))
} else {
return(0)
}
} else {
return(0)
}
})
names(lengths) <- gsub(
"_.*$", "",
gsub("409_", "", names(x))
)
return(lengths)
})
# match fusion detections with sample numbers and add to patient_df:
m <- match(patient_df$Sample, names(fusion_nos$FLI1))
patient_df$Detected_FLI1_EWSR1_fusions = fusion_nos$FLI1[m]
patient_df$Detected_FLI1_EWSR1_fusions[is.na(patient_df$Detected_FLI1_EWSR1_fusions)] <- 0
# match fusion detections with sample numbers and add to patient_df:
m <- match(dilution_df$Sample, names(fusion_nos$FLI1))
dilution_df$Detected_FLI1_EWSR1_fusions = fusion_nos$FLI1[m]
dilution_df$Detected_FLI1_EWSR1_fusions[is.na(dilution_df$Detected_FLI1_EWSR1_fusions)] <- 0
# change EWSR1 fusions to yes/no if necessary:
if (binary_calls) {
patient_df$Detected_FLI1_EWSR1_fusions[as.numeric(patient_df$Detected_FLI1_EWSR1_fusions) == 0] <- "no"
patient_df$Detected_FLI1_EWSR1_fusions[as.numeric(patient_df$Detected_FLI1_EWSR1_fusions) > 0] <- "yes"
colnames(patient_df) <- gsub("fusions", "fusion", colnames(patient_df))
dilution_df$Detected_FLI1_EWSR1_fusions[as.numeric(dilution_df$Detected_FLI1_EWSR1_fusions) == 0] <- "no"
dilution_df$Detected_FLI1_EWSR1_fusions[as.numeric(dilution_df$Detected_FLI1_EWSR1_fusions) > 0] <- "yes"
colnames(dilution_df) <- gsub("fusions", "fusion", colnames(dilution_df))
}
# identify false positive fusions:
for (i in 1:length(join_ranges)) {
if (i==1) {
false_fusions <- list(
find_fusions(
query_coord = join_ranges[[i]],
subject_coord = all_GOI,
invert = T
)
)
} else {
false_fusions[[i]] <- find_fusions(
query_coord = join_ranges[[i]],
subject_coord = all_GOI,
invert = T
)
}
}
names(false_fusions) <- names(join_ranges)
# count GOI fusions:
false_fusion_nos <- sapply(false_fusions, function(y) {
if (length(y) > 0) {
if (!is.na(y)) {
return(length(y))
} else {
return(0)
}
} else {
return(0)
}
})
names(false_fusion_nos) <- gsub(
"_.*$", "",
gsub("409_", "", names(false_fusions))
)
# match false fusion detections with sample numbers and add to patient_df:
m <- match(patient_df$Sample, names(false_fusion_nos))
patient_df$False_EWSR1_fusions = false_fusion_nos[m]
patient_df$False_EWSR1_fusions[is.na(patient_df$False_EWSR1_fusions)] <- 0
# match fusion detections with sample numbers and add to dilution_df:
m <- match(dilution_df$Sample, names(false_fusion_nos))
dilution_df$False_EWSR1_fusions = false_fusion_nos[m]
dilution_df$False_EWSR1_fusions[is.na(dilution_df$False_EWSR1_fusions)] <- 0
# clean up GOI_fusions:
GOI_fusions <- GOI_fusions[!(names(GOI_fusions) %in% "EWSR1")]
GOI_fusions <- lapply(GOI_fusions, function(x) {
temp <- lapply(x, function(y) y[!is.na(y)])
return(temp[lengths(temp) != 0])
})
return(
list(
fusion_nos = list(
patient_df = patient_df,
dilution_df = dilution_df
),
GOI_fusions = GOI_fusions
)
)
}
|
/functions/find_EWSR1_FLI1_fusions.R
|
no_license
|
james-torpy/ewing_ctDNA
|
R
| false
| false
| 5,788
|
r
|
find_EWSR1_FLI1_fusions <- function(
breakpoints,
GOI_list,
patient_df,
dilution_df
) {
# load function and define GOI regions:
find_fusions <- dget(paste0(func_dir, "find_fusions.R"))
all_GOI <- c(
GOI_list$EWSR1,
GOI_list$FLI1,
GOI_list$ERG,
GOI_list$ETV1,
GOI_list$ETV4,
GOI_list$FEV
)
# find breakpoint overlaps with EWSR1:
EWSR1_fusions <- lapply(breakpoints, find_fusions, GOI_list$EWSR1)
# remove NAs:
EWSR1_fusions <- EWSR1_fusions[!is.na(EWSR1_fusions)]
# count ranges:
print("EWSR1 fusions per sample:")
print(sapply(EWSR1_fusions, length))
# determine whether joining ranges overlap FLI1, ETV1 or ERG:
for (i in 1:length(EWSR1_fusions)) {
if (length(EWSR1_fusions[[i]]) > 0) {
seqnams <- gsub(
":.*$", "",
gsub("^.*chr", "chr", EWSR1_fusions[[i]]$join)
)
coord <- as.numeric(
gsub(
"[^0-9.-]", "",
gsub("^.*chr.*:", "", EWSR1_fusions[[i]]$join)
)
)
if (!exists("join_ranges")) {
join_ranges <- list(
GRanges(
seqnames = EWSR1_fusions[[i]]$join_chr,
ranges = IRanges(
start = EWSR1_fusions[[i]]$join_coord,
end = EWSR1_fusions[[i]]$join_coord
),
strand = "*",
join_chr = "chr22",
join_coord = start(EWSR1_fusions[[i]])
)
)
} else {
join_ranges[[i]] <- GRanges(
seqnames = EWSR1_fusions[[i]]$join_chr,
ranges = IRanges(
start = EWSR1_fusions[[i]]$join_coord,
end = EWSR1_fusions[[i]]$join_coord
),
strand = "*",
join_chr = "chr22",
join_coord = start(EWSR1_fusions[[i]])
)
}
} else {
if (!exists("join_ranges")) {
join_ranges <- list(NA)
} else {
join_ranges[[i]] <- NA
}
}
}
names(join_ranges) <- names(EWSR1_fusions)
# identify EWSR1 fusions with GOI:
GOI_fusions <- lapply(GOI_list, function(x) {
for (i in 1:length(join_ranges)) {
if (i==1) {
fusions <- list(find_fusions(join_ranges[[i]], x))
} else {
fusions[[i]] <- find_fusions(join_ranges[[i]], x)
}
}
names(fusions) <- names(join_ranges)
return(fusions)
})
# count GOI fusions:
fusion_nos <- lapply(GOI_fusions, function(x) {
lengths <- sapply(x, function(y) {
if (length(y) > 0) {
if (!is.na(y)) {
return(length(y))
} else {
return(0)
}
} else {
return(0)
}
})
names(lengths) <- gsub(
"_.*$", "",
gsub("409_", "", names(x))
)
return(lengths)
})
# match fusion detections with sample numbers and add to patient_df:
m <- match(patient_df$Sample, names(fusion_nos$FLI1))
patient_df$Detected_FLI1_EWSR1_fusions = fusion_nos$FLI1[m]
patient_df$Detected_FLI1_EWSR1_fusions[is.na(patient_df$Detected_FLI1_EWSR1_fusions)] <- 0
# match fusion detections with sample numbers and add to patient_df:
m <- match(dilution_df$Sample, names(fusion_nos$FLI1))
dilution_df$Detected_FLI1_EWSR1_fusions = fusion_nos$FLI1[m]
dilution_df$Detected_FLI1_EWSR1_fusions[is.na(dilution_df$Detected_FLI1_EWSR1_fusions)] <- 0
# change EWSR1 fusions to yes/no if necessary:
if (binary_calls) {
patient_df$Detected_FLI1_EWSR1_fusions[as.numeric(patient_df$Detected_FLI1_EWSR1_fusions) == 0] <- "no"
patient_df$Detected_FLI1_EWSR1_fusions[as.numeric(patient_df$Detected_FLI1_EWSR1_fusions) > 0] <- "yes"
colnames(patient_df) <- gsub("fusions", "fusion", colnames(patient_df))
dilution_df$Detected_FLI1_EWSR1_fusions[as.numeric(dilution_df$Detected_FLI1_EWSR1_fusions) == 0] <- "no"
dilution_df$Detected_FLI1_EWSR1_fusions[as.numeric(dilution_df$Detected_FLI1_EWSR1_fusions) > 0] <- "yes"
colnames(dilution_df) <- gsub("fusions", "fusion", colnames(dilution_df))
}
# identify false positive fusions:
for (i in 1:length(join_ranges)) {
if (i==1) {
false_fusions <- list(
find_fusions(
query_coord = join_ranges[[i]],
subject_coord = all_GOI,
invert = T
)
)
} else {
false_fusions[[i]] <- find_fusions(
query_coord = join_ranges[[i]],
subject_coord = all_GOI,
invert = T
)
}
}
names(false_fusions) <- names(join_ranges)
# count GOI fusions:
false_fusion_nos <- sapply(false_fusions, function(y) {
if (length(y) > 0) {
if (!is.na(y)) {
return(length(y))
} else {
return(0)
}
} else {
return(0)
}
})
names(false_fusion_nos) <- gsub(
"_.*$", "",
gsub("409_", "", names(false_fusions))
)
# match false fusion detections with sample numbers and add to patient_df:
m <- match(patient_df$Sample, names(false_fusion_nos))
patient_df$False_EWSR1_fusions = false_fusion_nos[m]
patient_df$False_EWSR1_fusions[is.na(patient_df$False_EWSR1_fusions)] <- 0
# match fusion detections with sample numbers and add to dilution_df:
m <- match(dilution_df$Sample, names(false_fusion_nos))
dilution_df$False_EWSR1_fusions = false_fusion_nos[m]
dilution_df$False_EWSR1_fusions[is.na(dilution_df$False_EWSR1_fusions)] <- 0
# clean up GOI_fusions:
GOI_fusions <- GOI_fusions[!(names(GOI_fusions) %in% "EWSR1")]
GOI_fusions <- lapply(GOI_fusions, function(x) {
temp <- lapply(x, function(y) y[!is.na(y)])
return(temp[lengths(temp) != 0])
})
return(
list(
fusion_nos = list(
patient_df = patient_df,
dilution_df = dilution_df
),
GOI_fusions = GOI_fusions
)
)
}
|
#read file
data <- read.table("household_power_consumption.txt",
header=T,
sep=";",
na.strings="?")
#extract subset data
usable <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
#convert date and time from string to Date
usable$datetime <- paste(usable$Date, usable$Time)
usable$datetime <- strptime(usable$datetime, "%d/%m/%Y %R")
#open png device
png("plot3.png")
plot(usable$datetime,
usable$Sub_metering_1,
type="l",
ylab="Energy sub metering",
xlab="")
#add metering2 data
lines(usable$datetime,
usable$Sub_metering_2,
type="l",
col="red")
#add metering3 data
lines(usable$datetime,
usable$Sub_metering_3,
type="l",
col="blue")
#add legend
legend("topright",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),
lwd=1)
#put to png and close the png dev
dev.off()
|
/plot3.R
|
no_license
|
kanwarujjaval/ExData_Plotting1
|
R
| false
| false
| 831
|
r
|
#read file
data <- read.table("household_power_consumption.txt",
header=T,
sep=";",
na.strings="?")
#extract subset data
usable <- subset(data, Date %in% c("1/2/2007","2/2/2007"))
#convert date and time from string to Date
usable$datetime <- paste(usable$Date, usable$Time)
usable$datetime <- strptime(usable$datetime, "%d/%m/%Y %R")
#open png device
png("plot3.png")
plot(usable$datetime,
usable$Sub_metering_1,
type="l",
ylab="Energy sub metering",
xlab="")
#add metering2 data
lines(usable$datetime,
usable$Sub_metering_2,
type="l",
col="red")
#add metering3 data
lines(usable$datetime,
usable$Sub_metering_3,
type="l",
col="blue")
#add legend
legend("topright",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","red","blue"),
lwd=1)
#put to png and close the png dev
dev.off()
|
#install.packages("randtoolbox")
library(ggplot2, quietly=TRUE)
library(randtoolbox, quietly=TRUE)
## library(tools, quietly=TRUE)
## library(scales, quietly=TRUE)
greyscale <- TRUE
n <- 400
# Generate Sobol sequence
sobol_sequence <- sobol(n, dim = 2)
sobol_frame <- data.frame(x=sobol_sequence[,1],y=sobol_sequence[,2])
# Plot Sobol sequence
pdf("2D-sobol-sequence.pdf", width=5, height=5)
sobol_plot <- ggplot(data = sobol_frame, aes(x,y))
sobol_plot <- sobol_plot + geom_point()
sobol_plot <- sobol_plot + ylab("") + xlab("")
print(sobol_plot)
dev.off()
# Generate MersenneTwister sequence
set.generator("MersenneTwister", initialization="init2002", resolution=53, seed=12345)
mersenne_frame <- data.frame(x=runif(n), y=runif(n))
# Plot
pdf("2D-mersenne-sequence.pdf", width=5, height=5)
mersenne_plot <- ggplot(data = mersenne_frame, aes(x,y))
mersenne_plot <- mersenne_plot + geom_point()
mersenne_plot <- mersenne_plot + ylab("") + xlab("")
print(mersenne_plot)
dev.off()
|
/tex/report/graphics/quasi-random-graphs.R
|
no_license
|
HIPERFIT/vectorprogramming
|
R
| false
| false
| 989
|
r
|
#install.packages("randtoolbox")
library(ggplot2, quietly=TRUE)
library(randtoolbox, quietly=TRUE)
## library(tools, quietly=TRUE)
## library(scales, quietly=TRUE)
greyscale <- TRUE
n <- 400
# Generate Sobol sequence
sobol_sequence <- sobol(n, dim = 2)
sobol_frame <- data.frame(x=sobol_sequence[,1],y=sobol_sequence[,2])
# Plot Sobol sequence
pdf("2D-sobol-sequence.pdf", width=5, height=5)
sobol_plot <- ggplot(data = sobol_frame, aes(x,y))
sobol_plot <- sobol_plot + geom_point()
sobol_plot <- sobol_plot + ylab("") + xlab("")
print(sobol_plot)
dev.off()
# Generate MersenneTwister sequence
set.generator("MersenneTwister", initialization="init2002", resolution=53, seed=12345)
mersenne_frame <- data.frame(x=runif(n), y=runif(n))
# Plot
pdf("2D-mersenne-sequence.pdf", width=5, height=5)
mersenne_plot <- ggplot(data = mersenne_frame, aes(x,y))
mersenne_plot <- mersenne_plot + geom_point()
mersenne_plot <- mersenne_plot + ylab("") + xlab("")
print(mersenne_plot)
dev.off()
|
#!/usr/bin/env Rscript
#library(scales)
library(ggplot2)
library(phyloseq)
library(tidyverse)
setwd("/data/projects/glyphosate/reads/mothur_processed/")
load("glyphosate_mothur_in_phyloseq.RData")
# get data per water OTU, setting threshold for samples and clusters
community_subset_water <- droplevels(subset(mothur_ra_melt_mean, days > 40
& Abundance > 0.15 & habitat == "water"))
community_subset_water %>%
group_by(new_day, order, nucleic_acid, treatment) %>%
summarise(Abundance2 = sum(Abundance)) %>%
ungroup() %>%
# Replace missing values by 0
spread(key = order, value = Abundance2) %>%
replace(., is.na(.), 0) %>%
gather(key = order, value = Abundance2, -c(new_day, nucleic_acid, treatment)) ->
order_sums_water
order_sums_water$order <- factor(order_sums_water$order,
levels = c(
# alphaproteos
"Caulobacterales", #
"Rhizobiales", #
"Rhodobacterales", #
"Rhodospirillales", #
"Sneathiellales", #
"Sphingomonadales", #
"Parvibaculales",
"Thalassobaculales",
# gammaproteos
"Aeromonadales", #
"Alteromonadales", #
"Betaproteobacteriales", #
"Gammaproteobacteria_Incertae_Sedis",
"Oceanospirillales", #
"Pseudomonadales", #
"Xanthomonadales", #
# Actinobacteria
"Corynebacteriales",
# Bacteroidia
"Bacillales", #
"Bacteroidia_unclassified",
"Chitinophagales", #
"Flavobacteriales", #
"Sphingobacteriales",
# Planctomycetacia
"Planctomycetales", #
"OM190_or", #
# Verrucomicrobia
"Verrucomicrobiales" #
))
# assign specific colour to make plot distuingishable
fill_values_water <- c("Aeromonadales" = "green",
"Alteromonadales" = "#e6194B",
"Bacillales" = "red",
"Bacteroidia_unclassified" = "maroon2",
"Betaproteobacteriales" = "#3cb44b",
"Caulobacterales" = "#ffe119",
"Chitinophagales" = "#4363d8",
"Corynebacteriales" = "darkblue",
"Cytophagales" = "blue",
"Flavobacteriales" = "#f58231",
"Gammaproteobacteria_Incertae_Sedis" = "black",
"Gaiellales" = "black",
"Oceanospirillales" = "maroon4",
"OM190_or" = "grey80",
"Opitutales" = "yellow",
"Sneathiellales" = "#42d4f4",
"Parvibaculales" = "#f032e6",
"Planctomycetales" = "yellow",
"Pseudomonadales" = "#fabebe",
"Rhizobiales" = "#469990",
"Rhodobacterales" = "#000000",
"Rhodospirillales" = "#9A6324",
"Sphingobacteriales" = "#fffac8",
"Sphingomonadales" = "#800000",
"Thalassobaculales" = "#a9a9a9",
"Verrucomicrobiales" = "turquoise1",
"Xanthomonadales" = "orange"
)
# sort and rename factor levels
order_sums_water$treatment <- factor(order_sums_water$treatment,
levels = c("glyph", "control"))
levels(order_sums_water$treatment) <- c("Treatment", "Control")
order_sums_water$nucleic_acid <- factor(order_sums_water$nucleic_acid,
levels = c("dna", "cdna"))
levels(order_sums_water$nucleic_acid) <- c("16S rRNA gene", "16S rRNA")
# plot an array of 4 geom_areas
water_areas <- ggplot(order_sums_water, aes(x = new_day, y = Abundance2, fill = order)) +
geom_area(stat = "identity") +
geom_vline(aes(xintercept = 0), linetype = "dashed", size = 1) +
scale_fill_manual(breaks = levels(order_sums_water$order), values = fill_values_water) +
guides(colour = FALSE, size = FALSE, width = FALSE, fill = guide_legend(ncol = 1,
keyheight = 1.2, label.theme = element_text(size = 12, face = "italic",
angle = 0), (title = NULL))) +
scale_x_continuous(expand = c(0, 0), breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(expand = c(0, 0)) +
theme_bw() +
theme(panel.grid.major = element_line(colour = NA, size = 0.2),
panel.grid.minor = element_line(colour = NA, size = 0.5),
axis.title = element_text(size = 16, face = "bold"),
axis.title.y = element_text(angle = 90, vjust = 1),
axis.text = element_text(size = 14),
legend.title = element_text(size = 14, face = "bold"),
legend.text = element_text(size = 12),
strip.text.x = element_text(size = 14, colour = "black", face = "bold"),
legend.background = element_rect(fill = "grey90", linetype = "solid")) +
labs(x = "Days", y = "Relative abundance [%]") +
# theme(legend.position = "bottom", legend.direction = "horizontal") +
facet_wrap(~ treatment + nucleic_acid, nrow = 2)
ggsave(water_areas, file = paste(plot_path, "Figure_2_water_communities.pdf", sep = ""),
device = "pdf", width = 26.0, height = 18, dpi = 300, unit = "cm")
# get data per OTU, setting threshold for samples and clusters
community_subset_biofilm <- droplevels(subset(mothur_ra_melt_mean, days > 40
& Abundance > 0.15 & habitat == "biofilm"))
community_subset_biofilm %>%
group_by(new_day, order, nucleic_acid, treatment) %>%
summarise(Abundance2 = sum(Abundance)) %>%
ungroup() %>%
# Replace missing values by 0
spread(key = order, value = Abundance2) %>%
replace(., is.na(.), 0) %>%
gather(key = order, value = Abundance2, -c(new_day, nucleic_acid, treatment)) ->
order_sums_biofilm
# recycle values from water plot
order_sums_biofilm$order <- factor(order_sums_biofilm$order,
levels = c(
# alphaproteos
"Caulobacterales", #
"Rhizobiales", #
"Rhodobacterales", #
"Rhodospirillales", #
"Sneathiellales", #
"Sphingomonadales", #
"Parvibaculales",
"Thalassobaculales",
# gammaproteos
"Aeromonadales", #
"Alteromonadales", #
"Betaproteobacteriales", #
"Gammaproteobacteria_Incertae_Sedis",
"Oceanospirillales", #
"Pseudomonadales", #
"Xanthomonadales", #
# Actinobacteria
"Corynebacteriales",
# Bacteroidia
"Bacillales", #
"Bacteroidia_unclassified",
"Chitinophagales", #
"Flavobacteriales", #
"Sphingobacteriales",
# Planctomycetacia
"Planctomycetales", #
"OM190_or", #
# Verrucomicrobia
"Verrucomicrobiales" #
))
fill_values_biofilm <- fill_values_water
# sort and rename factor levels
order_sums_biofilm$treatment <- factor(order_sums_biofilm$treatment,
levels = c("glyph", "control"))
levels(order_sums_biofilm$treatment) <- c("Treatment", "Control")
order_sums_biofilm$nucleic_acid <- factor(order_sums_biofilm$nucleic_acid,
levels = c("dna", "cdna"))
levels(order_sums_biofilm$nucleic_acid) <- c("16S rRNA gene", "16S rRNA")
# biofilm are plot for SI
biofilm_areas <- ggplot(order_sums_biofilm, aes(x = new_day, y = Abundance2, fill = order)) +
geom_area(stat = "identity") +
geom_vline(aes(xintercept = 0), linetype = "dashed", size = 1) +
scale_fill_manual(breaks = levels(order_sums_biofilm$order), values = fill_values_biofilm) +
guides(color = FALSE) +
guides(size = FALSE) +
guides(width = FALSE) +
guides(fill = guide_legend(label.theme = element_text(size = 12, face = "italic",
angle = 0), ncol = 1, keyheight = 1.2, (title = NULL))) +
scale_x_continuous(expand = c(0, 0), breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(expand = c(0, 0)) +
theme_bw() +
theme(panel.grid.major = element_line(colour = NA, size = 0.2),
panel.grid.minor = element_line(colour = NA, size = 0.5),
axis.title = element_text(size = 16, face = "bold"),
axis.title.y = element_text(angle = 90, vjust = 1),
axis.text = element_text(size = 14),
legend.title = element_text(size = 14, face = "bold"),
legend.text = element_text(size = 12),
strip.text.x = element_text(size = 14, colour = "black", face = "bold"),
legend.background = element_rect(fill = "grey90", linetype = "solid")) +
labs(x = "Days", y = "Relative abundance [%]") +
facet_wrap(~ treatment + nucleic_acid, nrow = 2)
ggsave(biofilm_areas, file = paste(plot_path, "SI_4_biofilm_communities.pdf", sep = ""),
device = "pdf", width = 26.0, height = 18, dpi = 300, unit = "cm")
|
/16S-analysis_phyloseq/Figure_02_community_area_plots.r
|
no_license
|
RJ333/Glyphosate_gene_richness
|
R
| false
| false
| 7,756
|
r
|
#!/usr/bin/env Rscript
#library(scales)
library(ggplot2)
library(phyloseq)
library(tidyverse)
setwd("/data/projects/glyphosate/reads/mothur_processed/")
load("glyphosate_mothur_in_phyloseq.RData")
# get data per water OTU, setting threshold for samples and clusters
community_subset_water <- droplevels(subset(mothur_ra_melt_mean, days > 40
& Abundance > 0.15 & habitat == "water"))
community_subset_water %>%
group_by(new_day, order, nucleic_acid, treatment) %>%
summarise(Abundance2 = sum(Abundance)) %>%
ungroup() %>%
# Replace missing values by 0
spread(key = order, value = Abundance2) %>%
replace(., is.na(.), 0) %>%
gather(key = order, value = Abundance2, -c(new_day, nucleic_acid, treatment)) ->
order_sums_water
order_sums_water$order <- factor(order_sums_water$order,
levels = c(
# alphaproteos
"Caulobacterales", #
"Rhizobiales", #
"Rhodobacterales", #
"Rhodospirillales", #
"Sneathiellales", #
"Sphingomonadales", #
"Parvibaculales",
"Thalassobaculales",
# gammaproteos
"Aeromonadales", #
"Alteromonadales", #
"Betaproteobacteriales", #
"Gammaproteobacteria_Incertae_Sedis",
"Oceanospirillales", #
"Pseudomonadales", #
"Xanthomonadales", #
# Actinobacteria
"Corynebacteriales",
# Bacteroidia
"Bacillales", #
"Bacteroidia_unclassified",
"Chitinophagales", #
"Flavobacteriales", #
"Sphingobacteriales",
# Planctomycetacia
"Planctomycetales", #
"OM190_or", #
# Verrucomicrobia
"Verrucomicrobiales" #
))
# assign specific colour to make plot distuingishable
fill_values_water <- c("Aeromonadales" = "green",
"Alteromonadales" = "#e6194B",
"Bacillales" = "red",
"Bacteroidia_unclassified" = "maroon2",
"Betaproteobacteriales" = "#3cb44b",
"Caulobacterales" = "#ffe119",
"Chitinophagales" = "#4363d8",
"Corynebacteriales" = "darkblue",
"Cytophagales" = "blue",
"Flavobacteriales" = "#f58231",
"Gammaproteobacteria_Incertae_Sedis" = "black",
"Gaiellales" = "black",
"Oceanospirillales" = "maroon4",
"OM190_or" = "grey80",
"Opitutales" = "yellow",
"Sneathiellales" = "#42d4f4",
"Parvibaculales" = "#f032e6",
"Planctomycetales" = "yellow",
"Pseudomonadales" = "#fabebe",
"Rhizobiales" = "#469990",
"Rhodobacterales" = "#000000",
"Rhodospirillales" = "#9A6324",
"Sphingobacteriales" = "#fffac8",
"Sphingomonadales" = "#800000",
"Thalassobaculales" = "#a9a9a9",
"Verrucomicrobiales" = "turquoise1",
"Xanthomonadales" = "orange"
)
# sort and rename factor levels
order_sums_water$treatment <- factor(order_sums_water$treatment,
levels = c("glyph", "control"))
levels(order_sums_water$treatment) <- c("Treatment", "Control")
order_sums_water$nucleic_acid <- factor(order_sums_water$nucleic_acid,
levels = c("dna", "cdna"))
levels(order_sums_water$nucleic_acid) <- c("16S rRNA gene", "16S rRNA")
# plot an array of 4 geom_areas
water_areas <- ggplot(order_sums_water, aes(x = new_day, y = Abundance2, fill = order)) +
geom_area(stat = "identity") +
geom_vline(aes(xintercept = 0), linetype = "dashed", size = 1) +
scale_fill_manual(breaks = levels(order_sums_water$order), values = fill_values_water) +
guides(colour = FALSE, size = FALSE, width = FALSE, fill = guide_legend(ncol = 1,
keyheight = 1.2, label.theme = element_text(size = 12, face = "italic",
angle = 0), (title = NULL))) +
scale_x_continuous(expand = c(0, 0), breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(expand = c(0, 0)) +
theme_bw() +
theme(panel.grid.major = element_line(colour = NA, size = 0.2),
panel.grid.minor = element_line(colour = NA, size = 0.5),
axis.title = element_text(size = 16, face = "bold"),
axis.title.y = element_text(angle = 90, vjust = 1),
axis.text = element_text(size = 14),
legend.title = element_text(size = 14, face = "bold"),
legend.text = element_text(size = 12),
strip.text.x = element_text(size = 14, colour = "black", face = "bold"),
legend.background = element_rect(fill = "grey90", linetype = "solid")) +
labs(x = "Days", y = "Relative abundance [%]") +
# theme(legend.position = "bottom", legend.direction = "horizontal") +
facet_wrap(~ treatment + nucleic_acid, nrow = 2)
ggsave(water_areas, file = paste(plot_path, "Figure_2_water_communities.pdf", sep = ""),
device = "pdf", width = 26.0, height = 18, dpi = 300, unit = "cm")
# get data per OTU, setting threshold for samples and clusters
community_subset_biofilm <- droplevels(subset(mothur_ra_melt_mean, days > 40
& Abundance > 0.15 & habitat == "biofilm"))
community_subset_biofilm %>%
group_by(new_day, order, nucleic_acid, treatment) %>%
summarise(Abundance2 = sum(Abundance)) %>%
ungroup() %>%
# Replace missing values by 0
spread(key = order, value = Abundance2) %>%
replace(., is.na(.), 0) %>%
gather(key = order, value = Abundance2, -c(new_day, nucleic_acid, treatment)) ->
order_sums_biofilm
# recycle values from water plot
order_sums_biofilm$order <- factor(order_sums_biofilm$order,
levels = c(
# alphaproteos
"Caulobacterales", #
"Rhizobiales", #
"Rhodobacterales", #
"Rhodospirillales", #
"Sneathiellales", #
"Sphingomonadales", #
"Parvibaculales",
"Thalassobaculales",
# gammaproteos
"Aeromonadales", #
"Alteromonadales", #
"Betaproteobacteriales", #
"Gammaproteobacteria_Incertae_Sedis",
"Oceanospirillales", #
"Pseudomonadales", #
"Xanthomonadales", #
# Actinobacteria
"Corynebacteriales",
# Bacteroidia
"Bacillales", #
"Bacteroidia_unclassified",
"Chitinophagales", #
"Flavobacteriales", #
"Sphingobacteriales",
# Planctomycetacia
"Planctomycetales", #
"OM190_or", #
# Verrucomicrobia
"Verrucomicrobiales" #
))
fill_values_biofilm <- fill_values_water
# sort and rename factor levels
order_sums_biofilm$treatment <- factor(order_sums_biofilm$treatment,
levels = c("glyph", "control"))
levels(order_sums_biofilm$treatment) <- c("Treatment", "Control")
order_sums_biofilm$nucleic_acid <- factor(order_sums_biofilm$nucleic_acid,
levels = c("dna", "cdna"))
levels(order_sums_biofilm$nucleic_acid) <- c("16S rRNA gene", "16S rRNA")
# biofilm are plot for SI
biofilm_areas <- ggplot(order_sums_biofilm, aes(x = new_day, y = Abundance2, fill = order)) +
geom_area(stat = "identity") +
geom_vline(aes(xintercept = 0), linetype = "dashed", size = 1) +
scale_fill_manual(breaks = levels(order_sums_biofilm$order), values = fill_values_biofilm) +
guides(color = FALSE) +
guides(size = FALSE) +
guides(width = FALSE) +
guides(fill = guide_legend(label.theme = element_text(size = 12, face = "italic",
angle = 0), ncol = 1, keyheight = 1.2, (title = NULL))) +
scale_x_continuous(expand = c(0, 0), breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(expand = c(0, 0)) +
theme_bw() +
theme(panel.grid.major = element_line(colour = NA, size = 0.2),
panel.grid.minor = element_line(colour = NA, size = 0.5),
axis.title = element_text(size = 16, face = "bold"),
axis.title.y = element_text(angle = 90, vjust = 1),
axis.text = element_text(size = 14),
legend.title = element_text(size = 14, face = "bold"),
legend.text = element_text(size = 12),
strip.text.x = element_text(size = 14, colour = "black", face = "bold"),
legend.background = element_rect(fill = "grey90", linetype = "solid")) +
labs(x = "Days", y = "Relative abundance [%]") +
facet_wrap(~ treatment + nucleic_acid, nrow = 2)
ggsave(biofilm_areas, file = paste(plot_path, "SI_4_biofilm_communities.pdf", sep = ""),
device = "pdf", width = 26.0, height = 18, dpi = 300, unit = "cm")
|
#' Index a New Comment
#'
#' @param id Notebook ID
#' @param content The new comment content
#' @param comment.id The comment ID
#'
#' @export
solr.post.comment <- function(id, content, comment.id) {
update_solr_id(id)
}
#' Modify Index for a Comment
#'
#' @param id Notebook ID
#' @param content New comment content
#' @param cid Comment ID
#'
#' @export
solr.modify.comment <- function(id, content, cid) {
update_solr_id(id)
}
#' Delete Comment from Index
#'
#' @param id Notebook ID
#' @param cid Comment ID
#'
#' @export
solr.delete.comment <- function(id, cid) {
update_solr_id(id)
}
#' Delete Notebook from Index
#'
#' Delete this notebook and all its child docs
#'
#' @param id Notebook ID
#'
#' @export
solr.delete.doc <- function(id) {
metadata <-
paste0('<delete><query>id:', id,
' OR notebook_id:', id,
'</query></delete>')
.solr.post(data = metadata, isXML = TRUE)
}
|
/R/comment-doc.R
|
no_license
|
att/rcloud.solr
|
R
| false
| false
| 920
|
r
|
#' Index a New Comment
#'
#' @param id Notebook ID
#' @param content The new comment content
#' @param comment.id The comment ID
#'
#' @export
solr.post.comment <- function(id, content, comment.id) {
update_solr_id(id)
}
#' Modify Index for a Comment
#'
#' @param id Notebook ID
#' @param content New comment content
#' @param cid Comment ID
#'
#' @export
solr.modify.comment <- function(id, content, cid) {
update_solr_id(id)
}
#' Delete Comment from Index
#'
#' @param id Notebook ID
#' @param cid Comment ID
#'
#' @export
solr.delete.comment <- function(id, cid) {
update_solr_id(id)
}
#' Delete Notebook from Index
#'
#' Delete this notebook and all its child docs
#'
#' @param id Notebook ID
#'
#' @export
solr.delete.doc <- function(id) {
metadata <-
paste0('<delete><query>id:', id,
' OR notebook_id:', id,
'</query></delete>')
.solr.post(data = metadata, isXML = TRUE)
}
|
## Wavelet example for thesis
#working directory
setwd("\\\\filestore.soton.ac.uk\\users\\ch19g17\\mydocuments\\Wavelet\\Example Wavelet Work")
##Load prerequisites
#source("//filestore.soton.ac.uk/users/ch19g17/mydocuments/Wavelet/Comparison/getWaveletFile.R")
require(wmtsa)
require(biwavelet)
require(fields)
## Create Fine Scale Changes dataset
#First time generating
#dfExample1<-data.frame(x=c(1:1024),y=rnorm(1024,0,1))
#write.csv(dfExample1,"Example1.csv",row.names=FALSE)
#Read from .csv from now on
dfExample1<-read.csv("Example1.csv")
##Create Wider Scale Changes dataset
#First time generating
#<-data.frame(x=c(1:1024),y=sin(c(1:1024)*pi/32)+rnorm(1024,0,1))
#write.csv(dfExample2,"Example2.csv",row.names=FALSE)
#Read from .csv from now on
dfExample2<-read.csv("Example2.csv")
##Create Wider Scale2 Changes dataset
#First time generating
#dfExample3<-data.frame(x=c(1:1024),y=sin(c(1:1024)*pi/32)+rnorm(1024,0,1))
#write.csv(dfExample3,"Example3.csv",row.names=FALSE)
#Read from .csv from now on
dfExample3<-read.csv("Example3.csv")
## Plot example signals
jpeg("ExampleSignals.jpeg",width=13,height=17,units="cm",quality=100,res=300)
par(mfcol=c(3,1))
plot(dfExample1$x,dfExample1$y,type="l",xlab="Kb",ylab="Signal",ylim=c(-5,5),xaxp=c(0,1024,8),main="Example 1: noise")
plot(dfExample2$x,dfExample2$y,type="l",xlab="Kb",ylab="Signal",ylim=c(-5,5),xaxp=c(0,1024,8),main="Example 2: wave")
plot(dfExample3$x,dfExample3$y,type="l",xlab="Kb",ylab="Signal",ylim=c(-5,5),xaxp=c(0,1024,8),main="Example 3: wave")
dev.off()
## Get DWT
Example1_DWT<-wavDWT(dfExample1$y,wavelet="haar")
Example2_DWT<-wavDWT(dfExample2$y,wavelet="haar")
Example3_DWT<-wavDWT(dfExample3$y,wavelet="haar")
## Get approximation coefficients
getApproxCoeff <- function(x){
approx<-list(s0=x)
n<-length(x)
level<-0
while (n> 1) {
n<-n/2
level<-level+1
s<-rep(NA,n)
for (i in 1:n){
s[i]<-(approx[[level]][2*i]+approx[[level]][2*i-1])/sqrt(2)
}
approx[[paste0("s",level)]]<-s
}
return(approx)
}
Example1_Approx<-getApproxCoeff(dfExample1$y)
Example2_Approx<-getApproxCoeff(dfExample2$y)
Example3_Approx<-getApproxCoeff(dfExample3$y)
## Plot the wavelet decomposition
plotBlocks<-function(y,xlim,ylab="",xaxt="n",yaxt="n",las=0,RHS=FALSE){
blocks<-length(y)
x<-seq(0,xlim[2],xlim[2]/blocks)
plot(rep(0,length(y)),y,type="n",xlim=xlim,xaxp=c(0,xlim[2],8),ylab=ylab,xlab="",yaxt=yaxt,xaxt=xaxt,las=las)
if (RHS==TRUE){
axis(side = 4,las=2)
}
for(i in 1:blocks){
lines(x=c(x[i],x[i+1]),y=c(y[i],y[i]))
if(i < blocks){
lines(x=c(x[i+1],x[i+1]),y=c(y[i],y[i+1]))
}
}
}
plotWaveletDecomposition<-function(DWT,Approx,fileName,graphTitle){
jpeg(fileName,width=14,height=21,units="cm",quality=100,res=300)
par(mar=c(0.5,0,0,0.5),oma=c(4,9.5,4,4),xpd=NA)
layout(matrix(c(1:22), 11, 2, byrow = FALSE))
plotBlocks(Approx[[1]],xlim=c(1,1024),yaxt="s",las=2)
mtext(paste0("Original\nSignal"),side = 2, line = 4.5,cex=0.75,las=1)
title(main="Detail coefficients",line=1)
mtext(graphTitle,outer=TRUE,line=2.5)
for (i in 1:9){
plotBlocks(DWT$data[[i]],xlim=c(1,1024),yaxt="s",las=2)
mtext(paste0("Scale ",2^i),side = 2, line = 4.5,cex=0.75,las=1)
mtext(paste0("d(",i,")"),side = 2, line = 3,cex=0.75)
}
#d10
plot(0,0,type="n",xaxp=c(0,1024,8),xlim=c(1,1024),ylab="",xlab="Kb",xaxt="s",las=2)
lines(c(0,1024),c(DWT$data[[10]],DWT$data[[10]]))
mtext(paste0("Scale ",2^10),side = 2, line = 4.5,cex=0.75,las=1)
mtext("d(10)",side = 2, line = 3,cex=0.75)
for (i in 0:9){
plotBlocks(Approx[[i+1]],xlim=c(1,1024),RHS=TRUE)
if(i==0){title(main="Approximation coefficients",line=1)}
mtext(paste0("a(",i,")"), side = 4, line = 3,cex =0.75)
}
plot(0,Approx[[11]],type="n",xaxp=c(0,1024,8),xlim=c(1,1024),ylab="",xlab="Kb",yaxt="n",las=2)
lines(c(0,1024),c(Approx[[11]],Approx[[11]]))
mtext("a(10)", side = 4, line = 3,cex =0.75)
axis(side = 4,las=2)
dev.off()
}
plotWaveletDecomposition(Example1_DWT,Example1_Approx,"Example1Decomposition.jpeg","DWT of Example 1")
plotWaveletDecomposition(Example2_DWT,Example2_Approx,"Example2Decomposition.jpeg","DWT of Example 2")
plotWaveletDecomposition(Example3_DWT,Example3_Approx,"Example3Decomposition.jpeg","DWT of Example 3")
## Get values for example reconstruction
Example2_DWT$data$s10
Example2_DWT$data$d10
Example2_Approx$s9
(0.0722-(-0.4326))/sqrt(2)
(0.0722+(-0.4326))/sqrt(2)
getPowerSpectrum<-function(dataDWT){
powerSpectrum<-NULL
for(i in 1:dataDWT$dictionary$n.levels){
powerSpectrum[i]<-sum(dataDWT$data[[i]]^2,na.rm = TRUE)
}
powerSpectrum<-powerSpectrum/sum(powerSpectrum,na.rm=TRUE)
return(powerSpectrum)
}
## Power Spectrum
Example1_PS<-getPowerSpectrum(Example1_DWT)
Example2_PS<-getPowerSpectrum(Example2_DWT)
Example3_PS<-getPowerSpectrum(Example3_DWT)
jpeg("PowerSpectrumExamples.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example1_PS,type="l",xlim=c(1,10),xlab="Scale Kb",ylab="Proportion of Variance",xaxt="n",ylim=c(0,max(Example1_PS,Example2_PS,Example3_PS,na.rm=TRUE)),main="Proportion of Variance")
axis(1,1:10,labels=2^(1:10),las=2)
lines(Example2_PS,col="blue",lty=2)
lines(Example3_PS,col="red",lty=3)
legend("topright",legend=c("Example 1","Example 2","Example 3"),col=c("black","blue","red"),lty=c(1,2,3))
dev.off()
## Show what happens when rotate 15
dfExample2r<-dfExample2
dfExample2r$y<-c(dfExample2$y[-c(1:15)],dfExample2$y[1:15])
Example2r_DWT<-wavDWT(dfExample2r$y,wavelet="haar")
Example2r_Approx<-getApproxCoeff(dfExample2r$y)
plotWaveletDecomposition(Example2r_DWT,Example2r_Approx,"Example2r15Decomposition.jpeg","DWT of Example 2, rotated by 15")
Example2r_PS<-getPowerSpectrum(Example2r_DWT)
jpeg("PowerSpectrumExamplesr15.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example2r_PS,type="l",xlim=c(1,10),xlab="Scale Kb",ylab="Proportion of Variance",xaxt="n",ylim=c(0,max(Example2r_PS,na.rm=TRUE)),main="Proportion of Variance",col="green",lty=2)
lines(Example2_PS,col="blue")
legend("topright",legend=c("Example 2","Example 2,\nrotated by 15",""),col=c("blue","green",NA),lty=c(1,2))
axis(1,1:10,labels=2^(1:10),las=2)
dev.off()
## Get MODWT
Example1_MODWT<-wavMODWT(dfExample1$y,wavelet="haar")
Example2_MODWT<-wavMODWT(dfExample2$y,wavelet="haar")
Example3_MODWT<-wavMODWT(dfExample3$y,wavelet="haar")
getApproxCoeffMO <- function(x){
approx<-list(s0=x)
n<-length(x)
level<-0
for (i in 1:10){
level<-level+1
s<-rep(NA,n)
for (t in 1:n){
index<-t-2^(i-1)
if (index<=0){
index <- 1024+index
}
s[t]<-(approx[[level]][t]+approx[[level]][index])/(2) #rescaled by multiplying by another 1/sqrt(2)
}
approx[[paste0("s",level)]]<-s
}
#check final level for rounding errors
for (i in 2:length(approx[[level+1]])){
if (isTRUE(all.equal(approx[[level+1]][i-1],approx[[level+1]][i]))){
approx[[level+1]][i]<-approx[[level+1]][i-1]
}
}
return(approx)
}
Example1_MOApprox<-getApproxCoeffMO(dfExample1$y)
Example2_MOApprox<-getApproxCoeffMO(dfExample2$y)
Example3_MOApprox<-getApproxCoeffMO(dfExample3$y)
plotBlocks<-function(y,xlim,ylab="",xaxt="n",yaxt="n",las=0,RHS=FALSE,xlab=""){
blocks<-length(y)
x<-seq(0,xlim[2],xlim[2]/blocks)
plot(rep(0,length(y)+2),c(y,0.05,-0.05),type="n",xlim=xlim,xaxp=c(0,xlim[2],8),ylab=ylab,xlab=xlab,yaxt=yaxt,xaxt=xaxt,las=las)
if (RHS==TRUE){
axis(side = 4,las=2)
}
for(i in 1:blocks){
lines(x=c(x[i],x[i+1]),y=c(y[i],y[i]))
if(i < blocks){
lines(x=c(x[i+1],x[i+1]),y=c(y[i],y[i+1]))
}
}
}
plotWaveletMODecomposition<-function(DWT,Approx,fileName,graphTitle){
jpeg(fileName,width=14,height=21,units="cm",quality=100,res=300)
par(mar=c(0.5,0,0,0.5),oma=c(4,9.5,4,4),xpd=NA)
layout(matrix(c(1:22), 11, 2, byrow = FALSE))
plotBlocks(Approx[[1]],xlim=c(1,1024),yaxt="s",las=2)
mtext(paste0("Original\nSignal"),side = 2, line = 4.5,cex=0.75,las=1)
title(main="Detail coefficients",line=1)
mtext(graphTitle,outer=TRUE,line=2.5)
for (i in 1:10){
plotBlocks(DWT$data[[i]],xlim=c(1,1024),yaxt="s",las=2,xaxt=ifelse(i==10,"s","n"),xlab=ifelse(i==10,"Kb",""))
mtext(paste0("Scale ",2^i),side = 2, line = 4.5,cex=0.75,las=1)
mtext(paste0("d(",i,")"),side = 2, line = 3,cex=0.75)
}
for (i in 0:10){
plotBlocks(Approx[[i+1]],xlim=c(1,1024),RHS=TRUE,xaxt=ifelse(i==10,"s","n"),las=2,xlab=ifelse(i==10,"Kb",""))
if(i==0){title(main="Approximation coefficients",line=1)}
mtext(paste0("a(",i,")"), side = 4, line = 3,cex =0.75) #WAS line =0.5
}
dev.off()
}
plotWaveletMODecomposition(Example1_MODWT,Example1_MOApprox,"Example1MODecomposition.jpeg","MODWT of Example 1")
plotWaveletMODecomposition(Example2_MODWT,Example2_MOApprox,"Example2MODecomposition.jpeg","MODWT of Example 2")
plotWaveletMODecomposition(Example3_MODWT,Example3_MOApprox,"Example3MODecomposition.jpeg","MODWT of Example 3")
## Power spectrum of MODWT
Example1_MOPS<-getPowerSpectrum(Example1_MODWT)
Example2_MOPS<-getPowerSpectrum(Example2_MODWT)
Example3_MOPS<-getPowerSpectrum(Example3_MODWT)
jpeg("PowerSpectrumMOExamples.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example1_MOPS,type="l",xlim=c(1,10),xlab="Scale Kb",ylab="Proportion of Variance",xaxt="n",ylim=c(0,max(Example1_MOPS,Example2_MOPS,Example3_MOPS,na.rm=TRUE)),main="Proportion of Variance with MODWT")
axis(1,1:10,labels=2^(1:10),las=2)
lines(Example2_MOPS,col="blue",lty=2)
lines(Example3_MOPS,col="red",lty=3)
legend("topright",legend=c("Example 1","Example 2","Example 3"),col=c("black","blue","red"),lty=c(1,2,3))
dev.off()
## Wavelet correlation
#function
getWaveletCorrelation10<-function(x,y){ ##x and y are DWT objects
est<-NULL
pval<-NULL
for (i in 1:9){ ##need more than 2 observations
rankCor<-cor.test(x$data[[i]],y$data[[i]],method="kendall")
est[i]<-rankCor$estimate
pval[i]<-rankCor$p.value
}
return(list(est=est,pval=pval))
}
#get correlations
Example12_cor<-getWaveletCorrelation10(Example1_MODWT,Example2_MODWT)
Example13_cor<-getWaveletCorrelation10(Example1_MODWT,Example3_MODWT)
Example23_cor<-getWaveletCorrelation10(Example2_MODWT,Example3_MODWT)
jpeg("CorrelationExamples.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example12_cor$est[1:8],type="l",main="Correlations",ylim=c(-1,1),col="black",xaxt="n",xlab="Scale Kb",ylab="Kendall's tau")
axis(1,at=c(1:8),labels=c(2^(1:8)),las=2)
points(which(Example12_cor$pval[1:8]<0.01),Example12_cor$est[which(Example12_cor$pval[1:8]<0.01)],col="black")
lines(Example13_cor$est[1:8],col="blue",lty=2)
points(which(Example13_cor$pval[1:8]<0.01),Example13_cor$est[which(Example13_cor$pval[1:8]<0.01)],col="blue")
lines(Example23_cor$est[1:8],col="red",lty=3)
points(which(Example23_cor$pval[1:8]<0.01),Example23_cor$est[which(Example23_cor$pval[1:8]<0.01)],col="red")
legend("bottomleft",legend=c("Examples 1 and 2","Examples 1 and 3","Examples 2 and 3"),col=c("black","blue","red"),lty=c(1,2,3))
legend("bottomright",legend="p-value < 0.01",pch=1)
dev.off()
# ----------------------------------------------------------------------------------------
###CWT
plotLegendCWT<-function(x){
x$power <- x$power.corr
yvals <- log2(x$period)
zvals <- log2(abs(x$power/x$sigma2))
zlim <- range(c(-1, 1) * max(zvals))
zvals[zvals < zlim[1]] <- zlim[1]
#locs <- pretty(range(zlim), n = 7)
locs<-c(-9,-6,-3,0,3,6,9)
leg.lab<-2^locs
leg.lab[leg.lab<1] <- paste0("1/",(1/leg.lab[leg.lab<1]))
fill.cols <- c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")
col.pal <- colorRampPalette(fill.cols)
fill.colors <- col.pal(64)
image.plot(x$t, yvals, t(zvals),
zlim = zlim,
ylim = rev(range(yvals)),
col = fill.colors,
horizontal = FALSE,
legend.only = TRUE,
axis.args = list(at = locs,labels = leg.lab),
xpd = NA)
}
Example1_CWT<-wt(dfExample1,mother="morlet",param=6)
Example2_CWT<-wt(dfExample2,mother="morlet",param=6)
Example3_CWT<-wt(dfExample3,mother="morlet",param=6)
bmp("Example1CWT.bmp",res=300,height=17,width=23,units='cm')
par(mfrow=c(2,1), oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example1_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 1",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example1_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample1$x,dfExample1$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
bmp("Example2CWT.bmp",res=300,height=17,width=23,units='cm')
par(mfrow=c(2,1), oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example2_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 2",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example2_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample2$x,dfExample2$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
bmp("Example3CWT.bmp",res=300,height=17,width=23,units='cm')
par(mfrow=c(2,1), oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example3_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 3",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example3_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample3$x,dfExample3$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
# Combine example 1 and 2
bmp("CWT_12combine.bmp",res=300,height=17,width=46,units='cm')
layout(matrix(c(1,2,3,4),2,2,byrow=TRUE))
par(oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example1_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 1",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example1_CWT)
plot(Example2_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 2",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example2_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample1$x,dfExample1$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
plot(dfExample2$x,dfExample2$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
## Wavelet Coherence
Example12_coh<-wtc(dfExample1,dfExample2,mother="morlet",param=6,nrands = 1000)
Example13_coh<-wtc(dfExample1,dfExample3,mother="morlet",param=6,nrands = 1000)
Example23_coh<-wtc(dfExample2,dfExample3,mother="morlet",param=6,nrands = 1000)
jpeg("Example12Coh.jpeg",width=15,height=15,units="cm",quality=100,res=300)
par(oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example12_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Wavelet Coherence: Examples 1 and 2")
dev.off()
jpeg("Example13Coh.jpeg",width=15,height=15,units="cm",quality=100,res=300)
par(oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example13_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Wavelet Coherence: Examples 1 and 3")
dev.off()
jpeg("Example23Coh.jpeg",width=15,height=15,units="cm",quality=100,res=300)
par(oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example23_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Wavelet Coherence: Examples 2 and 3")
dev.off()
## plot all on one graph
bmp("Coherence_combine.bmp",res=300,height=30,width=30,units='cm')
par(mfrow=c(2,2),oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example12_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Examples 1 and 2")
plot(Example13_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Examples 1 and 3")
plot(Example23_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Examples 2 and 3")
dev.off()
## plot Morlet 6
f <- function(x) (pi^(-1/4))*exp(-(0+1i)*6*x)*exp(-(x^2)/2)
x <- seq(-5, 5, by=0.00001)
png("Morlet6.png",width=15,height=15,units="cm",res=300)
plot(x,Re(f(x)),type="l",ylim=c(-0.8,0.8),ylab=expression(psi(s)),xlab="s")
lines(x,Im(f(x)),lty=3,col="blue")
legend("topright",legend=c("Real","Imaginary"),lty=c(1,3),col=c("black","blue"))
dev.off()
|
/Chapter_5/Example/WaveletExample2.R
|
no_license
|
chorscroft/PhD-Thesis
|
R
| false
| false
| 16,284
|
r
|
## Wavelet example for thesis
#working directory
setwd("\\\\filestore.soton.ac.uk\\users\\ch19g17\\mydocuments\\Wavelet\\Example Wavelet Work")
##Load prerequisites
#source("//filestore.soton.ac.uk/users/ch19g17/mydocuments/Wavelet/Comparison/getWaveletFile.R")
require(wmtsa)
require(biwavelet)
require(fields)
## Create Fine Scale Changes dataset
#First time generating
#dfExample1<-data.frame(x=c(1:1024),y=rnorm(1024,0,1))
#write.csv(dfExample1,"Example1.csv",row.names=FALSE)
#Read from .csv from now on
dfExample1<-read.csv("Example1.csv")
##Create Wider Scale Changes dataset
#First time generating
#<-data.frame(x=c(1:1024),y=sin(c(1:1024)*pi/32)+rnorm(1024,0,1))
#write.csv(dfExample2,"Example2.csv",row.names=FALSE)
#Read from .csv from now on
dfExample2<-read.csv("Example2.csv")
##Create Wider Scale2 Changes dataset
#First time generating
#dfExample3<-data.frame(x=c(1:1024),y=sin(c(1:1024)*pi/32)+rnorm(1024,0,1))
#write.csv(dfExample3,"Example3.csv",row.names=FALSE)
#Read from .csv from now on
dfExample3<-read.csv("Example3.csv")
## Plot example signals
jpeg("ExampleSignals.jpeg",width=13,height=17,units="cm",quality=100,res=300)
par(mfcol=c(3,1))
plot(dfExample1$x,dfExample1$y,type="l",xlab="Kb",ylab="Signal",ylim=c(-5,5),xaxp=c(0,1024,8),main="Example 1: noise")
plot(dfExample2$x,dfExample2$y,type="l",xlab="Kb",ylab="Signal",ylim=c(-5,5),xaxp=c(0,1024,8),main="Example 2: wave")
plot(dfExample3$x,dfExample3$y,type="l",xlab="Kb",ylab="Signal",ylim=c(-5,5),xaxp=c(0,1024,8),main="Example 3: wave")
dev.off()
## Get DWT
Example1_DWT<-wavDWT(dfExample1$y,wavelet="haar")
Example2_DWT<-wavDWT(dfExample2$y,wavelet="haar")
Example3_DWT<-wavDWT(dfExample3$y,wavelet="haar")
## Get approximation coefficients
getApproxCoeff <- function(x){
approx<-list(s0=x)
n<-length(x)
level<-0
while (n> 1) {
n<-n/2
level<-level+1
s<-rep(NA,n)
for (i in 1:n){
s[i]<-(approx[[level]][2*i]+approx[[level]][2*i-1])/sqrt(2)
}
approx[[paste0("s",level)]]<-s
}
return(approx)
}
Example1_Approx<-getApproxCoeff(dfExample1$y)
Example2_Approx<-getApproxCoeff(dfExample2$y)
Example3_Approx<-getApproxCoeff(dfExample3$y)
## Plot the wavelet decomposition
plotBlocks<-function(y,xlim,ylab="",xaxt="n",yaxt="n",las=0,RHS=FALSE){
blocks<-length(y)
x<-seq(0,xlim[2],xlim[2]/blocks)
plot(rep(0,length(y)),y,type="n",xlim=xlim,xaxp=c(0,xlim[2],8),ylab=ylab,xlab="",yaxt=yaxt,xaxt=xaxt,las=las)
if (RHS==TRUE){
axis(side = 4,las=2)
}
for(i in 1:blocks){
lines(x=c(x[i],x[i+1]),y=c(y[i],y[i]))
if(i < blocks){
lines(x=c(x[i+1],x[i+1]),y=c(y[i],y[i+1]))
}
}
}
plotWaveletDecomposition<-function(DWT,Approx,fileName,graphTitle){
jpeg(fileName,width=14,height=21,units="cm",quality=100,res=300)
par(mar=c(0.5,0,0,0.5),oma=c(4,9.5,4,4),xpd=NA)
layout(matrix(c(1:22), 11, 2, byrow = FALSE))
plotBlocks(Approx[[1]],xlim=c(1,1024),yaxt="s",las=2)
mtext(paste0("Original\nSignal"),side = 2, line = 4.5,cex=0.75,las=1)
title(main="Detail coefficients",line=1)
mtext(graphTitle,outer=TRUE,line=2.5)
for (i in 1:9){
plotBlocks(DWT$data[[i]],xlim=c(1,1024),yaxt="s",las=2)
mtext(paste0("Scale ",2^i),side = 2, line = 4.5,cex=0.75,las=1)
mtext(paste0("d(",i,")"),side = 2, line = 3,cex=0.75)
}
#d10
plot(0,0,type="n",xaxp=c(0,1024,8),xlim=c(1,1024),ylab="",xlab="Kb",xaxt="s",las=2)
lines(c(0,1024),c(DWT$data[[10]],DWT$data[[10]]))
mtext(paste0("Scale ",2^10),side = 2, line = 4.5,cex=0.75,las=1)
mtext("d(10)",side = 2, line = 3,cex=0.75)
for (i in 0:9){
plotBlocks(Approx[[i+1]],xlim=c(1,1024),RHS=TRUE)
if(i==0){title(main="Approximation coefficients",line=1)}
mtext(paste0("a(",i,")"), side = 4, line = 3,cex =0.75)
}
plot(0,Approx[[11]],type="n",xaxp=c(0,1024,8),xlim=c(1,1024),ylab="",xlab="Kb",yaxt="n",las=2)
lines(c(0,1024),c(Approx[[11]],Approx[[11]]))
mtext("a(10)", side = 4, line = 3,cex =0.75)
axis(side = 4,las=2)
dev.off()
}
plotWaveletDecomposition(Example1_DWT,Example1_Approx,"Example1Decomposition.jpeg","DWT of Example 1")
plotWaveletDecomposition(Example2_DWT,Example2_Approx,"Example2Decomposition.jpeg","DWT of Example 2")
plotWaveletDecomposition(Example3_DWT,Example3_Approx,"Example3Decomposition.jpeg","DWT of Example 3")
## Get values for example reconstruction
Example2_DWT$data$s10
Example2_DWT$data$d10
Example2_Approx$s9
(0.0722-(-0.4326))/sqrt(2)
(0.0722+(-0.4326))/sqrt(2)
getPowerSpectrum<-function(dataDWT){
powerSpectrum<-NULL
for(i in 1:dataDWT$dictionary$n.levels){
powerSpectrum[i]<-sum(dataDWT$data[[i]]^2,na.rm = TRUE)
}
powerSpectrum<-powerSpectrum/sum(powerSpectrum,na.rm=TRUE)
return(powerSpectrum)
}
## Power Spectrum
Example1_PS<-getPowerSpectrum(Example1_DWT)
Example2_PS<-getPowerSpectrum(Example2_DWT)
Example3_PS<-getPowerSpectrum(Example3_DWT)
jpeg("PowerSpectrumExamples.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example1_PS,type="l",xlim=c(1,10),xlab="Scale Kb",ylab="Proportion of Variance",xaxt="n",ylim=c(0,max(Example1_PS,Example2_PS,Example3_PS,na.rm=TRUE)),main="Proportion of Variance")
axis(1,1:10,labels=2^(1:10),las=2)
lines(Example2_PS,col="blue",lty=2)
lines(Example3_PS,col="red",lty=3)
legend("topright",legend=c("Example 1","Example 2","Example 3"),col=c("black","blue","red"),lty=c(1,2,3))
dev.off()
## Show what happens when rotate 15
dfExample2r<-dfExample2
dfExample2r$y<-c(dfExample2$y[-c(1:15)],dfExample2$y[1:15])
Example2r_DWT<-wavDWT(dfExample2r$y,wavelet="haar")
Example2r_Approx<-getApproxCoeff(dfExample2r$y)
plotWaveletDecomposition(Example2r_DWT,Example2r_Approx,"Example2r15Decomposition.jpeg","DWT of Example 2, rotated by 15")
Example2r_PS<-getPowerSpectrum(Example2r_DWT)
jpeg("PowerSpectrumExamplesr15.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example2r_PS,type="l",xlim=c(1,10),xlab="Scale Kb",ylab="Proportion of Variance",xaxt="n",ylim=c(0,max(Example2r_PS,na.rm=TRUE)),main="Proportion of Variance",col="green",lty=2)
lines(Example2_PS,col="blue")
legend("topright",legend=c("Example 2","Example 2,\nrotated by 15",""),col=c("blue","green",NA),lty=c(1,2))
axis(1,1:10,labels=2^(1:10),las=2)
dev.off()
## Get MODWT
Example1_MODWT<-wavMODWT(dfExample1$y,wavelet="haar")
Example2_MODWT<-wavMODWT(dfExample2$y,wavelet="haar")
Example3_MODWT<-wavMODWT(dfExample3$y,wavelet="haar")
getApproxCoeffMO <- function(x){
approx<-list(s0=x)
n<-length(x)
level<-0
for (i in 1:10){
level<-level+1
s<-rep(NA,n)
for (t in 1:n){
index<-t-2^(i-1)
if (index<=0){
index <- 1024+index
}
s[t]<-(approx[[level]][t]+approx[[level]][index])/(2) #rescaled by multiplying by another 1/sqrt(2)
}
approx[[paste0("s",level)]]<-s
}
#check final level for rounding errors
for (i in 2:length(approx[[level+1]])){
if (isTRUE(all.equal(approx[[level+1]][i-1],approx[[level+1]][i]))){
approx[[level+1]][i]<-approx[[level+1]][i-1]
}
}
return(approx)
}
Example1_MOApprox<-getApproxCoeffMO(dfExample1$y)
Example2_MOApprox<-getApproxCoeffMO(dfExample2$y)
Example3_MOApprox<-getApproxCoeffMO(dfExample3$y)
plotBlocks<-function(y,xlim,ylab="",xaxt="n",yaxt="n",las=0,RHS=FALSE,xlab=""){
blocks<-length(y)
x<-seq(0,xlim[2],xlim[2]/blocks)
plot(rep(0,length(y)+2),c(y,0.05,-0.05),type="n",xlim=xlim,xaxp=c(0,xlim[2],8),ylab=ylab,xlab=xlab,yaxt=yaxt,xaxt=xaxt,las=las)
if (RHS==TRUE){
axis(side = 4,las=2)
}
for(i in 1:blocks){
lines(x=c(x[i],x[i+1]),y=c(y[i],y[i]))
if(i < blocks){
lines(x=c(x[i+1],x[i+1]),y=c(y[i],y[i+1]))
}
}
}
plotWaveletMODecomposition<-function(DWT,Approx,fileName,graphTitle){
jpeg(fileName,width=14,height=21,units="cm",quality=100,res=300)
par(mar=c(0.5,0,0,0.5),oma=c(4,9.5,4,4),xpd=NA)
layout(matrix(c(1:22), 11, 2, byrow = FALSE))
plotBlocks(Approx[[1]],xlim=c(1,1024),yaxt="s",las=2)
mtext(paste0("Original\nSignal"),side = 2, line = 4.5,cex=0.75,las=1)
title(main="Detail coefficients",line=1)
mtext(graphTitle,outer=TRUE,line=2.5)
for (i in 1:10){
plotBlocks(DWT$data[[i]],xlim=c(1,1024),yaxt="s",las=2,xaxt=ifelse(i==10,"s","n"),xlab=ifelse(i==10,"Kb",""))
mtext(paste0("Scale ",2^i),side = 2, line = 4.5,cex=0.75,las=1)
mtext(paste0("d(",i,")"),side = 2, line = 3,cex=0.75)
}
for (i in 0:10){
plotBlocks(Approx[[i+1]],xlim=c(1,1024),RHS=TRUE,xaxt=ifelse(i==10,"s","n"),las=2,xlab=ifelse(i==10,"Kb",""))
if(i==0){title(main="Approximation coefficients",line=1)}
mtext(paste0("a(",i,")"), side = 4, line = 3,cex =0.75) #WAS line =0.5
}
dev.off()
}
plotWaveletMODecomposition(Example1_MODWT,Example1_MOApprox,"Example1MODecomposition.jpeg","MODWT of Example 1")
plotWaveletMODecomposition(Example2_MODWT,Example2_MOApprox,"Example2MODecomposition.jpeg","MODWT of Example 2")
plotWaveletMODecomposition(Example3_MODWT,Example3_MOApprox,"Example3MODecomposition.jpeg","MODWT of Example 3")
## Power spectrum of MODWT
Example1_MOPS<-getPowerSpectrum(Example1_MODWT)
Example2_MOPS<-getPowerSpectrum(Example2_MODWT)
Example3_MOPS<-getPowerSpectrum(Example3_MODWT)
jpeg("PowerSpectrumMOExamples.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example1_MOPS,type="l",xlim=c(1,10),xlab="Scale Kb",ylab="Proportion of Variance",xaxt="n",ylim=c(0,max(Example1_MOPS,Example2_MOPS,Example3_MOPS,na.rm=TRUE)),main="Proportion of Variance with MODWT")
axis(1,1:10,labels=2^(1:10),las=2)
lines(Example2_MOPS,col="blue",lty=2)
lines(Example3_MOPS,col="red",lty=3)
legend("topright",legend=c("Example 1","Example 2","Example 3"),col=c("black","blue","red"),lty=c(1,2,3))
dev.off()
## Wavelet correlation
#function
getWaveletCorrelation10<-function(x,y){ ##x and y are DWT objects
est<-NULL
pval<-NULL
for (i in 1:9){ ##need more than 2 observations
rankCor<-cor.test(x$data[[i]],y$data[[i]],method="kendall")
est[i]<-rankCor$estimate
pval[i]<-rankCor$p.value
}
return(list(est=est,pval=pval))
}
#get correlations
Example12_cor<-getWaveletCorrelation10(Example1_MODWT,Example2_MODWT)
Example13_cor<-getWaveletCorrelation10(Example1_MODWT,Example3_MODWT)
Example23_cor<-getWaveletCorrelation10(Example2_MODWT,Example3_MODWT)
jpeg("CorrelationExamples.jpeg",width=15,height=15,units="cm",quality=100,res=300)
plot(Example12_cor$est[1:8],type="l",main="Correlations",ylim=c(-1,1),col="black",xaxt="n",xlab="Scale Kb",ylab="Kendall's tau")
axis(1,at=c(1:8),labels=c(2^(1:8)),las=2)
points(which(Example12_cor$pval[1:8]<0.01),Example12_cor$est[which(Example12_cor$pval[1:8]<0.01)],col="black")
lines(Example13_cor$est[1:8],col="blue",lty=2)
points(which(Example13_cor$pval[1:8]<0.01),Example13_cor$est[which(Example13_cor$pval[1:8]<0.01)],col="blue")
lines(Example23_cor$est[1:8],col="red",lty=3)
points(which(Example23_cor$pval[1:8]<0.01),Example23_cor$est[which(Example23_cor$pval[1:8]<0.01)],col="red")
legend("bottomleft",legend=c("Examples 1 and 2","Examples 1 and 3","Examples 2 and 3"),col=c("black","blue","red"),lty=c(1,2,3))
legend("bottomright",legend="p-value < 0.01",pch=1)
dev.off()
# ----------------------------------------------------------------------------------------
###CWT
plotLegendCWT<-function(x){
x$power <- x$power.corr
yvals <- log2(x$period)
zvals <- log2(abs(x$power/x$sigma2))
zlim <- range(c(-1, 1) * max(zvals))
zvals[zvals < zlim[1]] <- zlim[1]
#locs <- pretty(range(zlim), n = 7)
locs<-c(-9,-6,-3,0,3,6,9)
leg.lab<-2^locs
leg.lab[leg.lab<1] <- paste0("1/",(1/leg.lab[leg.lab<1]))
fill.cols <- c("#00007F", "blue", "#007FFF", "cyan",
"#7FFF7F", "yellow", "#FF7F00", "red", "#7F0000")
col.pal <- colorRampPalette(fill.cols)
fill.colors <- col.pal(64)
image.plot(x$t, yvals, t(zvals),
zlim = zlim,
ylim = rev(range(yvals)),
col = fill.colors,
horizontal = FALSE,
legend.only = TRUE,
axis.args = list(at = locs,labels = leg.lab),
xpd = NA)
}
Example1_CWT<-wt(dfExample1,mother="morlet",param=6)
Example2_CWT<-wt(dfExample2,mother="morlet",param=6)
Example3_CWT<-wt(dfExample3,mother="morlet",param=6)
bmp("Example1CWT.bmp",res=300,height=17,width=23,units='cm')
par(mfrow=c(2,1), oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example1_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 1",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example1_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample1$x,dfExample1$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
bmp("Example2CWT.bmp",res=300,height=17,width=23,units='cm')
par(mfrow=c(2,1), oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example2_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 2",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example2_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample2$x,dfExample2$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
bmp("Example3CWT.bmp",res=300,height=17,width=23,units='cm')
par(mfrow=c(2,1), oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example3_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 3",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example3_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample3$x,dfExample3$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
# Combine example 1 and 2
bmp("CWT_12combine.bmp",res=300,height=17,width=46,units='cm')
layout(matrix(c(1,2,3,4),2,2,byrow=TRUE))
par(oma = c(0, 0, 0, 1), mar = c(0, 4, 4, 5) + 0.1)
plot(Example1_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 1",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example1_CWT)
plot(Example2_CWT, plot.cb = FALSE, plot.phase = FALSE,main="Example 2",xlab="",xaxt="n",lwd.sig=2,las=1)
plotLegendCWT(Example2_CWT)
par(mar = c(5, 4, 0, 5) + 0.1)
plot(dfExample1$x,dfExample1$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
plot(dfExample2$x,dfExample2$y,type="l",ylab="Signal",xlab="Kb",xaxs="i",xaxp=c(0,1024,8),las=1)
dev.off()
## Wavelet Coherence
Example12_coh<-wtc(dfExample1,dfExample2,mother="morlet",param=6,nrands = 1000)
Example13_coh<-wtc(dfExample1,dfExample3,mother="morlet",param=6,nrands = 1000)
Example23_coh<-wtc(dfExample2,dfExample3,mother="morlet",param=6,nrands = 1000)
jpeg("Example12Coh.jpeg",width=15,height=15,units="cm",quality=100,res=300)
par(oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example12_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Wavelet Coherence: Examples 1 and 2")
dev.off()
jpeg("Example13Coh.jpeg",width=15,height=15,units="cm",quality=100,res=300)
par(oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example13_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Wavelet Coherence: Examples 1 and 3")
dev.off()
jpeg("Example23Coh.jpeg",width=15,height=15,units="cm",quality=100,res=300)
par(oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example23_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Wavelet Coherence: Examples 2 and 3")
dev.off()
## plot all on one graph
bmp("Coherence_combine.bmp",res=300,height=30,width=30,units='cm')
par(mfrow=c(2,2),oma = c(0, 0, 0, 1), mar = c(5, 4, 5, 5) + 0.1)
plot(Example12_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Examples 1 and 2")
plot(Example13_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Examples 1 and 3")
plot(Example23_coh, lty.coi = 1, col.coi = "grey", lwd.coi = 2,
lwd.sig = 2, ylab = "Scale", xlab = "KB",
plot.cb = TRUE, main = "Examples 2 and 3")
dev.off()
## plot Morlet 6
f <- function(x) (pi^(-1/4))*exp(-(0+1i)*6*x)*exp(-(x^2)/2)
x <- seq(-5, 5, by=0.00001)
png("Morlet6.png",width=15,height=15,units="cm",res=300)
plot(x,Re(f(x)),type="l",ylim=c(-0.8,0.8),ylab=expression(psi(s)),xlab="s")
lines(x,Im(f(x)),lty=3,col="blue")
legend("topright",legend=c("Real","Imaginary"),lty=c(1,3),col=c("black","blue"))
dev.off()
|
\name{micro_S}
\alias{micro_S}
\docType{data}
\title{
Stimulated dataset
}
\description{
This is the stimulated data part of the GSE39411 dataset, \url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE39411}. Data were normalized and are ready to use.
}
\usage{data(micro_S)}
\format{A data frame with 54613 probesets measured 6 times throught 4 time points.
}
\details{
5 leukemic CLL B-lymphocyte of aggressive form were stimulated in vitro with an anti-IgM antibody, activating the B-cell receptor (BCR). We analyzed the gene expression at 4 time points (60, 90, 210 and 390 minutes). Each gene expression measurement is performed both in stimulated cells and in control unstimulated cells. This is the stimulated cells dataset.
Data were collected on HG-U133_Plus_2, Affymetrix Human Genome U133 Plus 2.0 Array.
}
\references{
Vallat, L., Kemper, C. A., Jung, N., Maumy-Bertrand, M., Bertrand, F., \dots, Bahram, S. (2013). Reverse-engineering the genetic circuitry of a cancer cell with predicted intervention in chronic lymphocytic leukemia. Proceedings of the National Academy of Sciences, 110(2), 459-464, \url{https://dx.doi.org/10.1073/pnas.1211130110}.}
\examples{
data(micro_S)
}
\keyword{datasets}
|
/man/micro_S.Rd
|
no_license
|
Bhanditz/CascadeData
|
R
| false
| false
| 1,220
|
rd
|
\name{micro_S}
\alias{micro_S}
\docType{data}
\title{
Stimulated dataset
}
\description{
This is the stimulated data part of the GSE39411 dataset, \url{https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE39411}. Data were normalized and are ready to use.
}
\usage{data(micro_S)}
\format{A data frame with 54613 probesets measured 6 times throught 4 time points.
}
\details{
5 leukemic CLL B-lymphocyte of aggressive form were stimulated in vitro with an anti-IgM antibody, activating the B-cell receptor (BCR). We analyzed the gene expression at 4 time points (60, 90, 210 and 390 minutes). Each gene expression measurement is performed both in stimulated cells and in control unstimulated cells. This is the stimulated cells dataset.
Data were collected on HG-U133_Plus_2, Affymetrix Human Genome U133 Plus 2.0 Array.
}
\references{
Vallat, L., Kemper, C. A., Jung, N., Maumy-Bertrand, M., Bertrand, F., \dots, Bahram, S. (2013). Reverse-engineering the genetic circuitry of a cancer cell with predicted intervention in chronic lymphocytic leukemia. Proceedings of the National Academy of Sciences, 110(2), 459-464, \url{https://dx.doi.org/10.1073/pnas.1211130110}.}
\examples{
data(micro_S)
}
\keyword{datasets}
|
require(mxnet)
# A basic neural net training
# To run this, run python/mxnet/test_io.py to get data first
# Network configuration
batch.size <- 100
data <- mx.symbol.Variable("data")
fc1 <- mx.symbol.FullyConnected(data, name="fc1", num_hidden=128)
act1 <- mx.symbol.Activation(fc1, name="relu1", act_type="relu")
fc2 <- mx.symbol.FullyConnected(act1, name = "fc2", num_hidden = 64)
act2 <- mx.symbol.Activation(fc2, name="relu2", act_type="relu")
fc3 <- mx.symbol.FullyConnected(act2, name="fc3", num_hidden=10)
softmax <- mx.symbol.Softmax(fc3, name = "sm")
dtrain = mx.varg.io.MNISTIter(list(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data.shape=c(784),
batch.size=batch.size,
shuffle=TRUE,
flat=TRUE,
silent=0,
seed=10))
accuracy <- function(label, pred) {
ypred = max.col(as.array(pred))
return(sum((as.array(label) + 1) == ypred) / length(label))
}
mx.set.seed(0)
# Training parameters
ctx <- mx.cpu()
input.shape <- c(batch.size, 784)
symbol <- softmax
init <- mx.init.uniform(0.07)
opt <- mx.opt.sgd(learning.rate=0.05, momentum=0.9, rescale.grad=1.0/batch.size)
# Training procedure
texec <- mx.simple.bind(symbol, ctx=ctx, data=input.shape, grad.req=TRUE)
shapes <- lapply(texec$ref.arg.arrays, dim)
names(shapes) <- names(texec$arg.arrays)
arg.arrays <- mx.init.create(init, shapes, ctx)
mx.exec.update.arg.arrays(texec, arg.arrays, match.name=TRUE)
updater <- mx.opt.get.updater(opt, texec$ref.arg.arrays)
nround <- 10
tic <- proc.time()
for (iteration in 1 : nround) {
nbatch <- 0
train.acc <- 0
while (dtrain$iter.next()) {
batch <- dtrain$value()
label <- batch$label
names(batch) <- c("data", "sm_label")
# copy data arguments to executor
mx.exec.update.arg.arrays(texec, batch, match.name=TRUE)
# forward pass
mx.exec.forward(texec, is.train=TRUE)
# copy prediction out
out.pred <- mx.nd.copyto(texec$outputs[[1]], mx.cpu())
# backward pass
mx.exec.backward(texec)
arg.arrays <- updater(texec$arg.arrays, texec$ref.grad.arrays)
mx.exec.update.arg.arrays(texec, arg.arrays, skip.null=TRUE)
nbatch <- nbatch + 1
train.acc <- train.acc + accuracy(label, out.pred)
if (nbatch %% 100 == 0) {
print(paste("Train-acc=", train.acc / nbatch))
print(proc.time() - tic)
}
}
dtrain$reset()
print(paste("Train-acc=", train.acc / nbatch))
}
|
/R-package/demo/basic_nn.R
|
permissive
|
wpande/mxnet
|
R
| false
| false
| 2,396
|
r
|
require(mxnet)
# A basic neural net training
# To run this, run python/mxnet/test_io.py to get data first
# Network configuration
batch.size <- 100
data <- mx.symbol.Variable("data")
fc1 <- mx.symbol.FullyConnected(data, name="fc1", num_hidden=128)
act1 <- mx.symbol.Activation(fc1, name="relu1", act_type="relu")
fc2 <- mx.symbol.FullyConnected(act1, name = "fc2", num_hidden = 64)
act2 <- mx.symbol.Activation(fc2, name="relu2", act_type="relu")
fc3 <- mx.symbol.FullyConnected(act2, name="fc3", num_hidden=10)
softmax <- mx.symbol.Softmax(fc3, name = "sm")
dtrain = mx.varg.io.MNISTIter(list(
image="data/train-images-idx3-ubyte",
label="data/train-labels-idx1-ubyte",
data.shape=c(784),
batch.size=batch.size,
shuffle=TRUE,
flat=TRUE,
silent=0,
seed=10))
accuracy <- function(label, pred) {
ypred = max.col(as.array(pred))
return(sum((as.array(label) + 1) == ypred) / length(label))
}
mx.set.seed(0)
# Training parameters
ctx <- mx.cpu()
input.shape <- c(batch.size, 784)
symbol <- softmax
init <- mx.init.uniform(0.07)
opt <- mx.opt.sgd(learning.rate=0.05, momentum=0.9, rescale.grad=1.0/batch.size)
# Training procedure
texec <- mx.simple.bind(symbol, ctx=ctx, data=input.shape, grad.req=TRUE)
shapes <- lapply(texec$ref.arg.arrays, dim)
names(shapes) <- names(texec$arg.arrays)
arg.arrays <- mx.init.create(init, shapes, ctx)
mx.exec.update.arg.arrays(texec, arg.arrays, match.name=TRUE)
updater <- mx.opt.get.updater(opt, texec$ref.arg.arrays)
nround <- 10
tic <- proc.time()
for (iteration in 1 : nround) {
nbatch <- 0
train.acc <- 0
while (dtrain$iter.next()) {
batch <- dtrain$value()
label <- batch$label
names(batch) <- c("data", "sm_label")
# copy data arguments to executor
mx.exec.update.arg.arrays(texec, batch, match.name=TRUE)
# forward pass
mx.exec.forward(texec, is.train=TRUE)
# copy prediction out
out.pred <- mx.nd.copyto(texec$outputs[[1]], mx.cpu())
# backward pass
mx.exec.backward(texec)
arg.arrays <- updater(texec$arg.arrays, texec$ref.grad.arrays)
mx.exec.update.arg.arrays(texec, arg.arrays, skip.null=TRUE)
nbatch <- nbatch + 1
train.acc <- train.acc + accuracy(label, out.pred)
if (nbatch %% 100 == 0) {
print(paste("Train-acc=", train.acc / nbatch))
print(proc.time() - tic)
}
}
dtrain$reset()
print(paste("Train-acc=", train.acc / nbatch))
}
|
pdf("distribution.pdf", width=6, height=6)
data <- read.table("times.txt", header=F, stringsAsFactors=F)
x = as.numeric(as.character(data$V1))
h<-hist(x, breaks=20, col="blue", xlab="Milliseconds per Request",
main="Frequency for each time bucket")
xfit<-seq(0, 1020,length=80)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
# lines(xfit, yfit, col="blue", lwd=2)s
dev.off()
|
/results/getStats.R
|
no_license
|
wangzhao1988/WebClient
|
R
| false
| false
| 420
|
r
|
pdf("distribution.pdf", width=6, height=6)
data <- read.table("times.txt", header=F, stringsAsFactors=F)
x = as.numeric(as.character(data$V1))
h<-hist(x, breaks=20, col="blue", xlab="Milliseconds per Request",
main="Frequency for each time bucket")
xfit<-seq(0, 1020,length=80)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
# lines(xfit, yfit, col="blue", lwd=2)s
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{NorthSeaCod}
\alias{NorthSeaCod}
\title{Atlantic Cod Recruits}
\format{A data frame with 39 observations of one variable. \describe{
\item{log10.recruits}{} }}
\source{
\emph{inferred from} Beaugrand, G., K.M. Brander, J.A. Lindley, S.
Souissi, and P.C. Reid. 2003. Plankton effect on cod recruitment in the
North Sea. \emph{Nature} 426: 661-664.
}
\description{
Number (\eqn{\log_{10}}{log10} transformed) of Atlantic cod (\emph{Gadus
morhua}) that recruited (grew to catchable size) in the North Sea over a 39
years span.
}
\examples{
favstats(NorthSeaCod$log10.recruits)
}
\references{
\url{http://www.nature.com/nature/journal/v426/n6967/abs/nature02164.html}
}
\keyword{datasets}
|
/man/NorthSeaCod.Rd
|
no_license
|
mdlama/abd
|
R
| false
| true
| 798
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{NorthSeaCod}
\alias{NorthSeaCod}
\title{Atlantic Cod Recruits}
\format{A data frame with 39 observations of one variable. \describe{
\item{log10.recruits}{} }}
\source{
\emph{inferred from} Beaugrand, G., K.M. Brander, J.A. Lindley, S.
Souissi, and P.C. Reid. 2003. Plankton effect on cod recruitment in the
North Sea. \emph{Nature} 426: 661-664.
}
\description{
Number (\eqn{\log_{10}}{log10} transformed) of Atlantic cod (\emph{Gadus
morhua}) that recruited (grew to catchable size) in the North Sea over a 39
years span.
}
\examples{
favstats(NorthSeaCod$log10.recruits)
}
\references{
\url{http://www.nature.com/nature/journal/v426/n6967/abs/nature02164.html}
}
\keyword{datasets}
|
[
{
"title": "Running sparklyr – RStudio’s R Interface to Spark on Amazon EMR",
"href": "https://aws.amazon.com/cn/blogs/big-data/running-sparklyr-rstudios-r-interface-to-spark-on-amazon-emr/",
"des": ""
},
{
"title": "RStudio in the Cloud II: Syncing Code & Data with AWS",
"href": "http://strimas.com/r/rstudio-cloud-2/",
"des": ": a continuation of [a previous post](http://strimas.com/r/rstudio-cloud-1/) on setting up RStudio in the cloud on Amazon Web Services. This tutorial demonstrates transferring data and code to/from the cloud with GitHub and S3."
},
{
"title": "Running sparklyr – RStudio’s R Interface to Spark on Amazon EMR",
"href": "https://aws.amazon.com/cn/blogs/big-data/running-sparklyr-rstudios-r-interface-to-spark-on-amazon-emr/",
"des": "",
"img": "https://d2908q01vomqb2.cloudfront.net/b6692ea5df920cad691c20319a6fffd7a4a766b8/2016/10/17/sparklyr_2.gif"
},
{
"title": "Rmarkdown in a scientific workflow",
"href": "http://predictiveecology.org/2016/10/21/Rmarkdown-science-workflow.html",
"des": " - Using Rmarkdown with Rstudio and for all stages of my scientific projects has been a remarkable shift in how my work gets done! "
},
{
"title": "Combing R and Java",
"href": "https://datadidit.com/2016/10/15/combing-r-and-java/",
"des": ""
},
{
"title": "The new R Graph Gallery",
"href": "http://www.r-graph-gallery.com/",
"des": ""
},
{
"title": "Why I would rather use ReporteRs than RMarkdown",
"href": "http://www.mango-solutions.com/wp/2016/10/why-i-would-rather-use-reporters-than-rmarkdown/",
"des": ""
},
{
"title": "Visualizing ROC Curves in R using Plotly",
"href": "http://moderndata.plot.ly/visualizing-roc-curves-in-r-using-plotly/",
"des": ""
},
{
"title": "The Grammar of Graphics and Radar Charts",
"href": "http://www.r-chart.com/2016/10/the-grammar-of-graphics-and-radar-charts.html",
"des": "",
"img": "https://i0.wp.com/2.bp.blogspot.com/-MwCucP8iX-A/WAJJF7vSj2I/AAAAAAAAAzg/P9N4U4gEMag2ml5NvGfxCvn_sYDzbcBJACEw/s640/polar_finished.png"
},
{
"title": "The Worlds Economic Data, Shiny Apps and all you want to know about Propensity Score Matching!",
"href": "http://r-exercises.com/2016/10/21/the-worlds-economic-data-shiny-apps-and-all-you-want-to-know-about-propensity-score-matching/",
"des": ""
},
{
"title": "Creating Interactive Plots with R and Highcharts",
"href": "https://www.rstudio.com/rviews/2016/10/19/creating-interactive-plots-with-r-and-highcharts/",
"des": ""
},
{
"title": "Using the pipe operator in R with Plotly",
"href": "http://moderndata.plot.ly/using-the-pipe-operator-in-r-with-plotly/",
"des": ""
},
{
"title": "Deep learning in the cloud with MXNet",
"href": "http://rsnippets.blogspot.com/2016/10/deep-learning-in-cloud-with-mxnet.html",
"des": ""
},
{
"title": "Raccoon Ch. 1 – Introduction to Linear Models with R",
"href": "http://www.quantide.com/raccoon-ch-1-introduction-to-linear-models-with-r/",
"des": ""
},
{
"title": "Annotated Facets with ggplot2",
"href": "https://statbandit.wordpress.com/2016/10/20/annotated-facets-with-ggplot2/",
"des": ""
},
{
"title": "On the ifelse function",
"href": "https://privefl.github.io/blog/On-the-ifelse-function/",
"des": ""
},
{
"title": "Progress bar overhead comparisons",
"href": "http://peter.solymos.org/code/2016/10/15/progress-bar-overhead-comparisons.html",
"des": "",
"img": "https://i2.wp.com/peter.solymos.org/images/2016/10/15/pb-overhead.png"
},
{
"title": "Statistical Reading Rainbow",
"href": "https://mathewanalytics.com/2016/10/17/statistical-reading-rainbow/",
"des": ""
},
{
"title": "Is Unemployment Higher under Labour or the Conservatives?",
"href": "http://rforjournalists.com/2016/10/17/is-unemployment-higher-under-labour-or-the-conservatives/",
"des": " - This post has covered using rectangles as annotations to show the British unemployment rate under different political parties, plus how to use breaks in your axes scaling.",
"img": "https://i2.wp.com/rforjournalists.com/wp-content/uploads/2016/10/unemployment2.png"
},
{
"title": "The History of Strikes in Britain, Told Using Line Plots and Annotations",
"href": "http://rforjournalists.com/2016/10/17/is-unemployment-higher-under-labour-or-the-conservatives/",
"des": ""
},
{
"title": "Exploring the effects of healthcare investment on child mortality in R",
"href": "https://drsimonj.svbtle.com/exploring-a-causal-relation-between-healthcare-investment-and-child-mortality-in-r",
"des": "",
"img": "https://i0.wp.com/svbtleusercontent.com/n1yn7f9gjs8gua.png"
},
{
"title": "The 'deadly board game' puzzle: efficient simulation in R",
"href": "http://varianceexplained.org/r/board-game-simulation/",
"des": ""
},
{
"title": "Rcpp now used by 800 CRAN packages",
"href": "http://dirk.eddelbuettel.com/blog/2016/10/16/",
"des": "",
"img": "https://i1.wp.com/dirk.eddelbuettel.com/blog/code/rcpp/RcppGrowth_2016-10-16.png"
},
{
"title": "How to “get good at R”",
"href": "http://www.arilamstein.com/blog/2016/10/18/get-good-r/",
"des": ""
},
{
"title": "Election 2016: Tracking Emotions with R and Python",
"href": "http://blog.revolutionanalytics.com/2016/10/debate-emotions.html",
"des": " "
},
{
"title": "Don’t buy a brand new Porsche 911 or Audi Q7!!",
"href": "https://longhowlam.wordpress.com/2016/10/19/dont-buy-a-brand-new-porsche-911-or-audi-q7/",
"des": " - Many people know that nasty feeling when buying a brand new car. The minute that you have left the dealer, your car has lost a substantial amount of value. Unfortunately this depreciation is inevitable, however, the amount depends heavily on the car make and model."
},
{
"title": "Estimating the value of a vehicle with R",
"href": "http://blog.revolutionanalytics.com/2016/10/car-valuation.html",
"des": "",
"img": "https://revolution-computing.typepad.com/.a/6a010534b1db25970b01b8d228f412970c-pi"
},
{
"title": "How long do I have to survive without cake?",
"href": "http://www.mango-solutions.com/wp/2016/10/how-long-do-i-have-to-survive-without-cake/",
"des": " - A more fun example of survival analysis is to consider the time in between someone bringing cake to work."
},
{
"title": "Tourism forecasting competition data in the Tcomp R package",
"href": "http://ellisp.github.io/blog/2016/10/19/Tcomp",
"des": " - A new R package `Tcomp` makes data from the 2010 tourism forecasting competition available in a format designed to facilitate the fitting and testing of en masse automated forecasts, consistent with the M1 and M3 forecasting competition data in the `Mcomp` R package. "
},
{
"title": "Notes from the Kölner R meeting, 14 October 2016",
"href": "http://www.magesblog.com/2016/10/notes-from-kolner-r-meeting-14-october.html",
"des": ""
},
{
"title": "Call for rstudio::conf lightning talks",
"href": "https://blog.rstudio.org/2016/10/18/call-for-rstudioconf-lightning-talks/",
"des": ""
},
{
"title": "Warsaw R-Ladies",
"href": "http://r-addict.com/2016/10/21/Warsaw-RLadies-01.html",
"des": "",
"img": "https://i1.wp.com/r-addict.com/images/fulls/rladies1.JPG"
},
{
"title": "The Team Data Science Process",
"href": "http://blog.revolutionanalytics.com/2016/10/the-team-data-science-process.html",
"des": " - As more and more organizations are setting up teams of data scientists to make sense of the massive amounts of data they collect, the need grows for a standardized process for managing the work of those teams. ",
"img": "https://revolution-computing.typepad.com/.a/6a010534b1db25970b01bb0945bf4d970d-pi"
},
{
"title": "Paper published: mlr – Machine Learning in R",
"href": "https://www.r-bloggers.com/paper-published-mlr-machine-learning-in-r/",
"des": ""
},
{
"title": "6 new jobs for R users – from around the world (2016-10-19)",
"href": "https://www.r-bloggers.com/6-new-jobs-for-r-users-from-around-the-world-2016-10-19/",
"des": ""
},
{
"title": "Hadley Wickham \"Data Science with R at Reed College",
"href": "https://www.youtube.com/watch?v=K-ss_ag2k9E&feature=youtu.be",
"des": ""
},
{
"title": "How to write a useful htmlwidgets in R: tips and walk-through a real example",
"href": "http://deanattali.com/blog/htmlwidgets-tips/",
"des": " - I’d like to share some tips and recommendations on building htmlwidgets, based on my own learning experience while creating timevis."
},
{
"title": "R Tools for Visual Studio 0.5",
"href": "http://blog.revolutionanalytics.com/2016/10/rtvs-05-now-available.html",
"des": " - the open-source Visual Studio add-in for R programmers."
},
{
"title": "DOM 0.3",
"href": "http://stattech.wordpress.fos.auckland.ac.nz/2016-13-dom-version-0-3/",
"des": " - This version represents a major refactoring of the package code, including its user-facing API."
},
{
"title": "anytime 0.0.4",
"href": "http://dirk.eddelbuettel.com/blog/2016/10/20/",
"des": " - Convert Any Input to Parsed Date or Datetime"
},
{
"title": "gettz 0.0.2",
"href": "http://dirk.eddelbuettel.com/blog/2016/10/17/",
"des": " - `gettz` provides a possible fallback in situations where Sys.timezone() fails to determine the system timezone."
},
{
"title": "August Package Picks by Joseph Rickert",
"href": "https://www.rstudio.com/rviews/2016/10/21/august-package-picks/",
"des": " - 141 new packages landed on CRAN in August. The following are my picks for the most interesting packages in four categories."
},
{
"title": "gpg",
"href": "https://cran.r-project.org/web/packages/gpg/index.html",
"des": " - Encryption and Digital Signatures in R using GPG."
}
]
|
/json/399.r
|
no_license
|
rweekly/rweekly.org
|
R
| false
| false
| 10,126
|
r
|
[
{
"title": "Running sparklyr – RStudio’s R Interface to Spark on Amazon EMR",
"href": "https://aws.amazon.com/cn/blogs/big-data/running-sparklyr-rstudios-r-interface-to-spark-on-amazon-emr/",
"des": ""
},
{
"title": "RStudio in the Cloud II: Syncing Code & Data with AWS",
"href": "http://strimas.com/r/rstudio-cloud-2/",
"des": ": a continuation of [a previous post](http://strimas.com/r/rstudio-cloud-1/) on setting up RStudio in the cloud on Amazon Web Services. This tutorial demonstrates transferring data and code to/from the cloud with GitHub and S3."
},
{
"title": "Running sparklyr – RStudio’s R Interface to Spark on Amazon EMR",
"href": "https://aws.amazon.com/cn/blogs/big-data/running-sparklyr-rstudios-r-interface-to-spark-on-amazon-emr/",
"des": "",
"img": "https://d2908q01vomqb2.cloudfront.net/b6692ea5df920cad691c20319a6fffd7a4a766b8/2016/10/17/sparklyr_2.gif"
},
{
"title": "Rmarkdown in a scientific workflow",
"href": "http://predictiveecology.org/2016/10/21/Rmarkdown-science-workflow.html",
"des": " - Using Rmarkdown with Rstudio and for all stages of my scientific projects has been a remarkable shift in how my work gets done! "
},
{
"title": "Combing R and Java",
"href": "https://datadidit.com/2016/10/15/combing-r-and-java/",
"des": ""
},
{
"title": "The new R Graph Gallery",
"href": "http://www.r-graph-gallery.com/",
"des": ""
},
{
"title": "Why I would rather use ReporteRs than RMarkdown",
"href": "http://www.mango-solutions.com/wp/2016/10/why-i-would-rather-use-reporters-than-rmarkdown/",
"des": ""
},
{
"title": "Visualizing ROC Curves in R using Plotly",
"href": "http://moderndata.plot.ly/visualizing-roc-curves-in-r-using-plotly/",
"des": ""
},
{
"title": "The Grammar of Graphics and Radar Charts",
"href": "http://www.r-chart.com/2016/10/the-grammar-of-graphics-and-radar-charts.html",
"des": "",
"img": "https://i0.wp.com/2.bp.blogspot.com/-MwCucP8iX-A/WAJJF7vSj2I/AAAAAAAAAzg/P9N4U4gEMag2ml5NvGfxCvn_sYDzbcBJACEw/s640/polar_finished.png"
},
{
"title": "The Worlds Economic Data, Shiny Apps and all you want to know about Propensity Score Matching!",
"href": "http://r-exercises.com/2016/10/21/the-worlds-economic-data-shiny-apps-and-all-you-want-to-know-about-propensity-score-matching/",
"des": ""
},
{
"title": "Creating Interactive Plots with R and Highcharts",
"href": "https://www.rstudio.com/rviews/2016/10/19/creating-interactive-plots-with-r-and-highcharts/",
"des": ""
},
{
"title": "Using the pipe operator in R with Plotly",
"href": "http://moderndata.plot.ly/using-the-pipe-operator-in-r-with-plotly/",
"des": ""
},
{
"title": "Deep learning in the cloud with MXNet",
"href": "http://rsnippets.blogspot.com/2016/10/deep-learning-in-cloud-with-mxnet.html",
"des": ""
},
{
"title": "Raccoon Ch. 1 – Introduction to Linear Models with R",
"href": "http://www.quantide.com/raccoon-ch-1-introduction-to-linear-models-with-r/",
"des": ""
},
{
"title": "Annotated Facets with ggplot2",
"href": "https://statbandit.wordpress.com/2016/10/20/annotated-facets-with-ggplot2/",
"des": ""
},
{
"title": "On the ifelse function",
"href": "https://privefl.github.io/blog/On-the-ifelse-function/",
"des": ""
},
{
"title": "Progress bar overhead comparisons",
"href": "http://peter.solymos.org/code/2016/10/15/progress-bar-overhead-comparisons.html",
"des": "",
"img": "https://i2.wp.com/peter.solymos.org/images/2016/10/15/pb-overhead.png"
},
{
"title": "Statistical Reading Rainbow",
"href": "https://mathewanalytics.com/2016/10/17/statistical-reading-rainbow/",
"des": ""
},
{
"title": "Is Unemployment Higher under Labour or the Conservatives?",
"href": "http://rforjournalists.com/2016/10/17/is-unemployment-higher-under-labour-or-the-conservatives/",
"des": " - This post has covered using rectangles as annotations to show the British unemployment rate under different political parties, plus how to use breaks in your axes scaling.",
"img": "https://i2.wp.com/rforjournalists.com/wp-content/uploads/2016/10/unemployment2.png"
},
{
"title": "The History of Strikes in Britain, Told Using Line Plots and Annotations",
"href": "http://rforjournalists.com/2016/10/17/is-unemployment-higher-under-labour-or-the-conservatives/",
"des": ""
},
{
"title": "Exploring the effects of healthcare investment on child mortality in R",
"href": "https://drsimonj.svbtle.com/exploring-a-causal-relation-between-healthcare-investment-and-child-mortality-in-r",
"des": "",
"img": "https://i0.wp.com/svbtleusercontent.com/n1yn7f9gjs8gua.png"
},
{
"title": "The 'deadly board game' puzzle: efficient simulation in R",
"href": "http://varianceexplained.org/r/board-game-simulation/",
"des": ""
},
{
"title": "Rcpp now used by 800 CRAN packages",
"href": "http://dirk.eddelbuettel.com/blog/2016/10/16/",
"des": "",
"img": "https://i1.wp.com/dirk.eddelbuettel.com/blog/code/rcpp/RcppGrowth_2016-10-16.png"
},
{
"title": "How to “get good at R”",
"href": "http://www.arilamstein.com/blog/2016/10/18/get-good-r/",
"des": ""
},
{
"title": "Election 2016: Tracking Emotions with R and Python",
"href": "http://blog.revolutionanalytics.com/2016/10/debate-emotions.html",
"des": " "
},
{
"title": "Don’t buy a brand new Porsche 911 or Audi Q7!!",
"href": "https://longhowlam.wordpress.com/2016/10/19/dont-buy-a-brand-new-porsche-911-or-audi-q7/",
"des": " - Many people know that nasty feeling when buying a brand new car. The minute that you have left the dealer, your car has lost a substantial amount of value. Unfortunately this depreciation is inevitable, however, the amount depends heavily on the car make and model."
},
{
"title": "Estimating the value of a vehicle with R",
"href": "http://blog.revolutionanalytics.com/2016/10/car-valuation.html",
"des": "",
"img": "https://revolution-computing.typepad.com/.a/6a010534b1db25970b01b8d228f412970c-pi"
},
{
"title": "How long do I have to survive without cake?",
"href": "http://www.mango-solutions.com/wp/2016/10/how-long-do-i-have-to-survive-without-cake/",
"des": " - A more fun example of survival analysis is to consider the time in between someone bringing cake to work."
},
{
"title": "Tourism forecasting competition data in the Tcomp R package",
"href": "http://ellisp.github.io/blog/2016/10/19/Tcomp",
"des": " - A new R package `Tcomp` makes data from the 2010 tourism forecasting competition available in a format designed to facilitate the fitting and testing of en masse automated forecasts, consistent with the M1 and M3 forecasting competition data in the `Mcomp` R package. "
},
{
"title": "Notes from the Kölner R meeting, 14 October 2016",
"href": "http://www.magesblog.com/2016/10/notes-from-kolner-r-meeting-14-october.html",
"des": ""
},
{
"title": "Call for rstudio::conf lightning talks",
"href": "https://blog.rstudio.org/2016/10/18/call-for-rstudioconf-lightning-talks/",
"des": ""
},
{
"title": "Warsaw R-Ladies",
"href": "http://r-addict.com/2016/10/21/Warsaw-RLadies-01.html",
"des": "",
"img": "https://i1.wp.com/r-addict.com/images/fulls/rladies1.JPG"
},
{
"title": "The Team Data Science Process",
"href": "http://blog.revolutionanalytics.com/2016/10/the-team-data-science-process.html",
"des": " - As more and more organizations are setting up teams of data scientists to make sense of the massive amounts of data they collect, the need grows for a standardized process for managing the work of those teams. ",
"img": "https://revolution-computing.typepad.com/.a/6a010534b1db25970b01bb0945bf4d970d-pi"
},
{
"title": "Paper published: mlr – Machine Learning in R",
"href": "https://www.r-bloggers.com/paper-published-mlr-machine-learning-in-r/",
"des": ""
},
{
"title": "6 new jobs for R users – from around the world (2016-10-19)",
"href": "https://www.r-bloggers.com/6-new-jobs-for-r-users-from-around-the-world-2016-10-19/",
"des": ""
},
{
"title": "Hadley Wickham \"Data Science with R at Reed College",
"href": "https://www.youtube.com/watch?v=K-ss_ag2k9E&feature=youtu.be",
"des": ""
},
{
"title": "How to write a useful htmlwidgets in R: tips and walk-through a real example",
"href": "http://deanattali.com/blog/htmlwidgets-tips/",
"des": " - I’d like to share some tips and recommendations on building htmlwidgets, based on my own learning experience while creating timevis."
},
{
"title": "R Tools for Visual Studio 0.5",
"href": "http://blog.revolutionanalytics.com/2016/10/rtvs-05-now-available.html",
"des": " - the open-source Visual Studio add-in for R programmers."
},
{
"title": "DOM 0.3",
"href": "http://stattech.wordpress.fos.auckland.ac.nz/2016-13-dom-version-0-3/",
"des": " - This version represents a major refactoring of the package code, including its user-facing API."
},
{
"title": "anytime 0.0.4",
"href": "http://dirk.eddelbuettel.com/blog/2016/10/20/",
"des": " - Convert Any Input to Parsed Date or Datetime"
},
{
"title": "gettz 0.0.2",
"href": "http://dirk.eddelbuettel.com/blog/2016/10/17/",
"des": " - `gettz` provides a possible fallback in situations where Sys.timezone() fails to determine the system timezone."
},
{
"title": "August Package Picks by Joseph Rickert",
"href": "https://www.rstudio.com/rviews/2016/10/21/august-package-picks/",
"des": " - 141 new packages landed on CRAN in August. The following are my picks for the most interesting packages in four categories."
},
{
"title": "gpg",
"href": "https://cran.r-project.org/web/packages/gpg/index.html",
"des": " - Encryption and Digital Signatures in R using GPG."
}
]
|
#' Train a cell type classifier
#'
#' This function takes single-cell expression data in the form of a CDS object
#' and a cell type definition file (marker file) and trains a multinomial
#' classifier to assign cell types. The resulting \code{garnett_classifier}
#' object can be used to classify the cells in the same dataset, or future
#' datasets from similar tissues/samples.
#'
#' @param cds Input CDS object.
#' @param marker_file A character path to the marker file to define cell types.
#' See details and documentation for \code{\link{Parser}} by running
#' \code{?Parser}for more information.
#' @param db Bioconductor AnnotationDb-class package for converting gene IDs.
#' For example, for humans use org.Hs.eg.db. See available packages at
#' \href{http://bioconductor.org/packages/3.8/data/annotation/}{Bioconductor}.
#' If your organism does not have an AnnotationDb-class database available,
#' you can specify "none", however then Garnett will not check/convert gene
#' IDs, so your CDS and marker file must have the same gene ID type.
#' @param cds_gene_id_type The type of gene ID used in the CDS. Should be one
#' of the values in \code{columns(db)}. Default is "ENSEMBL". Ignored if
#' db = "none".
#' @param marker_file_gene_id_type The type of gene ID used in the marker file.
#' Should be one of the values in \code{columns(db)}. Default is "SYMBOL".
#' Ignored if db = "none".
#' @param min_observations An integer. The minimum number of representative
#' cells per cell type required to include the cell type in the predictive
#' model. Default is 8.
#' @param max_training_samples An integer. The maximum number of representative
#' cells per cell type to be included in the model training. Decreasing this
#' number increases speed, but may hurt performance of the model. Default is
#' 500.
#' @param num_unknown An integer. The number of unknown type cells to use as an
#' outgroup during classification. Default is 500.
#' @param propogate_markers Logical. Should markers from child nodes of a cell
#' type be used in finding representatives of the parent type? Should
#' generally be \code{TRUE}.
#' @param cores An integer. The number of cores to use for computation.
#' @param lambdas \code{NULL} or a numeric vector. Allows the user to pass
#' their own lambda values to \code{\link[glmnet]{cv.glmnet}}. If \code{NULL},
#' preset lambda values are used.
#' @param classifier_gene_id_type The type of gene ID that will be used in the
#' classifier. If possible for your organism, this should be "ENSEMBL", which
#' is the default. Ignored if db = "none".
#' @param return_initial_assign Logical indicating whether an initial
#' assignment data frame for the root level should be returned instead of a
#' classifier. This can be useful while choosing/debugging markers. Please
#' note that this means that a classifier will not be built, so you will not
#' be able to move on to the next steps of the workflow until you rerun the
#' functionwith \code{return_initial_assign = FALSE}. Default is \code{FALSE}.
#'
#' @details This function has three major parts: 1) parsing the marker file 2)
#' choosing cell representatives and 3) training the classifier. Details on
#' each of these steps is below:
#'
#' Parsing the marker file: the first step of this function is to parse the
#' provided marker file. The marker file is a representation of the cell types
#' expected in the data and known characteristics about them. Information
#' about marker file syntax is available in the documentation for the
#' \code{\link{Parser}} function, and on the
#' \href{https://cole-trapnell-lab.github.io/garnett}{Garnett website}.
#'
#' Choosing cell representatives: after parsing the marker file, this function
#' identifies cells that fit the parameters specified in the file for each cell
#' type. Depending on how marker genes and other cell type definition
#' information are specified, expression data is normalized and expression
#' cutoffs are defined automatically. In addition to the cell types in the
#' marker file, an outgroup of diverse cells is also chosen.
#'
#' Training the classifier: lastly, this function trains a multinomial GLMnet
#' classifier on the chosen representative cells.
#'
#' Because cell types can be defined hierarchically (i.e. cell types can be
#' subtypes of other cell types), steps 2 and 3 above are performed iteratively
#' over all internal nodes in the tree representation of cell types.
#'
#' See the
#' \href{https://cole-trapnell-lab.github.io/garnett}{Garnett website} and the
#' accompanying paper for further details.
#'
#' @export
#'
#' @examples
#' library(org.Hs.eg.db)
#' data(test_cds)
#' set.seed(260)
#'
#' marker_file_path <- system.file("extdata", "pbmc_bad_markers.txt",
#' package = "garnett")
#'
#' test_classifier <- train_cell_classifier(cds = test_cds,
#' marker_file = marker_file_path,
#' db=org.Hs.eg.db,
#' min_observations = 10,
#' cds_gene_id_type = "SYMBOL",
#' num_unknown = 50,
#' marker_file_gene_id_type = "SYMBOL")
#'
train_cell_classifier <- function(cds,
marker_file,
db,
cds_gene_id_type = "ENSEMBL",
marker_file_gene_id_type = "SYMBOL",
min_observations=8,
max_training_samples=500,
num_unknown = 500,
propogate_markers = TRUE,
cores=1,
lambdas = NULL,
classifier_gene_id_type = "ENSEMBL",
return_initial_assign = FALSE) {
##### Check inputs #####
assertthat::assert_that(is(cds, "CellDataSet"))
assertthat::assert_that(assertthat::has_name(pData(cds), "Size_Factor"),
msg = paste("Must run estimateSizeFactors() on cds",
"before calling train_cell_classifier"))
assertthat::assert_that(sum(is.na(pData(cds)$Size_Factor)) == 0,
msg = paste("Must run estimateSizeFactors() on cds",
"before calling train_cell_classifier"))
assertthat::assert_that(is.character(marker_file))
assertthat::is.readable(marker_file)
if (is(db, "character") && db == "none") {
cds_gene_id_type <- 'custom'
classifier_gene_id_type <- 'custom'
marker_file_gene_id_type <- 'custom'
} else {
assertthat::assert_that(is(db, "OrgDb"),
msg = paste0("db must be an 'AnnotationDb' object ",
"or 'none' see ",
"http://bioconductor.org/packages/",
"3.8/data/annotation/ for available"))
assertthat::assert_that(is.character(cds_gene_id_type))
assertthat::assert_that(is.character(marker_file_gene_id_type))
assertthat::assert_that(cds_gene_id_type %in% AnnotationDbi::keytypes(db),
msg = paste("cds_gene_id_type must be one of",
"keytypes(db)"))
assertthat::assert_that(classifier_gene_id_type %in% AnnotationDbi::keytypes(db),
msg = paste("classifier_gene_id_type must be one of",
"keytypes(db)"))
assertthat::assert_that(marker_file_gene_id_type %in%
AnnotationDbi::keytypes(db),
msg = paste("marker_file_gene_id_type must be one of",
"keytypes(db)"))
}
assertthat::is.count(num_unknown)
assertthat::is.count(cores)
assertthat::assert_that(is.logical(propogate_markers))
if (!is.null(lambdas)) {
assertthat::assert_that(is.numeric(lambdas))
}
##### Set internal parameters #####
rel_gene_quantile <- .9 # exclusion criterion for genes expressed at greater
# than rel_gene_quantile in all training cell subsets
back_cutoff <- 0.25 # percent of 95th percentile of expression that marks the
# cutoff between "expressed" and "not expressed"
perc_cells <- 0.05 # percent of training cells a gene is expressed to be
# included in glmnet training
training_cutoff <- .75 # percentile of marker score required for training
# assignment
##### Normalize and rename CDS #####
if (!is(exprs(cds), "dgCMatrix")) {
sf <- pData(cds)$Size_Factor
pd <- new("AnnotatedDataFrame", data = pData(cds))
fd <- new("AnnotatedDataFrame", data = fData(cds))
cds <- suppressWarnings(newCellDataSet(as(exprs(cds), "dgCMatrix"),
phenoData = pd,
featureData = fd))
pData(cds)$Size_Factor <- sf
}
pData(cds)$num_genes_expressed <- Matrix::colSums(as(exprs(cds),
"lgCMatrix"))
cell_totals <- Matrix::colSums(exprs(cds))
sf <- pData(cds)$Size_Factor
pd <- new("AnnotatedDataFrame", data = pData(cds))
fd <- new("AnnotatedDataFrame", data = fData(cds))
temp <- exprs(cds)
temp@x <- temp@x / rep.int(pData(cds)$Size_Factor, diff(temp@p))
norm_cds <- suppressWarnings(newCellDataSet(temp,
phenoData = pd, featureData = fd))
orig_cds <- cds
if(cds_gene_id_type != classifier_gene_id_type) {
norm_cds <- cds_to_other_id(norm_cds, db=db, cds_gene_id_type,
classifier_gene_id_type)
orig_cds <- cds_to_other_id(cds, db=db, cds_gene_id_type,
classifier_gene_id_type)
}
pData(norm_cds)$Size_Factor <- sf
##### Parse Marker File #####
file_str = paste0(readChar(marker_file, file.info(marker_file)$size),"\n")
parse_list <- parse_input(file_str)
orig_name_order <- unlist(parse_list[["name_order"]])
rm("name_order", envir=parse_list)
# Check and order subtypes
ranks <- lapply(orig_name_order, function(i) parse_list[[i]]@parenttype)
names(ranks) <- orig_name_order
if(length(unlist(unique(ranks[which(!ranks %in% names(ranks) & lengths(ranks) != 0L)])) != 0)) {
stop(paste("Subtype", unlist(unique(ranks[which(!ranks %in% names(ranks) & lengths(ranks) != 0L)])), "is not defined in marker file."))
}
if(any(names(ranks) == ranks)) {
bad <- ranks[names(ranks) == ranks]
stop(paste0("'", bad,
"' cannot be a subtype of itself. Please modify marker file."))
}
name_order <- names(ranks[lengths(ranks) == 0L])
ranks <- ranks[!names(ranks) %in% name_order]
while(length(ranks) != 0) {
name_order <- c(name_order, names(ranks)[ranks %in% name_order])
ranks <- ranks[!names(ranks) %in% name_order]
}
if(is.null(parse_list)) stop("Parse failed!")
message(paste("There are", length(parse_list), "cell type definitions"))
# Check gene names and keywords
gene_table <- make_name_map(parse_list,
as.character(row.names(fData(norm_cds))),
classifier_gene_id_type,
marker_file_gene_id_type,
db)
##### Make garnett_classifier #####
classifier <- new_garnett_classifier()
classifier@gene_id_type <- classifier_gene_id_type
if(is(db, "character") && db == "none") classifier@gene_id_type <- "custom"
for(i in name_order) {
# check meta data exists
if (nrow(parse_list[[i]]@meta) != 0) {
if (!all(parse_list[[i]]@meta$name %in% colnames(pData(norm_cds)))) {
bad_meta <- parse_list[[i]]@meta$name[!parse_list[[i]]@meta$name %in%
colnames(pData(norm_cds))]
stop(paste0("Cell type '", parse_list[[i]]@name,
"' has a meta data specification '", bad_meta ,
"' that's not in the pData table."))
}
}
logic_list <- assemble_logic(parse_list[[i]], gene_table)
classifier <- add_cell_rule(parse_list[[i]], classifier, logic_list)
}
classifier@cell_totals <- exp(mean(log(cell_totals)))/
stats::median(pData(norm_cds)$num_genes_expressed)
##### Create transformed marker table #####
if(propogate_markers) {
root <- propogate_func(curr_node = "root", parse_list, classifier)
}
tf_idf <- tfidf(norm_cds) #slow
### Aggregate markers ###
marker_scores <- data.frame(cell = row.names(tf_idf))
for (i in name_order) {
agg <- aggregate_positive_markers(parse_list[[i]], tf_idf,
gene_table, back_cutoff)
bad_cells <- get_negative_markers(parse_list[[i]], tf_idf,
gene_table, back_cutoff)
if(is.null(agg)) {
warning (paste("Cell type", i, "has no genes that are expressed",
"and will be skipped"))
} else {
agg[names(agg) %in% bad_cells] <- 0
marker_scores <- cbind(marker_scores, as.matrix(agg))
colnames(marker_scores)[ncol(marker_scores)] <- parse_list[[i]]@name
}
}
##### Train Classifier #####
for (v in igraph::V(classifier@classification_tree)){
child_cell_types <- igraph::V(classifier@classification_tree)[
suppressWarnings(outnei(v))]$name
if(length(child_cell_types) > 0) {
### Get CDS subset for training ###
if(igraph::V(classifier@classification_tree) [ v ]$name == "root") {
cds_sub <- norm_cds
orig_sub <- orig_cds
} else {
# loosely classify to subset
new_assign <-
make_predictions(norm_cds,
classifier,
igraph::V(classifier@classification_tree)[
suppressWarnings(innei(v))]$name,
rank_prob_ratio = 1.1,
s = "lambda.min")
if(!igraph::V(classifier@classification_tree)[v]$name %in%
names(new_assign)) {
message(paste0("No cells classified as ",
igraph::V(classifier@classification_tree) [ v ]$name,
". No subclassification"))
next
}
good_cells <-
as.matrix(new_assign[
igraph::V(classifier@classification_tree)[v]$name][[1]])
good_cells <- names(good_cells[good_cells[,1] != 0,])
if(length(good_cells) == 0) {
message(paste0("No cells classified as ",
igraph::V(classifier@classification_tree) [ v ]$name,
". No subclassification"))
next
}
cds_sub <- norm_cds[,good_cells]
orig_sub <- orig_cds[,good_cells]
}
### Get training sample ###
training_sample <- get_training_sample(cds = cds_sub,
orig_cds = orig_sub,
classifier,
tf_idf,
gene_table,
v,
parse_list,
name_order,
max_training_samples,
num_unknown,
back_cutoff,
training_cutoff,
marker_scores,
return_initial_assign)
if(return_initial_assign) {
return(training_sample)
}
if (length(training_sample) > 0 & sum(training_sample != "Unknown") > 0) {
# exclude useless genes
sub <- norm_cds[,names(training_sample[training_sample != "Unknown"])]
tf <- tfidf(sub)
temp <- training_sample[training_sample != "Unknown"]
y <- split.data.frame(as.matrix(tf), temp)
rm <- lapply(y, colMeans)
rm[["Unknown"]] <- NULL
rm <- do.call(cbind, rm)
rm <- as.data.frame(rm)
rm$num_3q <- rowSums(rm > apply(rm, 2, stats::quantile,
p = rel_gene_quantile, na.rm=TRUE))
exclude <- row.names(rm[rm$num_3q == max(rm$num_3q),])
cds_sub <- cds_sub[setdiff(row.names(fData(cds_sub)), exclude),]
classifier <- train_glmnet(cds_sub,
classifier,
v,
training_sample,
min_observations = min_observations,
lambdas = lambdas,
cores = cores,
gene_table = gene_table,
perc_cells = perc_cells)
} else {
if(igraph::V(classifier@classification_tree)[v]$name == "root") {
stop(paste("Not enough training samples for any cell types at root",
"of cell type hierarchy!"))
}
message(paste0("Not enough training samples for children of ",
igraph::V(classifier@classification_tree)[v]$name,
". They will not be subclassified."))
}
}
}
return(classifier)
}
parse_input <- function(file_str,
debug = F) {
# Parse input_file
lexer <- rly::lex(Lexer, debug=debug)
parser <- rly::yacc(Parser, debug=debug)
parse_list <- parser$parse(file_str, lexer)
parse_list
}
make_name_map <- function(parse_list,
possible_genes,
cds_gene_id_type,
marker_file_gene_id_type,
db) {
gene_start <- collect_gene_names(parse_list)
gene_table <- data.frame(fgenes = gene_start[,1], parent = gene_start[,2])
gene_table$parent <- as.character(gene_table$parent)
gene_table$fgenes <- as.character(gene_table$fgenes)
gene_table$orig_fgenes <- gene_table$fgenes
if(cds_gene_id_type != marker_file_gene_id_type) {
gene_table$fgenes <- convert_gene_ids(gene_table$orig_fgenes,
db,
marker_file_gene_id_type,
cds_gene_id_type)
bad_convert <- sum(is.na(gene_table$fgenes))
if (bad_convert > 0) warning(paste(bad_convert,
"genes could not be converted from",
marker_file_gene_id_type,
"to", cds_gene_id_type, "These genes are",
"listed below:", paste0(gene_table$orig_genes[
is.na(gene_table$fgenes)],
collapse="\n")))
} else {
gene_table$cds <- gene_table$fgenes
}
if(cds_gene_id_type == "ENSEMBL" | marker_file_gene_id_type == "ENSEMBL") {
gene_table$cds <- NULL
possibles <- data.frame(cds = possible_genes,
ensembl = as.character(
stringr::str_split_fixed(possible_genes,
"\\.",
2)[,1]))
gene_table <- merge(gene_table, possibles, all.x=T,
by.x="fgenes", by.y="ensembl")
gene_table$fgenes <- gene_table$cds
} else {
gene_table$cds <- gene_table$fgenes
}
gene_table$in_cds <- gene_table$f %in% possible_genes
gene_table$in_cds[is.na(gene_table$in_cds)] <- FALSE
bad_genes <- gene_table$orig_fgenes[!gene_table$in_cds]
if (length(bad_genes) > 0) warning(strwrap("The following genes from
the cell type definition file are
not present in the cell dataset.
Please check these genes for
errors. Cell type determination
will continue, ignoring these
genes."), "\n",
paste0(bad_genes, collapse="\n"))
gene_table$fgenes <- as.character(gene_table$fgenes)
gene_table$cds <- as.character(gene_table$cds)
gene_table
}
add_cell_rule <- function(cell_type,
classifier,
logic_list) {
# Set parenttype to root if no parent
if (length(cell_type@parenttype) == 0) {
cell_type@parenttype <- "root"
}
# subtype of
if (length(cell_type@parenttype) > 1) stop("only 1 parenttype allowed")
parent_type <- as.character(cell_type@parenttype)
# references
if (length(cell_type@references) > 0) {
if (length(classifier@references) == 0) {
classifier@references <- list()
}
classifier@references <- c(classifier@references,
list(cell_type@references))
names(classifier@references)[length(classifier@references)] <-
cell_type@name
}
if (length(logic_list) == 0) {
warning (paste("Cell type", cell_type@name,
"has no valid rules and will be skipped"))
classifier <- add_cell_type(classifier, cell_type@name,
classify_func = function(x) {rep(FALSE, ncol(x))},
parent_type)
return(classifier)
}
logic <- paste(unlist(logic_list), collapse = ' & ')
tryCatch(
if(nchar(logic) == 0) {
classifier <- add_cell_type(classifier, cell_type@name,
classify_func = function(x) {FALSE},
parent_type)
} else {
classifier <- add_cell_type(classifier, cell_type@name,
classify_func = function(x) {
eval(parse(text = logic))
},
parent_type)
},
error = function(e) {
msg <- paste("Cell type rule generation failed on the",
"cell definition for ", cell_type@name, ".\nError: ",e)
stop(msg)
}
)
return(classifier)
}
assemble_logic <- function(cell_type,
gene_table) {
logic = ""
logic_list = list()
bad_genes <- gene_table[!gene_table$in_cds,]$orig_fgenes
# expressed/not expressed
logic_list <- lapply(cell_type@gene_rules, function(rule) {
log_piece <- ""
if (!rule@gene_name %in% bad_genes) {
paste0("(x['",
gene_table$fgenes[match(rule@gene_name, gene_table$orig_fgenes)],
"',] > ", rule@lower,
") & (x['",
gene_table$fgenes[match(rule@gene_name, gene_table$orig_fgenes)],
"',] < ",
rule@upper,
")")
}
})
if (length(cell_type@expressed) > 0 | length(cell_type@not_expressed) > 0) {
logic_list <- list(logic_list, paste0("assigns == '", cell_type@name, "'"))
}
if(length(logic_list) == 0) warning(paste("Cell type", cell_type@name,
"has no valid expression rules."))
# meta data
if (nrow(cell_type@meta) > 0) {
mlogic <- plyr::dlply(cell_type@meta, plyr::.(name), function(x) {
if(nrow(x) == 1){
out <- paste0(x["name"], " %in% c('", x[,"spec"][1],"')")
} else {
out <- paste0(x[,"name"][1], " %in% c('", paste(x[,"spec"],
collapse = "', '"),
"')")
}
out
})
logic_list <- c(logic_list, unname(mlogic))
}
logic_list <- logic_list[!is.na(logic_list)]
logic_list
}
propogate_func <- function(curr_node,
parse_list,
classifier) {
children <- igraph::V(classifier@classification_tree)[
suppressWarnings(outnei(curr_node))]$name
if(length(children) == 0) {
return(parse_list[[curr_node]]@expressed)
} else {
child_genes <- c()
if (curr_node != "root") {
child_genes <- parse_list[[curr_node]]@expressed
}
for(child in children) {
child_genes <- union(child_genes,
propogate_func(child, parse_list, classifier))
}
if(curr_node != "root") {
parse_list[[curr_node]]@expressed <- child_genes
}
return(child_genes)
}
}
tfidf <- function(input_cds) {
ncounts <- exprs(input_cds)
ncounts <- ncounts[Matrix::rowSums(ncounts) != 0,]
nfreqs <- ncounts
nfreqs@x <- ncounts@x / rep.int(Matrix::colSums(ncounts), diff(ncounts@p))
tf_idf_counts <- nfreqs * log(1 + ncol(ncounts) / Matrix::rowSums(ncounts > 0))
Matrix::t(tf_idf_counts)
}
train_glmnet <- function(cds,
classifier,
curr_node,
training_sample,
min_observations,
cores,
lambdas,
gene_table,
perc_cells) {
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
# calculate weights
obs_counts = table(training_sample)
obs_weights = gm_mean(obs_counts) / obs_counts
# check enough example cells per cell type
excluded_cell_types = names(which(obs_counts < min_observations))
print(obs_counts)
if (length(excluded_cell_types) > 0) {
message(paste("The following cell types do not have enough training",
"cells and will be dropped: ",
paste(excluded_cell_types, collapse = " ")))
}
if(length(setdiff(names(obs_counts),
c(excluded_cell_types, "Unknown"))) == 0) {
warning("No cell types with sufficient examples")
return(classifier)
}
count <- 0
done <- FALSE
cvfit = ""
if(is.null(lambdas)) lambdas <- unique(c(100000, 50000,
seq(10000, 100, by=-200),
seq(100,10, by=-20),
seq(10, 1, by=-2),
seq(1, .1, by=-.2), .1, .05, .01,
.005, .001, .0005, .0001))
while(done == FALSE & count < 5){
if(cvfit == "low_cell") {
excluded_cell_types <- c(excluded_cell_types,
names(which.min(table(training_sample))))
} else if (cvfit == "repeat") lambdas <- lambdas[1:(length(lambdas) - 3)]
training_sample = training_sample[!training_sample %in% excluded_cell_types]
training_sample <- droplevels(training_sample)
cds_sub = cds[,names(training_sample)]
count <- count + 1
y = training_sample
# only include genes in model that are expressed in 5% of training cells
candidate_model_genes = c()
for (cell_type in levels(y)){
genes_in_cell_type = names(which(Matrix::rowSums(
exprs(cds_sub[,y == cell_type]) > 0) >
perc_cells * sum(y == cell_type)))
candidate_model_genes = append(candidate_model_genes, genes_in_cell_type)
}
candidate_model_genes = unique(candidate_model_genes)
cds_sub = cds_sub[candidate_model_genes,]
x = Matrix::t(exprs(cds_sub))
if (length(which(table(y ) < 8)) > 0) {
message(paste("The following cell types have few training examples.",
"Be careful with interpretation"))
print(names(which(table(y ) < 8)))
}
pens <- rep(1, ncol(x))
sub <-
gene_table[gene_table$parent ==
igraph::V(classifier@classification_tree)[curr_node]$name,]
pens[colnames(x) %in% sub$fgenes] <- 0.00001
# train model
cvfit <- tryCatch({
if (cores > 1){
doParallel::registerDoParallel(cores=cores)
cvfit <- suppressWarnings(
glmnet::cv.glmnet(x, y, lambda = lambdas,
weights=obs_weights[y],
alpha=.3,
family = "multinomial",
type.multinomial = "grouped",
type.measure="class",
type.logistic = "modified.Newton",
lambda.min.ratio=0.001,
standardize=FALSE,
parallel=TRUE,
thresh=1e-6,
nfolds=3,
nlambda=20,
penalty.factor = pens))
}else{
cvfit <- suppressWarnings(
glmnet::cv.glmnet(x, y, lambda = lambdas,
weights=obs_weights[y],
alpha=.3,
family = "multinomial",
type.multinomial = "grouped",
type.logistic = "modified.Newton",
type.measure="class",
lambda.min.ratio=0.001,
standardize=FALSE,
parallel=FALSE,
thresh=1e-6,
nfolds=3,
nlambda=50,
penalty.factor = pens))
}
message("Model training finished.")
cvfit
}, error = function(e) {
print (e)
if(count < 5 & grepl("90000", as.character(e))) {
message(paste0("GLMNET failed with unknown error code, trying again"))
return("repeat")
} else if(count < 5 & length(unique(training_sample)) > 2) {
message(paste0("GLMNET failed, excluding low count cell type: ",
names(which.min(table(training_sample))),
" and trying again"))
return("low_cell")
} else {
message(paste0("GLMNET failed"))
return(e)
}
classifier
})
if(is.character(cvfit)) {
if(cvfit == "repeat") {
done <- FALSE
} else if(cvfit == "low_cell") {
done <- FALSE
}
} else if (inherits(cvfit, "error")) {
done <- TRUE
} else {
igraph::V(classifier@classification_tree)[curr_node]$model <- list(cvfit)
done <- TRUE
}
}
return(classifier)
}
|
/R/train_cell_classifier.R
|
permissive
|
PUMC-FWYY-Lab/garnett
|
R
| false
| false
| 30,628
|
r
|
#' Train a cell type classifier
#'
#' This function takes single-cell expression data in the form of a CDS object
#' and a cell type definition file (marker file) and trains a multinomial
#' classifier to assign cell types. The resulting \code{garnett_classifier}
#' object can be used to classify the cells in the same dataset, or future
#' datasets from similar tissues/samples.
#'
#' @param cds Input CDS object.
#' @param marker_file A character path to the marker file to define cell types.
#' See details and documentation for \code{\link{Parser}} by running
#' \code{?Parser}for more information.
#' @param db Bioconductor AnnotationDb-class package for converting gene IDs.
#' For example, for humans use org.Hs.eg.db. See available packages at
#' \href{http://bioconductor.org/packages/3.8/data/annotation/}{Bioconductor}.
#' If your organism does not have an AnnotationDb-class database available,
#' you can specify "none", however then Garnett will not check/convert gene
#' IDs, so your CDS and marker file must have the same gene ID type.
#' @param cds_gene_id_type The type of gene ID used in the CDS. Should be one
#' of the values in \code{columns(db)}. Default is "ENSEMBL". Ignored if
#' db = "none".
#' @param marker_file_gene_id_type The type of gene ID used in the marker file.
#' Should be one of the values in \code{columns(db)}. Default is "SYMBOL".
#' Ignored if db = "none".
#' @param min_observations An integer. The minimum number of representative
#' cells per cell type required to include the cell type in the predictive
#' model. Default is 8.
#' @param max_training_samples An integer. The maximum number of representative
#' cells per cell type to be included in the model training. Decreasing this
#' number increases speed, but may hurt performance of the model. Default is
#' 500.
#' @param num_unknown An integer. The number of unknown type cells to use as an
#' outgroup during classification. Default is 500.
#' @param propogate_markers Logical. Should markers from child nodes of a cell
#' type be used in finding representatives of the parent type? Should
#' generally be \code{TRUE}.
#' @param cores An integer. The number of cores to use for computation.
#' @param lambdas \code{NULL} or a numeric vector. Allows the user to pass
#' their own lambda values to \code{\link[glmnet]{cv.glmnet}}. If \code{NULL},
#' preset lambda values are used.
#' @param classifier_gene_id_type The type of gene ID that will be used in the
#' classifier. If possible for your organism, this should be "ENSEMBL", which
#' is the default. Ignored if db = "none".
#' @param return_initial_assign Logical indicating whether an initial
#' assignment data frame for the root level should be returned instead of a
#' classifier. This can be useful while choosing/debugging markers. Please
#' note that this means that a classifier will not be built, so you will not
#' be able to move on to the next steps of the workflow until you rerun the
#' functionwith \code{return_initial_assign = FALSE}. Default is \code{FALSE}.
#'
#' @details This function has three major parts: 1) parsing the marker file 2)
#' choosing cell representatives and 3) training the classifier. Details on
#' each of these steps is below:
#'
#' Parsing the marker file: the first step of this function is to parse the
#' provided marker file. The marker file is a representation of the cell types
#' expected in the data and known characteristics about them. Information
#' about marker file syntax is available in the documentation for the
#' \code{\link{Parser}} function, and on the
#' \href{https://cole-trapnell-lab.github.io/garnett}{Garnett website}.
#'
#' Choosing cell representatives: after parsing the marker file, this function
#' identifies cells that fit the parameters specified in the file for each cell
#' type. Depending on how marker genes and other cell type definition
#' information are specified, expression data is normalized and expression
#' cutoffs are defined automatically. In addition to the cell types in the
#' marker file, an outgroup of diverse cells is also chosen.
#'
#' Training the classifier: lastly, this function trains a multinomial GLMnet
#' classifier on the chosen representative cells.
#'
#' Because cell types can be defined hierarchically (i.e. cell types can be
#' subtypes of other cell types), steps 2 and 3 above are performed iteratively
#' over all internal nodes in the tree representation of cell types.
#'
#' See the
#' \href{https://cole-trapnell-lab.github.io/garnett}{Garnett website} and the
#' accompanying paper for further details.
#'
#' @export
#'
#' @examples
#' library(org.Hs.eg.db)
#' data(test_cds)
#' set.seed(260)
#'
#' marker_file_path <- system.file("extdata", "pbmc_bad_markers.txt",
#' package = "garnett")
#'
#' test_classifier <- train_cell_classifier(cds = test_cds,
#' marker_file = marker_file_path,
#' db=org.Hs.eg.db,
#' min_observations = 10,
#' cds_gene_id_type = "SYMBOL",
#' num_unknown = 50,
#' marker_file_gene_id_type = "SYMBOL")
#'
train_cell_classifier <- function(cds,
marker_file,
db,
cds_gene_id_type = "ENSEMBL",
marker_file_gene_id_type = "SYMBOL",
min_observations=8,
max_training_samples=500,
num_unknown = 500,
propogate_markers = TRUE,
cores=1,
lambdas = NULL,
classifier_gene_id_type = "ENSEMBL",
return_initial_assign = FALSE) {
##### Check inputs #####
assertthat::assert_that(is(cds, "CellDataSet"))
assertthat::assert_that(assertthat::has_name(pData(cds), "Size_Factor"),
msg = paste("Must run estimateSizeFactors() on cds",
"before calling train_cell_classifier"))
assertthat::assert_that(sum(is.na(pData(cds)$Size_Factor)) == 0,
msg = paste("Must run estimateSizeFactors() on cds",
"before calling train_cell_classifier"))
assertthat::assert_that(is.character(marker_file))
assertthat::is.readable(marker_file)
if (is(db, "character") && db == "none") {
cds_gene_id_type <- 'custom'
classifier_gene_id_type <- 'custom'
marker_file_gene_id_type <- 'custom'
} else {
assertthat::assert_that(is(db, "OrgDb"),
msg = paste0("db must be an 'AnnotationDb' object ",
"or 'none' see ",
"http://bioconductor.org/packages/",
"3.8/data/annotation/ for available"))
assertthat::assert_that(is.character(cds_gene_id_type))
assertthat::assert_that(is.character(marker_file_gene_id_type))
assertthat::assert_that(cds_gene_id_type %in% AnnotationDbi::keytypes(db),
msg = paste("cds_gene_id_type must be one of",
"keytypes(db)"))
assertthat::assert_that(classifier_gene_id_type %in% AnnotationDbi::keytypes(db),
msg = paste("classifier_gene_id_type must be one of",
"keytypes(db)"))
assertthat::assert_that(marker_file_gene_id_type %in%
AnnotationDbi::keytypes(db),
msg = paste("marker_file_gene_id_type must be one of",
"keytypes(db)"))
}
assertthat::is.count(num_unknown)
assertthat::is.count(cores)
assertthat::assert_that(is.logical(propogate_markers))
if (!is.null(lambdas)) {
assertthat::assert_that(is.numeric(lambdas))
}
##### Set internal parameters #####
rel_gene_quantile <- .9 # exclusion criterion for genes expressed at greater
# than rel_gene_quantile in all training cell subsets
back_cutoff <- 0.25 # percent of 95th percentile of expression that marks the
# cutoff between "expressed" and "not expressed"
perc_cells <- 0.05 # percent of training cells a gene is expressed to be
# included in glmnet training
training_cutoff <- .75 # percentile of marker score required for training
# assignment
##### Normalize and rename CDS #####
if (!is(exprs(cds), "dgCMatrix")) {
sf <- pData(cds)$Size_Factor
pd <- new("AnnotatedDataFrame", data = pData(cds))
fd <- new("AnnotatedDataFrame", data = fData(cds))
cds <- suppressWarnings(newCellDataSet(as(exprs(cds), "dgCMatrix"),
phenoData = pd,
featureData = fd))
pData(cds)$Size_Factor <- sf
}
pData(cds)$num_genes_expressed <- Matrix::colSums(as(exprs(cds),
"lgCMatrix"))
cell_totals <- Matrix::colSums(exprs(cds))
sf <- pData(cds)$Size_Factor
pd <- new("AnnotatedDataFrame", data = pData(cds))
fd <- new("AnnotatedDataFrame", data = fData(cds))
temp <- exprs(cds)
temp@x <- temp@x / rep.int(pData(cds)$Size_Factor, diff(temp@p))
norm_cds <- suppressWarnings(newCellDataSet(temp,
phenoData = pd, featureData = fd))
orig_cds <- cds
if(cds_gene_id_type != classifier_gene_id_type) {
norm_cds <- cds_to_other_id(norm_cds, db=db, cds_gene_id_type,
classifier_gene_id_type)
orig_cds <- cds_to_other_id(cds, db=db, cds_gene_id_type,
classifier_gene_id_type)
}
pData(norm_cds)$Size_Factor <- sf
##### Parse Marker File #####
file_str = paste0(readChar(marker_file, file.info(marker_file)$size),"\n")
parse_list <- parse_input(file_str)
orig_name_order <- unlist(parse_list[["name_order"]])
rm("name_order", envir=parse_list)
# Check and order subtypes
ranks <- lapply(orig_name_order, function(i) parse_list[[i]]@parenttype)
names(ranks) <- orig_name_order
if(length(unlist(unique(ranks[which(!ranks %in% names(ranks) & lengths(ranks) != 0L)])) != 0)) {
stop(paste("Subtype", unlist(unique(ranks[which(!ranks %in% names(ranks) & lengths(ranks) != 0L)])), "is not defined in marker file."))
}
if(any(names(ranks) == ranks)) {
bad <- ranks[names(ranks) == ranks]
stop(paste0("'", bad,
"' cannot be a subtype of itself. Please modify marker file."))
}
name_order <- names(ranks[lengths(ranks) == 0L])
ranks <- ranks[!names(ranks) %in% name_order]
while(length(ranks) != 0) {
name_order <- c(name_order, names(ranks)[ranks %in% name_order])
ranks <- ranks[!names(ranks) %in% name_order]
}
if(is.null(parse_list)) stop("Parse failed!")
message(paste("There are", length(parse_list), "cell type definitions"))
# Check gene names and keywords
gene_table <- make_name_map(parse_list,
as.character(row.names(fData(norm_cds))),
classifier_gene_id_type,
marker_file_gene_id_type,
db)
##### Make garnett_classifier #####
classifier <- new_garnett_classifier()
classifier@gene_id_type <- classifier_gene_id_type
if(is(db, "character") && db == "none") classifier@gene_id_type <- "custom"
for(i in name_order) {
# check meta data exists
if (nrow(parse_list[[i]]@meta) != 0) {
if (!all(parse_list[[i]]@meta$name %in% colnames(pData(norm_cds)))) {
bad_meta <- parse_list[[i]]@meta$name[!parse_list[[i]]@meta$name %in%
colnames(pData(norm_cds))]
stop(paste0("Cell type '", parse_list[[i]]@name,
"' has a meta data specification '", bad_meta ,
"' that's not in the pData table."))
}
}
logic_list <- assemble_logic(parse_list[[i]], gene_table)
classifier <- add_cell_rule(parse_list[[i]], classifier, logic_list)
}
classifier@cell_totals <- exp(mean(log(cell_totals)))/
stats::median(pData(norm_cds)$num_genes_expressed)
##### Create transformed marker table #####
if(propogate_markers) {
root <- propogate_func(curr_node = "root", parse_list, classifier)
}
tf_idf <- tfidf(norm_cds) #slow
### Aggregate markers ###
marker_scores <- data.frame(cell = row.names(tf_idf))
for (i in name_order) {
agg <- aggregate_positive_markers(parse_list[[i]], tf_idf,
gene_table, back_cutoff)
bad_cells <- get_negative_markers(parse_list[[i]], tf_idf,
gene_table, back_cutoff)
if(is.null(agg)) {
warning (paste("Cell type", i, "has no genes that are expressed",
"and will be skipped"))
} else {
agg[names(agg) %in% bad_cells] <- 0
marker_scores <- cbind(marker_scores, as.matrix(agg))
colnames(marker_scores)[ncol(marker_scores)] <- parse_list[[i]]@name
}
}
##### Train Classifier #####
for (v in igraph::V(classifier@classification_tree)){
child_cell_types <- igraph::V(classifier@classification_tree)[
suppressWarnings(outnei(v))]$name
if(length(child_cell_types) > 0) {
### Get CDS subset for training ###
if(igraph::V(classifier@classification_tree) [ v ]$name == "root") {
cds_sub <- norm_cds
orig_sub <- orig_cds
} else {
# loosely classify to subset
new_assign <-
make_predictions(norm_cds,
classifier,
igraph::V(classifier@classification_tree)[
suppressWarnings(innei(v))]$name,
rank_prob_ratio = 1.1,
s = "lambda.min")
if(!igraph::V(classifier@classification_tree)[v]$name %in%
names(new_assign)) {
message(paste0("No cells classified as ",
igraph::V(classifier@classification_tree) [ v ]$name,
". No subclassification"))
next
}
good_cells <-
as.matrix(new_assign[
igraph::V(classifier@classification_tree)[v]$name][[1]])
good_cells <- names(good_cells[good_cells[,1] != 0,])
if(length(good_cells) == 0) {
message(paste0("No cells classified as ",
igraph::V(classifier@classification_tree) [ v ]$name,
". No subclassification"))
next
}
cds_sub <- norm_cds[,good_cells]
orig_sub <- orig_cds[,good_cells]
}
### Get training sample ###
training_sample <- get_training_sample(cds = cds_sub,
orig_cds = orig_sub,
classifier,
tf_idf,
gene_table,
v,
parse_list,
name_order,
max_training_samples,
num_unknown,
back_cutoff,
training_cutoff,
marker_scores,
return_initial_assign)
if(return_initial_assign) {
return(training_sample)
}
if (length(training_sample) > 0 & sum(training_sample != "Unknown") > 0) {
# exclude useless genes
sub <- norm_cds[,names(training_sample[training_sample != "Unknown"])]
tf <- tfidf(sub)
temp <- training_sample[training_sample != "Unknown"]
y <- split.data.frame(as.matrix(tf), temp)
rm <- lapply(y, colMeans)
rm[["Unknown"]] <- NULL
rm <- do.call(cbind, rm)
rm <- as.data.frame(rm)
rm$num_3q <- rowSums(rm > apply(rm, 2, stats::quantile,
p = rel_gene_quantile, na.rm=TRUE))
exclude <- row.names(rm[rm$num_3q == max(rm$num_3q),])
cds_sub <- cds_sub[setdiff(row.names(fData(cds_sub)), exclude),]
classifier <- train_glmnet(cds_sub,
classifier,
v,
training_sample,
min_observations = min_observations,
lambdas = lambdas,
cores = cores,
gene_table = gene_table,
perc_cells = perc_cells)
} else {
if(igraph::V(classifier@classification_tree)[v]$name == "root") {
stop(paste("Not enough training samples for any cell types at root",
"of cell type hierarchy!"))
}
message(paste0("Not enough training samples for children of ",
igraph::V(classifier@classification_tree)[v]$name,
". They will not be subclassified."))
}
}
}
return(classifier)
}
parse_input <- function(file_str,
debug = F) {
# Parse input_file
lexer <- rly::lex(Lexer, debug=debug)
parser <- rly::yacc(Parser, debug=debug)
parse_list <- parser$parse(file_str, lexer)
parse_list
}
make_name_map <- function(parse_list,
possible_genes,
cds_gene_id_type,
marker_file_gene_id_type,
db) {
gene_start <- collect_gene_names(parse_list)
gene_table <- data.frame(fgenes = gene_start[,1], parent = gene_start[,2])
gene_table$parent <- as.character(gene_table$parent)
gene_table$fgenes <- as.character(gene_table$fgenes)
gene_table$orig_fgenes <- gene_table$fgenes
if(cds_gene_id_type != marker_file_gene_id_type) {
gene_table$fgenes <- convert_gene_ids(gene_table$orig_fgenes,
db,
marker_file_gene_id_type,
cds_gene_id_type)
bad_convert <- sum(is.na(gene_table$fgenes))
if (bad_convert > 0) warning(paste(bad_convert,
"genes could not be converted from",
marker_file_gene_id_type,
"to", cds_gene_id_type, "These genes are",
"listed below:", paste0(gene_table$orig_genes[
is.na(gene_table$fgenes)],
collapse="\n")))
} else {
gene_table$cds <- gene_table$fgenes
}
if(cds_gene_id_type == "ENSEMBL" | marker_file_gene_id_type == "ENSEMBL") {
gene_table$cds <- NULL
possibles <- data.frame(cds = possible_genes,
ensembl = as.character(
stringr::str_split_fixed(possible_genes,
"\\.",
2)[,1]))
gene_table <- merge(gene_table, possibles, all.x=T,
by.x="fgenes", by.y="ensembl")
gene_table$fgenes <- gene_table$cds
} else {
gene_table$cds <- gene_table$fgenes
}
gene_table$in_cds <- gene_table$f %in% possible_genes
gene_table$in_cds[is.na(gene_table$in_cds)] <- FALSE
bad_genes <- gene_table$orig_fgenes[!gene_table$in_cds]
if (length(bad_genes) > 0) warning(strwrap("The following genes from
the cell type definition file are
not present in the cell dataset.
Please check these genes for
errors. Cell type determination
will continue, ignoring these
genes."), "\n",
paste0(bad_genes, collapse="\n"))
gene_table$fgenes <- as.character(gene_table$fgenes)
gene_table$cds <- as.character(gene_table$cds)
gene_table
}
add_cell_rule <- function(cell_type,
classifier,
logic_list) {
# Set parenttype to root if no parent
if (length(cell_type@parenttype) == 0) {
cell_type@parenttype <- "root"
}
# subtype of
if (length(cell_type@parenttype) > 1) stop("only 1 parenttype allowed")
parent_type <- as.character(cell_type@parenttype)
# references
if (length(cell_type@references) > 0) {
if (length(classifier@references) == 0) {
classifier@references <- list()
}
classifier@references <- c(classifier@references,
list(cell_type@references))
names(classifier@references)[length(classifier@references)] <-
cell_type@name
}
if (length(logic_list) == 0) {
warning (paste("Cell type", cell_type@name,
"has no valid rules and will be skipped"))
classifier <- add_cell_type(classifier, cell_type@name,
classify_func = function(x) {rep(FALSE, ncol(x))},
parent_type)
return(classifier)
}
logic <- paste(unlist(logic_list), collapse = ' & ')
tryCatch(
if(nchar(logic) == 0) {
classifier <- add_cell_type(classifier, cell_type@name,
classify_func = function(x) {FALSE},
parent_type)
} else {
classifier <- add_cell_type(classifier, cell_type@name,
classify_func = function(x) {
eval(parse(text = logic))
},
parent_type)
},
error = function(e) {
msg <- paste("Cell type rule generation failed on the",
"cell definition for ", cell_type@name, ".\nError: ",e)
stop(msg)
}
)
return(classifier)
}
assemble_logic <- function(cell_type,
gene_table) {
logic = ""
logic_list = list()
bad_genes <- gene_table[!gene_table$in_cds,]$orig_fgenes
# expressed/not expressed
logic_list <- lapply(cell_type@gene_rules, function(rule) {
log_piece <- ""
if (!rule@gene_name %in% bad_genes) {
paste0("(x['",
gene_table$fgenes[match(rule@gene_name, gene_table$orig_fgenes)],
"',] > ", rule@lower,
") & (x['",
gene_table$fgenes[match(rule@gene_name, gene_table$orig_fgenes)],
"',] < ",
rule@upper,
")")
}
})
if (length(cell_type@expressed) > 0 | length(cell_type@not_expressed) > 0) {
logic_list <- list(logic_list, paste0("assigns == '", cell_type@name, "'"))
}
if(length(logic_list) == 0) warning(paste("Cell type", cell_type@name,
"has no valid expression rules."))
# meta data
if (nrow(cell_type@meta) > 0) {
mlogic <- plyr::dlply(cell_type@meta, plyr::.(name), function(x) {
if(nrow(x) == 1){
out <- paste0(x["name"], " %in% c('", x[,"spec"][1],"')")
} else {
out <- paste0(x[,"name"][1], " %in% c('", paste(x[,"spec"],
collapse = "', '"),
"')")
}
out
})
logic_list <- c(logic_list, unname(mlogic))
}
logic_list <- logic_list[!is.na(logic_list)]
logic_list
}
propogate_func <- function(curr_node,
parse_list,
classifier) {
children <- igraph::V(classifier@classification_tree)[
suppressWarnings(outnei(curr_node))]$name
if(length(children) == 0) {
return(parse_list[[curr_node]]@expressed)
} else {
child_genes <- c()
if (curr_node != "root") {
child_genes <- parse_list[[curr_node]]@expressed
}
for(child in children) {
child_genes <- union(child_genes,
propogate_func(child, parse_list, classifier))
}
if(curr_node != "root") {
parse_list[[curr_node]]@expressed <- child_genes
}
return(child_genes)
}
}
tfidf <- function(input_cds) {
ncounts <- exprs(input_cds)
ncounts <- ncounts[Matrix::rowSums(ncounts) != 0,]
nfreqs <- ncounts
nfreqs@x <- ncounts@x / rep.int(Matrix::colSums(ncounts), diff(ncounts@p))
tf_idf_counts <- nfreqs * log(1 + ncol(ncounts) / Matrix::rowSums(ncounts > 0))
Matrix::t(tf_idf_counts)
}
train_glmnet <- function(cds,
classifier,
curr_node,
training_sample,
min_observations,
cores,
lambdas,
gene_table,
perc_cells) {
gm_mean = function(x, na.rm=TRUE){
exp(sum(log(x[x > 0]), na.rm=na.rm) / length(x))
}
# calculate weights
obs_counts = table(training_sample)
obs_weights = gm_mean(obs_counts) / obs_counts
# check enough example cells per cell type
excluded_cell_types = names(which(obs_counts < min_observations))
print(obs_counts)
if (length(excluded_cell_types) > 0) {
message(paste("The following cell types do not have enough training",
"cells and will be dropped: ",
paste(excluded_cell_types, collapse = " ")))
}
if(length(setdiff(names(obs_counts),
c(excluded_cell_types, "Unknown"))) == 0) {
warning("No cell types with sufficient examples")
return(classifier)
}
count <- 0
done <- FALSE
cvfit = ""
if(is.null(lambdas)) lambdas <- unique(c(100000, 50000,
seq(10000, 100, by=-200),
seq(100,10, by=-20),
seq(10, 1, by=-2),
seq(1, .1, by=-.2), .1, .05, .01,
.005, .001, .0005, .0001))
while(done == FALSE & count < 5){
if(cvfit == "low_cell") {
excluded_cell_types <- c(excluded_cell_types,
names(which.min(table(training_sample))))
} else if (cvfit == "repeat") lambdas <- lambdas[1:(length(lambdas) - 3)]
training_sample = training_sample[!training_sample %in% excluded_cell_types]
training_sample <- droplevels(training_sample)
cds_sub = cds[,names(training_sample)]
count <- count + 1
y = training_sample
# only include genes in model that are expressed in 5% of training cells
candidate_model_genes = c()
for (cell_type in levels(y)){
genes_in_cell_type = names(which(Matrix::rowSums(
exprs(cds_sub[,y == cell_type]) > 0) >
perc_cells * sum(y == cell_type)))
candidate_model_genes = append(candidate_model_genes, genes_in_cell_type)
}
candidate_model_genes = unique(candidate_model_genes)
cds_sub = cds_sub[candidate_model_genes,]
x = Matrix::t(exprs(cds_sub))
if (length(which(table(y ) < 8)) > 0) {
message(paste("The following cell types have few training examples.",
"Be careful with interpretation"))
print(names(which(table(y ) < 8)))
}
pens <- rep(1, ncol(x))
sub <-
gene_table[gene_table$parent ==
igraph::V(classifier@classification_tree)[curr_node]$name,]
pens[colnames(x) %in% sub$fgenes] <- 0.00001
# train model
cvfit <- tryCatch({
if (cores > 1){
doParallel::registerDoParallel(cores=cores)
cvfit <- suppressWarnings(
glmnet::cv.glmnet(x, y, lambda = lambdas,
weights=obs_weights[y],
alpha=.3,
family = "multinomial",
type.multinomial = "grouped",
type.measure="class",
type.logistic = "modified.Newton",
lambda.min.ratio=0.001,
standardize=FALSE,
parallel=TRUE,
thresh=1e-6,
nfolds=3,
nlambda=20,
penalty.factor = pens))
}else{
cvfit <- suppressWarnings(
glmnet::cv.glmnet(x, y, lambda = lambdas,
weights=obs_weights[y],
alpha=.3,
family = "multinomial",
type.multinomial = "grouped",
type.logistic = "modified.Newton",
type.measure="class",
lambda.min.ratio=0.001,
standardize=FALSE,
parallel=FALSE,
thresh=1e-6,
nfolds=3,
nlambda=50,
penalty.factor = pens))
}
message("Model training finished.")
cvfit
}, error = function(e) {
print (e)
if(count < 5 & grepl("90000", as.character(e))) {
message(paste0("GLMNET failed with unknown error code, trying again"))
return("repeat")
} else if(count < 5 & length(unique(training_sample)) > 2) {
message(paste0("GLMNET failed, excluding low count cell type: ",
names(which.min(table(training_sample))),
" and trying again"))
return("low_cell")
} else {
message(paste0("GLMNET failed"))
return(e)
}
classifier
})
if(is.character(cvfit)) {
if(cvfit == "repeat") {
done <- FALSE
} else if(cvfit == "low_cell") {
done <- FALSE
}
} else if (inherits(cvfit, "error")) {
done <- TRUE
} else {
igraph::V(classifier@classification_tree)[curr_node]$model <- list(cvfit)
done <- TRUE
}
}
return(classifier)
}
|
library("pracma")
library("numbers")
library("MASS")
library("xtable")
n=300
f1=rep(0, n*(n+1)/2)
f2=rep(0, n*(n+1)/2)
l=0
for (j in 1:n) {
s=(1:j)^2
f1[(l+1):(l+j)]=s
f2[(l+1):(l+j)]= (s %% j)
l=l+j
}
#basic plots
par(mfrow=c(1,1))
par(pty="m", mar=c(2,2,1,1), mgp=c(1,.35,0))
plot(x=c(0:(length(f2)-1)), f2, pch=16, cex=.25, col=c("dodgerblue4","darkred"), xlab="n",ylab= expression(paste(f,"[n]")), main=expression(paste(f, " linearized square modulo triangle")))
curve((-1+sqrt(1+8*x))/2, from = 0, to=length(f2), n=2*length(f2), col="darkgreen", add=T)
abline(v=seq(0,n*(n+1)/2,1000), h=seq(0,n,10), col="gray", lty=3)
#secondary parabola starts
r1=(1:round(sqrt(n/2),0))^2
r2=sapply(1:(length(r1)-1), function(x) (r1[x]+r1[x+1])/2)
r1=sort(c(r1,r2))
y1=2*r1
x1=y1*r1
points(x1,y1, col="darkgreen")
#secondary parabola curve fits
a=ceil((1:length(x1)+2/3)^2)
cee=c(1,25,50,100,100,200,200,400,480, 600, 700, 1000, 1100, 1400, 1550, 2100, 2000, 2700, 2800, 3250, 3250,4200, 3900)
endpoints=(a^2+cee)/1.89
for (j in 1:length(y1)) {
curve(a[j]-sqrt(1.89*x-cee[j]), from = x1[j], to=endpoints[j], col="lightcoral", add=T, n=2*length(f2))
}
#square modulus triangle
m=300
sqmod=matrix(data=NA, nrow = m, ncol = m)
sqrs=matrix(data=NA, nrow = m, ncol = m)
l=0
for (j in 1:m) {
sqmod[j,(1:j)]=f2[(l+1):(l+j)]
sqrs[j,(1:j)]=f1[(l+1):(l+j)]
l=l+j
}
#as image
image(t(sqmod), asp=1, axes=F, ylim=c(1,0), col=colors()[c(1:136,233:502)][1:max(sqmod, na.rm = T)]) #as is
image(t(asinh(sqmod)), asp=1, axes=F, ylim=c(1,0), col=colors()[c(1:136,233:502)][1:max(sqmod, na.rm = T)]) #with asinh transform
#print
print(xtable(sqmod, digits = 0), include.rownames = F, include.colnames = F) #xtable
print(xtable(sqrs, digits = 0), include.rownames = F, include.colnames = F) #xtable
prmatrix(sqmod, na.print = "", collab = rep("",m),rowlab = rep("",m))
#plots of particular rows of matrix
par(mfrow=c(3,3))
par(pty="m", mar=c(2,2,1,1), mgp=c(1,.35,0))
n=293
plot(c(na.omit(sqmod[n,])), type="h", col=c("dodgerblue4","darkred"), xlab="n",ylab=paste("T[",n,", k]"), main=paste("row=", n))
# column descent by 1 start sequence
d1=sapply(0:(m-1), function(x) floor(x^2/2)+x)
# column descent by 2 start sequence
d2=sapply(1:(m), function(x) floor(x*(x+2)/3)-1) #also sapply(1:(m), function(x) floor(x*(x-1)/3)+x-1)
# column descent by 3 start sequence
d3=sapply(0:(m-1), function(x) floor((x*(x+6)/4))) # also d3=sapply(1:(m), function(x) (2*x*(x+6)-3*(1-(-1)^x))/8)
#descent matrix
dmat=matrix(nrow = 12, ncol=9)
dmat[,1]=d1[1:12]
dmat[2:12,2]=d2[1:11]
dmat[3:12,3]=d3[1:10]
dmat[6:12,4]=c(4,9,12,13,16,21,28)
dmat[8:12,5]=c(9,11,15,16,19)
dmat[9:12,6]=c(9,10,13,18)
dmat[10:12,7]=c(9,9,11)
dmat[11:12,8]=c(9,8)
dmat[12,9]=c(9)
#column descent sequence run length
drl=rep(0,m)
k=0
l=1
for (j in 1:length(drl)) {
if(j==l^2-k){
k=k+1
l=l+1
}
drl[j]=j^2-j-k+1
kbox[j]=k
}
#single line formula for above
drl=sapply(1:m, function(x) x^2-x-round(sqrt(x))+1)
#number of starting square terms per column
n.sqtrm= sapply(1:m, function(x) round(sqrt(x)))
#number of terms before the final run of k^2 square terms in a column
before.sq=sapply(1:m, function(x) x^2-x+1)
#modulo frequency
fr=table(f2)
par(pty="m", mar=c(2,2,1,1), mgp=c(1,.35,0), mfrow=c(1,1))
plot(x=names(fr), y=c(fr), type = "h", col="dodgerblue4", xlab="n", ylab="fr")
#fit for square terms
te=fr[(1:floor(sqrt(n)))^2+1]
ye=as.numeric(names(te))
reg1=lm(te ~ ye)
abline(reg1, col="darkred", lty=3, lwd=2)
#minima near square terms
wi=10
sqs=sapply(4:floor(sqrt(n)), function(x) x^2+1)
te=sapply(sqs, function(x) min(fr[(x-wi):(x+wi)]))
ye=sapply(1:length(sqs), function(x) sqs[x]-wi-1+match(te[x], fr[(sqs[x]-wi):(sqs[x]+wi)]))
points(x=ye, y=te, col="red")
#maxima other than square terms
wi=15
sqs=sapply(0:floor(sqrt(n)), function(x) x^2+1)
ye=sapply(1:n, function(x) ifelse(x %in% sqs, 0, fr[x]))
te=sapply(tail(sqs, -4), function(x) max(ye[(x-wi):(x+wi)]))
ye=sapply(1:length(tail(sqs, -4)), function(x) tail(sqs, -4)[x]-wi-1+match(te[x], fr[(tail(sqs, -4)[x]-wi):(tail(sqs, -4)[x]+wi)]))
points(x=ye, y=te, col="blueviolet")
te=mean(fr[sapply(0:floor(sqrt(n)), function(x) x^2+1)]) #mean freq of square terms
ye=mean(fr[-sapply(0:floor(sqrt(n)), function(x) x^2+1)]) #mean freq of non-square terms
#location of primes
te=fr[primes(300)+2]
ye=primes(300)+1
points(ye, te, col="red")
#attempt at minima
te=unique(unlist(sapply(1:n, function(x) which(fr[1:x]==min(fr[1:x])))))
ye=c(fr[te])
points(te-1, ye, col="red", pch=16)
|
/scripts/square_modulus_triangle.R
|
no_license
|
somasushma/R-code
|
R
| false
| false
| 4,686
|
r
|
library("pracma")
library("numbers")
library("MASS")
library("xtable")
n=300
f1=rep(0, n*(n+1)/2)
f2=rep(0, n*(n+1)/2)
l=0
for (j in 1:n) {
s=(1:j)^2
f1[(l+1):(l+j)]=s
f2[(l+1):(l+j)]= (s %% j)
l=l+j
}
#basic plots
par(mfrow=c(1,1))
par(pty="m", mar=c(2,2,1,1), mgp=c(1,.35,0))
plot(x=c(0:(length(f2)-1)), f2, pch=16, cex=.25, col=c("dodgerblue4","darkred"), xlab="n",ylab= expression(paste(f,"[n]")), main=expression(paste(f, " linearized square modulo triangle")))
curve((-1+sqrt(1+8*x))/2, from = 0, to=length(f2), n=2*length(f2), col="darkgreen", add=T)
abline(v=seq(0,n*(n+1)/2,1000), h=seq(0,n,10), col="gray", lty=3)
#secondary parabola starts
r1=(1:round(sqrt(n/2),0))^2
r2=sapply(1:(length(r1)-1), function(x) (r1[x]+r1[x+1])/2)
r1=sort(c(r1,r2))
y1=2*r1
x1=y1*r1
points(x1,y1, col="darkgreen")
#secondary parabola curve fits
a=ceil((1:length(x1)+2/3)^2)
cee=c(1,25,50,100,100,200,200,400,480, 600, 700, 1000, 1100, 1400, 1550, 2100, 2000, 2700, 2800, 3250, 3250,4200, 3900)
endpoints=(a^2+cee)/1.89
for (j in 1:length(y1)) {
curve(a[j]-sqrt(1.89*x-cee[j]), from = x1[j], to=endpoints[j], col="lightcoral", add=T, n=2*length(f2))
}
#square modulus triangle
m=300
sqmod=matrix(data=NA, nrow = m, ncol = m)
sqrs=matrix(data=NA, nrow = m, ncol = m)
l=0
for (j in 1:m) {
sqmod[j,(1:j)]=f2[(l+1):(l+j)]
sqrs[j,(1:j)]=f1[(l+1):(l+j)]
l=l+j
}
#as image
image(t(sqmod), asp=1, axes=F, ylim=c(1,0), col=colors()[c(1:136,233:502)][1:max(sqmod, na.rm = T)]) #as is
image(t(asinh(sqmod)), asp=1, axes=F, ylim=c(1,0), col=colors()[c(1:136,233:502)][1:max(sqmod, na.rm = T)]) #with asinh transform
#print
print(xtable(sqmod, digits = 0), include.rownames = F, include.colnames = F) #xtable
print(xtable(sqrs, digits = 0), include.rownames = F, include.colnames = F) #xtable
prmatrix(sqmod, na.print = "", collab = rep("",m),rowlab = rep("",m))
#plots of particular rows of matrix
par(mfrow=c(3,3))
par(pty="m", mar=c(2,2,1,1), mgp=c(1,.35,0))
n=293
plot(c(na.omit(sqmod[n,])), type="h", col=c("dodgerblue4","darkred"), xlab="n",ylab=paste("T[",n,", k]"), main=paste("row=", n))
# column descent by 1 start sequence
d1=sapply(0:(m-1), function(x) floor(x^2/2)+x)
# column descent by 2 start sequence
d2=sapply(1:(m), function(x) floor(x*(x+2)/3)-1) #also sapply(1:(m), function(x) floor(x*(x-1)/3)+x-1)
# column descent by 3 start sequence
d3=sapply(0:(m-1), function(x) floor((x*(x+6)/4))) # also d3=sapply(1:(m), function(x) (2*x*(x+6)-3*(1-(-1)^x))/8)
#descent matrix
dmat=matrix(nrow = 12, ncol=9)
dmat[,1]=d1[1:12]
dmat[2:12,2]=d2[1:11]
dmat[3:12,3]=d3[1:10]
dmat[6:12,4]=c(4,9,12,13,16,21,28)
dmat[8:12,5]=c(9,11,15,16,19)
dmat[9:12,6]=c(9,10,13,18)
dmat[10:12,7]=c(9,9,11)
dmat[11:12,8]=c(9,8)
dmat[12,9]=c(9)
#column descent sequence run length
drl=rep(0,m)
k=0
l=1
for (j in 1:length(drl)) {
if(j==l^2-k){
k=k+1
l=l+1
}
drl[j]=j^2-j-k+1
kbox[j]=k
}
#single line formula for above
drl=sapply(1:m, function(x) x^2-x-round(sqrt(x))+1)
#number of starting square terms per column
n.sqtrm= sapply(1:m, function(x) round(sqrt(x)))
#number of terms before the final run of k^2 square terms in a column
before.sq=sapply(1:m, function(x) x^2-x+1)
#modulo frequency
fr=table(f2)
par(pty="m", mar=c(2,2,1,1), mgp=c(1,.35,0), mfrow=c(1,1))
plot(x=names(fr), y=c(fr), type = "h", col="dodgerblue4", xlab="n", ylab="fr")
#fit for square terms
te=fr[(1:floor(sqrt(n)))^2+1]
ye=as.numeric(names(te))
reg1=lm(te ~ ye)
abline(reg1, col="darkred", lty=3, lwd=2)
#minima near square terms
wi=10
sqs=sapply(4:floor(sqrt(n)), function(x) x^2+1)
te=sapply(sqs, function(x) min(fr[(x-wi):(x+wi)]))
ye=sapply(1:length(sqs), function(x) sqs[x]-wi-1+match(te[x], fr[(sqs[x]-wi):(sqs[x]+wi)]))
points(x=ye, y=te, col="red")
#maxima other than square terms
wi=15
sqs=sapply(0:floor(sqrt(n)), function(x) x^2+1)
ye=sapply(1:n, function(x) ifelse(x %in% sqs, 0, fr[x]))
te=sapply(tail(sqs, -4), function(x) max(ye[(x-wi):(x+wi)]))
ye=sapply(1:length(tail(sqs, -4)), function(x) tail(sqs, -4)[x]-wi-1+match(te[x], fr[(tail(sqs, -4)[x]-wi):(tail(sqs, -4)[x]+wi)]))
points(x=ye, y=te, col="blueviolet")
te=mean(fr[sapply(0:floor(sqrt(n)), function(x) x^2+1)]) #mean freq of square terms
ye=mean(fr[-sapply(0:floor(sqrt(n)), function(x) x^2+1)]) #mean freq of non-square terms
#location of primes
te=fr[primes(300)+2]
ye=primes(300)+1
points(ye, te, col="red")
#attempt at minima
te=unique(unlist(sapply(1:n, function(x) which(fr[1:x]==min(fr[1:x])))))
ye=c(fr[te])
points(te-1, ye, col="red", pch=16)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/operators.R
\name{mx}
\alias{mx}
\alias{\%mx\%}
\title{Numerical and Symbolic Matrix Product}
\usage{
mx(x, y)
x \%mx\% y
}
\arguments{
\item{x}{\code{numeric} or \code{character} matrix.}
\item{y}{\code{numeric} or \code{character} matrix.}
}
\value{
\code{matrix}.
}
\description{
Multiplies two \code{numeric} or \code{character} matrices, if they are conformable. If one argument is a vector, it will be promoted to either a row or column matrix to make the two arguments conformable. If both are vectors of the same length, it will return the inner product (as a \code{matrix}).
}
\section{Functions}{
\itemize{
\item \code{x \%mx\% y}: binary operator.
}}
\examples{
### numeric inner product
x <- 1:4
mx(x, x)
### symbolic inner product
x <- letters[1:4]
mx(x, x)
### numeric matrix product
x <- letters[1:4]
y <- diag(4)
mx(x, y)
### symbolic matrix product
x <- array(1:12, dim = c(3,4))
y <- letters[1:4]
mx(x, y)
### binary operator
x <- array(1:12, dim = c(3,4))
y <- letters[1:4]
x \%mx\% y
}
\references{
Guidotti E (2022). "calculus: High-Dimensional Numerical and Symbolic Calculus in R." Journal of Statistical Software, 104(5), 1-37. \doi{10.18637/jss.v104.i05}
}
\seealso{
Other matrix algebra:
\code{\link{mxdet}()},
\code{\link{mxinv}()},
\code{\link{mxtr}()}
}
\concept{matrix algebra}
|
/man/mx.Rd
|
no_license
|
cran/calculus
|
R
| false
| true
| 1,397
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/operators.R
\name{mx}
\alias{mx}
\alias{\%mx\%}
\title{Numerical and Symbolic Matrix Product}
\usage{
mx(x, y)
x \%mx\% y
}
\arguments{
\item{x}{\code{numeric} or \code{character} matrix.}
\item{y}{\code{numeric} or \code{character} matrix.}
}
\value{
\code{matrix}.
}
\description{
Multiplies two \code{numeric} or \code{character} matrices, if they are conformable. If one argument is a vector, it will be promoted to either a row or column matrix to make the two arguments conformable. If both are vectors of the same length, it will return the inner product (as a \code{matrix}).
}
\section{Functions}{
\itemize{
\item \code{x \%mx\% y}: binary operator.
}}
\examples{
### numeric inner product
x <- 1:4
mx(x, x)
### symbolic inner product
x <- letters[1:4]
mx(x, x)
### numeric matrix product
x <- letters[1:4]
y <- diag(4)
mx(x, y)
### symbolic matrix product
x <- array(1:12, dim = c(3,4))
y <- letters[1:4]
mx(x, y)
### binary operator
x <- array(1:12, dim = c(3,4))
y <- letters[1:4]
x \%mx\% y
}
\references{
Guidotti E (2022). "calculus: High-Dimensional Numerical and Symbolic Calculus in R." Journal of Statistical Software, 104(5), 1-37. \doi{10.18637/jss.v104.i05}
}
\seealso{
Other matrix algebra:
\code{\link{mxdet}()},
\code{\link{mxinv}()},
\code{\link{mxtr}()}
}
\concept{matrix algebra}
|
# THIS NEEDS WORK - tighten up the value-checking. Integrate Campbell's 7999 value => should set 'D' flag
library(RMySQL)
options(scipen=999) #This disables scientific notation for values
#Process the ME limno dat at the end of each day when buoy data in available from AOS. This should run between
#1930-midnight on the relevant day so the correct directory can be found. This also does range-checking.
#Limno data is recorded once per minute
### Get current date. When this script runs, UTC is one day ahead so we can just use our current jday.
date <- Sys.Date()
year <- as.character(format(date,"%Y"))
jday <- as.character(format(date,"%j")) #It's 3-digit char because it will be part of the path
month <- as.numeric(format(date,"%m"))
datestr <- as.character(format(date,"%Y%m%d"))
#message(date())
##For testing and single-day processing:
#year <- "2017"
#jday <- "114"
message("Processing Mendota watertemp hires for: jday=",jday," year=",year)
### Load from the range-checking file
#rangeFile <- "../range_checks_new.csv"
rangeFile <- "/triton/BuoyData/range_checks_new.csv"
df.R <- read.csv(rangeFile,header=T,stringsAsFactors=FALSE) #Read header to index the proper row
#Create two matrices to hold min/max range values; rows are thermistor number (depth) and cols are month
minRange <- matrix(nrow=23,ncol=12)
maxRange <- matrix(nrow=23,ncol=12)
for (row in 1:23) {
for (mo in 1:12) {
maxRange[row,mo] <- df.R[2*(row-1)+138,mo+1]
minRange[row,mo] <- df.R[2*(row-1)+139,mo+1]
}#mo
}#row
### Functions
# Check for blank data
is.blank <- function(var) {
if ( is.na(var)||is.null(var)||is.nan(var)||(var=="NAN")||(var=="")||(var==" ")||(var==7999)) return (TRUE)
else return (FALSE)
}
# #check val
# check.val <- function(var) {
#}
### Set up the database connection and sql query parameters
conn <- dbConnect(MySQL(),dbname="dbmaker", client.flag=CLIENT_MULTI_RESULTS)
#table: sensor_mendota_lake_met_hi_res
nfields <- 11
#Assign field names. These must match the database field names
fields <- c("sampledate","year4", "month", "daynum", "sample_time", "sampletime","data_freq","depth","wtemp","flag_wtemp","hr")
# Assign formatting to each field. Strings (%s) get extra single quotes
fmt <- c("'%s'","%.0f","%.0f","%.0f","'%s'","'%s'","%.0f","%.2f","%.2f","'%s'","%.0f")
### Deal with the input file
#limnofile <- list.files(pattern="MendotaBuoy_limnodata_*.csv")
datapath <- paste0("/opt/data/mendota/",year,"/",jday,"/");
setwd(datapath);
limnofile <- list.files(pattern="*limno*.dat")
message("Running update_ME_limno_hires_byday.R")
message("limno file: ",limnofile)
#Limno data goes into df.B
if (file.exists(limnofile)) {
df.B <- read.csv(limnofile,header=F,stringsAsFactors=FALSE)
nrowsB <- nrow(df.B)
}
depthMap <- c(0,0.5,1,1.5,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
for (rowB in 5:nrowsB) {
# for (rowB in 5:10) {
utc_limno <- df.B[rowB,1]
lt <- as.POSIXct(utc_limno,tz="UTC")
attributes(lt)$tzone <- "America/Chicago"
sampledate <- substr(as.character(lt),0,10)
year4 <- as.numeric(format(lt,"%Y"))
month <- as.numeric(format(lt,"%m"))
daynum <- as.numeric(format(lt,"%j"))
hr <- 100*as.numeric(format(lt,"%H"))
sample_time <- as.character(format(lt,"%H%M"))
sampletime <- as.character(format(lt,"%H:%M:%S"))
data_freq <- 1
for (therm in 1:23) {
#Temp (index to first temp is 5)
wtemp <- as.numeric(df.B[rowB,therm+4])
if (is.blank(wtemp)||wtemp<0) {
wtemp <- NA #Make the value NA, not zero
flag_wtemp <- 'C'
} else if ( (wtemp<minRange[therm,month]) || (wtemp>maxRange[therm,month]) ) {
flag_wtemp <- 'H'
} else {
flag_wtemp <- NA
}
depth <- depthMap[therm]
sql <- "INSERT IGNORE INTO sensor_mendota_lake_watertemp_hi_res ("
for (i in 1:nfields) { sql <- paste0(sql,fields[i],",") }
sql <- substr(sql,1,nchar(sql)-1) #remove last comma
sql <- paste0(sql,") VALUES (")
for (i in 1:nfields) {
field_value <- sprintf(fmt[i],eval(parse(text=fields[i])))
if ( is.na(field_value) || (field_value == "'NA'") || (field_value == "NA") ) { field_value <- 'NULL'}
#message(i," ",field_value)
sql <- paste0(sql,field_value,",") #valued fields
}
sql <- substr(sql,1,nchar(sql)-1) #remove last comma
sql <- paste0(sql,");")
#print(sql)
result <- dbGetQuery(conn,sql)
}#for therm
}#for rowB
dbDisconnect(conn)
|
/ME/update_ME_limno_hires_byday.R
|
no_license
|
xujunjiejack/LTER-Buoys
|
R
| false
| false
| 4,485
|
r
|
# THIS NEEDS WORK - tighten up the value-checking. Integrate Campbell's 7999 value => should set 'D' flag
library(RMySQL)
options(scipen=999) #This disables scientific notation for values
#Process the ME limno dat at the end of each day when buoy data in available from AOS. This should run between
#1930-midnight on the relevant day so the correct directory can be found. This also does range-checking.
#Limno data is recorded once per minute
### Get current date. When this script runs, UTC is one day ahead so we can just use our current jday.
date <- Sys.Date()
year <- as.character(format(date,"%Y"))
jday <- as.character(format(date,"%j")) #It's 3-digit char because it will be part of the path
month <- as.numeric(format(date,"%m"))
datestr <- as.character(format(date,"%Y%m%d"))
#message(date())
##For testing and single-day processing:
#year <- "2017"
#jday <- "114"
message("Processing Mendota watertemp hires for: jday=",jday," year=",year)
### Load from the range-checking file
#rangeFile <- "../range_checks_new.csv"
rangeFile <- "/triton/BuoyData/range_checks_new.csv"
df.R <- read.csv(rangeFile,header=T,stringsAsFactors=FALSE) #Read header to index the proper row
#Create two matrices to hold min/max range values; rows are thermistor number (depth) and cols are month
minRange <- matrix(nrow=23,ncol=12)
maxRange <- matrix(nrow=23,ncol=12)
for (row in 1:23) {
for (mo in 1:12) {
maxRange[row,mo] <- df.R[2*(row-1)+138,mo+1]
minRange[row,mo] <- df.R[2*(row-1)+139,mo+1]
}#mo
}#row
### Functions
# Check for blank data
is.blank <- function(var) {
if ( is.na(var)||is.null(var)||is.nan(var)||(var=="NAN")||(var=="")||(var==" ")||(var==7999)) return (TRUE)
else return (FALSE)
}
# #check val
# check.val <- function(var) {
#}
### Set up the database connection and sql query parameters
conn <- dbConnect(MySQL(),dbname="dbmaker", client.flag=CLIENT_MULTI_RESULTS)
#table: sensor_mendota_lake_met_hi_res
nfields <- 11
#Assign field names. These must match the database field names
fields <- c("sampledate","year4", "month", "daynum", "sample_time", "sampletime","data_freq","depth","wtemp","flag_wtemp","hr")
# Assign formatting to each field. Strings (%s) get extra single quotes
fmt <- c("'%s'","%.0f","%.0f","%.0f","'%s'","'%s'","%.0f","%.2f","%.2f","'%s'","%.0f")
### Deal with the input file
#limnofile <- list.files(pattern="MendotaBuoy_limnodata_*.csv")
datapath <- paste0("/opt/data/mendota/",year,"/",jday,"/");
setwd(datapath);
limnofile <- list.files(pattern="*limno*.dat")
message("Running update_ME_limno_hires_byday.R")
message("limno file: ",limnofile)
#Limno data goes into df.B
if (file.exists(limnofile)) {
df.B <- read.csv(limnofile,header=F,stringsAsFactors=FALSE)
nrowsB <- nrow(df.B)
}
depthMap <- c(0,0.5,1,1.5,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20)
for (rowB in 5:nrowsB) {
# for (rowB in 5:10) {
utc_limno <- df.B[rowB,1]
lt <- as.POSIXct(utc_limno,tz="UTC")
attributes(lt)$tzone <- "America/Chicago"
sampledate <- substr(as.character(lt),0,10)
year4 <- as.numeric(format(lt,"%Y"))
month <- as.numeric(format(lt,"%m"))
daynum <- as.numeric(format(lt,"%j"))
hr <- 100*as.numeric(format(lt,"%H"))
sample_time <- as.character(format(lt,"%H%M"))
sampletime <- as.character(format(lt,"%H:%M:%S"))
data_freq <- 1
for (therm in 1:23) {
#Temp (index to first temp is 5)
wtemp <- as.numeric(df.B[rowB,therm+4])
if (is.blank(wtemp)||wtemp<0) {
wtemp <- NA #Make the value NA, not zero
flag_wtemp <- 'C'
} else if ( (wtemp<minRange[therm,month]) || (wtemp>maxRange[therm,month]) ) {
flag_wtemp <- 'H'
} else {
flag_wtemp <- NA
}
depth <- depthMap[therm]
sql <- "INSERT IGNORE INTO sensor_mendota_lake_watertemp_hi_res ("
for (i in 1:nfields) { sql <- paste0(sql,fields[i],",") }
sql <- substr(sql,1,nchar(sql)-1) #remove last comma
sql <- paste0(sql,") VALUES (")
for (i in 1:nfields) {
field_value <- sprintf(fmt[i],eval(parse(text=fields[i])))
if ( is.na(field_value) || (field_value == "'NA'") || (field_value == "NA") ) { field_value <- 'NULL'}
#message(i," ",field_value)
sql <- paste0(sql,field_value,",") #valued fields
}
sql <- substr(sql,1,nchar(sql)-1) #remove last comma
sql <- paste0(sql,");")
#print(sql)
result <- dbGetQuery(conn,sql)
}#for therm
}#for rowB
dbDisconnect(conn)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PdPDB.R
\name{PdPDB}
\alias{PdPDB}
\title{Pattern Discovery in PDB Structures of Metalloproteins}
\usage{
PdPDB(path, metal, n, perc, interactive, dropsReplicate)
}
\arguments{
\item{path}{A string containing the path to the PDB directory.}
\item{metal}{A string containing the PDB chemical symbol of the target prosthetic centre; e.g. SF4 for [4Fe-4S] cluster, ZN for zinc. The PDB chemical symbol is case sensitive for macOS.}
\item{n}{A numerical value that contains the number or residue in following/preceding n positions from the ligated amino acid or nucleotide; if n=1 PdPDB searches for x(L)x motif-like chains, if n=2 for xx(L)xx. (L)igand.}
\item{perc}{A numerical value about the minimum percent of letters in a column otherwise residues are dropped.}
\item{interactive}{A numerical value. 0 interactive, 1 automated (will not cut dendrogram), 2 user decided cut. In mode 1 and 2 ExPASy amino acid frequencies are used as reference.}
\item{dropsReplicate}{A numerical value. 0 keeps replicated patterns, 1 drops replicated patterns entry by entry, 2 keeps only unique patterns.}
}
\value{
PdPDB generates a list of ".csv" and ".svg" files that will be stored in the same folder of the analyzed pdb/cif files (see "path"), its output is as follows:
\item{frequency.csv}{PDB-like patterns (i.e. with PDB chem Ids). "-" and "+" are used for residues out of the n inspecting window or from different monomers, respectively. Patterns come along with their frequency.}
\item{alignment.csv}{Ligand-aligned patterns with dashes, plus signs and gaps ("*"). See 'frequency.csv'.}
\item{following_X_enrichment.csv}{n files. Each file contains enrichment score, z-score and statistics at up to n following positions. X is the +position from ligated residue.}
\item{ligands_enrichment.csv}{Enrichment scores and statistics for ligands.}
\item{notLigands_enrichment.csv}{Enrichment statistics for the whole specimen but ligands.}
\item{preceeding_X_enrichment.csv}{As for "following" but this is meant for residues preceeding ligands. See "following_X_enrichment.csv."}
\item{root_enrichment.csv}{Overall enrichment score.}
\item{logo_Y.csv}{Y files. Each file contains the logo and consensus sequence for a cluster. Y is the cluster number.}
\item{dendrogram.svg}{The dendrogram along with the user deciced cutoff and clusters.}
\item{following_X_proportions.svg}{Plot of the enrichment score per each amino acid in following positions.}
\item{ligands_proportions.svg}{Plot of the enrichment score per each amino acid in ligated position.}
\item{notLigands_proportions.svg}{Plot of the enrichment score per each amino acid in non ligated position.}
\item{preceeding_X_proportions.svg}{Plot of the enrichment score per each amino acid in preceeding positions.}
\item{root_proportions.svg}{Plot of the root enrichment score.}
\item{logo_Y.svg}{Plot of the logo and consensus sequence of the Yth cluster. The complete aligned cluster is given as homonym '.csv' file. Sequences come along with percentages. If the dendrogram is not cut the root logo is given.}
\item{following_X_standardized.svg}{Plot of the z-score per each amino acid in following positions.}
\item{ligands_standardized.svg}{Plot of the z-score per each amino acid in ligated position.}
\item{notLigands_standardized.svg}{Plot of the z-score per each amino acid in non ligated position.}
\item{preceeding_X_standardized.svg}{Plot of the z-score per each amino acid in preceeding positions.}
\item{root_standardized.svg}{Plot of the root z-score.}
\item{patterns.csv}{PDB like extracted patterns along with the PDB ID and metal IDs. Useful for debbugging. Needed for restore.}
\item{PdPDB.log}{PdPDB log file. Useful for debbugging. Needed for restore.}
}
\description{
Looks for amino acid and/or nucleotide patterns coordinated to a given prosthetic centre. It also accounts for small molecule ligands. Patterns are aligned, clustered and translated to logo-like sequences to infer coordination motifs.
}
\note{
Files have to be in the local file system and contain the ".pdb" or ".cif" extension. Output files use brackets to highlight ligands and/or 'L' in heading line.
}
\examples{
################ Defining path to PDBs
path_to_PDB="inst/extdata/PDB" # this is where pdb/cif files are stored
################ Research Parameters
metal="SF4" # searches for [4fe-4s] coordinating patterns
n=1 # searches for x(L)x patterns, (L) coordinates to SF4
perc=20 # drops residues with less than the 20\% of frequency
interactive= 0 # interactive. User decided references and dendrogram cut
dropsReplicate=0 # do not remove replicated patterns
################ Launch PdPDB
PdPDB(path_to_PDB,metal,n, perc, interactive, dropsReplicate)
}
\author{
Luca Belmonte, Sheref S. Mansy
}
\references{
Belmonte L, Mansy SS Patterns of Ligands Coordinated to Metallocofactors Extracted from the Protein Data Bank, Journal of Chemical Information and Modeling (accepted)
}
\keyword{PDB,}
\keyword{alignment,}
\keyword{coordinating}
\keyword{ligand}
\keyword{metal,}
\keyword{metalloproteins,}
\keyword{motifs}
\keyword{patterns,}
|
/man/PdPDB.Rd
|
no_license
|
cran/PdPDB
|
R
| false
| true
| 5,168
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PdPDB.R
\name{PdPDB}
\alias{PdPDB}
\title{Pattern Discovery in PDB Structures of Metalloproteins}
\usage{
PdPDB(path, metal, n, perc, interactive, dropsReplicate)
}
\arguments{
\item{path}{A string containing the path to the PDB directory.}
\item{metal}{A string containing the PDB chemical symbol of the target prosthetic centre; e.g. SF4 for [4Fe-4S] cluster, ZN for zinc. The PDB chemical symbol is case sensitive for macOS.}
\item{n}{A numerical value that contains the number or residue in following/preceding n positions from the ligated amino acid or nucleotide; if n=1 PdPDB searches for x(L)x motif-like chains, if n=2 for xx(L)xx. (L)igand.}
\item{perc}{A numerical value about the minimum percent of letters in a column otherwise residues are dropped.}
\item{interactive}{A numerical value. 0 interactive, 1 automated (will not cut dendrogram), 2 user decided cut. In mode 1 and 2 ExPASy amino acid frequencies are used as reference.}
\item{dropsReplicate}{A numerical value. 0 keeps replicated patterns, 1 drops replicated patterns entry by entry, 2 keeps only unique patterns.}
}
\value{
PdPDB generates a list of ".csv" and ".svg" files that will be stored in the same folder of the analyzed pdb/cif files (see "path"), its output is as follows:
\item{frequency.csv}{PDB-like patterns (i.e. with PDB chem Ids). "-" and "+" are used for residues out of the n inspecting window or from different monomers, respectively. Patterns come along with their frequency.}
\item{alignment.csv}{Ligand-aligned patterns with dashes, plus signs and gaps ("*"). See 'frequency.csv'.}
\item{following_X_enrichment.csv}{n files. Each file contains enrichment score, z-score and statistics at up to n following positions. X is the +position from ligated residue.}
\item{ligands_enrichment.csv}{Enrichment scores and statistics for ligands.}
\item{notLigands_enrichment.csv}{Enrichment statistics for the whole specimen but ligands.}
\item{preceeding_X_enrichment.csv}{As for "following" but this is meant for residues preceeding ligands. See "following_X_enrichment.csv."}
\item{root_enrichment.csv}{Overall enrichment score.}
\item{logo_Y.csv}{Y files. Each file contains the logo and consensus sequence for a cluster. Y is the cluster number.}
\item{dendrogram.svg}{The dendrogram along with the user deciced cutoff and clusters.}
\item{following_X_proportions.svg}{Plot of the enrichment score per each amino acid in following positions.}
\item{ligands_proportions.svg}{Plot of the enrichment score per each amino acid in ligated position.}
\item{notLigands_proportions.svg}{Plot of the enrichment score per each amino acid in non ligated position.}
\item{preceeding_X_proportions.svg}{Plot of the enrichment score per each amino acid in preceeding positions.}
\item{root_proportions.svg}{Plot of the root enrichment score.}
\item{logo_Y.svg}{Plot of the logo and consensus sequence of the Yth cluster. The complete aligned cluster is given as homonym '.csv' file. Sequences come along with percentages. If the dendrogram is not cut the root logo is given.}
\item{following_X_standardized.svg}{Plot of the z-score per each amino acid in following positions.}
\item{ligands_standardized.svg}{Plot of the z-score per each amino acid in ligated position.}
\item{notLigands_standardized.svg}{Plot of the z-score per each amino acid in non ligated position.}
\item{preceeding_X_standardized.svg}{Plot of the z-score per each amino acid in preceeding positions.}
\item{root_standardized.svg}{Plot of the root z-score.}
\item{patterns.csv}{PDB like extracted patterns along with the PDB ID and metal IDs. Useful for debbugging. Needed for restore.}
\item{PdPDB.log}{PdPDB log file. Useful for debbugging. Needed for restore.}
}
\description{
Looks for amino acid and/or nucleotide patterns coordinated to a given prosthetic centre. It also accounts for small molecule ligands. Patterns are aligned, clustered and translated to logo-like sequences to infer coordination motifs.
}
\note{
Files have to be in the local file system and contain the ".pdb" or ".cif" extension. Output files use brackets to highlight ligands and/or 'L' in heading line.
}
\examples{
################ Defining path to PDBs
path_to_PDB="inst/extdata/PDB" # this is where pdb/cif files are stored
################ Research Parameters
metal="SF4" # searches for [4fe-4s] coordinating patterns
n=1 # searches for x(L)x patterns, (L) coordinates to SF4
perc=20 # drops residues with less than the 20\% of frequency
interactive= 0 # interactive. User decided references and dendrogram cut
dropsReplicate=0 # do not remove replicated patterns
################ Launch PdPDB
PdPDB(path_to_PDB,metal,n, perc, interactive, dropsReplicate)
}
\author{
Luca Belmonte, Sheref S. Mansy
}
\references{
Belmonte L, Mansy SS Patterns of Ligands Coordinated to Metallocofactors Extracted from the Protein Data Bank, Journal of Chemical Information and Modeling (accepted)
}
\keyword{PDB,}
\keyword{alignment,}
\keyword{coordinating}
\keyword{ligand}
\keyword{metal,}
\keyword{metalloproteins,}
\keyword{motifs}
\keyword{patterns,}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{ordinal.gamma}
\alias{ordinal.gamma}
\title{Compute the ordinal gamma association statistic}
\usage{
ordinal.gamma(mat)
}
\arguments{
\item{mat}{a cross tabulation matrix}
}
\description{
Compute the ordinal gamma association statistic
}
\examples{
# Example data from Agresti (1990, p. 21)
jobsat <- matrix(c(20,22,13,7,24,38,28,18,80,104,81,54,82,125,113,92), nrow=4, ncol=4)
ordinal.gamma(jobsat)
}
\references{
Agresti, A. (1990). Categorical data analysis. New York: Wiley.
}
|
/man/ordinal.gamma.Rd
|
no_license
|
mhunter1/rpf
|
R
| false
| false
| 541
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{ordinal.gamma}
\alias{ordinal.gamma}
\title{Compute the ordinal gamma association statistic}
\usage{
ordinal.gamma(mat)
}
\arguments{
\item{mat}{a cross tabulation matrix}
}
\description{
Compute the ordinal gamma association statistic
}
\examples{
# Example data from Agresti (1990, p. 21)
jobsat <- matrix(c(20,22,13,7,24,38,28,18,80,104,81,54,82,125,113,92), nrow=4, ncol=4)
ordinal.gamma(jobsat)
}
\references{
Agresti, A. (1990). Categorical data analysis. New York: Wiley.
}
|
# Create a line graph of friend_count vs. age
# so that each year_joined.bucket is a line
# tracking the median user friend_count across
# age. This means you should have four different
# lines on your plot.
# You should subset the data to exclude the users
# whose year_joined.bucket is NA.
# If you need a hint, see the Instructor Notes.
# This assignment is not graded and
# will be marked as correct when you submit.
# ENTER YOUR CODE BELOW THIS LINE
# ===================================================
library("ggplot2")
pf <- read.delim('data/pseudo_facebook.tsv')
pf$year_joined <- floor(2014 - pf$tenure / 365)
pf$year_joined.bucket <- cut(pf$year_joined, breaks = c(2004, 2009, 2011, 2012, 2014))
plot <- ggplot(data = subset(pf, !is.na(year_joined.bucket)), aes(x = age, y = friend_count)) +
geom_line(aes(color = year_joined.bucket), stat = 'summary', fun.y = median)
print(plot)
|
/Data Analysis with R/L5 Explore Many Variables/PlottingitallTogether.R
|
no_license
|
DataUnicorn/Udacity
|
R
| false
| false
| 903
|
r
|
# Create a line graph of friend_count vs. age
# so that each year_joined.bucket is a line
# tracking the median user friend_count across
# age. This means you should have four different
# lines on your plot.
# You should subset the data to exclude the users
# whose year_joined.bucket is NA.
# If you need a hint, see the Instructor Notes.
# This assignment is not graded and
# will be marked as correct when you submit.
# ENTER YOUR CODE BELOW THIS LINE
# ===================================================
library("ggplot2")
pf <- read.delim('data/pseudo_facebook.tsv')
pf$year_joined <- floor(2014 - pf$tenure / 365)
pf$year_joined.bucket <- cut(pf$year_joined, breaks = c(2004, 2009, 2011, 2012, 2014))
plot <- ggplot(data = subset(pf, !is.na(year_joined.bucket)), aes(x = age, y = friend_count)) +
geom_line(aes(color = year_joined.bucket), stat = 'summary', fun.y = median)
print(plot)
|
## Topic modeling, take 2
#
# The plan:
# 1. Compress cleaned noexcludes into a single file in MALLET-ready format
# 2. Set up MALLET to analyze that file, but don't run yet
# a. to import the instances from that single file, use system() to run MALLET outside of R
# (as in Ben Warwick's 'R2MALLET.r); use my token regex, not his original one
# b. to train the model, use library(mallet) and David Mimno's approach
# (as in 'topic modeling with mallet package.R'), with his optimized (hyper)parameters
# 3. If we don't know number of topics,
# a. choose a subset of the data to train on
# b. use foreach() to try a sequence from 10-200, interval 10
# c. maximize log.likelihood/token, which MALLET outputs somewhere by default. (Find it!)
# 4. Run MALLET with the parameters set up in Step 2, with the topics as chosen in 1 or 3.
##
## 0. Establish the working environment.
if (!exists(sourceloc)) {
source(file="~/Box Sync/research/dissertations/data, code, and figures/Dissertation-Research/dataprep.R")
}
# # Assume we're typically going to need more Java heap space, set maximum allocation
# # Never mind, this is set by MALLET in $malletloc/bin/mallet,
# # on line 10: "MEMORY=" etc. Leaving it here in case both need to be set.
if (which_computer == "laptop") {
heap_param <- paste("-Xmx","3g",sep="")
} else if (which_computer == "work") {
heap_param <- paste("-Xmx","15g",sep="")
}
options(java.parameters=heap_param)
# What's our dataset?
# dataset_name <- "realconsorts"
dataset_name <- "noexcludes2001_2015"
## Step 1. Compress the files, if they haven't been compressed already
## NB: double-check which commands are commented out before running; this could take a while.
## If the output file already exists, this call will just exit with an error.
file <- path.expand(file.path(sourceloc, 'Shell scripts and commands/ben_clean_and_consolidate.sh'))
file.exists(file)
system(paste0('"', file,'" ', dataset_name))
## Step 2. Set up MALLET to analyze that file, but don't run yet
## Step 2a. Run MALLET externally to read in the cumulative file as a list of instances.
#
# (This use of system() inspired by https://gist.github.com/benmarwick/4537873,
# via https://gist.github.com/drlabratory/6198388). Instructions for MALLET import
# are online at http://mallet.cs.umass.edu/import.php.
ben.mallet.import <- function(dataset_name="noexcludes",
remove_stopwords=T,
extra_stopwords=F,
# seed=NULL,
token_regex='"\\p{L}[-\\p{L}\\p{Po}]+\\p{L}"') {
require(mallet)
# 2a.1. where is the command that runs MALLET? (NB: malletloc is set in `dataprep.R`)
mallet_cmd <- file.path(malletloc, "bin", "mallet")
# TO DO: Replace this file with a directory
# 2a.2. Where is the import file? (determined by the shell script in Step 1)
# importroot <- "~/Documents/fulltext_dissertations" ## Now replaced with fulltextloc, set by dataprep.R
importdir <- file.path(fulltextloc, paste0("clean_", dataset_name, "_only"))
# import_file <- paste0("~/Documents/fulltext_dissertations/cumulative/",
# dataset_name, "_cumulative.txt")
#
# 2a.3. Where should we save the instances created by the import? (we'll need this in Step 2b)
instance_list <- file.path(tmloc, paste0(dataset_name, "_instances.mallet"))
# 2a.4. What counts as a token?
# token_regex <- '"\\p{L}[-\\p{L}\\p{Po}]+\\p{L}"' # now set as parameter
# NB: Instead of the default [A-Za-z]*, or Mimno's original p{P} (any
# punctuation) in the middle of the word, I modded the regex above to search
# for p{Po} -- that's "any kind of punctuation character that is not a dash,
# bracket, quote or connector," per
# http://www.regular-expressions.info/unicode.html -- plus hyphens. This was
# necessary to break words at em-dashes. NB as well that this regex as
# structured defines words to be at least three characters long: a letter,
# plus a letter or punctuation, plus a letter. At some later point I may be
# curious about the use of the words "I," "me," "we," etc, and that would
# require a different regex.
# 2a.5. Any other parameters for tokenizing?
# stoplist_file: use in addition to the standard English stopwords.
# Optionally created by ben.mallet.tm(), below.
if (remove_stopwords) {
stop_options <- "--remove-stopwords"
} else {
stop_options <- ""
}
if (extra_stopwords) {
stoplist_file <- file.path(malletloc, "stoplists", "top-and-bottom-plus.txt")
stop_options <- paste(stop_options, "--extra-stopwords", stoplist_file)
} else {
stop_options <- paste(stop_options, "")
}
# if (!is.null(seed)) {
# seed_option <- paste("--random-seed=", seed)
# } else {
# seed_option <- ""
# }
#
# 2a.6. Set the import command to include the parameters set above.
# Check to see if the instance list has already been created. If so,
# then system(scope) will return 0; otherwise, run the import script now.
# NB: This assumes you've already moved the files into their own directory.
scope <- paste0("cd ", "~/'", substr(sourceloc, 3, nchar(sourceloc)), "'",
"; cd 'Shell scripts and commands' ; ls ", instance_list)
if (system(scope)) {
import_cmd <- paste(mallet_cmd,
# "import-file --input", import_file,
"import-dir --input", importdir,
"--output", instance_list,
stop_options,
"--keep-sequence TRUE",
"--token-regex", token_regex
)
# # 2a.7. Trigger the import.
go <- readline(paste("About to import instance list with the following command: \n",
import_cmd, "\n",
"Is that what you meant to do? (Y/N)\n"))
if(tolower(go) != "y") {
stop("Never mind, then.")
}
message("Beginning import now...")
if(! system(import_cmd)) {
print("Done.") # If successful, report back.
}
message("Saving index of filenames used for this instance list...")
id_cmd <- "ls *.txt | awk -F _ '{ print $2 }' | awk -F . '{ print $1 }'"
outputfile <- file.path(tmloc, paste0(dataset_name, "_doc_ids.txt"))
id_cmd <- paste("cd", importdir, ";", id_cmd, ">", outputfile)
if(! system(id_cmd)) {
print("Done.") # If successful, report back.
}
} else { # if system(scope) succeeds, it returns 0 and triggers this:
print("Oh, good, the instance file exists. Moving on...")
}
# close the mallet import function
}
if(autorun) {
require(dfrtopics)
m <- train_model(instances = file.path(tmloc, paste0(dataset_name, "_instances.mallet")), # the file created by ben.mallet.import)
n_topics = 60,
threads = 10L)
summary(m)
write_mallet_model(m, file.path(tmloc, paste0(dataset_name, "modeling_results")))
}
# Step 2b. Use Mimno's library(mallet) to actually train the model on those instances.
ben.mallet.tm <- function(K=50, # how many topics?
dataset_name="noexcludes2001_2015", # which subset of data to include?
imported_file=file.path(tmloc, paste0(dataset_name, "_instances.mallet")), # the file created by ben.mallet.import
curate_vocab=FALSE, # create new stoplist from top/bottom words?
top.cutoff.pct=10, # remove words in this % of documents or more (only used if curate_vocab=TRUE)
num.top.words=7, # how to label topics
runlong=FALSE, # do extra iterations?
diagnostics=TRUE # generate a diagnostics file as per http://mallet.cs.umass.edu/diagnostics.php?
# abstracts=FALSE # use full text (default) or abstracts only?
)
{
require(mallet)
# 2b.1. Create a topic trainer object.
# NB: It starts out uninteresting; the cool stuff happens when we run the operators on this Java object.
topic.model <- MalletLDA(num.topics=K)
# 2b.2. Load our documents from a saved
# instance list file that we build from [Mallet's] command-line tools.
topic.model$loadDocuments(imported_file)
# 2b.3. Get the vocabulary, and some statistics about word frequencies.
# These may be useful in further curating the stopword list.
# To save on memory in the vocabulary, word.freqs, etc, use the big.matrix format.
library(bigmemory)
vocabulary <- as.big.matrix(topic.model$getVocabulary(), type="character")
# print(vocabulary)
word.freqs <- as.big.matrix(mallet.word.freqs(topic.model), type="integer")
# print(word.freqs)
doc.freq.index <- morder(word.freqs, "doc.freq", decreasing=TRUE)
word.freqs.sorted <- mpermute(word.freqs, order=doc.freq.index, cols="doc.freq")
head(word.freqs.sorted, 30) # inspect the words occurring in the most documents
# tail(word.freqs.sorted, 100) # inspect the words occurring in the least documents
#### 2b.4. Optional: Curate the vocabulary
# (Approach here based on Mimno 2012, pp. 4-5: he recommends removing top 5-10% and bottom 5 count)
if (curate_vocab) {
# 2b.4.a. Find words occurring in more than top.cutoff.pct of the documents.
# Take them out, but save for later.
cutoff <- length(doc.freq.index) * (top.cutoff.pct/100)
top.words.index <- mwhich(word.freqs.sorted, "doc.freq", list(cutoff), list("gt"))
top.words <- word.freqs.sorted[top.words.index, ]
nrow(top.words) / length(vocabulary)
# 2b.4.b. Find words occurring in fewer than 5 (count, not %) of the documents
bottom.words.index <- mwhich(word.freqs.sorted, "doc.freq", list(5), list("lt"))
bottom.words <- word.freqs.sorted[bottom.words.index, ]
# 2b.4.c. Create a new stoplist
tandb.stoplist <- word.freqs.sorted[c(top.words.index, bottom.words.index), "words"]
tandb.stoplist <- sort(as.character(tandb.stoplist))
write(tandb.stoplist, file=file.path(malletloc, "stoplists", "top-and-bottom.txt"))
# 2b.4.d. Any other words that seem like they need pruning?
extra.stoplist <- c(tandb.stoplist, "dissertation", "chapter", "UMI")
extra.stoplist <- sort(as.character(extra.stoplist))
write(extra.stoplist, file=file.path(malletloc, "stoplists", "top-and-bottom-plus.txt"))
# end of stoplist vocabulary curation; we can pick it up again in another call to ben.mallet.import
}
r
## Now let's resume where Mimno left off... This is the actual model-training portion.
# 2b.5. Set to optimize hyperparameters every 20 iterations, after 50 burn-in iterations.
topic.model$setAlphaOptimization(20, 50)
# 2b.6. Now train a model. Note that hyperparameter optimization is on, by default.
# We can specify the number of iterations. Here we’ll use a large-ish round number.
if(runlong) {
topic.model$train(500) # Even this is much smaller than Ben Marwick's default 1000!
} else {
topic.model$train(200)
}
# 2b.7. Run through a few iterations where we pick the best topic for each token,
# rather than sampling from the posterior distribution.
topic.model$maximize(10)
# 2b.8. Get the probability of topics in documents and the probability of words in topics.
# By default, these functions return raw word counts. Here we want probabilities,
# so we normalize, and add "smoothing" so that nothing has exactly 0 probability.
# 2b.8.a. matrix with documents in rows, topics in columns; raw used only for testing.
# These are huge files, so use big.matrix again.
doc.topics <- as.big.matrix(mallet.doc.topics(topic.model, smoothed=T, normalized=T),
backingfile=file.path(malletloc, paste0(dataset_name, "K", K, "_doc_topics")))
# doc.topics.raw <- as.big.matrix(mallet.doc.topics(topic.model, smoothed=F, normalized=F))
# 2b.8.b. matrix with topics in rows, words in columns; raw used only for testing.
topic.words <- as.big.matrix(mallet.topic.words(topic.model, smoothed=T, normalized=T),
backingfile=file.path(malletloc, paste0(dataset_name, "K", K, "_topic_words")))
# topic.words.raw <- as.big.matrix(mallet.topic.words(topic.model, smoothed=F, normalized=F))
# 2b.9 Label topics with most frequent words
topic.labels <- mallet.top.words(topic.model, topic.words, num.top.words)
# 2b.10. Ben again: instead of using mallet.top.words, I want to use discriminative words.
# The helper function top.words.tfitf() is defined below.
topic.labels.tfitf <- top.words.tfitf(topic.model, topic.words, num.top.words)
# Now pass back the topic model itself, the labels for them, and the top words we removed:
save(topic.model, file=file.path(malletloc, paste0(dataset_name, "K", K, ".gz"), compress=TRUE))
return <- list("doc.topics" = doc.topics, # doc/topic big.matrix, filebacked
"topic.words" = topic.words, # topic/word big.matrix, filebacked
"topic.labels" = topic.labels, # most frequent words in each topic
"topic.labels.tfitf" = topic.labels.tfitf, # most distinctive words in each topic
"top.words" = top.words # the words we cut out as too frequent
)
}
## **Helper function: top.words.tfitf**
# I'd like to get the top words in each topic ranked not by term frequency alone
# but by uniqueness to the topic -- i.e. term frequency * inverse topic
# frequency (as modeled on TF*IDF). These will then be used to determine topic
# subject matter.
top.words.tfitf <- function (topic.model, topic.words, num.top.words = 10)
{
# 1. for each term-topic pair, calculate term frequency = weight of the term in
# the topic divided by the total number of terms assigned to the topic. For a
# normalized topic, the sum should always be 1, so this is just the weight
# value at each location in the topic.words matrix.
tf <- topic.words
# 2. for each term, calculate inverse topic frequency = log(#topics / #topics
# assigned to that term). Number of topics K implicit in topic.words (and
# presumably topic.model, but I don't know what to call).
K <- nrow(topic.words)
itf <- apply(topic.words, 2, sum)
itf <- log(K / itf)
# 3. multiply TF by ITF.
# NB: R wants to line up the ITF vector vertically with the TF grid and snake
# it around columns, which is not what we want. Instead, transpose TF and then
# undo it afterwards. (For some reason in vector-logic, transposing ITF will
# generate an error.)
tf.itf <- t(t(tf) * itf)
dim(tf.itf)
# d <- t(t(a) * b)
# d[2,] <- d[2, order(d[2,], decreasing=T)]
top.indices <- lapply(1:K, FUN=function(x) head(order(tf.itf[x,], decreasing=T), num.top.words))
# NB: the vocabulary indices are the same as the indices used in each row of
# the topic.words matrix (and, thus, the tf.itf matrix).
lapply(1:K, FUN=function(x) noquote(paste0(vocabulary[top.indices[[x]]], collapse=", ")))
}
## Step 4. Run MALLET with the parameters set up in Step 2, with the topics as chosen in 1 or 3.
if(autorun) {
Sys.time()
# ben.mallet.import(dataset_name="realconsorts")
ben.mallet.tm(dataset_name="noexcludes2001_2015")
Sys.time()
}
|
/topic modeling 3.R
|
no_license
|
benmiller314/Dissertation-Research
|
R
| false
| false
| 15,184
|
r
|
## Topic modeling, take 2
#
# The plan:
# 1. Compress cleaned noexcludes into a single file in MALLET-ready format
# 2. Set up MALLET to analyze that file, but don't run yet
# a. to import the instances from that single file, use system() to run MALLET outside of R
# (as in Ben Warwick's 'R2MALLET.r); use my token regex, not his original one
# b. to train the model, use library(mallet) and David Mimno's approach
# (as in 'topic modeling with mallet package.R'), with his optimized (hyper)parameters
# 3. If we don't know number of topics,
# a. choose a subset of the data to train on
# b. use foreach() to try a sequence from 10-200, interval 10
# c. maximize log.likelihood/token, which MALLET outputs somewhere by default. (Find it!)
# 4. Run MALLET with the parameters set up in Step 2, with the topics as chosen in 1 or 3.
##
## 0. Establish the working environment.
if (!exists(sourceloc)) {
source(file="~/Box Sync/research/dissertations/data, code, and figures/Dissertation-Research/dataprep.R")
}
# # Assume we're typically going to need more Java heap space, set maximum allocation
# # Never mind, this is set by MALLET in $malletloc/bin/mallet,
# # on line 10: "MEMORY=" etc. Leaving it here in case both need to be set.
if (which_computer == "laptop") {
heap_param <- paste("-Xmx","3g",sep="")
} else if (which_computer == "work") {
heap_param <- paste("-Xmx","15g",sep="")
}
options(java.parameters=heap_param)
# What's our dataset?
# dataset_name <- "realconsorts"
dataset_name <- "noexcludes2001_2015"
## Step 1. Compress the files, if they haven't been compressed already
## NB: double-check which commands are commented out before running; this could take a while.
## If the output file already exists, this call will just exit with an error.
file <- path.expand(file.path(sourceloc, 'Shell scripts and commands/ben_clean_and_consolidate.sh'))
file.exists(file)
system(paste0('"', file,'" ', dataset_name))
## Step 2. Set up MALLET to analyze that file, but don't run yet
## Step 2a. Run MALLET externally to read in the cumulative file as a list of instances.
#
# (This use of system() inspired by https://gist.github.com/benmarwick/4537873,
# via https://gist.github.com/drlabratory/6198388). Instructions for MALLET import
# are online at http://mallet.cs.umass.edu/import.php.
ben.mallet.import <- function(dataset_name="noexcludes",
remove_stopwords=T,
extra_stopwords=F,
# seed=NULL,
token_regex='"\\p{L}[-\\p{L}\\p{Po}]+\\p{L}"') {
require(mallet)
# 2a.1. where is the command that runs MALLET? (NB: malletloc is set in `dataprep.R`)
mallet_cmd <- file.path(malletloc, "bin", "mallet")
# TO DO: Replace this file with a directory
# 2a.2. Where is the import file? (determined by the shell script in Step 1)
# importroot <- "~/Documents/fulltext_dissertations" ## Now replaced with fulltextloc, set by dataprep.R
importdir <- file.path(fulltextloc, paste0("clean_", dataset_name, "_only"))
# import_file <- paste0("~/Documents/fulltext_dissertations/cumulative/",
# dataset_name, "_cumulative.txt")
#
# 2a.3. Where should we save the instances created by the import? (we'll need this in Step 2b)
instance_list <- file.path(tmloc, paste0(dataset_name, "_instances.mallet"))
# 2a.4. What counts as a token?
# token_regex <- '"\\p{L}[-\\p{L}\\p{Po}]+\\p{L}"' # now set as parameter
# NB: Instead of the default [A-Za-z]*, or Mimno's original p{P} (any
# punctuation) in the middle of the word, I modded the regex above to search
# for p{Po} -- that's "any kind of punctuation character that is not a dash,
# bracket, quote or connector," per
# http://www.regular-expressions.info/unicode.html -- plus hyphens. This was
# necessary to break words at em-dashes. NB as well that this regex as
# structured defines words to be at least three characters long: a letter,
# plus a letter or punctuation, plus a letter. At some later point I may be
# curious about the use of the words "I," "me," "we," etc, and that would
# require a different regex.
# 2a.5. Any other parameters for tokenizing?
# stoplist_file: use in addition to the standard English stopwords.
# Optionally created by ben.mallet.tm(), below.
if (remove_stopwords) {
stop_options <- "--remove-stopwords"
} else {
stop_options <- ""
}
if (extra_stopwords) {
stoplist_file <- file.path(malletloc, "stoplists", "top-and-bottom-plus.txt")
stop_options <- paste(stop_options, "--extra-stopwords", stoplist_file)
} else {
stop_options <- paste(stop_options, "")
}
# if (!is.null(seed)) {
# seed_option <- paste("--random-seed=", seed)
# } else {
# seed_option <- ""
# }
#
# 2a.6. Set the import command to include the parameters set above.
# Check to see if the instance list has already been created. If so,
# then system(scope) will return 0; otherwise, run the import script now.
# NB: This assumes you've already moved the files into their own directory.
scope <- paste0("cd ", "~/'", substr(sourceloc, 3, nchar(sourceloc)), "'",
"; cd 'Shell scripts and commands' ; ls ", instance_list)
if (system(scope)) {
import_cmd <- paste(mallet_cmd,
# "import-file --input", import_file,
"import-dir --input", importdir,
"--output", instance_list,
stop_options,
"--keep-sequence TRUE",
"--token-regex", token_regex
)
# # 2a.7. Trigger the import.
go <- readline(paste("About to import instance list with the following command: \n",
import_cmd, "\n",
"Is that what you meant to do? (Y/N)\n"))
if(tolower(go) != "y") {
stop("Never mind, then.")
}
message("Beginning import now...")
if(! system(import_cmd)) {
print("Done.") # If successful, report back.
}
message("Saving index of filenames used for this instance list...")
id_cmd <- "ls *.txt | awk -F _ '{ print $2 }' | awk -F . '{ print $1 }'"
outputfile <- file.path(tmloc, paste0(dataset_name, "_doc_ids.txt"))
id_cmd <- paste("cd", importdir, ";", id_cmd, ">", outputfile)
if(! system(id_cmd)) {
print("Done.") # If successful, report back.
}
} else { # if system(scope) succeeds, it returns 0 and triggers this:
print("Oh, good, the instance file exists. Moving on...")
}
# close the mallet import function
}
if(autorun) {
require(dfrtopics)
m <- train_model(instances = file.path(tmloc, paste0(dataset_name, "_instances.mallet")), # the file created by ben.mallet.import)
n_topics = 60,
threads = 10L)
summary(m)
write_mallet_model(m, file.path(tmloc, paste0(dataset_name, "modeling_results")))
}
# Step 2b. Use Mimno's library(mallet) to actually train the model on those instances.
ben.mallet.tm <- function(K=50, # how many topics?
dataset_name="noexcludes2001_2015", # which subset of data to include?
imported_file=file.path(tmloc, paste0(dataset_name, "_instances.mallet")), # the file created by ben.mallet.import
curate_vocab=FALSE, # create new stoplist from top/bottom words?
top.cutoff.pct=10, # remove words in this % of documents or more (only used if curate_vocab=TRUE)
num.top.words=7, # how to label topics
runlong=FALSE, # do extra iterations?
diagnostics=TRUE # generate a diagnostics file as per http://mallet.cs.umass.edu/diagnostics.php?
# abstracts=FALSE # use full text (default) or abstracts only?
)
{
require(mallet)
# 2b.1. Create a topic trainer object.
# NB: It starts out uninteresting; the cool stuff happens when we run the operators on this Java object.
topic.model <- MalletLDA(num.topics=K)
# 2b.2. Load our documents from a saved
# instance list file that we build from [Mallet's] command-line tools.
topic.model$loadDocuments(imported_file)
# 2b.3. Get the vocabulary, and some statistics about word frequencies.
# These may be useful in further curating the stopword list.
# To save on memory in the vocabulary, word.freqs, etc, use the big.matrix format.
library(bigmemory)
vocabulary <- as.big.matrix(topic.model$getVocabulary(), type="character")
# print(vocabulary)
word.freqs <- as.big.matrix(mallet.word.freqs(topic.model), type="integer")
# print(word.freqs)
doc.freq.index <- morder(word.freqs, "doc.freq", decreasing=TRUE)
word.freqs.sorted <- mpermute(word.freqs, order=doc.freq.index, cols="doc.freq")
head(word.freqs.sorted, 30) # inspect the words occurring in the most documents
# tail(word.freqs.sorted, 100) # inspect the words occurring in the least documents
#### 2b.4. Optional: Curate the vocabulary
# (Approach here based on Mimno 2012, pp. 4-5: he recommends removing top 5-10% and bottom 5 count)
if (curate_vocab) {
# 2b.4.a. Find words occurring in more than top.cutoff.pct of the documents.
# Take them out, but save for later.
cutoff <- length(doc.freq.index) * (top.cutoff.pct/100)
top.words.index <- mwhich(word.freqs.sorted, "doc.freq", list(cutoff), list("gt"))
top.words <- word.freqs.sorted[top.words.index, ]
nrow(top.words) / length(vocabulary)
# 2b.4.b. Find words occurring in fewer than 5 (count, not %) of the documents
bottom.words.index <- mwhich(word.freqs.sorted, "doc.freq", list(5), list("lt"))
bottom.words <- word.freqs.sorted[bottom.words.index, ]
# 2b.4.c. Create a new stoplist
tandb.stoplist <- word.freqs.sorted[c(top.words.index, bottom.words.index), "words"]
tandb.stoplist <- sort(as.character(tandb.stoplist))
write(tandb.stoplist, file=file.path(malletloc, "stoplists", "top-and-bottom.txt"))
# 2b.4.d. Any other words that seem like they need pruning?
extra.stoplist <- c(tandb.stoplist, "dissertation", "chapter", "UMI")
extra.stoplist <- sort(as.character(extra.stoplist))
write(extra.stoplist, file=file.path(malletloc, "stoplists", "top-and-bottom-plus.txt"))
# end of stoplist vocabulary curation; we can pick it up again in another call to ben.mallet.import
}
r
## Now let's resume where Mimno left off... This is the actual model-training portion.
# 2b.5. Set to optimize hyperparameters every 20 iterations, after 50 burn-in iterations.
topic.model$setAlphaOptimization(20, 50)
# 2b.6. Now train a model. Note that hyperparameter optimization is on, by default.
# We can specify the number of iterations. Here we’ll use a large-ish round number.
if(runlong) {
topic.model$train(500) # Even this is much smaller than Ben Marwick's default 1000!
} else {
topic.model$train(200)
}
# 2b.7. Run through a few iterations where we pick the best topic for each token,
# rather than sampling from the posterior distribution.
topic.model$maximize(10)
# 2b.8. Get the probability of topics in documents and the probability of words in topics.
# By default, these functions return raw word counts. Here we want probabilities,
# so we normalize, and add "smoothing" so that nothing has exactly 0 probability.
# 2b.8.a. matrix with documents in rows, topics in columns; raw used only for testing.
# These are huge files, so use big.matrix again.
doc.topics <- as.big.matrix(mallet.doc.topics(topic.model, smoothed=T, normalized=T),
backingfile=file.path(malletloc, paste0(dataset_name, "K", K, "_doc_topics")))
# doc.topics.raw <- as.big.matrix(mallet.doc.topics(topic.model, smoothed=F, normalized=F))
# 2b.8.b. matrix with topics in rows, words in columns; raw used only for testing.
topic.words <- as.big.matrix(mallet.topic.words(topic.model, smoothed=T, normalized=T),
backingfile=file.path(malletloc, paste0(dataset_name, "K", K, "_topic_words")))
# topic.words.raw <- as.big.matrix(mallet.topic.words(topic.model, smoothed=F, normalized=F))
# 2b.9 Label topics with most frequent words
topic.labels <- mallet.top.words(topic.model, topic.words, num.top.words)
# 2b.10. Ben again: instead of using mallet.top.words, I want to use discriminative words.
# The helper function top.words.tfitf() is defined below.
topic.labels.tfitf <- top.words.tfitf(topic.model, topic.words, num.top.words)
# Now pass back the topic model itself, the labels for them, and the top words we removed:
save(topic.model, file=file.path(malletloc, paste0(dataset_name, "K", K, ".gz"), compress=TRUE))
return <- list("doc.topics" = doc.topics, # doc/topic big.matrix, filebacked
"topic.words" = topic.words, # topic/word big.matrix, filebacked
"topic.labels" = topic.labels, # most frequent words in each topic
"topic.labels.tfitf" = topic.labels.tfitf, # most distinctive words in each topic
"top.words" = top.words # the words we cut out as too frequent
)
}
## **Helper function: top.words.tfitf**
# I'd like to get the top words in each topic ranked not by term frequency alone
# but by uniqueness to the topic -- i.e. term frequency * inverse topic
# frequency (as modeled on TF*IDF). These will then be used to determine topic
# subject matter.
top.words.tfitf <- function (topic.model, topic.words, num.top.words = 10)
{
# 1. for each term-topic pair, calculate term frequency = weight of the term in
# the topic divided by the total number of terms assigned to the topic. For a
# normalized topic, the sum should always be 1, so this is just the weight
# value at each location in the topic.words matrix.
tf <- topic.words
# 2. for each term, calculate inverse topic frequency = log(#topics / #topics
# assigned to that term). Number of topics K implicit in topic.words (and
# presumably topic.model, but I don't know what to call).
K <- nrow(topic.words)
itf <- apply(topic.words, 2, sum)
itf <- log(K / itf)
# 3. multiply TF by ITF.
# NB: R wants to line up the ITF vector vertically with the TF grid and snake
# it around columns, which is not what we want. Instead, transpose TF and then
# undo it afterwards. (For some reason in vector-logic, transposing ITF will
# generate an error.)
tf.itf <- t(t(tf) * itf)
dim(tf.itf)
# d <- t(t(a) * b)
# d[2,] <- d[2, order(d[2,], decreasing=T)]
top.indices <- lapply(1:K, FUN=function(x) head(order(tf.itf[x,], decreasing=T), num.top.words))
# NB: the vocabulary indices are the same as the indices used in each row of
# the topic.words matrix (and, thus, the tf.itf matrix).
lapply(1:K, FUN=function(x) noquote(paste0(vocabulary[top.indices[[x]]], collapse=", ")))
}
## Step 4. Run MALLET with the parameters set up in Step 2, with the topics as chosen in 1 or 3.
if(autorun) {
Sys.time()
# ben.mallet.import(dataset_name="realconsorts")
ben.mallet.tm(dataset_name="noexcludes2001_2015")
Sys.time()
}
|
options(java.parameters = "-Xmx8g" )
library(shiny)
library(DT)
library(png)
library(rJava)
library(rcdk)
library(fingerprint)
library(enrichR)
library(webchem)
library(plyr)
library(tidyverse)
library(plotly)
library(shinyBS)
library(shinythemes)
library(visNetwork)
library(igraph)
library(shinyjs)
library(shinycssloaders)
loading <- function() {
shinyjs::hide("loading_page")
shinyjs::show("main_content")
}
is.smiles <- function(x, verbose = TRUE) { ##corrected version from webchem
if (!requireNamespace("rcdk", quietly = TRUE)) {
stop("rcdk needed for this function to work. Please install it.",
call. = FALSE)
}
# x <- 'Clc(c(Cl)c(Cl)c1C(=O)O)c(Cl)c1Cl'
if (length(x) > 1) {
stop('Cannot handle multiple input strings.')
}
out <- try(rcdk::parse.smiles(x), silent = TRUE)
if (inherits(out[[1]], "try-error") | is.null(out[[1]])) {
return(FALSE)
} else {
return(TRUE)
}
}
parseInputFingerprint <- function(input, fp.type) {
if(is.smiles(input)==TRUE){
input.mol <- parse.smiles(as.character(input))
lapply(input.mol, do.typing)
lapply(input.mol, do.aromaticity)
lapply(input.mol, do.isotopes)
fp.inp <- lapply(input.mol, get.fingerprint, type = fp.type)
}else{
print('Please input a valid SMILES string.')
}
}
distance.minified <- function(fp1,fp.list){ #this function is a stripped down fingerprint::distance that runs about 2-3x faster; big gains for the app, but not as feature rich
n <- length(fp1)
f1 <- numeric(n)
f2 <- numeric(n)
f1[fp1@bits] <- 1
sapply(fp.list, function(x){
f2[x@bits] <- 1
sim <- 0.0
ret <- .C("fpdistance", as.double(f1), as.double(f2),
as.integer(n), as.integer(1),
as.double(sim),
PACKAGE="fingerprint")
return (ret[[5]])
})
}
convertDrugToSmiles <- function(input) {
filt <- filter(db.names, common_name == input) %>% dplyr::select(smiles)
filt
}
getTargetList <- function(selectdrugs) {
targets <- db %>%
filter(internal_id %in% selectdrugs) %>%
as.data.frame() %>%
dplyr::select(common_name, hugo_gene, mean_pchembl, n_quantitative, n_qualitative, known_selectivity_index, confidence, internal_id) %>%
arrange(-n_quantitative)
}
similarityFunction <- function(input, fp.type) {
input <- input
fp.type <- fp.type
fp.inp <- parseInputFingerprint(input, fp.type)
if(fp.type=="extended"){ sim <- distance.minified(fp.inp[[1]], fp.extended) }
if(fp.type=="circular"){ sim <- distance.minified(fp.inp[[1]], fp.circular) }
if(fp.type=="maccs"){ sim <- distance.minified(fp.inp[[1]], fp.maccs) }
# if(fp.type=="kr"){ sim <- distance.minified(fp.inp[[1]], fp.kr) }
# if(fp.type=="pubchem"){ sim <- distance.minified(fp.inp[[1]], fp.pubchem)
bar <- as.data.frame(sim) %>%
rownames_to_column("match") %>%
set_names(c("match", "similarity")) %>%
top_n(50, similarity) ##hard cutoff to avoid overloading the app - large n of compounds can cause sluggish response wrt visualizations
}
getSimMols <- function(sims, sim.thres) {
sims2 <- sims %>% dplyr::filter(similarity >= sim.thres) %>% arrange(-similarity)
sims2$internal_id <- as.character(sims2$match)
sims2$`Tanimoto Similarity` <- signif(sims2$similarity, 3)
targets <- left_join(sims2, db) %>%
dplyr::select(internal_id, common_name, `Tanimoto Similarity`) %>%
distinct() %>%
as.data.frame()
}
getMolImage <- function(input) {
smi <- parse.smiles(input)
view.image.2d(smi[[1]])
}
#This should no longer be required as app now works entirely with internal_id under the hood to avoid switching
# getInternalId <- function(input) { ##this is specifically for getting internal ids for use in network to external links
# ids <- filter(db.names, internal_id==input)
# unique(ids$internal_id)
# }
getExternalDrugLinks <- function(internal.id) {
links <- filter(db.links, internal_id %in% internal.id)
links <- as.character(links$link)
links <- paste(links, collapse = ",")
}
getExternalGeneLinks <- function(gene) {
links <- filter(db.gene.links, hugo_gene %in% gene)
links <- as.character(links$link)
}
getNetwork <- function(drugsfound, selectdrugs) {
targets <- drugsfound %>%
distinct() %>% filter(internal_id %in% selectdrugs)
targets$from <- "input"
targets$to <- as.character(targets$common_name)
targets$width <- ((targets$`Tanimoto Similarity`)^2) * 10
targets$color <- "tomato"
links <- sapply(selectdrugs, function(x){
links <- getExternalDrugLinks(x)
})
targets$title <- links
targets <- dplyr::select(targets, from, to, width, color, title)
}
getTargetNetwork <- function(selectdrugs, edge.size) {
targets <- getTargetList(selectdrugs)
targets$from <- targets$common_name
targets$to <- as.character(targets$hugo_gene)
if(edge.size==TRUE){
targets$width <- scales::rescale(targets$confidence, to = c(1,10))
}
if(edge.size==FALSE){
targets$width <- 5
}
targets$color <- "tomato"
targets <- dplyr::select(targets, from, to, width, color, internal_id) %>%
filter(from !="NA" & to != "NA")
}
dbs <- c("GO_Molecular_Function_2017", "GO_Cellular_Component_2017", "GO_Biological_Process_2017",
"KEGG_2016")
getGeneOntologyfromTargets <- function(selectdrugs) {
selectdrugs <- selectdrugs
targets <- getTargetList(selectdrugs) %>% as.data.frame()
target.list <- targets$hugo_gene
if (length(target.list) > 0) {
enriched <- enrichr(target.list, dbs)
} else {
print("no targets")
}
}
getMolsFromGenes <- function(genes) {
if(length(genes)>1){
mols <- db %>%
filter(hugo_gene %in% genes) %>%
group_by(internal_id) %>%
mutate(count = n()) %>%
filter(count == length(genes)) %>%
ungroup() %>%
distinct() %>%
dplyr::select(-count)
}else{
mols <- filter(db, hugo_gene == genes)
}
mols %>%
select(internal_id, common_name, hugo_gene, mean_pchembl, n_quantitative, n_qualitative, known_selectivity_index, confidence)
}
getMolsFromGeneNetworks.edges <- function(inp.gene, genenetmols, edge.size, gene.filter.metric) {
mols <- genenetmols %>% top_n(15, !!sym(gene.filter.metric))
net <- filter(db, internal_id %in% mols$internal_id) %>% distinct()
net$from <- as.character(net$internal_id)
net$to <- as.character(net$hugo_gene)
if(edge.size==TRUE){
net$width <- (net$confidence)/10
}
if(edge.size==FALSE){
net$width <- 5
}
net$color <- "tomato"
net <- net %>% dplyr::select(from, to, width, color)
as.data.frame(net)
}
getMolsFromGeneNetworks.nodes <- function(inp.gene, genenetmols, gene.filter.metric) {
mols <- genenetmols %>% top_n(15, !!sym(gene.filter.metric))
net <- filter(db, internal_id %in% mols$internal_id) %>%
distinct() # %>%
# group_by(common_name) %>%
# top_n(20, confidence) %>%
# ungroup()
id <- c(unique(as.character(net$internal_id)),
unique(as.character(net$hugo_gene)))
label <- c(unique(as.character(net$common_name)),
unique(as.character(net$hugo_gene)))
color <- c(rep("blue", length(unique(as.character(net$common_name)))),
rep("green", length(unique(as.character(net$hugo_gene)))))
druglinks <- sapply(unique(as.character(net$internal_id)), function(x){
druglinks <- getExternalDrugLinks(x)
})
genelinks <- sapply(unique(as.character(net$hugo_gene)), function(x){
getExternalGeneLinks(x)
})
title <- c(druglinks, genelinks)
net <- as.data.frame(cbind(id, label, color, title))
}
getSmiles <- function(input.name) {
input.name <- input.name
input.name <- URLencode(input.name)
query <- as.vector(cir_query(input.name, representation = "smiles", first = TRUE))
query
}
plotSimCTRPDrugs <- function(input, fp.type) {
fp.inp <- parseInputFingerprint(input, fp.type = fp.type)
if(fp.type == "circular"){fp.ctrp <- fp.ctrp.circular}
if(fp.type == "extended"){fp.ctrp <- fp.ctrp.extended}
if(fp.type == "maccs"){fp.ctrp <- fp.ctrp.maccs}
sims <- lapply(fp.inp, function(i) {
sim <- sapply(fp.ctrp, function(j) {
distance(i, j)
})
bar <- as.data.frame(sim)
bar$match <- rownames(bar)
bar
})
sims <- ldply(sims)
sims2 <- sims %>% arrange(-sim)
sims2$cpd_smiles <- as.character(sims2$match)
sims2$`Tanimoto Similarity` <- signif(sims2$sim, 3)
drugs <- left_join(sims2, ctrp.structures) %>% dplyr::select(makenames, cpd_name, `Tanimoto Similarity`) %>% distinct()
top_drug <- top_n(drugs, 1, `Tanimoto Similarity`)
drug.resp.single <- drug.resp[[top_drug$makenames]]
cors<-sapply(colnames(drug.resp), function(x){
test <- data.frame(drug.resp.single, drug.resp[[x]])
if(nrow(test[complete.cases(test),])>1){
cor<-cor.test(drug.resp.single, drug.resp[[x]], method = "spearman", use = "complete.obs")
res <- c("p.val" = cor$p.value, cor$estimate)
}else{
res <- c("p.val" = -1, "rho" = 0)
}
})
cors <- cors %>%
t() %>%
as.data.frame() %>%
rownames_to_column("makenames") %>%
inner_join(drugs) %>%
filter(p.val != -1)
cors$Correlation <- cors$rho
cors$`BH adj p.val` <- p.adjust(cors$p.val, method = "BH")
cors
}
plotSimSangDrugs <- function(input, fp.type) {
fp.inp <- parseInputFingerprint(input, fp.type = fp.type)
if(fp.type == "circular"){fp.sang <- fp.sang.circular}
if(fp.type == "extended"){fp.sang <- fp.sang.extended}
if(fp.type == "maccs"){fp.sang <- fp.sang.maccs}
sims <- lapply(fp.inp, function(i) {
sim <- sapply(fp.sang, function(j) {
distance(i, j)
})
bar <- as.data.frame(sim)
bar$match <- rownames(bar)
bar
})
sims <- ldply(sims)
sims2 <- sims %>% arrange(-sim)
sims2$smiles <- as.character(sims2$match)
sims2$`Tanimoto Similarity` <- signif(sims2$sim, 3)
drugs <- left_join(sims2, sang.structures) %>% dplyr::select(makenames, sanger_names, `Tanimoto Similarity`) %>% distinct()
top_drug <- top_n(drugs, 1, `Tanimoto Similarity`)
drug.resp.single <- drug.resp.sang[[top_drug$makenames]]
cors<-sapply(colnames(drug.resp.sang), function(x){
test <- data.frame(drug.resp.single, drug.resp.sang[[x]])
if(nrow(test[complete.cases(test),])>1){
cor<-cor.test(drug.resp.single, drug.resp.sang[[x]], method = "spearman", use = "complete.obs")
res <- c("p.val" = cor$p.value, cor$estimate)
}else{
res <- c("p.val" = -1, "rho" = 0)
}
})
cors <- cors %>%
t() %>%
as.data.frame() %>%
rownames_to_column("makenames") %>%
inner_join(drugs) %>%
filter(p.val != -1)
cors$Correlation <- cors$rho
cors$`BH adj p.val` <- p.adjust(cors$p.val, method = "BH")
cors
}
|
/global.R
|
permissive
|
xschildw/polypharmacology-db
|
R
| false
| false
| 10,759
|
r
|
options(java.parameters = "-Xmx8g" )
library(shiny)
library(DT)
library(png)
library(rJava)
library(rcdk)
library(fingerprint)
library(enrichR)
library(webchem)
library(plyr)
library(tidyverse)
library(plotly)
library(shinyBS)
library(shinythemes)
library(visNetwork)
library(igraph)
library(shinyjs)
library(shinycssloaders)
loading <- function() {
shinyjs::hide("loading_page")
shinyjs::show("main_content")
}
is.smiles <- function(x, verbose = TRUE) { ##corrected version from webchem
if (!requireNamespace("rcdk", quietly = TRUE)) {
stop("rcdk needed for this function to work. Please install it.",
call. = FALSE)
}
# x <- 'Clc(c(Cl)c(Cl)c1C(=O)O)c(Cl)c1Cl'
if (length(x) > 1) {
stop('Cannot handle multiple input strings.')
}
out <- try(rcdk::parse.smiles(x), silent = TRUE)
if (inherits(out[[1]], "try-error") | is.null(out[[1]])) {
return(FALSE)
} else {
return(TRUE)
}
}
parseInputFingerprint <- function(input, fp.type) {
if(is.smiles(input)==TRUE){
input.mol <- parse.smiles(as.character(input))
lapply(input.mol, do.typing)
lapply(input.mol, do.aromaticity)
lapply(input.mol, do.isotopes)
fp.inp <- lapply(input.mol, get.fingerprint, type = fp.type)
}else{
print('Please input a valid SMILES string.')
}
}
distance.minified <- function(fp1,fp.list){ #this function is a stripped down fingerprint::distance that runs about 2-3x faster; big gains for the app, but not as feature rich
n <- length(fp1)
f1 <- numeric(n)
f2 <- numeric(n)
f1[fp1@bits] <- 1
sapply(fp.list, function(x){
f2[x@bits] <- 1
sim <- 0.0
ret <- .C("fpdistance", as.double(f1), as.double(f2),
as.integer(n), as.integer(1),
as.double(sim),
PACKAGE="fingerprint")
return (ret[[5]])
})
}
convertDrugToSmiles <- function(input) {
filt <- filter(db.names, common_name == input) %>% dplyr::select(smiles)
filt
}
getTargetList <- function(selectdrugs) {
targets <- db %>%
filter(internal_id %in% selectdrugs) %>%
as.data.frame() %>%
dplyr::select(common_name, hugo_gene, mean_pchembl, n_quantitative, n_qualitative, known_selectivity_index, confidence, internal_id) %>%
arrange(-n_quantitative)
}
similarityFunction <- function(input, fp.type) {
input <- input
fp.type <- fp.type
fp.inp <- parseInputFingerprint(input, fp.type)
if(fp.type=="extended"){ sim <- distance.minified(fp.inp[[1]], fp.extended) }
if(fp.type=="circular"){ sim <- distance.minified(fp.inp[[1]], fp.circular) }
if(fp.type=="maccs"){ sim <- distance.minified(fp.inp[[1]], fp.maccs) }
# if(fp.type=="kr"){ sim <- distance.minified(fp.inp[[1]], fp.kr) }
# if(fp.type=="pubchem"){ sim <- distance.minified(fp.inp[[1]], fp.pubchem)
bar <- as.data.frame(sim) %>%
rownames_to_column("match") %>%
set_names(c("match", "similarity")) %>%
top_n(50, similarity) ##hard cutoff to avoid overloading the app - large n of compounds can cause sluggish response wrt visualizations
}
getSimMols <- function(sims, sim.thres) {
sims2 <- sims %>% dplyr::filter(similarity >= sim.thres) %>% arrange(-similarity)
sims2$internal_id <- as.character(sims2$match)
sims2$`Tanimoto Similarity` <- signif(sims2$similarity, 3)
targets <- left_join(sims2, db) %>%
dplyr::select(internal_id, common_name, `Tanimoto Similarity`) %>%
distinct() %>%
as.data.frame()
}
getMolImage <- function(input) {
smi <- parse.smiles(input)
view.image.2d(smi[[1]])
}
#This should no longer be required as app now works entirely with internal_id under the hood to avoid switching
# getInternalId <- function(input) { ##this is specifically for getting internal ids for use in network to external links
# ids <- filter(db.names, internal_id==input)
# unique(ids$internal_id)
# }
getExternalDrugLinks <- function(internal.id) {
links <- filter(db.links, internal_id %in% internal.id)
links <- as.character(links$link)
links <- paste(links, collapse = ",")
}
getExternalGeneLinks <- function(gene) {
links <- filter(db.gene.links, hugo_gene %in% gene)
links <- as.character(links$link)
}
getNetwork <- function(drugsfound, selectdrugs) {
targets <- drugsfound %>%
distinct() %>% filter(internal_id %in% selectdrugs)
targets$from <- "input"
targets$to <- as.character(targets$common_name)
targets$width <- ((targets$`Tanimoto Similarity`)^2) * 10
targets$color <- "tomato"
links <- sapply(selectdrugs, function(x){
links <- getExternalDrugLinks(x)
})
targets$title <- links
targets <- dplyr::select(targets, from, to, width, color, title)
}
getTargetNetwork <- function(selectdrugs, edge.size) {
targets <- getTargetList(selectdrugs)
targets$from <- targets$common_name
targets$to <- as.character(targets$hugo_gene)
if(edge.size==TRUE){
targets$width <- scales::rescale(targets$confidence, to = c(1,10))
}
if(edge.size==FALSE){
targets$width <- 5
}
targets$color <- "tomato"
targets <- dplyr::select(targets, from, to, width, color, internal_id) %>%
filter(from !="NA" & to != "NA")
}
dbs <- c("GO_Molecular_Function_2017", "GO_Cellular_Component_2017", "GO_Biological_Process_2017",
"KEGG_2016")
getGeneOntologyfromTargets <- function(selectdrugs) {
selectdrugs <- selectdrugs
targets <- getTargetList(selectdrugs) %>% as.data.frame()
target.list <- targets$hugo_gene
if (length(target.list) > 0) {
enriched <- enrichr(target.list, dbs)
} else {
print("no targets")
}
}
getMolsFromGenes <- function(genes) {
if(length(genes)>1){
mols <- db %>%
filter(hugo_gene %in% genes) %>%
group_by(internal_id) %>%
mutate(count = n()) %>%
filter(count == length(genes)) %>%
ungroup() %>%
distinct() %>%
dplyr::select(-count)
}else{
mols <- filter(db, hugo_gene == genes)
}
mols %>%
select(internal_id, common_name, hugo_gene, mean_pchembl, n_quantitative, n_qualitative, known_selectivity_index, confidence)
}
getMolsFromGeneNetworks.edges <- function(inp.gene, genenetmols, edge.size, gene.filter.metric) {
mols <- genenetmols %>% top_n(15, !!sym(gene.filter.metric))
net <- filter(db, internal_id %in% mols$internal_id) %>% distinct()
net$from <- as.character(net$internal_id)
net$to <- as.character(net$hugo_gene)
if(edge.size==TRUE){
net$width <- (net$confidence)/10
}
if(edge.size==FALSE){
net$width <- 5
}
net$color <- "tomato"
net <- net %>% dplyr::select(from, to, width, color)
as.data.frame(net)
}
getMolsFromGeneNetworks.nodes <- function(inp.gene, genenetmols, gene.filter.metric) {
mols <- genenetmols %>% top_n(15, !!sym(gene.filter.metric))
net <- filter(db, internal_id %in% mols$internal_id) %>%
distinct() # %>%
# group_by(common_name) %>%
# top_n(20, confidence) %>%
# ungroup()
id <- c(unique(as.character(net$internal_id)),
unique(as.character(net$hugo_gene)))
label <- c(unique(as.character(net$common_name)),
unique(as.character(net$hugo_gene)))
color <- c(rep("blue", length(unique(as.character(net$common_name)))),
rep("green", length(unique(as.character(net$hugo_gene)))))
druglinks <- sapply(unique(as.character(net$internal_id)), function(x){
druglinks <- getExternalDrugLinks(x)
})
genelinks <- sapply(unique(as.character(net$hugo_gene)), function(x){
getExternalGeneLinks(x)
})
title <- c(druglinks, genelinks)
net <- as.data.frame(cbind(id, label, color, title))
}
getSmiles <- function(input.name) {
input.name <- input.name
input.name <- URLencode(input.name)
query <- as.vector(cir_query(input.name, representation = "smiles", first = TRUE))
query
}
plotSimCTRPDrugs <- function(input, fp.type) {
fp.inp <- parseInputFingerprint(input, fp.type = fp.type)
if(fp.type == "circular"){fp.ctrp <- fp.ctrp.circular}
if(fp.type == "extended"){fp.ctrp <- fp.ctrp.extended}
if(fp.type == "maccs"){fp.ctrp <- fp.ctrp.maccs}
sims <- lapply(fp.inp, function(i) {
sim <- sapply(fp.ctrp, function(j) {
distance(i, j)
})
bar <- as.data.frame(sim)
bar$match <- rownames(bar)
bar
})
sims <- ldply(sims)
sims2 <- sims %>% arrange(-sim)
sims2$cpd_smiles <- as.character(sims2$match)
sims2$`Tanimoto Similarity` <- signif(sims2$sim, 3)
drugs <- left_join(sims2, ctrp.structures) %>% dplyr::select(makenames, cpd_name, `Tanimoto Similarity`) %>% distinct()
top_drug <- top_n(drugs, 1, `Tanimoto Similarity`)
drug.resp.single <- drug.resp[[top_drug$makenames]]
cors<-sapply(colnames(drug.resp), function(x){
test <- data.frame(drug.resp.single, drug.resp[[x]])
if(nrow(test[complete.cases(test),])>1){
cor<-cor.test(drug.resp.single, drug.resp[[x]], method = "spearman", use = "complete.obs")
res <- c("p.val" = cor$p.value, cor$estimate)
}else{
res <- c("p.val" = -1, "rho" = 0)
}
})
cors <- cors %>%
t() %>%
as.data.frame() %>%
rownames_to_column("makenames") %>%
inner_join(drugs) %>%
filter(p.val != -1)
cors$Correlation <- cors$rho
cors$`BH adj p.val` <- p.adjust(cors$p.val, method = "BH")
cors
}
plotSimSangDrugs <- function(input, fp.type) {
fp.inp <- parseInputFingerprint(input, fp.type = fp.type)
if(fp.type == "circular"){fp.sang <- fp.sang.circular}
if(fp.type == "extended"){fp.sang <- fp.sang.extended}
if(fp.type == "maccs"){fp.sang <- fp.sang.maccs}
sims <- lapply(fp.inp, function(i) {
sim <- sapply(fp.sang, function(j) {
distance(i, j)
})
bar <- as.data.frame(sim)
bar$match <- rownames(bar)
bar
})
sims <- ldply(sims)
sims2 <- sims %>% arrange(-sim)
sims2$smiles <- as.character(sims2$match)
sims2$`Tanimoto Similarity` <- signif(sims2$sim, 3)
drugs <- left_join(sims2, sang.structures) %>% dplyr::select(makenames, sanger_names, `Tanimoto Similarity`) %>% distinct()
top_drug <- top_n(drugs, 1, `Tanimoto Similarity`)
drug.resp.single <- drug.resp.sang[[top_drug$makenames]]
cors<-sapply(colnames(drug.resp.sang), function(x){
test <- data.frame(drug.resp.single, drug.resp.sang[[x]])
if(nrow(test[complete.cases(test),])>1){
cor<-cor.test(drug.resp.single, drug.resp.sang[[x]], method = "spearman", use = "complete.obs")
res <- c("p.val" = cor$p.value, cor$estimate)
}else{
res <- c("p.val" = -1, "rho" = 0)
}
})
cors <- cors %>%
t() %>%
as.data.frame() %>%
rownames_to_column("makenames") %>%
inner_join(drugs) %>%
filter(p.val != -1)
cors$Correlation <- cors$rho
cors$`BH adj p.val` <- p.adjust(cors$p.val, method = "BH")
cors
}
|
source("/data2/3to5/I35/scripts/analysisfunctions.R")
library(ncdf4)
library(maps)
library(mapdata)
library(maptools)
library(fields)
library(sp)
library(raster)
library(rasterVis)
library(ggplot2)
library(modi)
weighted.var2 <- function(x, w, na.rm = FALSE) {
if (na.rm) {
w <- w[i <- !is.na(x)]
x <- x[i]
}
sum.w <- sum(w)
sum.w2 <- sum(w^2)
mean.w <- sum(x * w) / sum(w)
(sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2, na.rm =na.rm)
}
weighted.var3 <- function(x, w, na.rm = FALSE) {
if (na.rm) {
w <- w[i <- !is.na(x)]
x <- x[i]
}
sum.w <- sum(w)
(sum(w*x^2) * sum.w - sum(w*x)^2) / (sum.w^2 - sum(w^2))
}
setwd("/home/woot0002/DS_ind/")
var = varin = "pr"
type="ann"
weightingused = "new mexico"
stateapplied = "new mexico"
if(weightingused=="full"){
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,".Rdata",sep=""))
BMAweights_GCM = read.table(paste("best_BMA_combo_",var,".txt",sep=""))
BMAweights_LOCA = read.table(paste("best_BMA_combo_LOCA_",var,".txt",sep=""))
load(paste("/home/woot0002/DS_ind/BMAposterior_meansandvars_",var,"_WU",weightingused,".Rdata",sep=""))
BMAweightsGCM = read.table(paste("posterior_BMA_combo_",var,".txt",sep=""))
BMAweightsLOCA = read.table(paste("posterior_BMA_combo_LOCA_",var,".txt",sep=""))
} else {
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,"_",weightingused,".Rdata",sep=""))
BMAweights_GCM = read.table(paste("best_BMA_combo_",var,"_",weightingused,".txt",sep=""))
BMAweights_LOCA = read.table(paste("best_BMA_combo_LOCA_",var,"_",weightingused,".txt",sep=""))
load(paste("/home/woot0002/DS_ind/BMAposterior_meansandvars_",var,"_WU",weightingused,".Rdata",sep=""))
BMAweightsGCM = read.table(paste("posterior_BMA_combo_",var,"_",weightingused,".txt",sep=""))
BMAweightsLOCA = read.table(paste("posterior_BMA_combo_LOCA_",var,"_",weightingused,".txt",sep=""))
}
GCMhdat$BMA = t(BMAweights_GCM)[,1]
LOCAhdat$BMA = t(BMAweights_LOCA)[,1]
#####
# get domain mask
if(stateapplied!="full"){
test = nc_open(paste("/home/woot0002/DS_ind/",stateapplied,"_mask.nc",sep=""))
regionmask = ncvar_get(test,"mask")
lon = ncvar_get(test,"lon")
lat = ncvar_get(test,"lat")
nc_close(test)
}
####
GCMweights= GCMhdat
LOCAweights = LOCAhdat
# precip files
GCMfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*histclimo*.nc",intern=TRUE)
LOCAfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*histclimo*.nc",intern=TRUE)
GCMprojfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*projclimo*.nc",intern=TRUE)
LIVNEHfile_pr = system("ls /home/woot0002/monthlyclimo/pr_day*livneh*.nc",intern=TRUE)
# tasmax files
GCMfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*histclimo*.nc",intern=TRUE)
LOCAfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*histclimo*.nc",intern=TRUE)
GCMprojfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LIVNEHfile_tmax = system("ls /home/woot0002/monthlyclimo/tasmax_day*livneh*.nc",intern=TRUE)
# subset files down
load("/home/woot0002/DS_ind/manuscript1/GCMlist.Rdata")
GCM_hfiles_pr = GCM_pfiles_pr = LOCA_hfiles_pr = LOCA_pfiles_pr = c()
GCM_hfiles_tmax = GCM_pfiles_tmax = LOCA_hfiles_tmax = LOCA_pfiles_tmax = c()
for(i in 1:length(GCMlist)){
#pr
GCM_hfiles_pr[i] = GCMfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_pr)]
GCM_pfiles_pr[i] = GCMprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_pr)]
LOCA_hfiles_pr[i] = LOCAfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_pr)]
LOCA_pfiles_pr[i] = LOCAprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_pr)]
#tmax
GCM_hfiles_tmax[i] = GCMfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_tmax)]
GCM_pfiles_tmax[i] = GCMprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_tmax)]
LOCA_hfiles_tmax[i] = LOCAfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_tmax)]
LOCA_pfiles_tmax[i] = LOCAprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_tmax)]
}
###
# create full filelist + metadata table - historical
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMhdat = filelist2[,c(2,3,4,6)]
names(GCMhdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCAhdat = filelist2[,c(2,3,4,6)]
names(LOCAhdat) = names(GCMhdat)
#All metadata
GCM = rep(NA,1)
exp = rep(NA,1)
DS = rep(NA,1)
training = "LIVNEH"
obsdat = data.frame(GCM,exp,DS,training)
GCMhdat = rbind(GCMhdat,obsdat)
LOCAhdat= rbind(LOCAhdat,obsdat)
# all files
GCMgroup_pr = c(GCM_hfiles_pr,LIVNEHfile_pr)
LOCAgroup_pr = c(LOCA_hfiles_pr,LIVNEHfile_pr)
GCMgroup_tmax = c(GCM_hfiles_tmax,LIVNEHfile_tmax)
LOCAgroup_tmax = c(LOCA_hfiles_tmax,LIVNEHfile_tmax)
###
# create full filelist + metadata table - projected
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMpdat = filelist2[,c(2,3,4,6)]
names(GCMpdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCApdat = filelist2[,c(2,3,4,6)]
names(LOCApdat) = names(GCMpdat)
# all files
GCMpgroup_pr = GCM_pfiles_pr
LOCApgroup_pr = LOCA_pfiles_pr
GCMpgroup_tmax = GCM_pfiles_tmax
LOCApgroup_tmax = LOCA_pfiles_tmax
######
# Gather data
ncvarname = "prclimo"
### GCM hist + Livneh - pr
GCMhvardatalist_pr = list()
for(i in 1:length(GCMgroup_pr)){
nctest = nc_open(GCMgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_pr[[i]],NA)
} else {
GCMhvardatalist_pr[[i]] = ifelse(regionmask==1,GCMhvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_pr,mean,na.rm=TRUE)
### GCM projected change - pr
GCMpvardatalist_pr = list()
for(i in 1:length(GCMpgroup_pr)){
nctest = nc_open(GCMpgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_pr[[i]],NA)
} else {
GCMpvardatalist_pr[[i]] = ifelse(regionmask==1,GCMpvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_pr,mean,na.rm=TRUE)
### LOCA historical + Livneh - pr
LOCAhvardatalist_pr = list()
for(i in 1:length(LOCAgroup_pr)){
nctest = nc_open(LOCAgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_pr[[i]],NA)
} else{
LOCAhvardatalist_pr[[i]] = ifelse(regionmask==1,LOCAhvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_pr,mean,na.rm=TRUE)
### LOCA projected change - pr
LOCApvardatalist_pr = list()
for(i in 1:length(LOCApgroup_pr)){
nctest = nc_open(LOCApgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_pr[[i]],NA)
} else{
LOCApvardatalist_pr[[i]] = ifelse(regionmask==1,LOCApvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_pr,mean,na.rm=TRUE)
######
# Gather Data 2
ncvarname = "tmaxclimo"
### GCM hist + Livneh - tmax
GCMhvardatalist_tmax = list()
for(i in 1:length(GCMgroup_tmax)){
nctest = nc_open(GCMgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_tmax[[i]],NA)
} else{
GCMhvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMhvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_tmax,mean,na.rm=TRUE)
### GCM projected change - tmax
GCMpvardatalist_tmax = list()
for(i in 1:length(GCMpgroup_tmax)){
nctest = nc_open(GCMpgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_tmax[[i]],NA)
} else{
GCMpvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMpvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_tmax,mean,na.rm=TRUE)
### LOCA historical + Livneh - tmax
LOCAhvardatalist_tmax = list()
for(i in 1:length(LOCAgroup_tmax)){
nctest = nc_open(LOCAgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_tmax[[i]],NA)
} else{
LOCAhvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCAhvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_tmax,mean,na.rm=TRUE)
### LOCA projected change - tmax
LOCApvardatalist_tmax = list()
for(i in 1:length(LOCApgroup_tmax)){
nctest = nc_open(LOCApgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_tmax[[i]],NA)
} else{
LOCApvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCApvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_tmax,mean,na.rm=TRUE)
#######
# projected changes - _pr
GCMchange_pr = LOCAchange_pr = GCMproj_pr = LOCAproj_pr = GCMhist_pr = LOCAhist_pr = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_pr = LOCAhvardatalist_pr[[27]]
if(var=="pr"){
if(min(OBS_pr,na.rm=TRUE)==0){
OBS_pr = ifelse(OBS_pr==0,NA,OBS_pr)
}
}
for(i in 1:26){
GCMchange_pr[,,i] = GCMpvardatalist_pr[[i]]-GCMhvardatalist_pr[[i]]
LOCAchange_pr[,,i] = LOCApvardatalist_pr[[i]]-LOCAhvardatalist_pr[[i]]
GCMproj_pr[,,i] = GCMpvardatalist_pr[[i]]
LOCAproj_pr[,,i] = LOCApvardatalist_pr[[i]]
GCMhist_pr[,,i] = GCMhvardatalist_pr[[i]]
LOCAhist_pr[,,i] = LOCAhvardatalist_pr[[i]]
}
#######
# projected changes - _tmax
GCMchange_tmax = LOCAchange_tmax = GCMproj_tmax = LOCAproj_tmax = GCMhist_tmax = LOCAhist_tmax = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_tmax = LOCAhvardatalist_tmax[[27]]
for(i in 1:26){
GCMchange_tmax[,,i] = GCMpvardatalist_tmax[[i]]-GCMhvardatalist_tmax[[i]]
LOCAchange_tmax[,,i] = LOCApvardatalist_tmax[[i]]-LOCAhvardatalist_tmax[[i]]
GCMproj_tmax[,,i] = GCMpvardatalist_tmax[[i]]
LOCAproj_tmax[,,i] = LOCApvardatalist_tmax[[i]]
GCMhist_tmax[,,i] = GCMhvardatalist_tmax[[i]]
LOCAhist_tmax[,,i] = LOCAhvardatalist_tmax[[i]]
}
######
# prep weights
GCMweights$Wh = (GCMweights$Wuh*GCMweights$Wqh)/sum(GCMweights$Wuh*GCMweights$Wqh)
GCMweights$Wc = (GCMweights$Wuc*GCMweights$Wqc)/sum(GCMweights$Wuc*GCMweights$Wqc)
GCMweights$Ws = GCMweights$Wqh/sum(GCMweights$Wqh)
LOCAweights$Wh = (LOCAweights$Wuh*LOCAweights$Wqh)/sum(LOCAweights$Wuh*LOCAweights$Wqh)
LOCAweights$Wc = (LOCAweights$Wuc*LOCAweights$Wqc)/sum(LOCAweights$Wuc*LOCAweights$Wqc)
LOCAweights$Ws = LOCAweights$Wqh/sum(LOCAweights$Wqh)
######
# Calculate historical means (weighted and unweighted) - pr
GCMunweightedmean_hist_pr = apply(GCMhist_pr,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_hist_pr = apply(LOCAhist_pr,c(1,2),mean,na.rm=TRUE)
GCMskillmean_hist_pr = GCMSIhmean_hist_pr = GCMSIcmean_hist_pr = GCMBMAmean_hist_pr = GCMunweightedmean_hist_pr
LOCAskillmean_hist_pr = LOCASIhmean_hist_pr = LOCASIcmean_hist_pr = LOCABMAmean_hist_pr = LOCAunweightedmean_hist_pr
for(i in 1:26){
## skill mean
tmpG = GCMhist_pr[,,i]*GCMweights$Ws[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_hist_pr = tmpG
LOCAskillmean_hist_pr = tmpL
} else {
GCMskillmean_hist_pr = GCMskillmean_hist_pr+tmpG
LOCAskillmean_hist_pr = LOCAskillmean_hist_pr+tmpL
}
## skill+ind hist only
tmpG = GCMhist_pr[,,i]*GCMweights$Wh[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_hist_pr = tmpG
LOCASIhmean_hist_pr = tmpL
} else {
GCMSIhmean_hist_pr = GCMSIhmean_hist_pr+tmpG
LOCASIhmean_hist_pr = LOCASIhmean_hist_pr+tmpL
}
## skill+ind hist and change
tmpG = GCMhist_pr[,,i]*GCMweights$Wc[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_hist_pr = tmpG
LOCASIcmean_hist_pr = tmpL
} else {
GCMSIcmean_hist_pr = GCMSIcmean_hist_pr+tmpG
LOCASIcmean_hist_pr = LOCASIcmean_hist_pr+tmpL
}
## BMA hist and change
tmpG = GCMhist_pr[,,i]*GCMweights$BMA[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_hist_pr = tmpG
LOCABMAmean_hist_pr = tmpL
} else {
GCMBMAmean_hist_pr = GCMBMAmean_hist_pr+tmpG
LOCABMAmean_hist_pr = LOCABMAmean_hist_pr+tmpL
}
}
LOCAunweightedmean_bias_pr = LOCAunweightedmean_hist_pr-OBS_pr
LOCAskillmean_bias_pr = LOCAskillmean_hist_pr-OBS_pr
LOCASIhmean_bias_pr = LOCASIhmean_hist_pr-OBS_pr
LOCASIcmean_bias_pr = LOCASIcmean_hist_pr-OBS_pr
LOCABMAmean_bias_pr = LOCABMAmean_hist_pr-OBS_pr
GCMunweightedmean_bias_pr = GCMunweightedmean_hist_pr-OBS_pr
GCMskillmean_bias_pr = GCMskillmean_hist_pr-OBS_pr
GCMSIhmean_bias_pr = GCMSIhmean_hist_pr-OBS_pr
GCMSIcmean_bias_pr = GCMSIcmean_hist_pr-OBS_pr
GCMBMAmean_bias_pr = GCMBMAmean_hist_pr-OBS_pr
######
# Calculate historical means (weighted and unweighted) - tmax
GCMunweightedmean_hist_tmax = apply(GCMhist_tmax,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_hist_tmax = apply(LOCAhist_tmax,c(1,2),mean,na.rm=TRUE)
GCMskillmean_hist_tmax = GCMSIhmean_hist_tmax = GCMSIcmean_hist_tmax = GCMBMAmean_hist_tmax = GCMunweightedmean_hist_tmax
LOCAskillmean_hist_tmax = LOCASIhmean_hist_tmax = LOCASIcmean_hist_tmax = LOCABMAmean_hist_tmax = LOCAunweightedmean_hist_tmax
for(i in 1:26){
## skill mean
tmpG = GCMhist_tmax[,,i]*GCMweights$Ws[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_hist_tmax = tmpG
LOCAskillmean_hist_tmax = tmpL
} else {
GCMskillmean_hist_tmax = GCMskillmean_hist_tmax+tmpG
LOCAskillmean_hist_tmax = LOCAskillmean_hist_tmax+tmpL
}
## skill+ind hist only
tmpG = GCMhist_tmax[,,i]*GCMweights$Wh[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_hist_tmax = tmpG
LOCASIhmean_hist_tmax = tmpL
} else {
GCMSIhmean_hist_tmax = GCMSIhmean_hist_tmax+tmpG
LOCASIhmean_hist_tmax = LOCASIhmean_hist_tmax+tmpL
}
## skill+ind hist and change
tmpG = GCMhist_tmax[,,i]*GCMweights$Wc[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_hist_tmax = tmpG
LOCASIcmean_hist_tmax = tmpL
} else {
GCMSIcmean_hist_tmax = GCMSIcmean_hist_tmax+tmpG
LOCASIcmean_hist_tmax = LOCASIcmean_hist_tmax+tmpL
}
## BMA hist and change
tmpG = GCMhist_tmax[,,i]*GCMweights$BMA[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_hist_tmax = tmpG
LOCABMAmean_hist_tmax = tmpL
} else {
GCMBMAmean_hist_tmax = GCMBMAmean_hist_tmax+tmpG
LOCABMAmean_hist_tmax = LOCABMAmean_hist_tmax+tmpL
}
}
LOCAunweightedmean_bias_tmax = LOCAunweightedmean_hist_tmax-OBS_tmax
LOCAskillmean_bias_tmax = LOCAskillmean_hist_tmax-OBS_tmax
LOCASIhmean_bias_tmax = LOCASIhmean_hist_tmax-OBS_tmax
LOCASIcmean_bias_tmax = LOCASIcmean_hist_tmax-OBS_tmax
LOCABMAmean_bias_tmax = LOCABMAmean_hist_tmax-OBS_tmax
GCMunweightedmean_bias_tmax = GCMunweightedmean_hist_tmax-OBS_tmax
GCMskillmean_bias_tmax = GCMskillmean_hist_tmax-OBS_tmax
GCMSIhmean_bias_tmax = GCMSIhmean_hist_tmax-OBS_tmax
GCMSIcmean_bias_tmax = GCMSIcmean_hist_tmax-OBS_tmax
GCMBMAmean_bias_tmax = GCMBMAmean_hist_tmax-OBS_tmax
######
# Calculate change means (weighted and unweighted) - pr only
GCMunweightedmean_change_pr = apply(GCMchange_pr,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_change_pr = apply(LOCAchange_pr,c(1,2),mean,na.rm=TRUE)
GCMskillmean_change_pr = GCMSIhmean_change_pr = GCMSIcmean_change_pr = GCMBMAmean_change_pr = GCMunweightedmean_change_pr
LOCAskillmean_change_pr = LOCASIhmean_change_pr = LOCASIcmean_change_pr = LOCABMAmean_change_pr = LOCAunweightedmean_change_pr
for(i in 1:26){
## skill mean
tmpG = GCMchange_pr[,,i]*GCMweights$Ws[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_change_pr = tmpG
LOCAskillmean_change_pr = tmpL
} else {
GCMskillmean_change_pr = GCMskillmean_change_pr+tmpG
LOCAskillmean_change_pr = LOCAskillmean_change_pr+tmpL
}
## skill+ind hist only
tmpG = GCMchange_pr[,,i]*GCMweights$Wh[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_change_pr = tmpG
LOCASIhmean_change_pr = tmpL
} else {
GCMSIhmean_change_pr = GCMSIhmean_change_pr+tmpG
LOCASIhmean_change_pr = LOCASIhmean_change_pr+tmpL
}
## skill+ind hist and change
tmpG = GCMchange_pr[,,i]*GCMweights$Wc[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_change_pr = tmpG
LOCASIcmean_change_pr = tmpL
} else {
GCMSIcmean_change_pr = GCMSIcmean_change_pr+tmpG
LOCASIcmean_change_pr = LOCASIcmean_change_pr+tmpL
}
## BMA hist and change
tmpG = GCMchange_pr[,,i]*GCMweights$BMA[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_change_pr = tmpG
LOCABMAmean_change_pr = tmpL
} else {
GCMBMAmean_change_pr = GCMBMAmean_change_pr+tmpG
LOCABMAmean_change_pr = LOCABMAmean_change_pr+tmpL
}
}
######
# Calculate change means (weighted and unweighted) - tmax only
GCMunweightedmean_change_tmax = apply(GCMchange_tmax,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_change_tmax = apply(LOCAchange_tmax,c(1,2),mean,na.rm=TRUE)
GCMskillmean_change_tmax = GCMSIhmean_change_tmax = GCMSIcmean_change_tmax = GCMBMAmean_change_tmax = GCMunweightedmean_change_tmax
LOCAskillmean_change_tmax = LOCASIhmean_change_tmax = LOCASIcmean_change_tmax = LOCABMAmean_change_tmax = LOCAunweightedmean_change_tmax
for(i in 1:26){
## skill mean
tmpG = GCMchange_tmax[,,i]*GCMweights$Ws[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_change_tmax = tmpG
LOCAskillmean_change_tmax = tmpL
} else {
GCMskillmean_change_tmax = GCMskillmean_change_tmax+tmpG
LOCAskillmean_change_tmax = LOCAskillmean_change_tmax+tmpL
}
## skill+ind hist only
tmpG = GCMchange_tmax[,,i]*GCMweights$Wh[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_change_tmax = tmpG
LOCASIhmean_change_tmax = tmpL
} else {
GCMSIhmean_change_tmax = GCMSIhmean_change_tmax+tmpG
LOCASIhmean_change_tmax = LOCASIhmean_change_tmax+tmpL
}
## skill+ind hist and change
tmpG = GCMchange_tmax[,,i]*GCMweights$Wc[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_change_tmax = tmpG
LOCASIcmean_change_tmax = tmpL
} else {
GCMSIcmean_change_tmax = GCMSIcmean_change_tmax+tmpG
LOCASIcmean_change_tmax = LOCASIcmean_change_tmax+tmpL
}
## BMA hist and change
tmpG = GCMchange_tmax[,,i]*GCMweights$BMA[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_change_tmax = tmpG
LOCABMAmean_change_tmax = tmpL
} else {
GCMBMAmean_change_tmax = GCMBMAmean_change_tmax+tmpG
LOCABMAmean_change_tmax = LOCABMAmean_change_tmax+tmpL
}
}
######
# Calculate change variance (weighted and unweighted) - pr only
GCMunweightedvar_change_pr = GCMskillvar_change_pr = GCMSIhvar_change_pr = GCMSIcvar_change_pr = GCMBMAvar_change_pr = GCMunweightedmean_change_pr
LOCAunweightedvar_change_pr = LOCAskillvar_change_pr = LOCASIhvar_change_pr = LOCASIcvar_change_pr = LOCABMAvar_change_pr = LOCAunweightedmean_change_pr
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_pr[R,C,])==TRUE)==FALSE){
GCMunweightedvar_change_pr[R,C] = var(GCMchange_pr[R,C,],na.rm=TRUE)
LOCAunweightedvar_change_pr[R,C] = var(LOCAchange_pr[R,C,],na.rm=TRUE)
GCMskillvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$Ws,na.rm=TRUE)
LOCAskillvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$Ws,na.rm=TRUE)
GCMSIhvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$Wh,na.rm=TRUE)
LOCASIhvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$Wh,na.rm=TRUE)
GCMSIcvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$Wc,na.rm=TRUE)
LOCASIcvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$Wc,na.rm=TRUE)
GCMBMAvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$BMA,na.rm=TRUE)
LOCABMAvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$BMA,na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
######
# Calculate change variance (weighted and unweighted) - tmax only
GCMunweightedvar_change_tmax = GCMskillvar_change_tmax = GCMSIhvar_change_tmax = GCMSIcvar_change_tmax = GCMBMAvar_change_tmax = GCMunweightedmean_change_tmax
LOCAunweightedvar_change_tmax = LOCAskillvar_change_tmax = LOCASIhvar_change_tmax = LOCASIcvar_change_tmax = LOCABMAvar_change_tmax = LOCAunweightedmean_change_tmax
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_tmax[R,C,])==TRUE)==FALSE){
GCMunweightedvar_change_tmax[R,C] = var(GCMchange_tmax[R,C,],na.rm=TRUE)
LOCAunweightedvar_change_tmax[R,C] = var(LOCAchange_tmax[R,C,],na.rm=TRUE)
GCMskillvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$Ws,na.rm=TRUE)
LOCAskillvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$Ws,na.rm=TRUE)
GCMSIhvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$Wh,na.rm=TRUE)
LOCASIhvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$Wh,na.rm=TRUE)
GCMSIcvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$Wc,na.rm=TRUE)
LOCASIcvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$Wc,na.rm=TRUE)
GCMBMAvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$BMA,na.rm=TRUE)
LOCABMAvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$BMA,na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
######
# gather means - pr only
meansdat_pr = NULL
histmeans_GCM_pr = c(mean(GCMunweightedmean_hist_pr,na.rm=TRUE),mean(GCMskillmean_hist_pr,na.rm=TRUE),mean(GCMSIhmean_hist_pr,na.rm=TRUE),mean(GCMSIcmean_hist_pr,na.rm=TRUE),mean(GCMBMAmean_hist_pr,na.rm=TRUE))
histmeans_LOCA_pr = c(mean(LOCAunweightedmean_hist_pr,na.rm=TRUE),mean(LOCAskillmean_hist_pr,na.rm=TRUE),mean(LOCASIhmean_hist_pr,na.rm=TRUE),mean(LOCASIcmean_hist_pr,na.rm=TRUE),mean(LOCABMAmean_hist_pr,na.rm=TRUE))
changemeans_GCM_pr = c(mean(GCMunweightedmean_change_pr,na.rm=TRUE),mean(GCMskillmean_change_pr,na.rm=TRUE),mean(GCMSIhmean_change_pr,na.rm=TRUE),mean(GCMSIcmean_change_pr,na.rm=TRUE),mean(GCMBMAmean_change_pr,na.rm=TRUE))
changemeans_LOCA_pr = c(mean(LOCAunweightedmean_change_pr,na.rm=TRUE),mean(LOCAskillmean_change_pr,na.rm=TRUE),mean(LOCASIhmean_change_pr,na.rm=TRUE),mean(LOCASIcmean_change_pr,na.rm=TRUE),mean(LOCABMAmean_change_pr,na.rm=TRUE))
changevars_GCM_pr = c(mean(GCMunweightedvar_change_pr,na.rm=TRUE),mean(GCMskillvar_change_pr,na.rm=TRUE),mean(GCMSIhvar_change_pr,na.rm=TRUE),mean(GCMSIcvar_change_pr,na.rm=TRUE),mean(GCMBMAvar_change_pr,na.rm=TRUE))
changevars_LOCA_pr = c(mean(LOCAunweightedvar_change_pr,na.rm=TRUE),mean(LOCAskillvar_change_pr,na.rm=TRUE),mean(LOCASIhvar_change_pr,na.rm=TRUE),mean(LOCASIcvar_change_pr,na.rm=TRUE),mean(LOCABMAvar_change_pr,na.rm=TRUE))
obs = mean(OBS_pr,na.rm=TRUE)
region = stateapplied
RMSE_GCM_pr = c(sqrt(mean((GCMunweightedmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMskillmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMSIhmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMSIcmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMBMAmean_bias_pr)^2,na.rm=TRUE)))
RMSE_LOCA_pr = c(sqrt(mean((LOCAunweightedmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCAskillmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCASIhmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCASIcmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCABMAmean_bias_pr)^2,na.rm=TRUE)))
histmeans = c(histmeans_GCM_pr,histmeans_LOCA_pr)
changemeans = c(changemeans_GCM_pr,changemeans_LOCA_pr)
changevars = c(changevars_GCM_pr,changevars_LOCA_pr)
rmse = c(RMSE_GCM_pr,RMSE_LOCA_pr)
group = rep(c("unweighted","skill","SI-h","SI-c","BMA"),2)
DS = rep(c("CMIP5","LOCA"),each=5)
meansframe_pr = data.frame(group,region,DS,histmeans,obs,changemeans,changevars,rmse)
meansdat_pr = rbind(meansdat_pr,meansframe_pr)
meansdat_pr$bias = meansdat_pr$histmeans-meansdat_pr$obs
save(list="meansdat_pr",file=paste("WeightedMeansVars_pr_",var,"_WU",weightingused,"_SA",stateapplied,"_wBMA.Rdata",sep=""))
######
# gather means - tmax only
meansdat_tmax = NULL
histmeans_GCM_tmax = c(mean(GCMunweightedmean_hist_tmax,na.rm=TRUE),mean(GCMskillmean_hist_tmax,na.rm=TRUE),mean(GCMSIhmean_hist_tmax,na.rm=TRUE),mean(GCMSIcmean_hist_tmax,na.rm=TRUE),mean(GCMBMAmean_hist_tmax,na.rm=TRUE))
histmeans_LOCA_tmax = c(mean(LOCAunweightedmean_hist_tmax,na.rm=TRUE),mean(LOCAskillmean_hist_tmax,na.rm=TRUE),mean(LOCASIhmean_hist_tmax,na.rm=TRUE),mean(LOCASIcmean_hist_tmax,na.rm=TRUE),mean(LOCABMAmean_hist_tmax,na.rm=TRUE))
changemeans_GCM_tmax = c(mean(GCMunweightedmean_change_tmax,na.rm=TRUE),mean(GCMskillmean_change_tmax,na.rm=TRUE),mean(GCMSIhmean_change_tmax,na.rm=TRUE),mean(GCMSIcmean_change_tmax,na.rm=TRUE),mean(GCMBMAmean_change_tmax,na.rm=TRUE))
changemeans_LOCA_tmax = c(mean(LOCAunweightedmean_change_tmax,na.rm=TRUE),mean(LOCAskillmean_change_tmax,na.rm=TRUE),mean(LOCASIhmean_change_tmax,na.rm=TRUE),mean(LOCASIcmean_change_tmax,na.rm=TRUE),mean(LOCABMAmean_change_tmax,na.rm=TRUE))
changevars_GCM_tmax = c(mean(GCMunweightedvar_change_tmax,na.rm=TRUE),mean(GCMskillvar_change_tmax,na.rm=TRUE),mean(GCMSIhvar_change_tmax,na.rm=TRUE),mean(GCMSIcvar_change_tmax,na.rm=TRUE),mean(GCMBMAvar_change_tmax,na.rm=TRUE))
changevars_LOCA_tmax = c(mean(LOCAunweightedvar_change_tmax,na.rm=TRUE),mean(LOCAskillvar_change_tmax,na.rm=TRUE),mean(LOCASIhvar_change_tmax,na.rm=TRUE),mean(LOCASIcvar_change_tmax,na.rm=TRUE),mean(LOCABMAvar_change_tmax,na.rm=TRUE))
obs = mean(OBS_tmax,na.rm=TRUE)
region = stateapplied
RMSE_GCM_tmax = c(sqrt(mean((GCMunweightedmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMskillmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMSIhmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMSIcmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMBMAmean_bias_tmax)^2,na.rm=TRUE)))
RMSE_LOCA_tmax = c(sqrt(mean((LOCAunweightedmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCAskillmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCASIhmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCASIcmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCABMAmean_bias_tmax)^2,na.rm=TRUE)))
histmeans = c(histmeans_GCM_tmax,histmeans_LOCA_tmax)
changemeans = c(changemeans_GCM_tmax,changemeans_LOCA_tmax)
changevars = c(changevars_GCM_tmax,changevars_LOCA_tmax)
rmse = c(RMSE_GCM_tmax,RMSE_LOCA_tmax)
group = rep(c("unweighted","skill","SI-h","SI-c","BMA"),2)
DS = rep(c("CMIP5","LOCA"),each=5)
meansframe_tmax = data.frame(group,region,DS,histmeans,obs,changemeans,changevars,rmse)
meansdat_tmax = rbind(meansdat_tmax,meansframe_tmax)
meansdat_tmax$bias = meansdat_tmax$histmeans-meansdat_tmax$obs
|
/Sanderson/CalcChecks.R
|
no_license
|
amwootte/analysisscripts
|
R
| false
| false
| 29,817
|
r
|
source("/data2/3to5/I35/scripts/analysisfunctions.R")
library(ncdf4)
library(maps)
library(mapdata)
library(maptools)
library(fields)
library(sp)
library(raster)
library(rasterVis)
library(ggplot2)
library(modi)
weighted.var2 <- function(x, w, na.rm = FALSE) {
if (na.rm) {
w <- w[i <- !is.na(x)]
x <- x[i]
}
sum.w <- sum(w)
sum.w2 <- sum(w^2)
mean.w <- sum(x * w) / sum(w)
(sum.w / (sum.w^2 - sum.w2)) * sum(w * (x - mean.w)^2, na.rm =na.rm)
}
weighted.var3 <- function(x, w, na.rm = FALSE) {
if (na.rm) {
w <- w[i <- !is.na(x)]
x <- x[i]
}
sum.w <- sum(w)
(sum(w*x^2) * sum.w - sum(w*x)^2) / (sum.w^2 - sum(w^2))
}
setwd("/home/woot0002/DS_ind/")
var = varin = "pr"
type="ann"
weightingused = "new mexico"
stateapplied = "new mexico"
if(weightingused=="full"){
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,".Rdata",sep=""))
BMAweights_GCM = read.table(paste("best_BMA_combo_",var,".txt",sep=""))
BMAweights_LOCA = read.table(paste("best_BMA_combo_LOCA_",var,".txt",sep=""))
load(paste("/home/woot0002/DS_ind/BMAposterior_meansandvars_",var,"_WU",weightingused,".Rdata",sep=""))
BMAweightsGCM = read.table(paste("posterior_BMA_combo_",var,".txt",sep=""))
BMAweightsLOCA = read.table(paste("posterior_BMA_combo_LOCA_",var,".txt",sep=""))
} else {
load(file=paste("Sanderson_EnsembleWeights_",var,"_",type,"_",weightingused,".Rdata",sep=""))
BMAweights_GCM = read.table(paste("best_BMA_combo_",var,"_",weightingused,".txt",sep=""))
BMAweights_LOCA = read.table(paste("best_BMA_combo_LOCA_",var,"_",weightingused,".txt",sep=""))
load(paste("/home/woot0002/DS_ind/BMAposterior_meansandvars_",var,"_WU",weightingused,".Rdata",sep=""))
BMAweightsGCM = read.table(paste("posterior_BMA_combo_",var,"_",weightingused,".txt",sep=""))
BMAweightsLOCA = read.table(paste("posterior_BMA_combo_LOCA_",var,"_",weightingused,".txt",sep=""))
}
GCMhdat$BMA = t(BMAweights_GCM)[,1]
LOCAhdat$BMA = t(BMAweights_LOCA)[,1]
#####
# get domain mask
if(stateapplied!="full"){
test = nc_open(paste("/home/woot0002/DS_ind/",stateapplied,"_mask.nc",sep=""))
regionmask = ncvar_get(test,"mask")
lon = ncvar_get(test,"lon")
lat = ncvar_get(test,"lat")
nc_close(test)
}
####
GCMweights= GCMhdat
LOCAweights = LOCAhdat
# precip files
GCMfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*histclimo*.nc",intern=TRUE)
LOCAfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*histclimo*.nc",intern=TRUE)
GCMprojfiles_pr = system("ls /home/woot0002/GCMs/regrid/pr_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_pr = system("ls /home/woot0002/LOCA/regrid/pr_*projclimo*.nc",intern=TRUE)
LIVNEHfile_pr = system("ls /home/woot0002/monthlyclimo/pr_day*livneh*.nc",intern=TRUE)
# tasmax files
GCMfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*histclimo*.nc",intern=TRUE)
LOCAfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*histclimo*.nc",intern=TRUE)
GCMprojfiles_tmax = system("ls /home/woot0002/GCMs/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LOCAprojfiles_tmax = system("ls /home/woot0002/LOCA/regrid/tasmax_*projclimo*.nc",intern=TRUE)
LIVNEHfile_tmax = system("ls /home/woot0002/monthlyclimo/tasmax_day*livneh*.nc",intern=TRUE)
# subset files down
load("/home/woot0002/DS_ind/manuscript1/GCMlist.Rdata")
GCM_hfiles_pr = GCM_pfiles_pr = LOCA_hfiles_pr = LOCA_pfiles_pr = c()
GCM_hfiles_tmax = GCM_pfiles_tmax = LOCA_hfiles_tmax = LOCA_pfiles_tmax = c()
for(i in 1:length(GCMlist)){
#pr
GCM_hfiles_pr[i] = GCMfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_pr)]
GCM_pfiles_pr[i] = GCMprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_pr)]
LOCA_hfiles_pr[i] = LOCAfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_pr)]
LOCA_pfiles_pr[i] = LOCAprojfiles_pr[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_pr)]
#tmax
GCM_hfiles_tmax[i] = GCMfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMfiles_tmax)]
GCM_pfiles_tmax[i] = GCMprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),GCMprojfiles_tmax)]
LOCA_hfiles_tmax[i] = LOCAfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAfiles_tmax)]
LOCA_pfiles_tmax[i] = LOCAprojfiles_tmax[grep(paste(GCMlist[i],"_",sep=""),LOCAprojfiles_tmax)]
}
###
# create full filelist + metadata table - historical
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMhdat = filelist2[,c(2,3,4,6)]
names(GCMhdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_hfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCAhdat = filelist2[,c(2,3,4,6)]
names(LOCAhdat) = names(GCMhdat)
#All metadata
GCM = rep(NA,1)
exp = rep(NA,1)
DS = rep(NA,1)
training = "LIVNEH"
obsdat = data.frame(GCM,exp,DS,training)
GCMhdat = rbind(GCMhdat,obsdat)
LOCAhdat= rbind(LOCAhdat,obsdat)
# all files
GCMgroup_pr = c(GCM_hfiles_pr,LIVNEHfile_pr)
LOCAgroup_pr = c(LOCA_hfiles_pr,LIVNEHfile_pr)
GCMgroup_tmax = c(GCM_hfiles_tmax,LIVNEHfile_tmax)
LOCAgroup_tmax = c(LOCA_hfiles_tmax,LIVNEHfile_tmax)
###
# create full filelist + metadata table - projected
#GCMs
filelist1 = do.call("rbind",strsplit(GCM_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "NA"
GCMpdat = filelist2[,c(2,3,4,6)]
names(GCMpdat) = c("GCM","exp","DS","training")
#LOCA
filelist1 = do.call("rbind",strsplit(LOCA_pfiles_pr,"/",fixed=TRUE))
filelist2 = do.call("rbind",strsplit(filelist1[,6],"_",fixed=TRUE))
filelist2 = as.data.frame(filelist2)
filelist2$training = "Livneh"
LOCApdat = filelist2[,c(2,3,4,6)]
names(LOCApdat) = names(GCMpdat)
# all files
GCMpgroup_pr = GCM_pfiles_pr
LOCApgroup_pr = LOCA_pfiles_pr
GCMpgroup_tmax = GCM_pfiles_tmax
LOCApgroup_tmax = LOCA_pfiles_tmax
######
# Gather data
ncvarname = "prclimo"
### GCM hist + Livneh - pr
GCMhvardatalist_pr = list()
for(i in 1:length(GCMgroup_pr)){
nctest = nc_open(GCMgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_pr[[i]],NA)
} else {
GCMhvardatalist_pr[[i]] = ifelse(regionmask==1,GCMhvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_pr,mean,na.rm=TRUE)
### GCM projected change - pr
GCMpvardatalist_pr = list()
for(i in 1:length(GCMpgroup_pr)){
nctest = nc_open(GCMpgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_pr[[i]],NA)
} else {
GCMpvardatalist_pr[[i]] = ifelse(regionmask==1,GCMpvardatalist_pr[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_pr,mean,na.rm=TRUE)
### LOCA historical + Livneh - pr
LOCAhvardatalist_pr = list()
for(i in 1:length(LOCAgroup_pr)){
nctest = nc_open(LOCAgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_pr[[i]],NA)
} else{
LOCAhvardatalist_pr[[i]] = ifelse(regionmask==1,LOCAhvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_pr,mean,na.rm=TRUE)
### LOCA projected change - pr
LOCApvardatalist_pr = list()
for(i in 1:length(LOCApgroup_pr)){
nctest = nc_open(LOCApgroup_pr[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_pr[[i]] = apply(tmp,c(1,2),sum,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_pr[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_pr[[i]],NA)
} else{
LOCApvardatalist_pr[[i]] = ifelse(regionmask==1,LOCApvardatalist_pr[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_pr,mean,na.rm=TRUE)
######
# Gather Data 2
ncvarname = "tmaxclimo"
### GCM hist + Livneh - tmax
GCMhvardatalist_tmax = list()
for(i in 1:length(GCMgroup_tmax)){
nctest = nc_open(GCMgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMhvardatalist_tmax[[i]],NA)
} else{
GCMhvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMhvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon");
nc_close(nctest)
}
sapply(GCMhvardatalist_tmax,mean,na.rm=TRUE)
### GCM projected change - tmax
GCMpvardatalist_tmax = list()
for(i in 1:length(GCMpgroup_tmax)){
nctest = nc_open(GCMpgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
GCMpvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
GCMpvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,GCMpvardatalist_tmax[[i]],NA)
} else{
GCMpvardatalist_tmax[[i]] = ifelse(regionmask==1,GCMpvardatalist_tmax[[i]],NA)
}
#vardatalist[[i]] = ncvar_get(nctest,ncvarname)
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(GCMpvardatalist_tmax,mean,na.rm=TRUE)
### LOCA historical + Livneh - tmax
LOCAhvardatalist_tmax = list()
for(i in 1:length(LOCAgroup_tmax)){
nctest = nc_open(LOCAgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCAhvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCAhvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCAhvardatalist_tmax[[i]],NA)
} else{
LOCAhvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCAhvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCAhvardatalist_tmax,mean,na.rm=TRUE)
### LOCA projected change - tmax
LOCApvardatalist_tmax = list()
for(i in 1:length(LOCApgroup_tmax)){
nctest = nc_open(LOCApgroup_tmax[i])
idx = which(names(nctest$var)==ncvarname)
tmp = ncvar_get(nctest,nctest$var[[idx]]$name)
LOCApvardatalist_tmax[[i]] = apply(tmp,c(1,2),mean,na.rm=TRUE)
if(stateapplied=="full"){
LOCApvardatalist_tmax[[i]] = ifelse(is.na(tmp[,,1])==FALSE,LOCApvardatalist_tmax[[i]],NA)
} else{
LOCApvardatalist_tmax[[i]] = ifelse(regionmask==1,LOCApvardatalist_tmax[[i]],NA)
}
if(i==1) lat = ncvar_get(nctest,"lat"); lon=ncvar_get(nctest,"lon")
nc_close(nctest)
}
sapply(LOCApvardatalist_tmax,mean,na.rm=TRUE)
#######
# projected changes - _pr
GCMchange_pr = LOCAchange_pr = GCMproj_pr = LOCAproj_pr = GCMhist_pr = LOCAhist_pr = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_pr = LOCAhvardatalist_pr[[27]]
if(var=="pr"){
if(min(OBS_pr,na.rm=TRUE)==0){
OBS_pr = ifelse(OBS_pr==0,NA,OBS_pr)
}
}
for(i in 1:26){
GCMchange_pr[,,i] = GCMpvardatalist_pr[[i]]-GCMhvardatalist_pr[[i]]
LOCAchange_pr[,,i] = LOCApvardatalist_pr[[i]]-LOCAhvardatalist_pr[[i]]
GCMproj_pr[,,i] = GCMpvardatalist_pr[[i]]
LOCAproj_pr[,,i] = LOCApvardatalist_pr[[i]]
GCMhist_pr[,,i] = GCMhvardatalist_pr[[i]]
LOCAhist_pr[,,i] = LOCAhvardatalist_pr[[i]]
}
#######
# projected changes - _tmax
GCMchange_tmax = LOCAchange_tmax = GCMproj_tmax = LOCAproj_tmax = GCMhist_tmax = LOCAhist_tmax = array(NA,dim=c(length(lon),ncol=length(lat),26))
OBS_tmax = LOCAhvardatalist_tmax[[27]]
for(i in 1:26){
GCMchange_tmax[,,i] = GCMpvardatalist_tmax[[i]]-GCMhvardatalist_tmax[[i]]
LOCAchange_tmax[,,i] = LOCApvardatalist_tmax[[i]]-LOCAhvardatalist_tmax[[i]]
GCMproj_tmax[,,i] = GCMpvardatalist_tmax[[i]]
LOCAproj_tmax[,,i] = LOCApvardatalist_tmax[[i]]
GCMhist_tmax[,,i] = GCMhvardatalist_tmax[[i]]
LOCAhist_tmax[,,i] = LOCAhvardatalist_tmax[[i]]
}
######
# prep weights
GCMweights$Wh = (GCMweights$Wuh*GCMweights$Wqh)/sum(GCMweights$Wuh*GCMweights$Wqh)
GCMweights$Wc = (GCMweights$Wuc*GCMweights$Wqc)/sum(GCMweights$Wuc*GCMweights$Wqc)
GCMweights$Ws = GCMweights$Wqh/sum(GCMweights$Wqh)
LOCAweights$Wh = (LOCAweights$Wuh*LOCAweights$Wqh)/sum(LOCAweights$Wuh*LOCAweights$Wqh)
LOCAweights$Wc = (LOCAweights$Wuc*LOCAweights$Wqc)/sum(LOCAweights$Wuc*LOCAweights$Wqc)
LOCAweights$Ws = LOCAweights$Wqh/sum(LOCAweights$Wqh)
######
# Calculate historical means (weighted and unweighted) - pr
GCMunweightedmean_hist_pr = apply(GCMhist_pr,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_hist_pr = apply(LOCAhist_pr,c(1,2),mean,na.rm=TRUE)
GCMskillmean_hist_pr = GCMSIhmean_hist_pr = GCMSIcmean_hist_pr = GCMBMAmean_hist_pr = GCMunweightedmean_hist_pr
LOCAskillmean_hist_pr = LOCASIhmean_hist_pr = LOCASIcmean_hist_pr = LOCABMAmean_hist_pr = LOCAunweightedmean_hist_pr
for(i in 1:26){
## skill mean
tmpG = GCMhist_pr[,,i]*GCMweights$Ws[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_hist_pr = tmpG
LOCAskillmean_hist_pr = tmpL
} else {
GCMskillmean_hist_pr = GCMskillmean_hist_pr+tmpG
LOCAskillmean_hist_pr = LOCAskillmean_hist_pr+tmpL
}
## skill+ind hist only
tmpG = GCMhist_pr[,,i]*GCMweights$Wh[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_hist_pr = tmpG
LOCASIhmean_hist_pr = tmpL
} else {
GCMSIhmean_hist_pr = GCMSIhmean_hist_pr+tmpG
LOCASIhmean_hist_pr = LOCASIhmean_hist_pr+tmpL
}
## skill+ind hist and change
tmpG = GCMhist_pr[,,i]*GCMweights$Wc[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_hist_pr = tmpG
LOCASIcmean_hist_pr = tmpL
} else {
GCMSIcmean_hist_pr = GCMSIcmean_hist_pr+tmpG
LOCASIcmean_hist_pr = LOCASIcmean_hist_pr+tmpL
}
## BMA hist and change
tmpG = GCMhist_pr[,,i]*GCMweights$BMA[i]
tmpL = LOCAhist_pr[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_hist_pr = tmpG
LOCABMAmean_hist_pr = tmpL
} else {
GCMBMAmean_hist_pr = GCMBMAmean_hist_pr+tmpG
LOCABMAmean_hist_pr = LOCABMAmean_hist_pr+tmpL
}
}
LOCAunweightedmean_bias_pr = LOCAunweightedmean_hist_pr-OBS_pr
LOCAskillmean_bias_pr = LOCAskillmean_hist_pr-OBS_pr
LOCASIhmean_bias_pr = LOCASIhmean_hist_pr-OBS_pr
LOCASIcmean_bias_pr = LOCASIcmean_hist_pr-OBS_pr
LOCABMAmean_bias_pr = LOCABMAmean_hist_pr-OBS_pr
GCMunweightedmean_bias_pr = GCMunweightedmean_hist_pr-OBS_pr
GCMskillmean_bias_pr = GCMskillmean_hist_pr-OBS_pr
GCMSIhmean_bias_pr = GCMSIhmean_hist_pr-OBS_pr
GCMSIcmean_bias_pr = GCMSIcmean_hist_pr-OBS_pr
GCMBMAmean_bias_pr = GCMBMAmean_hist_pr-OBS_pr
######
# Calculate historical means (weighted and unweighted) - tmax
GCMunweightedmean_hist_tmax = apply(GCMhist_tmax,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_hist_tmax = apply(LOCAhist_tmax,c(1,2),mean,na.rm=TRUE)
GCMskillmean_hist_tmax = GCMSIhmean_hist_tmax = GCMSIcmean_hist_tmax = GCMBMAmean_hist_tmax = GCMunweightedmean_hist_tmax
LOCAskillmean_hist_tmax = LOCASIhmean_hist_tmax = LOCASIcmean_hist_tmax = LOCABMAmean_hist_tmax = LOCAunweightedmean_hist_tmax
for(i in 1:26){
## skill mean
tmpG = GCMhist_tmax[,,i]*GCMweights$Ws[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_hist_tmax = tmpG
LOCAskillmean_hist_tmax = tmpL
} else {
GCMskillmean_hist_tmax = GCMskillmean_hist_tmax+tmpG
LOCAskillmean_hist_tmax = LOCAskillmean_hist_tmax+tmpL
}
## skill+ind hist only
tmpG = GCMhist_tmax[,,i]*GCMweights$Wh[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_hist_tmax = tmpG
LOCASIhmean_hist_tmax = tmpL
} else {
GCMSIhmean_hist_tmax = GCMSIhmean_hist_tmax+tmpG
LOCASIhmean_hist_tmax = LOCASIhmean_hist_tmax+tmpL
}
## skill+ind hist and change
tmpG = GCMhist_tmax[,,i]*GCMweights$Wc[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_hist_tmax = tmpG
LOCASIcmean_hist_tmax = tmpL
} else {
GCMSIcmean_hist_tmax = GCMSIcmean_hist_tmax+tmpG
LOCASIcmean_hist_tmax = LOCASIcmean_hist_tmax+tmpL
}
## BMA hist and change
tmpG = GCMhist_tmax[,,i]*GCMweights$BMA[i]
tmpL = LOCAhist_tmax[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_hist_tmax = tmpG
LOCABMAmean_hist_tmax = tmpL
} else {
GCMBMAmean_hist_tmax = GCMBMAmean_hist_tmax+tmpG
LOCABMAmean_hist_tmax = LOCABMAmean_hist_tmax+tmpL
}
}
LOCAunweightedmean_bias_tmax = LOCAunweightedmean_hist_tmax-OBS_tmax
LOCAskillmean_bias_tmax = LOCAskillmean_hist_tmax-OBS_tmax
LOCASIhmean_bias_tmax = LOCASIhmean_hist_tmax-OBS_tmax
LOCASIcmean_bias_tmax = LOCASIcmean_hist_tmax-OBS_tmax
LOCABMAmean_bias_tmax = LOCABMAmean_hist_tmax-OBS_tmax
GCMunweightedmean_bias_tmax = GCMunweightedmean_hist_tmax-OBS_tmax
GCMskillmean_bias_tmax = GCMskillmean_hist_tmax-OBS_tmax
GCMSIhmean_bias_tmax = GCMSIhmean_hist_tmax-OBS_tmax
GCMSIcmean_bias_tmax = GCMSIcmean_hist_tmax-OBS_tmax
GCMBMAmean_bias_tmax = GCMBMAmean_hist_tmax-OBS_tmax
######
# Calculate change means (weighted and unweighted) - pr only
GCMunweightedmean_change_pr = apply(GCMchange_pr,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_change_pr = apply(LOCAchange_pr,c(1,2),mean,na.rm=TRUE)
GCMskillmean_change_pr = GCMSIhmean_change_pr = GCMSIcmean_change_pr = GCMBMAmean_change_pr = GCMunweightedmean_change_pr
LOCAskillmean_change_pr = LOCASIhmean_change_pr = LOCASIcmean_change_pr = LOCABMAmean_change_pr = LOCAunweightedmean_change_pr
for(i in 1:26){
## skill mean
tmpG = GCMchange_pr[,,i]*GCMweights$Ws[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_change_pr = tmpG
LOCAskillmean_change_pr = tmpL
} else {
GCMskillmean_change_pr = GCMskillmean_change_pr+tmpG
LOCAskillmean_change_pr = LOCAskillmean_change_pr+tmpL
}
## skill+ind hist only
tmpG = GCMchange_pr[,,i]*GCMweights$Wh[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_change_pr = tmpG
LOCASIhmean_change_pr = tmpL
} else {
GCMSIhmean_change_pr = GCMSIhmean_change_pr+tmpG
LOCASIhmean_change_pr = LOCASIhmean_change_pr+tmpL
}
## skill+ind hist and change
tmpG = GCMchange_pr[,,i]*GCMweights$Wc[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_change_pr = tmpG
LOCASIcmean_change_pr = tmpL
} else {
GCMSIcmean_change_pr = GCMSIcmean_change_pr+tmpG
LOCASIcmean_change_pr = LOCASIcmean_change_pr+tmpL
}
## BMA hist and change
tmpG = GCMchange_pr[,,i]*GCMweights$BMA[i]
tmpL = LOCAchange_pr[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_change_pr = tmpG
LOCABMAmean_change_pr = tmpL
} else {
GCMBMAmean_change_pr = GCMBMAmean_change_pr+tmpG
LOCABMAmean_change_pr = LOCABMAmean_change_pr+tmpL
}
}
######
# Calculate change means (weighted and unweighted) - tmax only
GCMunweightedmean_change_tmax = apply(GCMchange_tmax,c(1,2),mean,na.rm=TRUE)
LOCAunweightedmean_change_tmax = apply(LOCAchange_tmax,c(1,2),mean,na.rm=TRUE)
GCMskillmean_change_tmax = GCMSIhmean_change_tmax = GCMSIcmean_change_tmax = GCMBMAmean_change_tmax = GCMunweightedmean_change_tmax
LOCAskillmean_change_tmax = LOCASIhmean_change_tmax = LOCASIcmean_change_tmax = LOCABMAmean_change_tmax = LOCAunweightedmean_change_tmax
for(i in 1:26){
## skill mean
tmpG = GCMchange_tmax[,,i]*GCMweights$Ws[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$Ws[i]
if(i==1){
GCMskillmean_change_tmax = tmpG
LOCAskillmean_change_tmax = tmpL
} else {
GCMskillmean_change_tmax = GCMskillmean_change_tmax+tmpG
LOCAskillmean_change_tmax = LOCAskillmean_change_tmax+tmpL
}
## skill+ind hist only
tmpG = GCMchange_tmax[,,i]*GCMweights$Wh[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$Wh[i]
if(i==1){
GCMSIhmean_change_tmax = tmpG
LOCASIhmean_change_tmax = tmpL
} else {
GCMSIhmean_change_tmax = GCMSIhmean_change_tmax+tmpG
LOCASIhmean_change_tmax = LOCASIhmean_change_tmax+tmpL
}
## skill+ind hist and change
tmpG = GCMchange_tmax[,,i]*GCMweights$Wc[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$Wc[i]
if(i==1){
GCMSIcmean_change_tmax = tmpG
LOCASIcmean_change_tmax = tmpL
} else {
GCMSIcmean_change_tmax = GCMSIcmean_change_tmax+tmpG
LOCASIcmean_change_tmax = LOCASIcmean_change_tmax+tmpL
}
## BMA hist and change
tmpG = GCMchange_tmax[,,i]*GCMweights$BMA[i]
tmpL = LOCAchange_tmax[,,i]*LOCAweights$BMA[i]
if(i==1){
GCMBMAmean_change_tmax = tmpG
LOCABMAmean_change_tmax = tmpL
} else {
GCMBMAmean_change_tmax = GCMBMAmean_change_tmax+tmpG
LOCABMAmean_change_tmax = LOCABMAmean_change_tmax+tmpL
}
}
######
# Calculate change variance (weighted and unweighted) - pr only
GCMunweightedvar_change_pr = GCMskillvar_change_pr = GCMSIhvar_change_pr = GCMSIcvar_change_pr = GCMBMAvar_change_pr = GCMunweightedmean_change_pr
LOCAunweightedvar_change_pr = LOCAskillvar_change_pr = LOCASIhvar_change_pr = LOCASIcvar_change_pr = LOCABMAvar_change_pr = LOCAunweightedmean_change_pr
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_pr[R,C,])==TRUE)==FALSE){
GCMunweightedvar_change_pr[R,C] = var(GCMchange_pr[R,C,],na.rm=TRUE)
LOCAunweightedvar_change_pr[R,C] = var(LOCAchange_pr[R,C,],na.rm=TRUE)
GCMskillvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$Ws,na.rm=TRUE)
LOCAskillvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$Ws,na.rm=TRUE)
GCMSIhvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$Wh,na.rm=TRUE)
LOCASIhvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$Wh,na.rm=TRUE)
GCMSIcvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$Wc,na.rm=TRUE)
LOCASIcvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$Wc,na.rm=TRUE)
GCMBMAvar_change_pr[R,C] = weighted.var(x=GCMchange_pr[R,C,],w=GCMweights$BMA,na.rm=TRUE)
LOCABMAvar_change_pr[R,C] = weighted.var(x=LOCAchange_pr[R,C,],w=LOCAweights$BMA,na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
######
# Calculate change variance (weighted and unweighted) - tmax only
GCMunweightedvar_change_tmax = GCMskillvar_change_tmax = GCMSIhvar_change_tmax = GCMSIcvar_change_tmax = GCMBMAvar_change_tmax = GCMunweightedmean_change_tmax
LOCAunweightedvar_change_tmax = LOCAskillvar_change_tmax = LOCASIhvar_change_tmax = LOCASIcvar_change_tmax = LOCABMAvar_change_tmax = LOCAunweightedmean_change_tmax
for(R in 1:length(lon)){
for(C in 1:length(lat)){
if(all(is.na(GCMchange_tmax[R,C,])==TRUE)==FALSE){
GCMunweightedvar_change_tmax[R,C] = var(GCMchange_tmax[R,C,],na.rm=TRUE)
LOCAunweightedvar_change_tmax[R,C] = var(LOCAchange_tmax[R,C,],na.rm=TRUE)
GCMskillvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$Ws,na.rm=TRUE)
LOCAskillvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$Ws,na.rm=TRUE)
GCMSIhvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$Wh,na.rm=TRUE)
LOCASIhvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$Wh,na.rm=TRUE)
GCMSIcvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$Wc,na.rm=TRUE)
LOCASIcvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$Wc,na.rm=TRUE)
GCMBMAvar_change_tmax[R,C] = weighted.var(x=GCMchange_tmax[R,C,],w=GCMweights$BMA,na.rm=TRUE)
LOCABMAvar_change_tmax[R,C] = weighted.var(x=LOCAchange_tmax[R,C,],w=LOCAweights$BMA,na.rm=TRUE)
message("Finished calcs for R: ",R," and C: ",C)
}
}
}
######
# gather means - pr only
meansdat_pr = NULL
histmeans_GCM_pr = c(mean(GCMunweightedmean_hist_pr,na.rm=TRUE),mean(GCMskillmean_hist_pr,na.rm=TRUE),mean(GCMSIhmean_hist_pr,na.rm=TRUE),mean(GCMSIcmean_hist_pr,na.rm=TRUE),mean(GCMBMAmean_hist_pr,na.rm=TRUE))
histmeans_LOCA_pr = c(mean(LOCAunweightedmean_hist_pr,na.rm=TRUE),mean(LOCAskillmean_hist_pr,na.rm=TRUE),mean(LOCASIhmean_hist_pr,na.rm=TRUE),mean(LOCASIcmean_hist_pr,na.rm=TRUE),mean(LOCABMAmean_hist_pr,na.rm=TRUE))
changemeans_GCM_pr = c(mean(GCMunweightedmean_change_pr,na.rm=TRUE),mean(GCMskillmean_change_pr,na.rm=TRUE),mean(GCMSIhmean_change_pr,na.rm=TRUE),mean(GCMSIcmean_change_pr,na.rm=TRUE),mean(GCMBMAmean_change_pr,na.rm=TRUE))
changemeans_LOCA_pr = c(mean(LOCAunweightedmean_change_pr,na.rm=TRUE),mean(LOCAskillmean_change_pr,na.rm=TRUE),mean(LOCASIhmean_change_pr,na.rm=TRUE),mean(LOCASIcmean_change_pr,na.rm=TRUE),mean(LOCABMAmean_change_pr,na.rm=TRUE))
changevars_GCM_pr = c(mean(GCMunweightedvar_change_pr,na.rm=TRUE),mean(GCMskillvar_change_pr,na.rm=TRUE),mean(GCMSIhvar_change_pr,na.rm=TRUE),mean(GCMSIcvar_change_pr,na.rm=TRUE),mean(GCMBMAvar_change_pr,na.rm=TRUE))
changevars_LOCA_pr = c(mean(LOCAunweightedvar_change_pr,na.rm=TRUE),mean(LOCAskillvar_change_pr,na.rm=TRUE),mean(LOCASIhvar_change_pr,na.rm=TRUE),mean(LOCASIcvar_change_pr,na.rm=TRUE),mean(LOCABMAvar_change_pr,na.rm=TRUE))
obs = mean(OBS_pr,na.rm=TRUE)
region = stateapplied
RMSE_GCM_pr = c(sqrt(mean((GCMunweightedmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMskillmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMSIhmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMSIcmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((GCMBMAmean_bias_pr)^2,na.rm=TRUE)))
RMSE_LOCA_pr = c(sqrt(mean((LOCAunweightedmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCAskillmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCASIhmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCASIcmean_bias_pr)^2,na.rm=TRUE)),
sqrt(mean((LOCABMAmean_bias_pr)^2,na.rm=TRUE)))
histmeans = c(histmeans_GCM_pr,histmeans_LOCA_pr)
changemeans = c(changemeans_GCM_pr,changemeans_LOCA_pr)
changevars = c(changevars_GCM_pr,changevars_LOCA_pr)
rmse = c(RMSE_GCM_pr,RMSE_LOCA_pr)
group = rep(c("unweighted","skill","SI-h","SI-c","BMA"),2)
DS = rep(c("CMIP5","LOCA"),each=5)
meansframe_pr = data.frame(group,region,DS,histmeans,obs,changemeans,changevars,rmse)
meansdat_pr = rbind(meansdat_pr,meansframe_pr)
meansdat_pr$bias = meansdat_pr$histmeans-meansdat_pr$obs
save(list="meansdat_pr",file=paste("WeightedMeansVars_pr_",var,"_WU",weightingused,"_SA",stateapplied,"_wBMA.Rdata",sep=""))
######
# gather means - tmax only
meansdat_tmax = NULL
histmeans_GCM_tmax = c(mean(GCMunweightedmean_hist_tmax,na.rm=TRUE),mean(GCMskillmean_hist_tmax,na.rm=TRUE),mean(GCMSIhmean_hist_tmax,na.rm=TRUE),mean(GCMSIcmean_hist_tmax,na.rm=TRUE),mean(GCMBMAmean_hist_tmax,na.rm=TRUE))
histmeans_LOCA_tmax = c(mean(LOCAunweightedmean_hist_tmax,na.rm=TRUE),mean(LOCAskillmean_hist_tmax,na.rm=TRUE),mean(LOCASIhmean_hist_tmax,na.rm=TRUE),mean(LOCASIcmean_hist_tmax,na.rm=TRUE),mean(LOCABMAmean_hist_tmax,na.rm=TRUE))
changemeans_GCM_tmax = c(mean(GCMunweightedmean_change_tmax,na.rm=TRUE),mean(GCMskillmean_change_tmax,na.rm=TRUE),mean(GCMSIhmean_change_tmax,na.rm=TRUE),mean(GCMSIcmean_change_tmax,na.rm=TRUE),mean(GCMBMAmean_change_tmax,na.rm=TRUE))
changemeans_LOCA_tmax = c(mean(LOCAunweightedmean_change_tmax,na.rm=TRUE),mean(LOCAskillmean_change_tmax,na.rm=TRUE),mean(LOCASIhmean_change_tmax,na.rm=TRUE),mean(LOCASIcmean_change_tmax,na.rm=TRUE),mean(LOCABMAmean_change_tmax,na.rm=TRUE))
changevars_GCM_tmax = c(mean(GCMunweightedvar_change_tmax,na.rm=TRUE),mean(GCMskillvar_change_tmax,na.rm=TRUE),mean(GCMSIhvar_change_tmax,na.rm=TRUE),mean(GCMSIcvar_change_tmax,na.rm=TRUE),mean(GCMBMAvar_change_tmax,na.rm=TRUE))
changevars_LOCA_tmax = c(mean(LOCAunweightedvar_change_tmax,na.rm=TRUE),mean(LOCAskillvar_change_tmax,na.rm=TRUE),mean(LOCASIhvar_change_tmax,na.rm=TRUE),mean(LOCASIcvar_change_tmax,na.rm=TRUE),mean(LOCABMAvar_change_tmax,na.rm=TRUE))
obs = mean(OBS_tmax,na.rm=TRUE)
region = stateapplied
RMSE_GCM_tmax = c(sqrt(mean((GCMunweightedmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMskillmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMSIhmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMSIcmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((GCMBMAmean_bias_tmax)^2,na.rm=TRUE)))
RMSE_LOCA_tmax = c(sqrt(mean((LOCAunweightedmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCAskillmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCASIhmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCASIcmean_bias_tmax)^2,na.rm=TRUE)),
sqrt(mean((LOCABMAmean_bias_tmax)^2,na.rm=TRUE)))
histmeans = c(histmeans_GCM_tmax,histmeans_LOCA_tmax)
changemeans = c(changemeans_GCM_tmax,changemeans_LOCA_tmax)
changevars = c(changevars_GCM_tmax,changevars_LOCA_tmax)
rmse = c(RMSE_GCM_tmax,RMSE_LOCA_tmax)
group = rep(c("unweighted","skill","SI-h","SI-c","BMA"),2)
DS = rep(c("CMIP5","LOCA"),each=5)
meansframe_tmax = data.frame(group,region,DS,histmeans,obs,changemeans,changevars,rmse)
meansdat_tmax = rbind(meansdat_tmax,meansframe_tmax)
meansdat_tmax$bias = meansdat_tmax$histmeans-meansdat_tmax$obs
|
context("getOMLTask")
test_that("getOMLTask", {
measures = listOMLEvaluationMeasures(session.hash)$name
task = getOMLTask(1L, session.hash)
expect_is(task, "OMLTask")
expect_is(task$input$data.set, "OMLDataSet")
expect_true(is.data.frame(task$input$data.set$data))
tf = task$input$data.set$target.features
expect_true(is.character(tf) && length(tf) %in% 0:1 && !is.na(tf))
ems = task$input$evaluation.measures
expect_true(all(ems %in% measures | str_replace_all(ems, " ", "_") %in% measures))
expect_is(task$output$predictions, "list")
expect_error(getOMLTask(1231109283L, session.hash), "Unknown task")
})
|
/tests/testthat/test_base_getOMLTask.R
|
no_license
|
parthasen/r-6
|
R
| false
| false
| 636
|
r
|
context("getOMLTask")
test_that("getOMLTask", {
measures = listOMLEvaluationMeasures(session.hash)$name
task = getOMLTask(1L, session.hash)
expect_is(task, "OMLTask")
expect_is(task$input$data.set, "OMLDataSet")
expect_true(is.data.frame(task$input$data.set$data))
tf = task$input$data.set$target.features
expect_true(is.character(tf) && length(tf) %in% 0:1 && !is.na(tf))
ems = task$input$evaluation.measures
expect_true(all(ems %in% measures | str_replace_all(ems, " ", "_") %in% measures))
expect_is(task$output$predictions, "list")
expect_error(getOMLTask(1231109283L, session.hash), "Unknown task")
})
|
library(rebus)
library(dplyr)
library(tastyworks)
# Location of the confirmations
path <- file.path("~", "Documents", "Options Trading", "Tastyworks", "Confirmations")
# Confirmation file name example: "2017-08-30-5WT38480-confirmation.pdf"
filename_pattern <- START %R% YMD %R% "-" %R%
repeated(ALNUM, 8) %R% "-confirmation" %R%
DOT %R% "pdf" %R% END
# Get a list of confirmation files
files <- list.files(path = path.expand(path),
pattern = filename_pattern,
full.names = TRUE)
transactions <- files %>% tastyworks::read_confirmations(files)
# Save transactions to a file
saveRDS(transactions, file = "transactions.rds")
|
/transactions.R
|
no_license
|
tourko/profitzone
|
R
| false
| false
| 678
|
r
|
library(rebus)
library(dplyr)
library(tastyworks)
# Location of the confirmations
path <- file.path("~", "Documents", "Options Trading", "Tastyworks", "Confirmations")
# Confirmation file name example: "2017-08-30-5WT38480-confirmation.pdf"
filename_pattern <- START %R% YMD %R% "-" %R%
repeated(ALNUM, 8) %R% "-confirmation" %R%
DOT %R% "pdf" %R% END
# Get a list of confirmation files
files <- list.files(path = path.expand(path),
pattern = filename_pattern,
full.names = TRUE)
transactions <- files %>% tastyworks::read_confirmations(files)
# Save transactions to a file
saveRDS(transactions, file = "transactions.rds")
|
#' @name dtMotifMatch
#' @title Compute the augmented matching subsequence on SNP and reference allele
#' s.
#' @description Calculate the best matching augmented subsequences on both SNP
#' and reference alleles for motifs. Obtain extra unmatching position on the
#' best matching augmented subsequence of the reference and SNP alleles.
#' @param motif.lib A list of named position weight matrices.
#' @param snp.tbl A data.frame with the following information:
#' \tabular{cc}{
#' snpid \tab SNP id.\cr
#' ref_seq \tab Reference allele nucleobase sequence.\cr
#' snp_seq \tab SNP allele nucleobase sequence.\cr
#' ref_seq_rev \tab Reference allele nucleobase sequence on the reverse
#' strand.\cr
#' snp_seq_rev \tab SNP allele nucleobase sequence on the reverse strand.\cr}
#' @param motif.scores A data.frame with the following information:
#' \tabular{cc}{
#' motif \tab Name of the motif.\cr
#' motif_len \tab Length of the motif.\cr
#' ref_start, ref_end, ref_strand \tab Location of the best matching subsequence
#' on the reference allele.\cr
#' snp_start, snp_end, snp_strand \tab Location of the best matching subsequence
#' on the SNP allele.\cr
#' log_lik_ref \tab Log-likelihood score for the reference allele.\cr
#' log_lik_snp \tab Log-likelihood score for the SNP allele.\cr
#' log_lik_ratio \tab The log-likelihood ratio.\cr
#' log_enhance_odds \tab Difference in log-likelihood ratio between SNP allele
#' and reference allele based on the best matching subsequence on the reference
#' allele.\cr
#' log_reduce_odds \tab Difference in log-likelihood ratio between reference
#' allele and SNP allele based on the best matching subsequence on the SNP
#' allele.\cr
#' }
#' @param snpids A subset of snpids to compute the subsequences. Default: NULL,
#' when all snps are computed.
#' @param motifs A subset of motifs to compute the subsequences. Default: NULL,
#' when all motifs are computed.
#' @param ncores The number of cores used for parallel computing. Default: 10
#' @return A data.frame containing all columns from the function,
#' \code{\link{MatchSubsequence}}. In addition, the following columns are added:
#' \tabular{ll}{
#' snp_ref_start, snp_ref_end, snp_ref_length \tab Location and Length of the
#' best matching augmented subsequence on both the reference and SNP allele.\cr
#' ref_aug_match_seq_forward \tab Best matching augmented subsequence or its
#' corresponding sequence to the forward strand on the reference allele.\cr
#' snp_aug_match_seq_forward \tab Best matching augmented subsequence or its
#' corresponding sequence to the forward strand on the SNP allele.\cr
#' ref_aug_match_seq_reverse \tab Best matching augmented subsequence or its
#' corresponding sequence to the reverse strand on the reference allele.\cr
#' snp_aug_match_seq_reverse \tab Best matching augmented subsequence or its
#' corresponding sequence to the reverse strand on the SNP allele.\cr
#' ref_location \tab SNP location of the best matching augmented subsequence on
#' the reference allele. Starting from zero. \cr
#' snp_location \tab SNP location of the best matching augmented subsequence on
#' the SNP allele. Starting from zero. \cr
#' ref_extra_pwm_left \tab Left extra unmatching position on the best matching
#' augmented subsequence of the reference allele. \cr
#' ref_extra_pwm_right \tab Right extra unmatching position on the best matching
#' augmented subsequence of the reference allele. \cr
#' snp_extra_pwm_left \tab Left extra unmatching position on the best matching
#' augmented subsequence of the SNP allele. \cr
#' snp_extra_pwm_right \tab Right extra unmatching position on the best matching
#' augmented subsequence of the SNP allele. \cr
#' }
#' @author Sunyoung Shin\email{sunyoung.shin@@utdallas.edu}
#' @examples
#' data(example)
#' dtMotifMatch(motif_scores$snp.tbl, motif_scores$motif.scores,
#' motif.lib = motif_library)
#' @import data.table
#' @export
dtMotifMatch <-
function(snp.tbl,
motif.scores,
snpids = NULL,
motifs = NULL,
motif.lib,
ncores = 2) {
if (checkSNPids(snpids))
{
stop("snpids must be a vector of class character or NULL.")
} else if (checkMotifs(motifs)) {
stop("motifs must be a vector of class character or NULL.")
}
if (length(setdiff(snpids, motif.scores$snpid)) != 0)
{
stop("snpids are not found in motif.scores.")
} else if (length(setdiff(motifs, motif.scores$motif)) != 0) {
stop("motifs are not found in motif.scores.")
}
#warning for ncores, motif.lib etc.
snp.tbl <- as.data.table(snp.tbl)
ncores.v1 <- min(ncores, length(snpids) * length(motifs))
ncores.v2 <- ifelse(ncores.v1 == 0, ncores, ncores.v1)
sequence.half.window.size <- (nchar(snp.tbl[1, ref_seq]) - 1) / 2
motif.match <-
MatchSubsequence(
snp.tbl = snp.tbl,
motif.scores = motif.scores,
snpids = snpids,
motifs = motifs,
ncores = ncores.v2,
motif.lib = motif.lib
)
motif.match.dt <- as.data.table(motif.match)
##Augmentation of SNP and reference sequences###
len_seq <-
snpid <-
snp_ref_start <-
snp_ref_end <-
snp_ref_length <-
ref_start <-
snp_start <-
ref_end <-
snp_end <-
ref_seq <-
snp_seq <-
ref_strand <-
ref_location <-
snp_strand <-
snp_location <-
ref_extra_pwm_left <-
ref_extra_pwm_right <-
snp_extra_pwm_left <-
snp_extra_pwm_right <-
ref_aug_match_seq_forward <-
ref_aug_match_seq_reverse <-
snp_aug_match_seq_forward <- snp_aug_match_seq_reverse <- NULL
motif.match.dt[, len_seq := nchar(ref_seq)]
motif.match.dt[, snp_ref_start := apply(cbind(ref_start, snp_start), 1, min)]
motif.match.dt[, snp_ref_end := apply(cbind(ref_end, snp_end), 1, max)]
motif.match.dt[, snp_ref_length := snp_ref_end - snp_ref_start + 1]
motif.match.dt[, ref_aug_match_seq_forward := substr(ref_seq, snp_ref_start, snp_ref_end)]
motif.match.dt[, ref_aug_match_seq_reverse := apply(as.matrix(ref_aug_match_seq_forward), 1, .find_reverse)]
motif.match.dt[, snp_aug_match_seq_forward := substr(snp_seq, snp_ref_start, snp_ref_end)]
motif.match.dt[, snp_aug_match_seq_reverse := apply(as.matrix(snp_aug_match_seq_forward), 1, .find_reverse)]
##The starting position of the motif in the augmented sequences
motif.match.dt[ref_strand == "+", ref_location := (len_seq - 1) / 2 + 1 - snp_ref_start]
motif.match.dt[ref_strand == "-", ref_location := snp_ref_end - (len_seq - 1) / 2 - 1]
motif.match.dt[snp_strand == "+", snp_location := (len_seq - 1) / 2 +
1 - snp_ref_start]
motif.match.dt[snp_strand == "-", snp_location := snp_ref_end - (len_seq -
1) / 2 - 1]
motif.match.dt[, len_seq := NULL]
##PWM Location Adjustment Value for reference and SNP
motif.match.dt[ref_strand == "+", ref_extra_pwm_left := ref_start - snp_ref_start]
motif.match.dt[ref_strand == "-", ref_extra_pwm_left := snp_ref_end -
ref_end]
motif.match.dt[ref_strand == "+", ref_extra_pwm_right := snp_ref_end -
ref_end]
motif.match.dt[ref_strand == "-", ref_extra_pwm_right := ref_start - snp_ref_start]
motif.match.dt[snp_strand == "+", snp_extra_pwm_left := snp_start - snp_ref_start]
motif.match.dt[snp_strand == "-", snp_extra_pwm_left := snp_ref_end -
snp_end]
motif.match.dt[snp_strand == "+", snp_extra_pwm_right := snp_ref_end -
snp_end]
motif.match.dt[snp_strand == "-", snp_extra_pwm_right := snp_start - snp_ref_start]
setkey(motif.match.dt, snpid)
return(motif.match.dt)
}
#' @name plotMotifMatch
#' @title Plot sequence logos of the position weight matrix of the motif and
#' sequences of its corresponding best matching augmented subsequence on the
#' reference and SNP allele.
#' @description Plot the best matching augmented subsequences on the reference
#' and SNP alleles. Plot sequence logos of the position weight matrix of the
#' motif to the corresponding positions of the best matching subsequences on the
#' references and SNP alleles.
#' @param motif.match a single row ofdtMotifMatch output in data.frame format
#' @param motif.lib A list of position weight matrices
#' @param cex.main The size of the main title.
#' @param ... Other parameters passed to plotMotifLogo.
#' @return Sequence logo stacks: Reference subsequences, sequence logo of
#' reference allele matching potision weight matrix, SNP subsequences, sequence
#' logo of SNP allele matching potision weight matrix
#' @author Sunyoung Shin\email{sunyoung.shin@@utdallas.edu}
#' @examples
#' data(example)
#' plotMotifMatch(motif_match, motif.lib = motif_library)
#' @import grid
#' @importFrom motifStack plotMotifLogo pcm2pfm
#' @importFrom grDevices dev.off pdf
#' @importFrom stats quantile var
#' @importFrom utils data read.table write.table
#' @export
plotMotifMatch <-
function(motif.match, motif.lib, cex.main = 2, ...) {
if (!is(motif.match$snpid, "character") |
length(motif.match$snpid) != 1) {
stop("snpid must be a character")
}
if (!is(motif.match$motif, "character") |
length(motif.match$motif) != 1) {
stop("motif must be a character")
}
if (sum(!motif.match$motif %in% names(motif.lib)) > 0) {
stop("The motif is not included in 'motif.lib'.")
}
if (nrow(motif.match) > 1) {
stop(paste("Pick a single row of dtMotifMatch output."))
}
motif.pwm <- t(get(motif.match$motif, motif.lib))
##Convert ACGT to 1234
codes <- seq(4)
names(codes) <- c("A", "C", "G", "T")
ref_aug_match_seq_forward_code <-
codes[strsplit(motif.match$ref_aug_match_seq_forward, "")[[1]]]
ref_aug_match_seq_reverse_code <-
codes[strsplit(motif.match$ref_aug_match_seq_reverse, "")[[1]]]
snp_aug_match_seq_forward_code <-
codes[strsplit(motif.match$snp_aug_match_seq_forward, "")[[1]]]
snp_aug_match_seq_reverse_code <-
codes[strsplit(motif.match$snp_aug_match_seq_reverse, "")[[1]]]
##Convert 1234 to (1000)(0100)(0010)(0001)
codes.vec <- diag(4)
rownames(codes.vec) <- c("A", "C", "G", "T")
ref_aug_match_pwm_forward <-
mapply(function(i)
codes.vec[, i],
as.list(ref_aug_match_seq_forward_code))
ref_aug_match_pwm_reverse <-
mapply(function(i)
codes.vec[, i],
as.list(ref_aug_match_seq_reverse_code))
snp_aug_match_pwm_forward <-
mapply(function(i)
codes.vec[, i],
as.list(snp_aug_match_seq_forward_code))
snp_aug_match_pwm_reverse <-
mapply(function(i)
codes.vec[, i],
as.list(snp_aug_match_seq_reverse_code))
##(3,2) to Augmented PWM: ___PWM__
ref_aug_pwm <-
cbind(
matrix(0, 4, motif.match$ref_extra_pwm_left),
motif.pwm,
matrix(0, 4, motif.match$ref_extra_pwm_right)
)
rownames(ref_aug_pwm) <- c("A", "C", "G", "T")
snp_aug_pwm <-
cbind(
matrix(0, 4, motif.match$snp_extra_pwm_left),
motif.pwm,
matrix(0, 4, motif.match$snp_extra_pwm_right)
)
rownames(snp_aug_pwm) <- c("A", "C", "G", "T")
snp_loc <- motif.match$ref_location
revert.columns <- function(mat) {
mat[, rev(seq(ncol(mat)))]
}
ref_aug_match_pwm <- ref_aug_match_pwm_forward
snp_aug_match_pwm <- snp_aug_match_pwm_forward
if (motif.match$ref_strand == "-") {
ref_aug_pwm <- revert.columns(ref_aug_pwm)
snp_loc <- ncol(ref_aug_match_pwm_forward) - 1 - snp_loc
ref_aug_match_pwm <- ref_aug_match_pwm_reverse
}
if (motif.match$snp_strand == "-") {
snp_aug_pwm <- revert.columns(snp_aug_pwm)
snp_aug_match_pwm <- snp_aug_match_pwm_reverse
}
pushViewport(viewport(
y = unit(.5, "npc") - unit(2, "lines"),
height = unit(1, "npc") - unit(3, "lines")
))
pushViewport(viewport(y = .875, height = .25))
plotMotifLogo(
pcm2pfm(ref_aug_pwm),
"Best match to the reference genome",
yaxis = FALSE,
xaxis = FALSE,
xlab = "",
ylab = "PWM",
newpage = FALSE,
margins = c(1.5, 3, 2, 2)
)
if (motif.match$ref_strand == '+') {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(2, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "last"
)
)
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
} else {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(2, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "first"
)
)
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
}
popViewport()
pushViewport(viewport(y = .625, height = .25))
#par(mar = c(4, 3, 1.5, 2))
plotMotifLogo(
pcm2pfm(ref_aug_match_pwm),
font = "mono,Courier",
yaxis = FALSE,
xlab = "",
ylab = paste("(", motif.match$ref_strand, ")", sep = ""),
newpage = FALSE,
margins = c(2, 3, 1.5, 2)
)
pushViewport(plotViewport(margins = c(2, 3, 1.5, 2)))
grid.rect(
x = (snp_loc + .5) / motif.match$snp_ref_length,
width = 1 / motif.match$snp_ref_length,
gp = gpar(
col = "blue",
lty = 3,
lwd = 2,
fill = NA
)
)
popViewport()
if (motif.match$ref_strand == "+") {
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
} else {
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
}
popViewport()
pushViewport(viewport(y = .375, height = .25))
#par(mar=c(1.5, 3, 4, 2))
plotMotifLogo(
pcm2pfm(snp_aug_match_pwm),
"Best match to the SNP genome",
font = "mono,Courier",
yaxis = FALSE,
xlab = "",
ylab = paste("(", motif.match$snp_strand, ")", sep = ""),
newpage = FALSE,
margins = c(1.5, 3, 2, 2)
)
pushViewport(plotViewport(margins = c(1.5, 3, 2, 2)))
grid.rect(
x = (snp_loc + .5) / motif.match$snp_ref_length,
width = 1 / motif.match$snp_ref_length,
gp = gpar(
col = "blue",
lty = 3,
lwd = 2,
fill = NA
)
)
popViewport()
if (motif.match$snp_strand == "+") {
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
} else {
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
}
popViewport()
pushViewport(viewport(y = .125, height = .25))
#par(mar=c(4, 3, 1.5, 2))
plotMotifLogo(
pcm2pfm(snp_aug_pwm),
yaxis = FALSE,
xaxis = FALSE,
xlab = "",
ylab = "PWM",
newpage = FALSE,
margins = c(2, 3, 1.5, 2)
)
if (motif.match$snp_strand == '+') {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(1, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1.5, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "last"
)
)
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
} else {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(1, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1.5, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "first"
)
)
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
}
popViewport()
popViewport()
grid.text(
label = paste(motif.match$motif, " Motif Scan for ", motif.match$snpid, sep =
""),
y = unit(1, "npc") - unit(1.5, "lines"),
gp = gpar(cex.main = cex.main, fontface = "bold")
)
}
.find_reverse <- function(sequence) {
if (length(sequence) > 0) {
codes <- seq(4)
names(codes) <- c("A", "C", "G", "T")
return(paste(names(codes)[5 - codes[strsplit(sequence, split = "")[[1]]]], collapse = ""))
}
}
|
/R/graphic.R
|
no_license
|
chandlerzuo/atSNP
|
R
| false
| false
| 18,713
|
r
|
#' @name dtMotifMatch
#' @title Compute the augmented matching subsequence on SNP and reference allele
#' s.
#' @description Calculate the best matching augmented subsequences on both SNP
#' and reference alleles for motifs. Obtain extra unmatching position on the
#' best matching augmented subsequence of the reference and SNP alleles.
#' @param motif.lib A list of named position weight matrices.
#' @param snp.tbl A data.frame with the following information:
#' \tabular{cc}{
#' snpid \tab SNP id.\cr
#' ref_seq \tab Reference allele nucleobase sequence.\cr
#' snp_seq \tab SNP allele nucleobase sequence.\cr
#' ref_seq_rev \tab Reference allele nucleobase sequence on the reverse
#' strand.\cr
#' snp_seq_rev \tab SNP allele nucleobase sequence on the reverse strand.\cr}
#' @param motif.scores A data.frame with the following information:
#' \tabular{cc}{
#' motif \tab Name of the motif.\cr
#' motif_len \tab Length of the motif.\cr
#' ref_start, ref_end, ref_strand \tab Location of the best matching subsequence
#' on the reference allele.\cr
#' snp_start, snp_end, snp_strand \tab Location of the best matching subsequence
#' on the SNP allele.\cr
#' log_lik_ref \tab Log-likelihood score for the reference allele.\cr
#' log_lik_snp \tab Log-likelihood score for the SNP allele.\cr
#' log_lik_ratio \tab The log-likelihood ratio.\cr
#' log_enhance_odds \tab Difference in log-likelihood ratio between SNP allele
#' and reference allele based on the best matching subsequence on the reference
#' allele.\cr
#' log_reduce_odds \tab Difference in log-likelihood ratio between reference
#' allele and SNP allele based on the best matching subsequence on the SNP
#' allele.\cr
#' }
#' @param snpids A subset of snpids to compute the subsequences. Default: NULL,
#' when all snps are computed.
#' @param motifs A subset of motifs to compute the subsequences. Default: NULL,
#' when all motifs are computed.
#' @param ncores The number of cores used for parallel computing. Default: 10
#' @return A data.frame containing all columns from the function,
#' \code{\link{MatchSubsequence}}. In addition, the following columns are added:
#' \tabular{ll}{
#' snp_ref_start, snp_ref_end, snp_ref_length \tab Location and Length of the
#' best matching augmented subsequence on both the reference and SNP allele.\cr
#' ref_aug_match_seq_forward \tab Best matching augmented subsequence or its
#' corresponding sequence to the forward strand on the reference allele.\cr
#' snp_aug_match_seq_forward \tab Best matching augmented subsequence or its
#' corresponding sequence to the forward strand on the SNP allele.\cr
#' ref_aug_match_seq_reverse \tab Best matching augmented subsequence or its
#' corresponding sequence to the reverse strand on the reference allele.\cr
#' snp_aug_match_seq_reverse \tab Best matching augmented subsequence or its
#' corresponding sequence to the reverse strand on the SNP allele.\cr
#' ref_location \tab SNP location of the best matching augmented subsequence on
#' the reference allele. Starting from zero. \cr
#' snp_location \tab SNP location of the best matching augmented subsequence on
#' the SNP allele. Starting from zero. \cr
#' ref_extra_pwm_left \tab Left extra unmatching position on the best matching
#' augmented subsequence of the reference allele. \cr
#' ref_extra_pwm_right \tab Right extra unmatching position on the best matching
#' augmented subsequence of the reference allele. \cr
#' snp_extra_pwm_left \tab Left extra unmatching position on the best matching
#' augmented subsequence of the SNP allele. \cr
#' snp_extra_pwm_right \tab Right extra unmatching position on the best matching
#' augmented subsequence of the SNP allele. \cr
#' }
#' @author Sunyoung Shin\email{sunyoung.shin@@utdallas.edu}
#' @examples
#' data(example)
#' dtMotifMatch(motif_scores$snp.tbl, motif_scores$motif.scores,
#' motif.lib = motif_library)
#' @import data.table
#' @export
dtMotifMatch <-
function(snp.tbl,
motif.scores,
snpids = NULL,
motifs = NULL,
motif.lib,
ncores = 2) {
if (checkSNPids(snpids))
{
stop("snpids must be a vector of class character or NULL.")
} else if (checkMotifs(motifs)) {
stop("motifs must be a vector of class character or NULL.")
}
if (length(setdiff(snpids, motif.scores$snpid)) != 0)
{
stop("snpids are not found in motif.scores.")
} else if (length(setdiff(motifs, motif.scores$motif)) != 0) {
stop("motifs are not found in motif.scores.")
}
#warning for ncores, motif.lib etc.
snp.tbl <- as.data.table(snp.tbl)
ncores.v1 <- min(ncores, length(snpids) * length(motifs))
ncores.v2 <- ifelse(ncores.v1 == 0, ncores, ncores.v1)
sequence.half.window.size <- (nchar(snp.tbl[1, ref_seq]) - 1) / 2
motif.match <-
MatchSubsequence(
snp.tbl = snp.tbl,
motif.scores = motif.scores,
snpids = snpids,
motifs = motifs,
ncores = ncores.v2,
motif.lib = motif.lib
)
motif.match.dt <- as.data.table(motif.match)
##Augmentation of SNP and reference sequences###
len_seq <-
snpid <-
snp_ref_start <-
snp_ref_end <-
snp_ref_length <-
ref_start <-
snp_start <-
ref_end <-
snp_end <-
ref_seq <-
snp_seq <-
ref_strand <-
ref_location <-
snp_strand <-
snp_location <-
ref_extra_pwm_left <-
ref_extra_pwm_right <-
snp_extra_pwm_left <-
snp_extra_pwm_right <-
ref_aug_match_seq_forward <-
ref_aug_match_seq_reverse <-
snp_aug_match_seq_forward <- snp_aug_match_seq_reverse <- NULL
motif.match.dt[, len_seq := nchar(ref_seq)]
motif.match.dt[, snp_ref_start := apply(cbind(ref_start, snp_start), 1, min)]
motif.match.dt[, snp_ref_end := apply(cbind(ref_end, snp_end), 1, max)]
motif.match.dt[, snp_ref_length := snp_ref_end - snp_ref_start + 1]
motif.match.dt[, ref_aug_match_seq_forward := substr(ref_seq, snp_ref_start, snp_ref_end)]
motif.match.dt[, ref_aug_match_seq_reverse := apply(as.matrix(ref_aug_match_seq_forward), 1, .find_reverse)]
motif.match.dt[, snp_aug_match_seq_forward := substr(snp_seq, snp_ref_start, snp_ref_end)]
motif.match.dt[, snp_aug_match_seq_reverse := apply(as.matrix(snp_aug_match_seq_forward), 1, .find_reverse)]
##The starting position of the motif in the augmented sequences
motif.match.dt[ref_strand == "+", ref_location := (len_seq - 1) / 2 + 1 - snp_ref_start]
motif.match.dt[ref_strand == "-", ref_location := snp_ref_end - (len_seq - 1) / 2 - 1]
motif.match.dt[snp_strand == "+", snp_location := (len_seq - 1) / 2 +
1 - snp_ref_start]
motif.match.dt[snp_strand == "-", snp_location := snp_ref_end - (len_seq -
1) / 2 - 1]
motif.match.dt[, len_seq := NULL]
##PWM Location Adjustment Value for reference and SNP
motif.match.dt[ref_strand == "+", ref_extra_pwm_left := ref_start - snp_ref_start]
motif.match.dt[ref_strand == "-", ref_extra_pwm_left := snp_ref_end -
ref_end]
motif.match.dt[ref_strand == "+", ref_extra_pwm_right := snp_ref_end -
ref_end]
motif.match.dt[ref_strand == "-", ref_extra_pwm_right := ref_start - snp_ref_start]
motif.match.dt[snp_strand == "+", snp_extra_pwm_left := snp_start - snp_ref_start]
motif.match.dt[snp_strand == "-", snp_extra_pwm_left := snp_ref_end -
snp_end]
motif.match.dt[snp_strand == "+", snp_extra_pwm_right := snp_ref_end -
snp_end]
motif.match.dt[snp_strand == "-", snp_extra_pwm_right := snp_start - snp_ref_start]
setkey(motif.match.dt, snpid)
return(motif.match.dt)
}
#' @name plotMotifMatch
#' @title Plot sequence logos of the position weight matrix of the motif and
#' sequences of its corresponding best matching augmented subsequence on the
#' reference and SNP allele.
#' @description Plot the best matching augmented subsequences on the reference
#' and SNP alleles. Plot sequence logos of the position weight matrix of the
#' motif to the corresponding positions of the best matching subsequences on the
#' references and SNP alleles.
#' @param motif.match a single row ofdtMotifMatch output in data.frame format
#' @param motif.lib A list of position weight matrices
#' @param cex.main The size of the main title.
#' @param ... Other parameters passed to plotMotifLogo.
#' @return Sequence logo stacks: Reference subsequences, sequence logo of
#' reference allele matching potision weight matrix, SNP subsequences, sequence
#' logo of SNP allele matching potision weight matrix
#' @author Sunyoung Shin\email{sunyoung.shin@@utdallas.edu}
#' @examples
#' data(example)
#' plotMotifMatch(motif_match, motif.lib = motif_library)
#' @import grid
#' @importFrom motifStack plotMotifLogo pcm2pfm
#' @importFrom grDevices dev.off pdf
#' @importFrom stats quantile var
#' @importFrom utils data read.table write.table
#' @export
plotMotifMatch <-
function(motif.match, motif.lib, cex.main = 2, ...) {
if (!is(motif.match$snpid, "character") |
length(motif.match$snpid) != 1) {
stop("snpid must be a character")
}
if (!is(motif.match$motif, "character") |
length(motif.match$motif) != 1) {
stop("motif must be a character")
}
if (sum(!motif.match$motif %in% names(motif.lib)) > 0) {
stop("The motif is not included in 'motif.lib'.")
}
if (nrow(motif.match) > 1) {
stop(paste("Pick a single row of dtMotifMatch output."))
}
motif.pwm <- t(get(motif.match$motif, motif.lib))
##Convert ACGT to 1234
codes <- seq(4)
names(codes) <- c("A", "C", "G", "T")
ref_aug_match_seq_forward_code <-
codes[strsplit(motif.match$ref_aug_match_seq_forward, "")[[1]]]
ref_aug_match_seq_reverse_code <-
codes[strsplit(motif.match$ref_aug_match_seq_reverse, "")[[1]]]
snp_aug_match_seq_forward_code <-
codes[strsplit(motif.match$snp_aug_match_seq_forward, "")[[1]]]
snp_aug_match_seq_reverse_code <-
codes[strsplit(motif.match$snp_aug_match_seq_reverse, "")[[1]]]
##Convert 1234 to (1000)(0100)(0010)(0001)
codes.vec <- diag(4)
rownames(codes.vec) <- c("A", "C", "G", "T")
ref_aug_match_pwm_forward <-
mapply(function(i)
codes.vec[, i],
as.list(ref_aug_match_seq_forward_code))
ref_aug_match_pwm_reverse <-
mapply(function(i)
codes.vec[, i],
as.list(ref_aug_match_seq_reverse_code))
snp_aug_match_pwm_forward <-
mapply(function(i)
codes.vec[, i],
as.list(snp_aug_match_seq_forward_code))
snp_aug_match_pwm_reverse <-
mapply(function(i)
codes.vec[, i],
as.list(snp_aug_match_seq_reverse_code))
##(3,2) to Augmented PWM: ___PWM__
ref_aug_pwm <-
cbind(
matrix(0, 4, motif.match$ref_extra_pwm_left),
motif.pwm,
matrix(0, 4, motif.match$ref_extra_pwm_right)
)
rownames(ref_aug_pwm) <- c("A", "C", "G", "T")
snp_aug_pwm <-
cbind(
matrix(0, 4, motif.match$snp_extra_pwm_left),
motif.pwm,
matrix(0, 4, motif.match$snp_extra_pwm_right)
)
rownames(snp_aug_pwm) <- c("A", "C", "G", "T")
snp_loc <- motif.match$ref_location
revert.columns <- function(mat) {
mat[, rev(seq(ncol(mat)))]
}
ref_aug_match_pwm <- ref_aug_match_pwm_forward
snp_aug_match_pwm <- snp_aug_match_pwm_forward
if (motif.match$ref_strand == "-") {
ref_aug_pwm <- revert.columns(ref_aug_pwm)
snp_loc <- ncol(ref_aug_match_pwm_forward) - 1 - snp_loc
ref_aug_match_pwm <- ref_aug_match_pwm_reverse
}
if (motif.match$snp_strand == "-") {
snp_aug_pwm <- revert.columns(snp_aug_pwm)
snp_aug_match_pwm <- snp_aug_match_pwm_reverse
}
pushViewport(viewport(
y = unit(.5, "npc") - unit(2, "lines"),
height = unit(1, "npc") - unit(3, "lines")
))
pushViewport(viewport(y = .875, height = .25))
plotMotifLogo(
pcm2pfm(ref_aug_pwm),
"Best match to the reference genome",
yaxis = FALSE,
xaxis = FALSE,
xlab = "",
ylab = "PWM",
newpage = FALSE,
margins = c(1.5, 3, 2, 2)
)
if (motif.match$ref_strand == '+') {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(2, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "last"
)
)
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
} else {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(2, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "first"
)
)
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
}
popViewport()
pushViewport(viewport(y = .625, height = .25))
#par(mar = c(4, 3, 1.5, 2))
plotMotifLogo(
pcm2pfm(ref_aug_match_pwm),
font = "mono,Courier",
yaxis = FALSE,
xlab = "",
ylab = paste("(", motif.match$ref_strand, ")", sep = ""),
newpage = FALSE,
margins = c(2, 3, 1.5, 2)
)
pushViewport(plotViewport(margins = c(2, 3, 1.5, 2)))
grid.rect(
x = (snp_loc + .5) / motif.match$snp_ref_length,
width = 1 / motif.match$snp_ref_length,
gp = gpar(
col = "blue",
lty = 3,
lwd = 2,
fill = NA
)
)
popViewport()
if (motif.match$ref_strand == "+") {
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
} else {
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(2.5, "lines")
)
}
popViewport()
pushViewport(viewport(y = .375, height = .25))
#par(mar=c(1.5, 3, 4, 2))
plotMotifLogo(
pcm2pfm(snp_aug_match_pwm),
"Best match to the SNP genome",
font = "mono,Courier",
yaxis = FALSE,
xlab = "",
ylab = paste("(", motif.match$snp_strand, ")", sep = ""),
newpage = FALSE,
margins = c(1.5, 3, 2, 2)
)
pushViewport(plotViewport(margins = c(1.5, 3, 2, 2)))
grid.rect(
x = (snp_loc + .5) / motif.match$snp_ref_length,
width = 1 / motif.match$snp_ref_length,
gp = gpar(
col = "blue",
lty = 3,
lwd = 2,
fill = NA
)
)
popViewport()
if (motif.match$snp_strand == "+") {
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
} else {
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(.5, "lines")
)
}
popViewport()
pushViewport(viewport(y = .125, height = .25))
#par(mar=c(4, 3, 1.5, 2))
plotMotifLogo(
pcm2pfm(snp_aug_pwm),
yaxis = FALSE,
xaxis = FALSE,
xlab = "",
ylab = "PWM",
newpage = FALSE,
margins = c(2, 3, 1.5, 2)
)
if (motif.match$snp_strand == '+') {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(1, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1.5, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "last"
)
)
grid.text(
"3'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
grid.text(
"5'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
} else {
grid.lines(
x = c(
convertUnit(unit(3, "lines"), "npc", valueOnly = TRUE),
1 - convertUnit(unit(1, "lines"), "npc", valueOnly = TRUE)
),
y = unit(1.5, "lines"),
gp = gpar(col = "blue", lwd = 1.5, xpd = NA),
arrow = arrow(
length = unit(0.1, "inches"),
angle = 15,
ends = "first"
)
)
grid.text(
"5'",
x = unit(1, "npc") - unit(1, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
grid.text(
"3'",
x = unit(2, "lines"),
gp = gpar(col = "blue", cex = 1),
y = unit(1, "lines")
)
}
popViewport()
popViewport()
grid.text(
label = paste(motif.match$motif, " Motif Scan for ", motif.match$snpid, sep =
""),
y = unit(1, "npc") - unit(1.5, "lines"),
gp = gpar(cex.main = cex.main, fontface = "bold")
)
}
.find_reverse <- function(sequence) {
if (length(sequence) > 0) {
codes <- seq(4)
names(codes) <- c("A", "C", "G", "T")
return(paste(names(codes)[5 - codes[strsplit(sequence, split = "")[[1]]]], collapse = ""))
}
}
|
install.packages("tidyverse")
library(tidyverse)
browseVignettes("ggplot2")
browseVignettes("dplyr")
installed.packages()
|
/R/Week23.R
|
no_license
|
ashokjha/dataanalytics
|
R
| false
| false
| 129
|
r
|
install.packages("tidyverse")
library(tidyverse)
browseVignettes("ggplot2")
browseVignettes("dplyr")
installed.packages()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.