blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
57294a45c8c951dd360e83dae1c784a1695b70d1
|
4e5f8ad9b0a094d509ee8ba34c761aefa18a006e
|
/grattan195.R
|
7839b14ac8824aac182dea55f29a7f5ba5d7ac88
|
[] |
no_license
|
HughParsonage/r-dev-vs-release
|
727b555a3440d6886f50daf2ad4c75d7da5a518c
|
87fd595b04fd914a053dd6b0bf934f5da27990ab
|
refs/heads/master
| 2021-06-02T22:25:43.271211
| 2020-11-29T04:25:36
| 2020-11-29T04:25:36
| 124,319,979
| 0
| 0
| null | 2020-11-29T04:25:37
| 2018-03-08T01:44:13
|
R
|
UTF-8
|
R
| false
| false
| 451
|
r
|
grattan195.R
|
Sys.setenv('R_GRATTAN_BUILD_MAIN_VIGNETTE' = 'true')
Sys.setenv('R_GRATTAN_ALLOW_TAXSTATS' = 'true')
install.packages("remotes", repos = "https://cran.rstudio.com")
install.packages("grattan", repos = "https://cran.rstudio.com")
install.packages("taxstats", repos = "https://hughparsonage.github.io/tax-drat/", type = "source")
grattan::install_taxstats()
remotes::install_github("hughparsonage/grattan", dependencies = NA, upgrade = "never")
|
d52838bb057324191d2b58523ee2d6c9e1bf83d0
|
8375d88db0ec7f04d04dd79385754ec831d03ee8
|
/R/bcos.R
|
91cb12ff70f91b4a279bd507f50fcf90c1401b3b
|
[] |
no_license
|
cran/MIICD
|
4346c8879f4049253ad6dce6061ff78f53ba86b3
|
4d9c804c12352edf4d18028091ad09b9bde259a4
|
refs/heads/master
| 2021-01-15T15:47:54.639481
| 2017-05-27T15:20:01
| 2017-05-27T15:20:01
| 19,073,304
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 521
|
r
|
bcos.R
|
#' bcos : breast cosmesis data
#' @name bcos
#' @description A data frame with 94 observations on the following 3 variables : \itemize{
#' \item left
#' \item right
#' \item treatment
#' }
#' The data comes from the \code{Interval} library by Michael P. Fay.
#' @author Michael P. Fay
#' @examples head(bcos)
#' @docType data
#' @keywords dataset
#' @references Finkelstein, D.M., and Wolfe, R.A. (1985). A semiparametric model for regression analysis of interval-censored failure time data. Biometrics 41: 731-740.
NULL
|
23728045e6d7999f54b430751a2cd6b0bae33c0b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/knockoff/examples/knockoff.filter.Rd.R
|
9811ea86f04a5291fb34c31fec66f8d8911ac4e2
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 633
|
r
|
knockoff.filter.Rd.R
|
library(knockoff)
### Name: knockoff.filter
### Title: The Knockoff Filter
### Aliases: knockoff.filter
### ** Examples
p=200; n=100; k=15
mu = rep(0,p); Sigma = diag(p)
X = matrix(rnorm(n*p),n)
nonzero = sample(p, k)
beta = 3.5 * (1:p %in% nonzero)
y = X %*% beta + rnorm(n)
# Basic usage with default arguments
result = knockoff.filter(X, y)
print(result$selected)
# Advanced usage with custom arguments
knockoffs = function(X) create.gaussian(X, mu, Sigma)
k_stat = function(X, Xk, y) stat.glmnet_coefdiff(X, Xk, y, nfolds=5)
result = knockoff.filter(X, y, knockoffs=knockoffs, statistic=k_stat)
print(result$selected)
|
e07151ae1ee861ac7fb7d047e785f6fd71d33201
|
9ac4435b45098a4a0dc4c90fe118c40394767ba0
|
/analysis/AlexandrovEtAl/script/subscript/perform_pmsignature.R
|
45e482598c62a1fc0858ff686d4f9dc8b30e6dc7
|
[] |
no_license
|
friend1ws/pmsignature_paper
|
e0c2e66df310c1d7629da41dd300029fab2a7bb0
|
ef93ee9b21570b0f3dd7de38ab004f6dd406ec9e
|
refs/heads/master
| 2020-04-16T01:15:42.086275
| 2015-10-27T02:48:30
| 2015-10-27T02:48:30
| 30,580,825
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 523
|
r
|
perform_pmsignature.R
|
#! /home/yshira/local/bin/R
library(pmsignature);
inputFile <- commandArgs()[5];
outputFile <- commandArgs()[6];
sigNum <- as.numeric(commandArgs()[7]);
trDirFlag <- as.logical(commandArgs()[8]);
trialNum <- as.numeric(commandArgs()[9]);
G <- readMPFile(inputFile, numBases = 5, trDir = trDirFlag);
BG_prob <- readBGFile(G);
Param <- getPMSignature(G, K = sigNum , BG = BG_prob, numInit = trialNum);
Boot <- bootPMSignature(G, Param0 = Param, bootNum = 100, BG = BG_prob);
save(list(Param, Boot), file=outputFile);
|
0ebc8243aa713444fe7ee2a1f082bb18be9a5f81
|
3b3f29bb712b8b0c73460e0dfe859bc1e3a63790
|
/man/hhmmss_to_hms.Rd
|
e5b4f29535b77c3aac432b0dbe5b9c12b02d0a25
|
[] |
no_license
|
r-transit/tidytransit
|
37765dedc4450dd1ff1b069f45491b69402e83da
|
878e64a4a4b7f10b42f0e9d2c156fd1314fddaad
|
refs/heads/master
| 2023-08-08T10:17:46.583304
| 2023-07-21T08:29:46
| 2023-07-21T08:29:46
| 142,806,717
| 123
| 24
| null | 2023-07-21T08:29:47
| 2018-07-30T00:43:24
|
R
|
UTF-8
|
R
| false
| true
| 384
|
rd
|
hhmmss_to_hms.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/time.R
\name{hhmmss_to_hms}
\alias{hhmmss_to_hms}
\title{convert a vector of time strings
empty strings are converted to NA}
\usage{
hhmmss_to_hms(time_strings)
}
\arguments{
\item{time_strings}{char vector ("HH:MM:SS")}
}
\description{
convert a vector of time strings
empty strings are converted to NA
}
|
28d65f2864846509535500761cfd2b9f07c4bb25
|
aedb3cc6a5ff76468c2f244593381a851e36ef20
|
/man/guttman.Rd
|
16e1cbcec988829738eb90920eb0a4114c8863ad
|
[] |
no_license
|
JackStat/Lambda4
|
d340f354217363a4ae4a4d15e5ca4527a9cc9120
|
2da10dcf4ba68673bbc8de4a5417a4e28676274c
|
refs/heads/master
| 2021-01-17T15:13:44.056771
| 2016-07-03T16:38:07
| 2016-07-03T16:38:07
| 9,309,287
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,177
|
rd
|
guttman.Rd
|
\name{guttman}
\alias{guttman}
\title{Guttman's 6 Lambda Coefficients}
\usage{
guttman(x, missing = "complete", standardize = FALSE)
}
\arguments{
\item{x}{Can be either a data matrix or a covariance
matrix}
\item{missing}{How to handle missing values.}
\item{standardize}{When TRUE Results are standardized by
using the correlation matrix instead of the covariance
matrix for computation.}
}
\value{
\item{Lambda1}{Guttman's Lambda1 estimate of
reliability.} \item{Lambda2}{Guttman's Lambda2 estimate
of reliability.} \item{Lambda3}{Guttman's Lambda3
estimate of reliability. Also known as Cronbach's alpha
or coefficient alpha.} \item{Lambda4}{Guttman's maximimal
Lambda4 estimate of reliability.}
\item{Lambda5}{Guttman's Lambda5 estimate of
reliability.} \item{Lambda6}{Guttman's Lambda6 estimate
of reliability.}
}
\description{
Calculates all 6 of Guttman's lambda coefficients.
}
\note{
The estimate for Lambda4 is maximized.
}
\examples{
guttman(Rosenberg)
}
\author{
Tyler Hunt \email{tyler@psychoanalytix.com}
}
\references{
Guttman L (1945). "A Basis for Analyzing Test-Retest
Reliability." Psychometrika, 10, 255-282.
}
|
66f012965a8ea5e378db3cad2f66c61576f80c94
|
e26f5924c22e7536d83d51ac3219a9ce4af16483
|
/workout03/binomial/man/bin_cumulative.Rd
|
0a8b18f4723b7ca75838a55bbc87bba32c0f3f08
|
[] |
no_license
|
stat133-sp19/hw-stat133-xihuanzhang
|
1cd3e35a8a7be73f2174296c3c04dc07dbfb84d4
|
a2bf17ccc8b8136ddde7858927dcef6d5d639269
|
refs/heads/master
| 2020-04-28T12:03:19.110912
| 2019-05-04T06:52:13
| 2019-05-04T06:52:13
| 175,263,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 519
|
rd
|
bin_cumulative.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function.R
\name{bin_cumulative}
\alias{bin_cumulative}
\title{binomial cumulative function}
\usage{
bin_cumulative(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{success}{success rate of each trials}
}
\value{
A data frame with number of success, probability and cumulative probability
}
\description{
calculates the probability and cumulative probability of different success times
}
\examples{
bin_cumulative(5,0.5)
}
|
ed20149630126c2551244273a1a0162ac51a28ec
|
ab10f7fde360490c080c9a1184f8c868ace0cac8
|
/man/add_counts.Rd
|
d03872116858e27544a8c2c273b6ffcf984e1fbf
|
[
"CC-BY-4.0"
] |
permissive
|
cathblatter/cblttr
|
ca6b0160f25cd89d370aecc728f96e8cbd1e5365
|
41aaf0829084a545b9d7e0dcfa3df6c044c95412
|
refs/heads/main
| 2022-11-27T10:58:31.135308
| 2022-11-27T06:52:25
| 2022-11-27T06:52:25
| 161,007,732
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 602
|
rd
|
add_counts.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_counts.R
\name{add_counts}
\alias{add_counts}
\title{Add counts of multiple columns at once}
\usage{
add_counts(df, ...)
}
\arguments{
\item{df}{the dataframe to work on}
\item{...}{one or multiple column names to summarise by n() on}
}
\value{
a dataframe
}
\description{
Add counts of multiple columns at once
}
\examples{
mtcars |> add_counts(cyl, vs, am)
}
\references{
Code originally written in R4DS-Channel by
https://github.com/HannesOberreiter without testing for edge cases,
modified for personal use by me
}
|
5fa638727158179bed69f787bc167295d76c48ee
|
af6cab822fab2115eaa2415ce0a0d3d7f607bf9d
|
/R/mixash.R
|
b78c532f78886561a4117e6ca56662f5dd4d8d9b
|
[] |
no_license
|
mengyin/mixash
|
9935d140cea102346ebd979798cc2259af85ae76
|
62bbf38e9b52c5c1ca39fc0c128da8988104fbd8
|
refs/heads/master
| 2016-09-12T22:49:43.137749
| 2016-05-20T15:18:16
| 2016-05-20T15:18:16
| 59,304,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,771
|
r
|
mixash.R
|
library(SQUAREM)
library(gaussquad)
library(ashr)
#source('mix.R')
#source('added.R')
#' @title Function to compute the local false sign rate
#'
#' @param NegativeProb A vector of posterior probability that beta is negative.
#' @param ZeroProb A vector of posterior probability that beta is zero.
#' @return The local false sign rate.
compute_lfsr = function(NegativeProb,ZeroProb){
ifelse(NegativeProb> 0.5*(1-ZeroProb),1-NegativeProb,NegativeProb+ZeroProb)
}
# If x is a n-column vector, turn it into n by 1 matrix
# If x is a matrix, keep it
tomatrix = function(x){
if(is.vector(x)){
x = as.matrix(x)
}
return(x)
}
#estimate mixture proportions of beta's prior by EM algorithm
#prior gives the parameter of a Dirichlet prior on pi
#(prior is used to encourage results towards smallest value of sigma when
#likelihood is flat)
#nullcheck indicates whether to check whether the loglike exceeds the null
#(may not want to use if prior is used)
#VB provides an approach to estimate the approximate posterior distribution
#of mixture proportions of sigmaa by variational Bayes method
#(use Dirichlet prior and approximate Dirichlet posterior)
EMest_mean = function(betahat,sebetahat,pilik,g,prior,null.comp=1,nullcheck=FALSE, df, control=list()){
control.default=list(K = 1, method=3, square=TRUE, step.min0=1, step.max0=1, mstep=4, kr=1, objfn.inc=1,tol=1.e-07, maxiter=5000, trace=FALSE)
namc=names(control)
if (!all(namc %in% names(control.default)))
stop("unknown names in control: ", namc[!(namc %in% names(control.default))])
controlinput=modifyList(control.default, control)
pi.init = g$pi
k = ncomp(g)
n = length(betahat)
l = dim(tomatrix(sebetahat))[2]
group=rep(1:k,l)
controlinput$tol = min(0.1/n,1.e-7) # set convergence criteria to be more stringent for larger samples
if(controlinput$trace==TRUE){tic()}
matrix_lik_raw = t(compdens_conv_mixlik(g,betahat,sebetahat,df,pilik))
matrix_lik = t(rowsum(t(matrix_lik_raw),group))
EMfit = mixEM(matrix_lik,prior,pi.init, control=controlinput)
pi = EMfit$pihat
loglik = EMfit$B # actually return log lower bound not log-likelihood!
converged = EMfit$converged
niter = EMfit$niter
loglik.final = EMfit$B[length(EMfit$B)]
null.loglik = sum(log(matrix_lik[,null.comp]))
if(nullcheck==TRUE){
if(null.loglik > loglik.final){ #check whether exceeded "null" likelihood where everything is null
pi=rep(0,k)
pi[null.comp]=1
m = t(pi * t(matrix_lik))
m.rowsum = rowSums(m)
loglik = sum(log(m.rowsum))
}
}
g$pi=pi
if(controlinput$trace==TRUE){toc()}
return(list(loglik=loglik.final,null.loglik=null.loglik,
matrix_lik=matrix_lik,converged=converged,g=g))
}
# Approximate non-standard mixture t-likelihood by normal-mixture
# component i: ~scale[i]*T(df[i]), w.p. pi[i]
approxlik_gq=function(params,q,appsigma,appweight){
pi=params[1:q]
scale=params[(q+1):(2*q)]
df=params[(2*q+1):(3*q)]
fi=numeric(0)
sigma=numeric(0)
for (i in 1:q){
fi = c(fi,pi[i]*appweight[i,])
sigma = c(sigma, scale[i]*appsigma[i,])
}
fi=fi[order(sigma)]
sigma=sort(sigma)
return(c(fi,sigma))
}
# Approximate t-distribution (of df) by r-components normal mixture
approxt = function(df, r){
alpha=df/2-1
rules=glaguerre.quadrature.rules(r,alpha,normalized=TRUE)
sigma=sqrt(df/(2*rules[[r]]$x))
weight=rules[[r]]$w/sum(rules[[r]]$w)
return(list(sigma=sigma,weight=weight))
}
# Approximate mixture t likelihood (with l components)
# by mixture normal (with q components)
# pi, alpha, beta are n by l matrices
# component i: ~(scale[n,i])*T(df[n,i]), w.p. pi[n,i]
mixlik_sd=function(pi,scale,df){
q=dim(tomatrix(pi))[2]
ll=max(5,floor(20/q))
params=cbind(pi,scale,df)
appweight=matrix(rep(0,ll*q),nrow=q)
appsigma=matrix(rep(0,ll*q),nrow=q)
for (i in 1:q){
app = approxt(df=df[i],r=ll) # l components for approximating each t-distribution
appweight[i,]=app$weight
appsigma[i,]=app$sigma
}
results = t(apply(params,1,approxlik_gq,q=q,appsigma=appsigma,appweight=appweight))
pilik=results[,1:(dim(results)[2]/2)]
selik=results[,(dim(results)[2]/2+1):dim(results)[2]]
return(list(pilik=pilik,selik=selik))
}
#compute posterior shape (alpha1) and rate (beta1)
post.igmix = function(m,betahat,sebetahat,v){
n = length(sebetahat)
alpha1 = outer(rep(1,n),m$alpha+v/2)
beta1 = outer(m$beta,v/2*sebetahat^2,FUN="+")
ismissing = is.na(sebetahat)
beta1[,ismissing]=m$beta
return(list(alpha=alpha1,beta=t(beta1)))
}
# try to select a default range for the sigmaa values
# that should be used, based on the values of betahat and sebetahat
# mult is the multiplier by which the sds differ across the grid
autoselect.mixsd = function(betahat,sebetahat,mult){
sigmaamin = min(sebetahat)/10 #so that the minimum is small compared with measurement precision
if(all(betahat^2<sebetahat^2)){
sigmaamax = 8*sigmaamin #to deal with the occassional odd case where this could happen; 8 is arbitrary
} else {
sigmaamax = 2*sqrt(max(betahat^2-sebetahat^2)) #this computes a rough largest value you'd want to use, based on idea that sigmaamax^2 + sebetahat^2 should be at least betahat^2
}
if(mult==0){
return(c(0,sigmaamax/2))
}else{
npoint = ceiling(log2(sigmaamax/sigmaamin)/log2(mult))
return(mult^((-npoint):0) * sigmaamax)
}
}
#' @title Main Adaptive SHrinkage function
#'
#' @description Takes vectors of estimates (betahat) and their standard errors (sebetahat), and applies
#' shrinkage to them, using Empirical Bayes methods, to compute shrunk estimates for beta.
#'
#' @details See readme for more details
#'
#' @param betahat, a p vector of estimates
#' @param sebetahat, a p vector of corresponding standard errors
#' @param method: specifies how ash is to be run. Can be "shrinkage" (if main aim is shrinkage) or "fdr" (if main aim is to assess fdr or fsr)
#' This is simply a convenient way to specify certain combinations of parameters: "shrinkage" sets pointmass=FALSE and prior="uniform";
#' "fdr" sets pointmass=TRUE and prior="nullbiased".
#' @param mixcompdist: distribution of components in mixture ("normal", "uniform" or "halfuniform")
#'
#' @param lambda1: multiplicative "inflation factor" for standard errors (like Genomic Control)
#' @param lambda2: additive "inflation factor" for standard errors (like Genomic Control)
#' @param nullcheck: whether to check that any fitted model exceeds the "null" likelihood
#' in which all weight is on the first component
#' @param df: appropriate degrees of freedom for (t) distribution of betahat/sebetahat
#' @param randomstart: bool, indicating whether to initialize EM randomly. If FALSE, then initializes to prior mean (for EM algorithm) or prior (for VBEM)
#' @param pointmass: bool, indicating whether to use a point mass at zero as one of components for a mixture distribution
#' @param onlylogLR: bool, indicating whether to use this function to get logLR. Skip posterior prob, posterior mean, lfdr...
#' @param singlecomp: bool, indicating whether to use a single inverse-gamma distribution as the prior distribution of the variances
#' @param SGD: bool, indicating whether to use the stochastic gradient descent method to fit the prior distribution of the variances
#' @param unimodal: unimodal constraint for the prior distribution of the variances ("variance") or the precisions ("precision")
#' @param prior: string, or numeric vector indicating Dirichlet prior on mixture proportions (defaults to "uniform", or 1,1...,1; also can be "nullbiased" 1,1/k-1,...,1/k-1 to put more weight on first component)
#' @param mixsd: vector of sds for underlying mixture components
#' @param gridmult: the multiplier by which the default grid values for mixsd differ by one another. (Smaller values produce finer grids)
#' @param minimal_output: if TRUE, just outputs the fitted g and the lfsr (useful for very big data sets where memory is an issue)
#' @param g: the prior distribution for beta (usually estimated from the data; this is used primarily in simulated data to do computations with the "true" g)
#' @param control A list of control parameters for the SQUAREM algorithm, default value is set to be control.default=list(K = 1, method=3, square=TRUE, step.min0=1, step.max0=1, mstep=4, kr=1, objfn.inc=1,tol=1.e-07, maxiter=5000, trace=FALSE). User may supply changes to this list of parameter, say, control=list(maxiter=10000,trace=TRUE)
#'
#'
#' @return a list with elements fitted.g is fitted mixture
#' logLR : logP(D|mle(pi)) - logP(D|null)
#'
#' @export
#'
#' @examples
#' beta = c(rep(0,100),rnorm(100))
#' sebetahat = abs(rnorm(200,0,1))
#' betahat = rnorm(200,beta,sebetahat)
#' beta.ash = ash(betahat, sebetahat)
#' summary(beta.ash)
#' plot(betahat,beta.ash$PosteriorMean,xlim=c(-4,4),ylim=c(-4,4))
mixash = function(betahat,sebetahat,df,pilik,
method = c("shrink","fdr"),
mixcompdist = c("normal","uniform","halfuniform"),
lambda1=1,lambda2=0,nullcheck=FALSE,randomstart=FALSE,
pointmass = TRUE,
onlylogLR = FALSE,
singlecomp = FALSE,
SGD = TRUE,
prior=c("uniform","nullbiased"),
mixsd=NULL,gridmult=sqrt(2),
minimaloutput=FALSE,
g=NULL,
control=list()){
#method provides a convenient interface to set a particular combinations of parameters for prior an
#If method is supplied, use it to set up specific values for these parameters; provide warning if values
#are also specified by user
#If method is not supplied use the user-supplied values (or defaults if user does not specify them)
if(length(pilik)==1){
pilik=rep(1,length(betahat))
}else if(dim(tomatrix(pilik))[1]!=length(betahat)){
stop("Error: pilik must be 1, or in same shape as sebetahat.")
}
# if(length(df)==1){
# df=sebetahat/sebetahat*df
# }else if(dim(tomatrix(sebetahat))[2]>1 & length(df)==dim(tomatrix(sebetahat))[2]){
# df=matrix(rep(df,each=dim(sebetahat)[1]),ncol=length(df))
# }else if(dim(tomatrix(sebetahat))[1]>1 & length(df)==dim(tomatrix(sebetahat))[1]){
# df=matrix(rep(df,dim(sebetahat)[2]),nrow=length(df))
# }else{
# stop("Error: df must have length 1, or same length as betahat, or same as dim(sebetahat)[2].")
# }
if(!missing(method)){
method = match.arg(method)
if(method=="shrink"){
if(missing(prior)){
prior = "uniform"
} else {
warning("Specification of prior overrides default for method shrink")
}
if(missing(pointmass)){
pointmass=TRUE
} else {
warning("Specification of pointmass overrides default for method shrink")
}
}
if(method=="fdr"){
if(missing(prior)){
prior = "nullbiased"
} else {
warning("Specification of prior overrides default for method fdr")
}
if(missing(pointmass)){
pointmass=TRUE
} else {
warning("Specification of pointmass overrides default for method fdr")
}
}
}
if(onlylogLR){
pointmass = TRUE
}
mixcompdist = match.arg(mixcompdist)
if(!is.numeric(prior)){
prior = match.arg(prior)
}
if(length(sebetahat)==1){
sebetahat = rep(sebetahat,length(betahat))
}
if(length(pilik)==dim(tomatrix(sebetahat))[2]){
pilik = t(rep(pilik,length(betahat)),ncol=length(betahat))
}
if(dim(tomatrix(sebetahat))[1] != length(betahat)){
stop("Error: sebetahat must have length 1, or same length as betahat")
}
completeobs = (!is.na(betahat) & !is.na(apply(tomatrix(sebetahat),1,sum)) &
!is.na(apply(tomatrix(pilik),1,sum)))
n=sum(completeobs)
if(n==0){
if(onlylogLR){
return(list(pi=NULL, logLR = 0))
}
else{
stop("Error: all input values are missing")
}
}
pilik = tomatrix(pilik)
sebetahat = tomatrix(sebetahat)
if(mixcompdist=="normal"){
appnorm = mixlik_sd(pilik[completeobs,],sebetahat[completeobs,],df)
pilik = matrix(rep(NA,length(betahat)*dim(appnorm$pilik)[2]),ncol=dim(appnorm$pilik)[2])
pilik[completeobs,] = appnorm$pilik
selik = matrix(rep(NA,length(betahat)*dim(pilik)[2]),ncol=dim(pilik)[2])
selik[completeobs,] = appnorm$selik
moddf = NULL
}else if(mixcompdist=="uniform" | mixcompdist=="halfuniform"){
pilik = pilik
selik = sebetahat
moddf = df
}
selik = tomatrix(selik)
l = dim(pilik)[2]
#Handling control variables
control.default=list(K = 1, method=3, square=TRUE, step.min0=1, step.max0=1, mstep=4, kr=1, objfn.inc=1,tol=1.e-07, maxiter=5000, trace=FALSE)
if(n>50000){control.default$trace=TRUE}
namc=names(control)
if (!all(namc %in% names(control.default)))
stop("unknown names in control: ", namc[!(namc %in% names(control.default))])
controlinput=modifyList(control.default, control)
if(!is.null(g)){
controlinput$maxiter = 1 # if g is specified, don't iterate the EM
prior = rep(1,ncomp(g)) #prior is not actually used if g specified, but required to make sure EM doesn't produce warning
null.comp=1 #null.comp also not used, but required
} else {
if(is.null(mixsd)){
mixsd = autoselect.mixsd(betahat[completeobs],apply(tomatrix(pilik[completeobs,]*selik[completeobs,]),1,sum),gridmult)
}
if(pointmass){
mixsd = c(0,mixsd)
}
null.comp = which.min(mixsd) #which component is the "null"
k = length(mixsd)
if(!is.numeric(prior)){
if(prior=="nullbiased"){ # set up prior to favour "null"
prior = rep(1,k)
prior[null.comp] = 10 #prior 10-1 in favour of null
}else if(prior=="uniform"){
prior = rep(1,k)
}
}
if(length(prior)!=k | !is.numeric(prior)){
stop("invalid prior specification")
}
if(randomstart){
pi = rgamma(k,1,1)
} else {
if(k<n){
pi=rep(1,k)/n #default initialization strongly favours null; puts weight 1/n on everything except null
pi[null.comp] = (n-k+1)/n #the motivation is data can quickly drive away from null, but tend to drive only slowly toward null.
} else {
pi=rep(1,k)/k
}
}
pi=pi/sum(pi)
if(!is.element(mixcompdist,c("normal","uniform","halfuniform"))) stop("Error: invalid type of mixcompdist")
if(mixcompdist=="normal") g=normalmix(pi,rep(0,k),mixsd)
if(mixcompdist=="uniform") g=unimix(pi,-mixsd,mixsd)
if(mixcompdist=="halfuniform"){
g = unimix(c(pi,pi)/2,c(-mixsd,rep(0,k)),c(rep(0,k),mixsd))
prior = rep(prior, 2)
pi = rep(pi, 2)
}
}
pi.fit=EMest_mean(betahat[completeobs],lambda1*selik[completeobs,]+lambda2,pilik[completeobs,],g,prior,null.comp=null.comp,
nullcheck=nullcheck,df=moddf[completeobs],control=controlinput)
if(onlylogLR){
logLR = tail(pi.fit$loglik,1) - pi.fit$null.loglik
return(list(fitted.g=pi.fit$g, logLR = logLR))
} else if(minimaloutput){
n=length(betahat)
ZeroProb = rep(0,length=n)
NegativeProb = rep(0,length=n)
#print("normal likelihood")
ZeroProb[completeobs] = colSums(comppostprob_mixlik(pi.fit$g,betahat[completeobs],sebetahat[completeobs,],moddf,pilik[completeobs,])[comp_sd(pi.fit$g)==0,,drop=FALSE])
NegativeProb[completeobs] = cdf_post_mixlik(pi.fit$g, 0, betahat[completeobs],sebetahat[completeobs,],moddf,pilik[completeobs,]) - ZeroProb[completeobs]
ZeroProb[!completeobs] = sum(mixprop(pi.fit$g)[comp_sd(pi.fit$g)==0])
NegativeProb[!completeobs] = mixcdf(pi.fit$g,0)
lfsr = compute_lfsr(NegativeProb,ZeroProb)
result = list(fitted.g=pi.fit$g,lfsr=lfsr,fit=pi.fit)
return(result)
} else{
# post = posterior_dist(pi.fit$g,betahat,sebetahat)
n=length(betahat)
ZeroProb = rep(0,length=n)
NegativeProb = rep(0,length=n)
PosteriorMean = rep(0,length=n)
PosteriorSD=rep(0,length=n)
pilikco = tomatrix(pilik[completeobs,])
selikco = tomatrix(selik[completeobs,])
ZeroProb[completeobs] = colSums(comppostprob_mixlik(pi.fit$g, betahat[completeobs], selikco, moddf, pilikco)[comp_sd(pi.fit$g)==0,,drop=FALSE])
NegativeProb[completeobs] = cdf_post_mixlik(pi.fit$g, 0, betahat[completeobs],selikco,moddf, pilikco) - ZeroProb[completeobs]
PosteriorMean[completeobs] = postmean_mixlik(pi.fit$g,betahat[completeobs],selikco,moddf, pilikco)
PosteriorSD[completeobs] =postsd_mixlik(pi.fit$g,betahat[completeobs],selikco,moddf, pilikco)
#FOR MISSING OBSERVATIONS, USE THE PRIOR INSTEAD OF THE POSTERIOR
ZeroProb[!completeobs] = sum(mixprop(pi.fit$g)[comp_sd(pi.fit$g)==0])
NegativeProb[!completeobs] = mixcdf(pi.fit$g,0)
PosteriorMean[!completeobs] = calc_mixmean(pi.fit$g)
PosteriorSD[!completeobs] =calc_mixsd(pi.fit$g)
PositiveProb = 1- NegativeProb-ZeroProb
lfsr = compute_lfsr(NegativeProb,ZeroProb)
#lfsra = compute_lfsra(PositiveProb,NegativeProb,ZeroProb)
lfdr = ZeroProb
qvalue = qval.from.lfdr(lfdr)
result = list(fitted.g=pi.fit$g,logLR =tail(pi.fit$loglik,1) - pi.fit$null.loglik,
PosteriorMean = PosteriorMean,PosteriorSD=PosteriorSD,
PositiveProb =PositiveProb,NegativeProb=NegativeProb, ZeroProb=ZeroProb,lfsr = lfsr,
#lfsra=lfsra,
lfdr=lfdr,qvalue=qvalue,
fit=pi.fit,lambda1=lambda1,lambda2=lambda2,call=match.call(),data=list(betahat = betahat, sebetahat=sebetahat))
class(result)= "mixash"
return(result)
}
}
|
dc5dd849362e048a8b17f1036fdaaf4647551867
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/uavRst/examples/calc_ext.Rd.R
|
c9c18a52daacdb101f06e9b6aea3299abf602c8b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,682
|
r
|
calc_ext.Rd.R
|
library(uavRst)
### Name: calc_ext
### Title: Convenient function to preprocess synthetic raster bands from a
### given RGB image and/or DTM/DSM data.
### Aliases: calc_ext
### ** Examples
## Not run:
##D
##D ##- required packages
##D require(uavRst)
##D require(link2GI)
##D
##D # create and check the links to the GI software
##D giLinks<-uavRst::linkAll()
##D if (giLinks$saga$exist & giLinks$otb$exist){
##D #'
##D ##- create and set folders
##D ##- please mind that the pathes are exported as global variables
##D paths<-link2GI::initProj(projRootDir = tempdir(),
##D projFolders = c("data/","data/ref/","output/","run/","las/"),
##D global = TRUE,
##D path_prefix = "path_")
##D
##D ##- clean runtime folder
##D unlink(paste0(path_run,"*"), force = TRUE)
##D
##D ##- get the tutorial data
##D utils::download.file("https://github.com/gisma/gismaData/raw/master/uavRst/data/tutorial_data.zip",
##D paste0(path_run,"tutorial_data.zip"))
##D unzip(zipfile = paste0(path_run,"tutorial_data.zip"), exdir = R.utils::getAbsolutePath(path_run))
##D
##D ##- calculate some synthetic channels from the RGB image and the canopy height model
##D ##- then extract the from the corresponding training geometries the data values aka trainingdata
##D trainDF <- calc_ext(calculateBands = TRUE,
##D extractTrain = TRUE,
##D suffixTrainGeom = "",
##D patternIdx = "index",
##D patternImgFiles = "rgb" ,
##D patterndemFiles = "chm",
##D prefixRun = "tutorial",
##D prefixTrainImg = "",
##D rgbi = TRUE,
##D indices = c("TGI","CI"),
##D channels = c("red"),
##D rgbTrans = FALSE,
##D hara = FALSE,
##D haraType = c("simple"),
##D stat = FALSE,
##D edge = FALSE,
##D morpho = FALSE,
##D pardem = TRUE,
##D demType = c("slope", "MTPI"),
##D kernel = 3,
##D currentDataFolder = path_run,
##D currentIdxFolder = path_run,
##D giLinks = giLinks)
##D
##D ##- show the result
##D head(trainDF)
##D # use ffs_train as next step for rf classification issues
##D }
##D ##+
## End(Not run)
|
c986bb05139939fbdc58c7fa5795546985472ba4
|
e00befe0f92d42dd1f97e9304973f4b22da03af5
|
/BCS_PopularKernels/BCS_PopularKernels.R
|
443b6a7baf99784be06cdf6b66b609319c7a4064
|
[] |
no_license
|
QuantLet/BCS
|
a706ffdc3cf8777b5443b2c66ff601c3bc517ee0
|
4a5d9fc2c058e5e02534ccb37898d9e9cf2edd9e
|
refs/heads/master
| 2023-04-03T23:59:31.647499
| 2023-03-27T22:14:39
| 2023-03-27T22:14:39
| 51,316,067
| 4
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,484
|
r
|
BCS_PopularKernels.R
|
# The R function kernel.wfunc implements all different kernel weighting functions in R
kernel.wfunc = function(x, xzero, kernel) {
n = length(x) # number of points to evaluate
y = NA # weight vector
dist = x - xzero # distance vector
for (i in 1:n) {
if (dist[i] <= 1 & dist[i] >= -1) {
# weights for each point x not to far from xzero uniform weights
if (kernel == "Uniform") {
y[i] = 0.5
}
if (kernel == "Triangular") {
# triangular weights
y[i] = 1 - abs(dist[i])
}
if (kernel == "Epanechnikov") {
# Epanechnikov weights
y[i] = 0.75 * (1 - dist[i]^2)
}
if (kernel == "Quartic") {
# Quartic weights
y[i] = 15/16 * (1 - dist[i]^2)^2
}
} else {
y[i] = 0
} # weights for points too far from xzero
}
return(y)
}
kernel_names = c("Uniform", "Triangular", "Epanechnikov", "Quartic")
x = seq(-2, 2, by = 0.1) # sequence for points to evaluate
par = par(mfrow = c(2, 2), cex.main = 1.5, lwd = 2, cex.axis = 1.5, lty = "solid")
for (i in 1:length(kernel_names)) {
# loop creating the four plots
y = kernel.wfunc(x, xzero = 0, kernel = kernel_names[i])
plot(x, y, type = "l", xlim = c(-2, 2), ylim = c(0, 1), col = "red", xlab = "", ylab = "", main = paste(kernel_names[i],
"kernel"))
}
|
03d7366ea897186323ccbe298773ee1ff2e23026
|
aa44abd28951f5f21ea3e4924bb7244cef3cd1c8
|
/man/renderVegalite.Rd
|
41d0fb1ecbd4d81f9d7f1afe89640e669ae28a42
|
[] |
no_license
|
hrbrmstr/vegalite
|
969a53d7d34d67d445d9ddab49ecf5336345f69f
|
385369840c6b396ba49a698e3c291aab8b5e404b
|
refs/heads/master
| 2021-01-10T10:08:09.882157
| 2018-07-30T17:44:42
| 2018-07-30T17:44:42
| 52,391,615
| 175
| 21
| null | 2018-07-30T17:44:43
| 2016-02-23T21:05:34
|
JavaScript
|
UTF-8
|
R
| false
| true
| 507
|
rd
|
renderVegalite.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shiny.r
\name{renderVegalite}
\alias{renderVegalite}
\alias{renderVegaliteSpec}
\title{Widget render function for use in Shiny}
\usage{
renderVegalite(expr, env = parent.frame(), quoted = FALSE)
renderVegaliteSpec(expr, env = parent.frame(), quoted = FALSE)
}
\arguments{
\item{expr}{expr to render}
\item{env}{evaluation environemnt}
\item{quoted}{quote expression?}
}
\description{
Widget render function for use in Shiny
}
|
2b5d1f00b9eea061501d868aab1c81bb9e48db97
|
d14bcd4679f0ffa43df5267a82544f098095f1d1
|
/R/groupi.Dest.Degrad.oneplot.R
|
cbd4a92dd67c114d2937840344a43a8cb3f4608a
|
[] |
no_license
|
anhnguyendepocen/SMRD
|
9e52aa72a5abe5274f9a8546475639d11f058c0d
|
c54fa017afca7f20255291c6363194673bc2435a
|
refs/heads/master
| 2022-12-15T12:29:11.165234
| 2020-09-10T13:23:59
| 2020-09-10T13:23:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,668
|
r
|
groupi.Dest.Degrad.oneplot.R
|
#' Title
#'
#' @param data.ddd
#' @param distribution
#' @param transformation.response
#' @param transformation.time
#' @param ylim
#' @param xlim
#' @param my.title
#' @param ylab
#' @param xlab
#' @param cex
#' @param cex.labs
#' @param cex.points
#' @param add
#' @param grids
#' @param title.option
#' @param pch.point
#' @param response.on.yaxis
#' @param subset
#' @param do.legend
#' @param fail.level
#' @param group.var
#' @param plot.lines
#' @param lty
#' @param ...
#'
#' @return NULL
#' @export
#'
#' @examples
#' \dontrun{
#'
#' InsulationBrkdwn.ddd <- frame.to.ddd(insulationbrkdwn,
#' response.column = 3,
#' time.column = 1,
#' x.columns = 2,
#' data.title = "Voltage Breakdown Data",
#' response.units = "Volts",
#' time.units = "Weeks")
#'
#' print(InsulationBrkdwn.ddd)
#'
#' plot(InsulationBrkdwn.ddd,
#' transformation.Response = "log",
#' transformation.time = "linear")
#'
#' tmp <- groupi.Dest.Degrad.indivplots(InsulationBrkdwn.ddd,
#' transformation.Response = "log",
#' transformation.time = "linear",
#' distribution = "normal")
#'
#' groupi.Dest.Degrad.oneplot(InsulationBrkdwn.ddd,
#' transformation.Response = "log",
#' transformation.time = "linear",
#' distribution="normal")
#'
#' groupm.Dest.Degrad(InsulationBrkdwn.ddd,
#' distribution = "normal",
#' transformation.Response = "log10",
#' transformation.x = "invtemp",
#' transformation.time = "linear")
#'
#' groupm.Dest.Degrad(InsulationBrkdwn.ddd,
#' distribution = "normal",
#' transformation.Response = "log",
#' transformation.x = "arrhenius",
#' transformation.time="linear")
#'
#' # Do individual analyses at each level of temperature
#'
#' InsulationBrkdwn.groupi.Dest.Degrad <-groupi.Dest.Degrad(InsulationBrkdwn.ddd,
#' distribution = "normal",
#' transformation.Response = "log",
#' transformation.time = "sqrt")
#'
#' plot(InsulationBrkdwn.groupi.Dest.Degrad,
#' transformation.x = "Arrhenius")
#'
#' InsulationBrkdwn.groupm.Dest.Degrad <-groupm.Dest.Degrad(InsulationBrkdwn.ddd,
#' distribution = "normal",
#' transformation.Response = "log",
#' transformation.x = "arrhenius",
#' transformation.time = "sqrt")
#'
#' InsulationBrkdwn.groupm.Dest.Degrad<-groupm.Dest.Degrad(InsulationBrkdwn.ddd,
#' distribution = "normal",
#' transformation.Response = "log",
#' transformation.x = "arrhenius",
#' transformation.time = "sqrt",
#' new.data = c("150,260"))
#'
#' }
groupi.Dest.Degrad.oneplot <-
function (data.ddd,
distribution,
transformation.response,
transformation.time,
ylim = c(NA, NA),
xlim = c(NA, NA),
my.title = NULL,
ylab = NULL,
xlab = NULL,
cex = 1.05,
cex.labs = 1.05,
cex.points = 1,
add = F,
grids = F,
title.option = GetSMRDDefault("SMRD.TitleOption"),
pch.point = NULL,
response.on.yaxis = T,
subset = T,
do.legend = "On plot",
fail.level = NULL,
group.var = 1:ncol(xmat(data.ddd)),
plot.lines = T,
lty = NULL,
lwd = 2,
bty = `if`(grids, "o","L"),...)
{
`do.list<-` <- function (data.ld, value) {
attr(data.ld, "do.list") <- value
return(data.ld)
}
CheckString <- function (pat, str) {
return(regexpr(pat, str) > 0)
}
on.exit(par(xpd = F, bty = "o", mar = c(5, 4, 4, 2) + 0.1))
tran.data.ddd <- plot.Dest.Degrad.data(x = data.ddd,
transformation.response = transformation.response,
transformation.time = transformation.time,
ylim = ylim,
xlim = xlim,
my.title = "",
ylab = ylab,
xlab = xlab,
cex = cex,
cex.labs = cex.labs,
cex.points = cex.points,
grids = grids,
title.option = title.option,
pch.point = pch.point,
response.on.yaxis = response.on.yaxis,
subset = subset,
group.var = group.var,
do.legend = "Suppress",
mar = c(4.5, 5.25, 3.5, 12.1),
bty = bty)
do.list <- do.list(tran.data.ddd)
if (is.null(lty)) {
`if`(GetSMRDDefault("SMRD.solid.lines"),
lty <- rep(1, length(do.list)),
lty <- (1:(length(do.list) + 1))[-2])
}
if (is.null(pch.point)) pch.point <- (1:(length(do.list) + 4))[-c(2, 6, 17, 19)]
if (length(pch.point) == 1) pch.point <- rep(pch.point, length(do.list))
two.stage.out <- two.stage.dest.degrad(tran.data.ddd,
distribution = distribution,
double.count.zeros = F)
ok.values <- attr(two.stage.out, "ok.values")
slope <- attr(two.stage.out, "slope")
intercept <- attr(two.stage.out, "intercept")
the.slope.computed.list <- slope.computed.list(two.stage.out)
the.done.list <- done.list(two.stage.out)
plot.index <- match(the.slope.computed.list, do.list)
for (i in 1:length(the.slope.computed.list)) {
if (map.SMRDDebugLevel() >= 4) {
cat(the.slope.computed.list[i],
"Intercept = ",
intercept[i],
"slope = ",
slope[i],
"in groupi.Dest.Degrad.oneplot\n")
}
abline(intercept[i],
slope[i],
col = plot.index[i],
lty = lty[plot.index[i]],
lwd = lwd)
if (!is.null(fail.level)) abline(h = f.relationship(fail.level, transformation.response),
lwd = 3,
lty = lty[i])
}
model.string <- paste("Resp:",
transformation.response,
",Time:",
transformation.time,
", Dist:",
distribution, sep = "")
if (is.null(my.title)) {
my.title <- paste(get.data.title(tran.data.ddd),
"\nDestructive Degradation",
" Individual Regression Analyses\n",
model.string,
sep = "")
}
if (CheckString("full", title.option)) mtext(text = my.title,
side = 3,
cex = 1.2,
line = 0.5)
`if`(mean(slope) > 0,
{ legend.x <- x.loc(1.05) ; legend.y <- y.loc(0.99) },
{ legend.x <- x.loc(1.05) ; legend.y <- y.loc(0.99) })
if (length(do.list) > 1) {
if (do.legend == "On plot") {
par(xpd = T)
legend(legend.x,
legend.y,
parse(text = switch.units(do.list, NULL)),
cex = cex,
bty = "n",
col = 1:length(do.list),
pch = pch.point,
lty = lty,
y.intersp = 1,
seg.len = 1.5,
lwd = 1.5,
adj = c(-0.1))
}
if (do.legend == "New page" || do.legend == "New file") {
if (do.legend == "New file") postscript(file = "Save_legend.ps",
horizontal = T)
plot(c(0, 0),
c(1, 1),
xlab = "",
ylab = "",
type = "n",
xaxt = "n",
yaxt = "n")
legend(x.loc(0.003),
y.loc(0.997),
do.list,
cex = 1.1,
bty = "n",
col = 1:length(do.list),
pch = pch.point,
lty = lty,
y.intersp = 0.675)
if (do.legend == "New file") dev.off()
}
}
attr(two.stage.out, "data.ddd") <- data.ddd
attr(two.stage.out, "distribution") <- distribution
attr(two.stage.out, "transformation Response") <- transformation.response
attr(two.stage.out, "transformation.time") <- transformation.time
oldClass(two.stage.out) <- "groupi.Dest.Degrad.out"
invisible(two.stage.out)
}
|
39f5f3734bdfc4a573ec33f180a74844c4e980ca
|
0fbfa221d90a5485fa721b3d4dd2070753d923b1
|
/modele_sird.R
|
42725345521c4bf7c5665cdefd25e658848e2857
|
[] |
no_license
|
remithiebaut/3BIM
|
4b9e5554e23e7d01f94b6f04a41995bade305e0a
|
b7900fb29fdfa8e27773cf039f8775f14986a561
|
refs/heads/master
| 2020-04-21T07:59:16.437556
| 2019-06-12T17:26:03
| 2019-06-12T17:26:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,658
|
r
|
modele_sird.R
|
library(phaseR)
library(deSolve)
rm(list=ls()) #reinitialiser variables
par(mfrow=c(2,1)) # 2 fig
Eq1 = function(t,x,parameters)
{
with(as.list(c(parameters,x)),{
dS = - beta*S*I
dI = beta*S*I - gamma*I - mu*I
dR = mu*I
dD = gamma*I
list(c(dS,dI,dR,dD))
})
}
beta=0.00225 #infectiosité
gamma=0.01 #mortalité
mu=0.04 #recouvrement
N=764 #pop initiale
I=1 #infecté initiale
parameters=c(beta=beta,gamma=gamma,N=N,mu=mu)
ini=c(S=N-I,I=I,R=0,D=0)
time=seq(0,40,0.1)
sol=lsoda(func=Eq1,y=ini,times=time,parms = parameters) #resoud
plot(x=time,y=sol[,2],xlab="x",ylab="Population",type="l",ylim=c(0,700),col="green")
points(x = time,sol[,3],col="red",type="l")
points(x = time,sol[,4],col="blue",type="l")
points(x = time,sol[,5],col="black",type="l")
#rajout des accroissements naturels
Eq2 = function(t,x,parameters) #modèle complexe
{
with(as.list(c(parameters,x)),{
dS = - beta*S*I + alpha*(S+I+R) -lambda*S
dI = beta*S*I - gamma*I - mu*I -lambda*I
dR = mu*I - lambda*R
dD = gamma*I + lambda*(R+I+S)
list(c(dS,dI,dR,dD))
})
}
beta=0.00225 #infectiosité
gamma=0.01 #mortalité
mu=0.04 #recouvrement
N=764 #pop initiale
I=1 #infecté initiale
lambda=0.001 #morts naturelles
alpha=0.002 #naissances naturelles
parameters=c(beta=beta,gamma=gamma,N=N,mu=mu,lambda=lambda,alpha=alpha)
ini=c(S=N-I,I=I,R=0,D=0)
time=seq(0,3000,0.1)
sol=lsoda(func=Eq2,y=ini,times=time,parms = parameters) #resoud
plot(x=time,y=sol[,2],xlab="x",ylab="Population",type="l",ylim=c(0,700),col="green",main="Malthus")
points(x = time,sol[,3],col="red",type="l")
points(x = time,sol[,4],col="blue",type="l")
points(x = time,sol[,5],col="black",type="l")
#passage au modele logistique
Eq3 = function(t,x,parameters) #modèle complexe
{
with(as.list(c(parameters,x)),{
dS = - beta*S*I + alpha*(S+I+R) -lambda*S - theta*(S+I+R)*S/k
dI = beta*S*I - gamma*I - mu*I -lambda*I - theta*(S+I+R)*I/k
dR = mu*I - lambda*R - theta*(S+I+R)*R/k
dD = gamma*I + lambda*(R+I+S) + theta*(S+I+R)^2/k
list(c(dS,dI,dR,dD))
})
}
beta=0.00225 #infectiosité
gamma=0.01 #mortalité
mu=0.04 #recouvrement
N=764 #pop initiale
I=0.0 #infecté initiale
lambda=0.001 #morts naturelles
alpha=0.002 #naissances naturelles
theta=alpha-lambda #pression démographique
k=300 #capacité limite
parameters=c(beta=beta,gamma=gamma,N=N,mu=mu,lambda=lambda,alpha=alpha,theta=theta,k=k)
ini=c(S=N-I,I=I,R=0,D=0)
time=seq(0,3500,1)
sol=lsoda(func=Eq3,y=ini,times=time,parms = parameters) #resoud
plot(x=time,y=sol[,2],xlab="x",ylab="Population",type="l",ylim=c(0,700),col="green",main = "Logistique")
points(x = time,sol[,3],col="red",type="l")
points(x = time,sol[,4],col="blue",type="l")
points(x = time,sol[,5],col="black",type="l")
# TESTS
beta=0.00225 #infectiosité
gamma=0.1 #mortalité
mu=0.4 #recouvrement
N=764 #pop initiale
I=1 #infecté initiale
lambda=0.001 #morts naturelles
alpha=0.002 #naissances naturelles
theta=alpha-lambda #pression démographique
k=300 #capacité limite
parameters=c(beta=beta,gamma=gamma,N=N,mu=mu,lambda=lambda,alpha=alpha,theta=theta,k=k)
ini=c(S=N-I,I=I,R=0,D=0)
inisansmaladie=c(S=N,I=0,R=0,D=0)
maxtime=5000
time=seq(0,maxtime,0.1)
sol=lsoda(func=Eq3,y=ini,times=time,parms = parameters) #resoud
solm=lsoda(func=Eq3,y=inisansmaladie,times=time,parms = parameters) #resoud
print(sol[maxtime,5]/(sol[maxtime,4]+sol[maxtime,3]+sol[maxtime,2]))
print(solm[maxtime,5]/(solm[maxtime,4]+solm[maxtime,3]+solm[maxtime,2]))
|
70aa4f2fcf74c87e92e306778fb3ce2c60748adb
|
278dff1534b0ca8662efa3b520e0c52c40160af2
|
/White_Sea/temperatures_Kandalaksha/temp_2013_2016.R
|
aea042a29a222368ae7e5b02c80947fc7bcba759
|
[] |
no_license
|
sophianazarova/PhD_thesis
|
ba96384983e8630819292811428ec316656c3ebb
|
5f77ef0b52d7690200124de218672354cff3b964
|
refs/heads/master
| 2020-05-17T01:53:44.975595
| 2016-10-14T08:42:56
| 2016-10-14T08:42:56
| 7,933,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 449
|
r
|
temp_2013_2016.R
|
temp <- read.csv2("2013_2016.csv")
str(temp)
str(temp$Time)
temp$Time <- as.POSIXlt(x = temp$Time, format = "%d.%m.%Y %H:%M", tz = "MSK")
str(temp$Time)
#делаем колонку месяц-год
temp$ym <- format(temp$Time,"%Y.%m")
str(temp$ym)
#считаем среднемесячные
Temp_month_mean <- aggregate( Temp ~ ym, data = temp, FUN = mean, na.rm=T)
write.csv2(Temp_month_mean, "Temp_Kandalaksha_mean_month_2013_2016.csv")
|
cbebf9c06280f5c12f1bf6035e25e463d3c05691
|
459c860719bb94380fc8bf2ddc95790a7957deb8
|
/Project/feature_extraction/sentiment/sentiment.R
|
70950a127fc60db81232eedc7b4333df04678723
|
[] |
no_license
|
mit-6867/mit_6867
|
8df3b852fe725258f7d71bcf923ace787e0ed5c1
|
65b320ef714cad963b1c4ef416a2b6051b5314d1
|
refs/heads/master
| 2020-04-05T22:53:51.230608
| 2016-10-12T19:41:01
| 2016-10-12T19:41:01
| 42,942,455
| 6
| 2
| null | 2015-11-12T04:01:43
| 2015-09-22T15:24:27
|
TeX
|
UTF-8
|
R
| false
| false
| 1,326
|
r
|
sentiment.R
|
library('tm')
library('SnowballC')
library('slam')
library("tm.lexicon.GeneralInquirer")
normalize <- function(corpus){ # conversion to lower case, other function: removeNumbers,removePunctuation,removeWords,
wd <- tm_map(corpus,content_transformer(removePunctuation))
wd <- tm_map(wd,content_transformer(tolower))
wd <- tm_map(wd,content_transformer(removeNumbers))
# wd <- tm_map(wd,content_transformer(removeWords),stopwords("english"))
# wd <- tm_map(wd,content_transformer(removeWords),c('may','will'))
# wd <- tm_map(wd,content_transformer(stripWhitespace))
# wd <- tm_map(wd,content_transformer(stemDocument))
return(wd)}
path <- 'C:/Users/Jeremy/Desktop/Data/R/articles'
c <- Corpus(DirSource(path,encoding='UTF-8'))
#c <- Corpus(VectorSource(text))
c <- normalize(c)
dtm <- DocumentTermMatrix(c)
# tf-idf weighted dtm
dtm_weighted <- DocumentTermMatrix(c, control = list(weighting = weightTfIdf))
pos <- as.numeric(tm_term_score(dtm,terms_in_General_Inquirer_categories("Positiv")))
neg <- as.numeric(tm_term_score(dtm,terms_in_General_Inquirer_categories("Negativ")))
len <- rollup(dtm, 2, na.rm=TRUE, FUN = sum)
len <- as.vector(as.matrix(len))
index <- seq(1,length(len))
data <- cbind(index,pos,neg,len)
head(data)
dim(data)
write.table(data,'sentiment.txt',row.names = F,sep=',')
|
67bc927a500d1f679d820934c532fcd660ffa562
|
f821206c7d62134707f119019676c2a3f67522fb
|
/plotCNVs.R
|
e41a9ae9985ba6dae8a04328ee641df82b0a7ff5
|
[] |
no_license
|
DTHenare/CSL_Choice
|
a407b55f27085b62f4422879affb72b0c97f6a56
|
5251628f8e6135be81d80e8c515184144ef17618
|
refs/heads/master
| 2021-10-20T10:52:27.169811
| 2021-10-19T00:55:58
| 2021-10-19T00:55:58
| 175,589,021
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,161
|
r
|
plotCNVs.R
|
library(tidyr)
library(dplyr)
library(ggplot2)
paths = c('CNV_StimOnsetWide/','CNV_HannaStimOnsetWide/')
groupInfos = c('SophiaGroupAssignment_provisional.csv','HannaGroupAssignment_provisional.csv')
for (ind in 1:length(paths)) {
dPath = paths[ind]
baseline = 800
groupdata = read.csv(file=groupInfos[ind])
rejCrit = 700
fPrefix = 'CNV'
rm(allData)
gc()
#Organisevgroup data
groupdata = groupdata %>% mutate(Group = ifelse(Group == 1, "Colour", "Shape"))
groupdata$Reject = as.factor(groupdata$Reject)
#####
#Creates aggregate of all participant data (needs dPath and fPrefix)
eFilePattern = paste(fPrefix,"*_epochs.csv", sep="")
fFilePattern = paste(fPrefix,"*_Fz.csv", sep="")
pFilePattern = paste(fPrefix,"*_Pz.csv", sep="")
eFileList = list.files(dPath, pattern=glob2rx(eFilePattern))
fFileList = list.files(dPath, pattern=glob2rx(fFilePattern))
pFileList = list.files(dPath, pattern=glob2rx(pFilePattern))
#create variables using first dataset
epochInfo = read.csv(file = paste(dPath,eFileList[1], sep=""))
epochInfo$Subject = 1
epochInfo$Group = groupdata$Group[1]
epochInfo$Reject = groupdata$Reject[1]
FzData = read.csv(file = paste(dPath,fFileList[1], sep=""), header = FALSE)
PzData = read.csv(file = paste(dPath,pFileList[1], sep=""), header = FALSE)
#append the other datasets to the above variables
for (subj in 2:length(eFileList)) {
curEpochInfo = read.csv(file = paste(dPath,eFileList[subj], sep=""))
curEpochInfo$Subject = subj
curEpochInfo$Group = groupdata$Group[subj]
curEpochInfo$Reject = groupdata$Reject[subj]
curFzData = read.csv(file = paste(dPath,fFileList[subj], sep=""), header = FALSE)
curPzData = read.csv(file = paste(dPath,pFileList[subj], sep=""), header = FALSE)
epochInfo = rbind(epochInfo, curEpochInfo)
FzData = rbind(FzData, curFzData)
PzData = rbind(PzData, curPzData)
}
#Tidy the variables, remove unnecessary and convert to factors
epochInfo$Subject = as.factor(epochInfo$Subject)
epochInfo$Group = as.factor(epochInfo$Group)
epochInfo$VarName8 = NULL
epochInfo$VarName9 = NULL
epochInfo$VarName10 = NULL
epochInfo$VarName11 = NULL
epochInfo$LatStim=NULL
epochInfo$MidStim=NULL
epochInfo$TrialType=NULL
epochInfo$Timepoint=NULL
epochInfo$Hemifield=NULL
#clear stuff that I don't need
rm(curEpochInfo,curFzData,curPzData, fPrefix, eFileList, eFilePattern, fFileList, fFilePattern, pFileList, pFilePattern, subj, groupdata)
gc()
#####
#combine all the data together into one long table
gathercols = colnames(FzData)
FzData$Chan = "Fz"
PzData$Chan = "Pz"
scalpData = rbind(FzData,PzData)
epochInfo = rbind(epochInfo,epochInfo)
allData <- cbind(epochInfo, scalpData)
rm(epochInfo,FzData,PzData,scalpData)
gc()
allData <- gather(allData, "sample", "voltage", gathercols, factor_key = TRUE)
#Tidy variable names etc. and create any necessary variables
allData$sample <- as.integer(substring(allData$sample,2))
allData <- allData %>% mutate(RepSwitch = substring(allData$Event,15))
allData <- allData %>% mutate(RepSwitch = ifelse((RepSwitch == "p" | RepSwitch == "ep"),"Repetition","Switch"))
allData <- allData %>% mutate(TaskChoice = substring(Event,1,5)) %>% mutate(TaskChoice = ifelse(TaskChoice=="Searc","Search","Learn"))
allData$Chan <- factor(allData$Chan, levels=c('Fz', 'Pz'))
#clear stuff that I don't need
rm(gathercols)
allData$StimTrig = NULL
allData$Event = NULL
gc()
#####
#Remove bad participants
Keepers <- allData %>% filter(Chan =="Fz", sample == 1) %>% group_by(Subject) %>% summarise(n = n()) %>% filter(n>rejCrit) %>% select("Subject")
Keepers <- as.character(Keepers$Subject)
plotWidth = 24
plotHeight = 9
plotData <- allData %>%
filter(Subject %in% Keepers) %>%
mutate(sample = sample-baseline) %>%
group_by(TaskChoice,RepSwitch, sample, Chan, Subject) %>%
summarise(mean = mean(voltage))
save(plotData, file = paste(dPath,"plotData_TaskChoiceXrepswitch.RData",sep=""))
}
plotWidth = 24
plotHeight = 9
dPath = 'F:/CSL_Choice/CNV_StimOnsetWide/'
load('CNV_StimOnsetWide/plotData_TaskChoiceXrepswitch.RData')
choicePlot <- plotData
choicePlot$Exp = "Voluntary"
choicePlot= choicePlot %>% mutate(Subject = paste("vol",as.character(Subject),sep =""))
load('CNV_HannaStimOnsetWide/plotData_TaskChoiceXrepswitch.RData')
plotData$Exp = "Cued"
plotData= plotData %>% mutate(Subject = paste("cue",as.character(Subject),sep =""))
plotData <- rbind(choicePlot,plotData)
save(plotData, file = "CNV/plotData_readyforplotsandstats.RData")
x = c(-800,-500)
y=c(0,0)
lineData = data.frame(x=x,y=y)
plotData %>%
group_by(sample, Chan, Exp) %>%
summarise(mean = mean(mean)) %>%
ggplot() +
geom_rect(xmin = -300, xmax=-0, ymin = -Inf, ymax = Inf, size = 0, fill = "lemonchiffon") +
#geom_rect(xmin = -700, xmax=-500, ymin = -Inf, ymax = Inf, size = 0, fill = "gray85") +
geom_line(aes(sample, mean, colour = Exp),size=1) +
scale_x_continuous(name ="Latency (ms)", expand = c(0, 0)) +
scale_y_reverse(name =expression(paste("Amplitude (",mu,"v)")), expand = c(0, 0)) +
facet_grid(Chan~.,scales = "free_y") +
geom_vline(xintercept = 0,linetype = "dashed" )+
geom_vline(xintercept = -500)+
geom_hline(yintercept = 0,linetype = "dashed") +
geom_line(data = lineData,aes(x=x,y=y)) +
theme_minimal()
theme(panel.spacing.y = unit(2, "lines"),text= element_text(size=60))
ggsave(paste(dPath,"CNVERPs_Average.png",sep=""),width = 10, height = 6.49)
plotData %>%
group_by(sample, Chan, Exp, TaskChoice) %>%
summarise(mean = mean(mean)) %>%
ggplot() +
geom_rect(xmin = -300, xmax=-0, ymin = -Inf, ymax = Inf, size = 0, fill = "lemonchiffon") +
#geom_rect(xmin = -700, xmax=-500, ymin = -Inf, ymax = Inf, size = 0, fill = "gray85") +
geom_line(aes(x = sample,y=mean, colour = Exp, linetype = TaskChoice), size = 1.5) +
scale_x_continuous(name ="Latency (ms)", expand = c(0, 0)) +
scale_y_reverse(name =expression(paste("Amplitude (",mu,"v)")), expand = c(0, 0)) +
facet_grid(Chan~.) +
geom_vline(xintercept = 0,linetype = "dashed" )+
geom_vline(xintercept = -500)+
geom_hline(yintercept = 0,linetype = "dashed") +
geom_line(data = lineData,aes(x=x,y=y)) +
theme_minimal() +
theme(panel.spacing.y = unit(2, "lines"),text= element_text(size=60))
ggsave(paste(dPath,"CNVERPs_TaskChoice.png",sep=""),width = plotWidth, height = plotHeight*2)
plotData %>%
group_by(sample, Chan, Exp, RepSwitch) %>%
summarise(mean = mean(mean)) %>%
ggplot() +
geom_rect(xmin = -300, xmax=-0, ymin = -Inf, ymax = Inf, size = 0, fill = "lemonchiffon") +
#geom_rect(xmin = -700, xmax=-500, ymin = -Inf, ymax = Inf, size = 0, fill = "gray85") +
geom_line(aes(x = sample,y=mean, colour = Exp, linetype = RepSwitch), size = 1.5) +
scale_x_continuous(name ="Latency (ms)", expand = c(0, 0)) +
scale_y_reverse(name =expression(paste("Amplitude (",mu,"v)")), expand = c(0, 0)) +
facet_grid(Chan~.) +
geom_vline(xintercept = 0,linetype = "dashed" )+
geom_vline(xintercept = -500)+
geom_hline(yintercept = 0,linetype = "dashed") +
geom_line(data = lineData,aes(x=x,y=y)) +
theme_minimal() +
theme(panel.spacing.y = unit(2, "lines"),text= element_text(size=60))
ggsave(paste(dPath,"CNVERPs_RepSwitch.png",sep=""),width = plotWidth, height = plotHeight*2)
plotData %>%
group_by(sample, Chan, Exp, RepSwitch, TaskChoice) %>%
summarise(mean = mean(mean)) %>%
ggplot() +
geom_rect(xmin = -300, xmax=-0, ymin = -Inf, ymax = Inf, size = 0, fill = "lemonchiffon") +
#geom_rect(xmin = -700, xmax=-500, ymin = -Inf, ymax = Inf, size = 0, fill = "gray85") +
geom_line(aes( x = sample,y=mean,colour = TaskChoice, linetype = RepSwitch), size = 1) +
scale_x_continuous(name ="Latency (ms)", expand = c(0, 0)) +
scale_y_reverse(name =expression(paste("Amplitude (",mu,"v)")), expand = c(0, 0)) +
facet_grid(Chan~Exp) +
geom_vline(xintercept = 0,linetype = "dashed" )+
geom_vline(xintercept = -500)+
geom_hline(yintercept = 0,linetype = "dashed") +
geom_line(data = lineData,aes(x=x,y=y)) +
theme_minimal()
theme(panel.spacing.y = unit(2, "lines"),text= element_text(size=60))
ggsave(paste(dPath,"CNVERPs_TaskChoiceXrepswitch.png",sep=""),width = 10, height = 6.49)
frontalData <- plotData %>% filter(Chan == "Fz", sample > -300, sample<0)
posteriorData <- plotData %>% filter(Chan == "Pz", sample > -300, sample<0)
afex::aov_ez(
data = frontalData,
dv = "mean",
id = "Subject",
within = c("RepSwitch", "TaskChoice"),
between = "Exp"
)
afex::aov_ez(
data = posteriorData,
dv = "mean",
id = "Subject",
within = c("RepSwitch", "TaskChoice"),
between = "Exp"
)
frontalData <- frontalData %>% group_by(sample, Exp) %>% summarise(mean = mean(mean))
t.test(frontalData$mean[frontalData$Exp == "Voluntary"],frontalData$mean[frontalData$Exp == "Cued"],paired = 0)
posteriorData <- posteriorData %>% group_by(sample, Exp) %>% summarise(mean = mean(mean))
t.test(posteriorData$mean[posteriorData$Exp == "Voluntary"],posteriorData$mean[posteriorData$Exp == "Cued"],paired = 0)
|
77543bc894f49ead9d279d4078c73ebd61458772
|
ea3c74bc2a5690656332a015f4765bc79a90062f
|
/hemocyte_proteome/obsolete code/venn_diagram_research.R
|
894b5bb7a9622ae10052582ab7bd714dadfdcef5
|
[] |
no_license
|
drozdovapb/code_chunks
|
f2b591d3d44606220a5b2e8ff0b7723c07ea9619
|
2fcd9073073929eed75668d130c59e91a1d9172c
|
refs/heads/master
| 2023-07-11T14:08:19.925268
| 2023-06-26T10:43:25
| 2023-06-26T10:43:25
| 28,041,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,032
|
r
|
venn_diagram_research.R
|
### Venn diagram part
##https://stackoverflow.com/questions/8713994/venn-diagram-proportional-and-color-shading-with-semi-transparency
##https://www.datanovia.com/en/blog/venn-diagram-with-r-or-rstudio-a-million-ways/
#if (!require(devtools)) install.packages("devtools")
#devtools::install_github("yanlinlin82/ggvenn")
#library(ggvenn)
#ggvenn(
# listNames[1:3],
## fill_color = c("#0073C2FF", "#EFC000FF", "#868686FF", "#CD534CFF"),
# stroke_size = 0.5, set_name_size = 4)
#if (!require(devtools)) install.packages("devtools")
#devtools::install_github("gaospecial/ggVennDiagram")
#library(ggVennDiagram)
#ggVennDiagram(listNames) ## only 2-4 dimension! They're really good
#ggVennDiagram(listNames[1:3], label_alpha=0)
#ggVennDiagram(listNames[4:6], label_alpha=0)
#if (!requireNamespace("BiocManager", quietly = TRUE))
# install.packages("BiocManager")
#BiocManager::install("biomaRt")
#install.packages('Bif oVenn')
library(BioVenn)
BioVenn::draw.venn(one, two, three, title = "SDS", subtitle = "",
xtitle = "", ytitle = "", ztitle = "")
BioVenn::draw.venn(four, five, six, title = "no SDS", subtitle = "",
xtitle = "", ytitle = "", ztitle = "")
BioVenn::draw.venn(allSDS, allnoSDS, list(), title = "SDS vs no SDS",
subtitle = "",
xtitle = "", ytitle = "", ztitle = "")
BioVenn::draw.venn(coreSDS, corenoSDS, list(), title = "SDS vs no SDS (core)",
subtitle = "",
xtitle = "", ytitle = "", ztitle = "")
## Option 4. venneuler
#install.packages('venneuler')
library(venneuler)
df <- data.frame(elements=c(one, two, three),
sets=c(rep("51", length(one)), rep("52", length(two)), rep("53", length(three))))
vdf <- venneuler(df)
plot(vdf)
df <- data.frame(elements=c(four, five, six),
sets=c(rep("61", length(four)), rep("62", length(five)), rep("63", length(six))))
vdf <- venneuler(df)
plot(vdf)
df <- data.frame(elements=c(allSDS, allnoSDS),
sets=c(rep("allSDS", length(allSDS)), rep("allnoSDS", length(allnoSDS))))
vdf <- venneuler(df)
plot(vdf)
df <- data.frame(elements=c(coreSDS, corenoSDS),
sets=c(rep("coreSDS", length(coreSDS)), rep("corenoSDS", length(corenoSDS))))
vdf <- venneuler(df)
plot(vdf)
#install.packages('nVennR')
library(nVennR)
myV2 <- plotVenn(
list(one, two, three),
outFile = "test.svg")
#install.packages("VennDiagram")
library(VennDiagram)
venn.diagram(x = list(one, two, three),
fill = c("lightblue", "green", "blue"),
alpha = c(0.5, 0.5, 0.5), category = rep("", 3),
filename = "VennDiagram.svg", imagetype = "svg", height = 10, width = 10)
## why the heck isn't it proportional?!
#install.packages('eulerr')
library(eulerr)
plot(venn(list(one = unique(one), two = unique(two), three = unique(three))))
plot(euler(list(one = unique(one), two = unique(two), three = unique(three))))
plot(euler(list(four = unique(four), five = unique(five), six = unique(six))))
plot(euler(list(allSDS = allSDS, allnoSDS = allnoSDS)))
plot(euler(list(coreSDS = coreSDS, corenoSDS = corenoSDS)))
plot(euler(list(one = unique(one), two = unique(two), three = unique(three),
four = unique(four), five = unique(five), six = unique(six))))
diamond <- read.delim("EveGHHK01_and_contam.diamond.tsv", header = F, stringsAsFactors = F)
found.proteins <- data.frame(Accession = found, Annotation = NA)
found.proteins$Accession <- as.character(found.proteins$Accession)
found.proteins$Annotation <- sapply(found.proteins$Accession, function(x) diamond[diamond$V1 == x, "V14"])
samples5152 <- merge(sample51, sample52, by = "Main Accession", all = TRUE)
samples6162 <- merge(sample61, sample62, by = "Main Accession", all = TRUE)
samples5 <- merge(samples5152, sample53, by = "Main Accession", all = TRUE)
samples6 <- merge(samples6162, sample63, by = "Main Accession", all = TRUE)
allsamples <- merge(samples5, samples6, by = "Main Accession", all = TRUE)
|
55ac03ee80b4856eb022b5e92837e489306df5ed
|
f581a1cec7e634730dc759dafe2c59caa6fc064d
|
/R/clo.R
|
1d7f9448d2cce190d417ffc740553915f307e1a9
|
[] |
no_license
|
ebmtnprof/rties
|
c02919b4ce78a5ac1342d2a52e8baaec427f2390
|
fae56523593318ded0d7d38c8533f39515711dfe
|
refs/heads/master
| 2022-09-17T08:06:59.139907
| 2022-08-23T00:41:03
| 2022-08-23T00:41:03
| 127,973,424
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,141
|
r
|
clo.R
|
## This file includes all the functions needed for a coupled oscillator analysis
#### The first two functions were written by Dr. Steven Boker and are available on his website, http://people.virginia.edu/~smb3u/. The method they are implementing is described in the following two publications:
# Boker SM, Nesselroade JR. A method for modeling the intrinsic dynamics of intraindividual variability: Recovering parameters of simulated oscillators in multi-wave panel data. Multivariate Behavioral Research. 2002;37:127–60.
# Boker SM, Deboeck PR, Edler C, Keel PK. Generalized local linear approximation of derivatives from time series. In: Chow S, Ferrer E, Hsieh F, editors. Statistical methods for modeling human dynamics. New York: Routledge; 2010. p. 161–78.
#---------------------------------------------------------
# gllaWMatrix -- Calculates a GLLA linear transformation matrix to
# create approximate derivatives
#
# Input: embed -- Embedding dimension
# tau -- Time delay
# deltaT -- Interobservation interval
# order -- Highest order of derivatives (2, 3, or more)
gllaWMatrix <- function(embed, tau, deltaT, order=2) {
L <- rep(1,embed)
for(i in 1:order) {
L <- cbind(L,(((c(1:embed)-mean(1:embed))*tau*deltaT)^i)/factorial(i))
}
return(L%*%solve(t(L)%*%L))
}
#---------------------------------------------------------
# gllaEmbed -- Creates a time-delay embedding of a variable
# given a vector and an optional grouping variable
# Requires equal interval occasion data ordered by occasion.
# If multiple individuals, use the ID vector as "groupby"
#
# Input: x -- vector to embed
# embed -- Embedding dimension (2 creates an N by 2 embedded matrix)
# tau -- rows by which to shift x to create each time delay column
# groupby -- grouping vector
# label -- variable label for the columns
# idColumn -- if TRUE, return ID values in column 1
# if FALSE, return the embedding columns only.
#
# Returns: An embedded matrix where column 1 has the ID values, and the
# remaining columns are time delay embedded according to the arguments.
gllaEmbed <- function(x, embed, tau, groupby=NA, label="x", idColumn=F) {
minLen <- (tau + 1 + ((embed - 2) * tau))
if (!is.vector(groupby) | length(groupby[!is.na(groupby[])])<1) {
groupby <- rep(1,length(x))
}
x <- x[!is.na(groupby[])]
groupby <- groupby[!is.na(groupby[])]
if (embed < 2 | is.na(embed) | tau < 1 | is.na(tau) |
!is.vector(x) | length(x) < minLen)
return(NA)
if (length(groupby) != length(x))
return(NA)
embeddedMatrix <- matrix(NA, length(x) + (embed*tau), embed+1)
colNames <- c("ID", paste(label, "0", sep=""))
for (j in 2:embed) {
colNames <- c(colNames, paste(label, (j-1)*tau, sep=""))
}
dimnames(embeddedMatrix) <- list(NULL, colNames)
tRow <- 1
for (i in unique(groupby)) {
tx <- x[groupby==i]
if (length(tx) < minLen)
next
tLen <- length(tx) - minLen
embeddedMatrix[tRow:(tRow+tLen), 1] <- i
for (j in 1:embed) {
k <- 1 + ((j-1)*tau)
embeddedMatrix[tRow:(tRow+tLen), j+1] <- tx[k:(k+tLen)]
}
tRow <- tRow + tLen + 1
}
if (idColumn==TRUE) {
return(embeddedMatrix[1:(tRow-1),])
}
return(embeddedMatrix[1:(tRow-1), 2:(embed+1)])
}
############## estDerivs
#' Estimates first and second derivatives of an oberved state variable
#'
#' This function makes use of 2 functions written by Steven Boker, "gllaWMatrix" and "gllaEmbed" which are available on his website, http://people.virginia.edu/~smb3u/. It fits a coupled oscillator model for each dyad at different combinations of the input parameters (tau, embeds) and returns the input values and period of oscillation that maximize the R^2 for each dyad. It also estimates first and second derivatives of the observed state variable for each person at the input values that maximize the R^2 for that dyad and returns a dataframe that contains them.
#'
#' @param prepData A dataframe that was produced with the "dataPrep" function.
#' @param taus A vector containing the values of tau to use. Tau indicates the number of time points to lag in the lagged data matrix (see Boker, S.M., Deboeck, P.R., Edler, C., & Keel, P.K. (2010). Generalized local linear approximation of derivatives from time series. In S.M. Chow & E. Ferrer (Eds.), Statistical Methods for Modeling Human Dynamics: An Interdisciplinary Dialogue (pp. 161-178). New York, NY: Taylor & Francis Group). The first derivative is estimated as the mean of the two adjacent slopes across that number of lags, e.g., if tau = 2 then the estimate of the first derivative at time = t is based on the mean of the slopes left and right of time t across 2 observations each. The second derivative is the difference in the two slopes with respect to time. Tau = 1 is sensitive to noise and increasing its value acts as smoothing.
#' @param embeds A vector containing the values of embeds to use. Embeds indicates the number of columns in the lagged data matrix. The minimum = 3 for 2nd order derivatives and higher values increase smoothing.
#' @param delta A value indicating the inter-observation interval. For example, if delta = 2, then every second observation is used in the estimation process.
#' @param idConvention The value that was added to the dist1 ID number to get the dist2 ID number
#' @examples
#' data <- rties_ExampleDataShort
#' newData <- dataPrep(basedata=data, dyadId="couple", personId="person",
#' obs_name="dial", dist_name="female", time_name="time")
#' taus <-c(2,3)
#' embeds <- c(3,4)
#' delta <- 1
#' derivs <- estDerivs(prepData=newData, taus=taus, embeds=embeds, delta=delta, idConvention=500)
#' head(derivs$fitTable)
#' summary(derivs$fitTable[ ,4]) # summary of R-square
#' summary(derivs$fitTable[ ,5]) # summary of period of oscillation
#'
#' @return The function returns a list including: 1) "data" which is a dataframe containing first and second derivative estimates of an observed state variable, and 2) "fitTable" which shows the maximal R^2 achieved for each dyad for a coupled oscillator model, along with the associated tau, embed and estimated period of oscillation.
#' @export
estDerivs <- function(prepData, taus, embeds, delta, idConvention)
{
basedata <- prepData
basedata <- basedata[stats::complete.cases(basedata), ]
params <- expand.grid(taus=taus, embeds=embeds)
dyadId <- unique(factor(basedata$dyad))
derivData <- list()
fitTable <- list()
for(d in 1:length(dyadId)){
r <- list()
freq0 <- list()
freq1 <- list()
dataiFull <- basedata[basedata$dyad == dyadId[d],]
datai <- dataiFull[ which(dataiFull$dist0 == 1), ]
for(i in 1:nrow(params)){
# Estimate derivatives for each parameter combination
obsEmbed <- gllaEmbed(datai$obs_deTrend, tau=params[i,1], embed=params[i,2])
p_obsEmbed <- gllaEmbed(datai$p_obs_deTrend, tau=params[i,1], embed=params[i,2])
obsLLA <- gllaWMatrix(tau=params[i,1], embed=params[i,2], deltaT=delta, order=2)
obsDeriv <- obsEmbed[,1:dim(obsEmbed)[2]] %*% obsLLA
p_obsDeriv <- p_obsEmbed[,1:dim(p_obsEmbed)[2]] %*% obsLLA
idLength <- dim(obsDeriv)[1]
dist0 <- rep(unique(datai$dist0), idLength)
dist1 <- rep(unique(datai$dist1), idLength)
deriv0 <- cbind(obsDeriv, dist0)
deriv1 <- cbind(p_obsDeriv, dist1)
deriv0full <- cbind(deriv0, deriv1)
dimnames(deriv0full) <- list(NULL, c("obs_deTrend","d1","d2","dist0","p_obs_deTrend","p_d1","p_d2","dist1"))
deriv1full <- cbind(deriv1, deriv0)
dimnames(deriv1full) <- list(NULL, c("obs_deTrend","d1","d2","dist0","p_obs_deTrend","p_d1","p_d2","dist1"))
derivi <- rbind(deriv1full, deriv0full)
derivi <- as.data.frame(derivi)
# fit CLO and get R^2 for that combination of parameters
out <- stats::lm(d2 ~ dist0:obs_deTrend + dist0:d1 + dist0:p_obs_deTrend + dist0:p_d1 + dist1:obs_deTrend + dist1:d1 + dist1:p_obs_deTrend + dist1:p_d1 -1, data=derivi)
r[i] <- summary(out)$adj.r.squared
freq0[i] <- out$coefficients[1]
freq1[i] <- out$coefficients[5]
}
## get maximum R^2 for given dyad and the corresponding tau & embed values
maxR <- max(unlist(r))
paramRow <- which(r==maxR)
select <- params[paramRow,]
tau <- select[1,1]
embed <- select[1,2]
temp0 <- unlist(freq0)
freq0 <- temp0[paramRow]
n0 <- abs(as.numeric(freq0))
temp1 <- unlist(freq1)
freq1 <- temp1[paramRow]
n1 <- abs(as.numeric(freq1))
## get period (1 cycle (peak to peak)) in given time units associated with highest R^2
if (freq0 >= 0 | freq1 >= 0) {print("error: frequency parameter is not negative")}
period0 <- (2*pi) / (sqrt(n0))
period1 <- (2*pi) / (sqrt(n1))
# Estimate derivatives with selected tau and embed for each dyad
obsEmbed <- gllaEmbed(datai$obs_deTrend, tau=tau, embed=embed)
p_obsEmbed <- gllaEmbed(datai$p_obs_deTrend, tau=tau, embed=embed)
obsLLA <- gllaWMatrix(tau=tau, embed=embed, deltaT=delta, order=2)
obsDeriv <- obsEmbed[,1:dim(obsEmbed)[2]] %*% obsLLA
p_obsDeriv <- p_obsEmbed[,1:dim(p_obsEmbed)[2]] %*% obsLLA
idLength <- dim(obsDeriv)[1]
dist0 <- rep(unique(datai$dist0), idLength)
dist1 <- rep(unique(datai$dist1), idLength)
time <- seq_len(idLength)
dyad <- rep(unique(datai$dyad, idLength))
deriv0 <- cbind(dyad, dist0, time, obsDeriv)
deriv1 <- cbind(dyad, dist1, time, p_obsDeriv)
deriv0full <- cbind(deriv0, deriv1)
id0 <- rep(unique(datai$dyad + idConvention), idLength)
deriv0full <- cbind(deriv0full, id0)
dimnames(deriv0full) <- list(NULL, c("dyad","dist0", "time","obs_deTrend","d1","d2","p_dyad","dist1","p_time","p_obs_deTrend","p_d1","p_d2", "id"))
deriv1full <- cbind(deriv1, deriv0)
id1 <- rep(unique(datai$dyad), idLength)
deriv1full <- cbind(deriv1full, id1)
dimnames(deriv1full) <- list(NULL, c("dyad","dist0", "time","obs_deTrend","d1","d2","p_dyad","dist1","p_time","p_obs_deTrend","p_d1","p_d2", "id"))
deriv <- rbind(deriv1full, deriv0full)
deriv <- as.data.frame(deriv)
derivData[[d]] <- deriv
fitTable[[d]] <- c("dyad"= unique(deriv$dyad), "tau"=tau, "embed"= embed,
"Rsqr"= maxR, "Period0"=period0, "Period1"=period1)
}
## output, which includes the derivative data and the fit table
derivD <- as.data.frame(do.call(rbind, derivData))
fitTable <- as.data.frame(do.call(rbind, fitTable))
fitTable <- round(fitTable, 2)
derivOut <- list("data"=derivD, "fitTable"= fitTable)
}
################ cloCoupleOde
#' Provides the equation for a coupled oscillator model for the differential equation solver (ode) to plot
#' @param t A parameter used by the ode and passed by functions calling cloCoupleOde
#' @param state Another parameter used by the ode and passed by functions calling cloCoupleOde
#' @param parameters Another parameter used by the ode and passed by functions calling cloCoupleOde
#'
#' @return A list with the rates of change for each state variable.
cloCoupledOde <- function(t, state, parameters)
{
with(as.list(c(state, parameters)), {
dy1 <- y2
dy2 <- y1*obs_0 + y2*d1_0 + y3*p_obs_0 + y4*p_d1_0
dy3 <- y4
dy4 <- y3*obs_1 + y4*d1_1 + y1*p_obs_1 + y2*p_d1_1
list(c(dy1, dy2, dy3, dy4))
})
}
############### cloUncoupledOde
#' Provides the equation for an un-coupled oscillator model for the differential equation solver (ode) to plot
#' @param t A parameter used by the ode and passed by functions calling cloCoupleOde
#' @param state Another parameter used by the ode and passed by functions calling cloCoupleOde
#' @param parameters Another parameter used by the ode and passed by functions calling cloCoupleOde
#' #'
#' @return A list with the rates of change for each state variable.
cloUncoupledOde <- function(t, state, parameters)
{
with(as.list(c(state, parameters)), {
dy1 <- y2
dy2 <- y1*obs_0 + y2*d1_0
dy3 <- y4
dy4 <- y3*obs_1 + y4*d1_1
list(c(dy1, dy2, dy3, dy4))
})
}
################### indivClo
#' Estimates either an uncoupled or coupled oscillator model for each dyad.
#'
#' Both models predict the second derivatives of the observed state variables (with linear trends removed). For the uncoupled oscillator, the predictors are each person's own observed state variables (again with linear trends removed), as well as each person's own first derivatives of the observed state variables (again with linear trends removed. For the coupled oscillator, the predictors are each person's own and partner's observed state variables (again with linear trends removed), as well as each person's own and partner's first derivatives of the observed state variables (again with linear trends removed).
#'
#' @param derivData A dataframe that was produced with the "estDerivs" function.
#' @param whichModel Whether the model to be estimated is the "uncoupled" or "coupled" oscillator.
#' @examples
#' data <- rties_ExampleDataShort
#' newData <- dataPrep(basedata=data, dyadId="couple", personId="person",
#' obs_name="dial", dist_name="female", time_name="time")
#' taus <-c(2,3)
#' embeds <- c(3,4)
#' delta <- 1
#' derivs <- estDerivs(prepData=newData, taus=taus, embeds=embeds, delta=delta, idConvention=500)
#' clo <- indivClo(derivData=derivs$data, whichModel="coupled")
#' summary(clo$R2)
#' head(clo$params)
#' @return The function returns a list including: 1) the adjusted R^2 for the model for each dyad (called "R2"), and 2) the parameter estimates for the model for each dyad (called "params", for use in either predicting, or being predicted by, the system variable).
#' @export
indivClo <- function(derivData, whichModel)
{
basedata <- derivData
param <- list()
if(whichModel != "uncoupled" & whichModel != "coupled") {
stop("the model type must be either uncoupled or coupled")
} else if (whichModel == "uncoupled"){
model <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist1:obs_deTrend + dist1:d1 -1)
obs_0 <- param[1]
d1_0 <- param[2]
obs_1 <- param[3]
d1_1 <- param[4]
paramClo <- c("obs_0"= obs_0, "d1_0"= d1_0, "obs_1"=obs_1, "d1_1"=d1_1)
paramNames <- c("obs_0","d1_0","obs_1","d1_1","dyad")
} else if (whichModel == "coupled"){
model <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist0:p_obs_deTrend + dist0:p_d1 + dist1:obs_deTrend + dist1:d1 + dist1:p_obs_deTrend + dist1:p_d1 -1)
obs_0 <- param[1]
d1_0 <- param[2]
p_obs_0 <- param[3]
p_d1_0 <- param[4]
obs_1 <- param[5]
d1_1 <- param[6]
p_obs_1 <- param[7]
p_d1_1 <- param[8]
paramClo <- c("obs_0"= obs_0, "d1_0"= d1_0, "p_obs_0"= p_obs_0, "p_d1_0"=p_d1_0, "obs_1"=obs_1, "d1_1"=d1_1, "p_obs_1"= p_obs_1, "p_d1_1"= p_d1_1)
paramNames <- c("obs_0","d1_0","p_obs_0","p_d1_0","obs_1","d1_1","p_obs_1","p_d1_1","dyad")
}
newDiD <- unique(factor(basedata$dyad))
basedata <- basedata[stats::complete.cases(basedata), ]
R2 <- vector()
for (i in 1:length(newDiD)){
datai <- basedata[basedata$dyad == newDiD[i], ]
m <- stats::lm(model, na.action=na.exclude, data=datai)
R2[[i]] <- summary(m)$adj.r.squared
param[[i]] <- round(as.numeric(m$coefficients), 5)
numParam <- length(m$coefficients)
param[[i]][numParam + 1] <- unique(datai$dyad)
}
params <- as.data.frame(do.call(rbind, param))
colnames(params) <- paramNames
results <- list(R2=R2, params=params)
}
################### indivCloCompare
#' Compares model fit for the uncoupled and coupled oscillator for each dyad's state trajectories using an R-square comparison.
#'
#' Fits an uncoupled and coupled oscillator model to each dyad's observed state variables and returns the adjusted R-squares, along with the difference between them (coupled - uncoupled, so positive values indicate better fit for the more complex model).
#'
#' @param derivData A dataframe that was produced with the "estDerivs" function.
#' #' @examples
#' data <- rties_ExampleDataShort
#' newData <- dataPrep(basedata=data, dyadId="couple", personId="person",
#' obs_name="dial", dist_name="female", time_name="time")
#' taus <-c(2,3)
#' embeds <- c(3,4)
#' delta <- 1
#' derivs <- estDerivs(prepData=newData, taus=taus, embeds=embeds, delta=delta, idConvention=500)
#' compare <- indivCloCompare(derivData=derivs$data)
#' summary(compare$R2couple)
#'
#' @return The function returns a named list including: 1) the adjusted R^2 for the uncoupled model for each dyad (called "R2uncouple"), 2) the adjusted R^2 for the coupled model for each dyad (called "R2couple"), and 3) the difference between the R-squares for each dyad (coupled - uncoupled, called "R2dif").
#' @export
indivCloCompare <- function(derivData)
{
basedata <- derivData
newDiD <- unique(factor(basedata$dyad))
R2uncouple <- vector()
R2couple <- vector()
R2dif <- vector()
for (i in 1:length(newDiD)){
datai <- basedata[basedata$dyad == newDiD[i], ]
m1 <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist1:obs_deTrend + dist1:d1 -1)
uncouple <- stats::lm(m1, na.action=na.exclude, data=datai)
m2 <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist0:p_obs_deTrend + dist0:p_d1 + dist1:obs_deTrend + dist1:d1 + dist1:p_obs_deTrend + dist1:p_d1-1)
couple <- stats::lm(m2, na.action=na.exclude, data=datai)
R2uncouple[[i]] <- summary(uncouple)$adj.r.squared
R2couple[[i]] <- summary(couple)$adj.r.squared
R2dif[[i]] <- R2couple[[i]] - R2uncouple[[i]]
}
output <- list(R2uncouple=R2uncouple, R2couple=R2couple, R2dif=R2dif)
}
################ indivCloPlots
#' Produces plots of either an uncoupled or coupled oscillator model-predicted trajectories overlaid on raw data for each dyad.
#'
#' The observed and CLO-model predicted state variables (with linear trends removed) are plotted for each dyad individually.
#'
#' @param derivData A dataframe that was produced with the "estDerivs" function.
#' @param whichModel Whether the model to be estimated is the "uncoupled" or "coupled" oscillator.
#' @param idConvention The number that was added to the dist0 partner to get the ID number for the dist1 partner.
#' @param dist0name An optional name for the level-0 of the distinguishing variable (e.g., "Women"). Default is dist0.
#' @param dist1name An optional name for the level-1 of the distinguishing variable (e.g., "Men"). Default is dist1.
#' @param plot_obs_name An optional name for the observed state variables being plotted (e.g., "Emotional Experience"). Default is observed.
#' @param minMax An optional vector with desired minimum and maximum quantiles to be used for setting the y-axis range on the plots, e.g., minMax <- c(.1, .9) would set the y-axis limits to the 10th and 90th percentiles of the observed state variables. Default is to use the minimum and maximum observed values of the state variables.
#' @param printPlots If true (the default) plots are displayed on the screen.
#' @examples
#' # See vignettes for examples.
#'
#' @return A list plots of the predicted values against the observed values for each dyad.
#' @import ggplot2
#' @export
indivCloPlots <- function(derivData, whichModel, idConvention, dist0name=NULL, dist1name=NULL, plot_obs_name=NULL, minMax=NULL, printPlots=T)
{
basedata <- derivData
param <- list()
if(is.null(dist0name)){dist0name <- "dist0"}
if(is.null(dist1name)){dist1name <- "dist1"}
if(is.null(plot_obs_name)){plot_obs_name <- "observed"}
if(is.null(minMax)){
min <- min(basedata$obs_deTrend, na.rm=T)
max <- max(basedata$obs_deTrend, na.rm=T)
} else {
min <- stats::quantile(basedata$obs_deTrend, minMax[1], na.rm=T)
max <- stats::quantile(basedata$obs_deTrend, minMax[2], na.rm=T)
}
if(whichModel != "uncoupled" & whichModel != "coupled") {
stop("the model type must be either uncoupled or coupled")
} else if (whichModel == "uncoupled"){
model <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist1:obs_deTrend + dist1:d1 -1)
obs_0 <- param[1]
d1_0 <- param[2]
obs_1 <- param[3]
d1_1 <- param[4]
paramClo <- c("obs_0"= obs_0, "d1_0"= d1_0, "obs_1"=obs_1, "d1_1"=d1_1)
paramNames <- c("obs_0","d1_0","obs_1","d1_1","dyad")
odeFunction <- cloUncoupledOde
} else if (whichModel == "coupled"){
model <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist0:p_obs_deTrend + dist0:p_d1 + dist1:obs_deTrend + dist1:d1 + dist1:p_obs_deTrend + dist1:p_d1 -1)
obs_0 <- param[1]
d1_0 <- param[2]
p_obs_0 <- param[3]
p_d1_0 <- param[4]
obs_1 <- param[5]
d1_1 <- param[6]
p_obs_1 <- param[7]
p_d1_1 <- param[8]
paramClo <- c("obs_0"= obs_0, "d1_0"= d1_0, "p_obs_0"= p_obs_0, "p_d1_0"=p_d1_0, "obs_1"=obs_1, "d1_1"=d1_1, "p_obs_1"= p_obs_1, "p_d1_1"= p_d1_1)
paramNames <- c("obs_0","d1_0","p_obs_0","p_d1_0","obs_1","d1_1","p_obs_1","p_d1_1","dyad")
odeFunction <- cloCoupledOde
}
newDiD <- unique(factor(basedata$dyad))
basedata <- basedata[stats::complete.cases(basedata), ]
plots <- list()
for (i in 1:length(newDiD)){
statedatai <- basedata[basedata$dyad == newDiD[i] & basedata$dist0 == 0,]
maxtime <- max(statedatai$time)
plotTimes <- seq(1, maxtime, by=1)
time <- obs_deTrend <- p_obs_deTrend <- NULL
start <- suppressWarnings(subset(statedatai, time==c(1:5), select=c(obs_deTrend, p_obs_deTrend)))
y1 <- mean(start$obs_deTrend, na.rm=T)
y2 <- 0
y3 <- mean(start$p_obs_deTrend, na.rm=T)
y4 <- 0
statei <- c("y1"=y1, "y2"=y2, "y3"=y3, "y4"=y4)
datai <- basedata[basedata$dyad == newDiD[i], ]
m <- stats::lm(model, na.action=na.exclude, data=datai)
param[[i]] <- round(as.numeric(m$coefficients), 5)
numParam <- length(m$coefficients)
param[[i]][numParam + 1] <- unique(datai$dyad)
names(param[[i]]) <- paramNames
temp <- as.data.frame(deSolve::ode(y=statei, times=plotTimes, func= odeFunction, parms= param[[i]]))
vars1 <- c("y2", "y4")
temp2 <- temp[ ,!(names(temp) %in% vars1)]
names(temp2) <- c("time","d0.pred","d1.pred")
temp2$dyad <- statedatai$dyad
temp3 <- stats::reshape(temp2, direction='long', varying=c("d0.pred","d1.pred"), timevar="role", times=c("d0","d1"), v.names=c("pred"), idvar="time")
temp3$id <- ifelse(temp3$role == "d0", temp3$dyad, temp3$dyad + idConvention)
temp4 <- suppressMessages(plyr::join(datai, temp3))
temp4$roleNew <- factor(temp4$role, levels=c("d0","d1"), labels=c(dist0name, dist1name))
plotData <- temp4[stats::complete.cases(temp4), ]
plotTitle <- as.character(unique(datai$dyad))
plots[[i]] <- ggplot(plotData, aes_string(x="time")) +
geom_line(aes_string(y= "obs_deTrend", color="roleNew"), linetype="dotted", size= .8, na.rm=T) +
geom_line(aes_string(y="pred", color="roleNew"), size= .8, na.rm=T) +
scale_color_manual(name="Role", values=c("red","blue")) +
ylab(plot_obs_name) +
ylim(min, max) +
annotate("text", x=-Inf, y=-Inf, hjust=0, vjust=0, label="Dots = Observed; Lines = Predicted", size=3) +
labs(title= "Dyad ID:", subtitle= plotTitle) +
theme(plot.title=element_text(size=11)) +
theme(plot.subtitle=element_text(size=10))
}
if(printPlots==T){print(plots)}
return(plots)
}
###################### cloResids
#' Produces histograms of the residuals from the oscillator model for each dyad.
#'
#' @param derivData A dataframe that was produced with the "estDerivs" function.
#' @param whichModel Whether the model to be estimated is the uncoupled-oscillator ("uncoupled") or the coupled-oscillator ("coupled").
#' @param printPlots If true (the default) plots are displayed on the screen.
#' @examples
#' # See vignettes for examples.
#'
#' @return A list with the histograms of the residuals for each dyad.
#' @import ggplot2
#' @export
cloResids <- function(derivData, whichModel, printPlots=T)
{
basedata <- derivData
if(whichModel != "uncoupled" & whichModel != "coupled") {
stop("the model type must be either uncoupled or coupled")
} else if (whichModel == "uncoupled"){
model <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist1:obs_deTrend + dist1:d1 -1)
} else if (whichModel == "coupled"){
model <- stats::formula(d2 ~ dist0:obs_deTrend + dist0:d1 + dist0:p_obs_deTrend + dist0:p_d1 + dist1:obs_deTrend + dist1:d1 + dist1:p_obs_deTrend + dist1:p_d1 -1)
}
newDiD <- unique(factor(basedata$dyad))
plots <- list()
resid <- list()
for (i in 1:length(newDiD)){
datai <- basedata[basedata$dyad == newDiD[i], ]
m <- stats::lm(model, na.action=na.exclude, data=datai)
plotTitle <- as.character(unique(datai$dyad))
resid[[i]] <- m$residuals
plotResid <- data.frame(resid[[i]])
colnames(plotResid) <- "Residuals"
plots[[i]] <- ggplot(plotResid, aes_string(x="Residuals")) +
geom_histogram(color="black", fill="grey") +
labs(title= "Dyad ID:", subtitle= plotTitle) +
theme(plot.title=element_text(size=11)) +
theme(plot.subtitle=element_text(size=10))
}
if(printPlots==T){print(plots)}
return(plots)
}
#################### cloPlotTraj
#' Plots the bivariate state variable's clo model-predicted temporal trajectories for each latent profile of clo parameters.
#'
#' @param prepData A dataframe that was produced with the "dataPrep" function.
#' @param paramEst A dataframe created by indivClo containing the clo parameter estimates for each dyad.
#' @param n_profiles The number of latent profiles.
#' @param dist0name An optional name for the level-0 of the distinguishing variable (e.g., "Women"). Default is dist0.
#' @param dist1name An optional name for the level-1 of the distinguishing variable (e.g., "Men"). Default is dist1
#' @param plot_obs_name An optional name for the observed state variable to appear on plots (e.g., "Emotional Experience").
#' @param minMax An optional vector with desired minimum and maximum quantiles to be used for setting the y-axis range on the plots, e.g., minMax <- c(.1, .9) would set the y-axis limits to the 10th and 90th percentiles of the observed state variables. If not provided, the default is to use the minimum and maximum observed values of the state variables.
#' @param time_length An optional value specifying how many time points to plot across. Default is the 75th percentile for the observed time variable.
#' @param printPlots If true (the default) plots are displayed on the screen.
#' @examples
#' # See vignettes for examples.
#'
#' @return The function returns the plots as a list.
#' @import ggplot2
#' @export
cloPlotTraj <- function(prepData, paramEst, n_profiles, dist0name=NULL, dist1name=NULL, plot_obs_name = NULL, minMax=NULL, time_length=NULL, printPlots=T)
{
if(is.null(dist0name)){dist0name <- "dist0"}
if(is.null(dist1name)){dist1name <- "dist1"}
if(is.null(plot_obs_name)){plot_obs_name <- "observed"}
if(is.null(minMax)){
min <- min(prepData$obs_deTrend, na.rm=T)
max <- max(prepData$obs_deTrend, na.rm=T)
} else {
min <- stats::quantile(prepData$obs_deTrend, minMax[1], na.rm=T)
max <- stats::quantile(prepData$obs_deTrend, minMax[2], na.rm=T)
}
if(is.null(time_length)){time_length <- as.numeric(stats::quantile(prepData$time, prob=.75))}
prepData <- prepData[stats::complete.cases(prepData), ]
paramEst <- paramEst[stats::complete.cases(paramEst), ]
vars1 <- c("obs_0","d1_0","p_obs_0","p_d1_0","obs_1","d1_1","p_obs_1","p_d1_1")
temp1 <- paramEst[vars1]
lpa <- mclust::Mclust(temp1, G=n_profiles)
profileParams <- as.data.frame(lpa$parameters$mean)
plots <- list()
for(i in 1:n_profiles){
statedata0 <- prepData[prepData$dist0 == 1 & prepData$time ==1,]
start0 <- stats::median(statedata0$obs_deTrend, na.rm=T)
statedata1 <- prepData[prepData$dist0 == 0 & prepData$time ==1,]
start1 <- stats::median(statedata1$obs_deTrend, na.rm=T)
plotTimes <- seq(1, time_length, by=1)
state <- c("y1"=start0, "y2"=0, "y3"=start1, "y4"=0)
temp1 <- profileParams[ ,i]
names <- rownames(profileParams)
names(temp1) <- names
paramsi <- temp1
temp2 <- as.data.frame(deSolve::ode(y=state, times=plotTimes, func=cloCoupledOde, parms= paramsi))
vars2 <- c("y2", "y4")
temp3 <- temp2[ ,!(names(temp2) %in% vars2)]
names(temp3) <- c("time","d0pred","d1pred")
temp4 <- stats::reshape(temp3, direction='long', varying=c("d0pred","d1pred"), timevar="role", times=c("d0","d1"), v.names=c("pred"), idvar="time")
temp4$roleNew <- factor(temp4$role, levels=c("d0","d1"), labels=c(dist0name, dist1name))
plotData <- temp4[stats::complete.cases(temp4), ]
profileName <- paste("Profile", i , sep="_")
plotsi <- ggplot(plotData, aes_string(x="time")) +
geom_line(aes_string(y="pred", color="roleNew"), linetype="solid", size=1, na.rm=T) +
scale_color_manual(name="Role", values=c("black","gray47")) +
ylab(plot_obs_name) +
ylim(min, max) +
labs(title=profileName, subtitle= "Predicted Trajectory") +
theme(plot.title=element_text(size=11))
plots[[i]] <- plotsi
}
if(printPlots==T){print(plots)}
return(plots)
}
|
667230a7cea5b31c53d951d3ea1dab15d4de323b
|
e641d77c2724c240c76f1872a6e2e88e0ca6c01e
|
/6 - 3nt Addition Energy.R
|
36a0e322a0a84f614e9e2425df95c6ae0c43e28d
|
[] |
no_license
|
cdevlugt/iav_par
|
9a3bff7a328cc3c323fe807cb9104672a364418c
|
01e4c11fc24513334cbb47689a6a311f75650fe4
|
refs/heads/master
| 2020-03-28T11:26:16.202803
| 2019-03-26T14:34:11
| 2019-03-26T14:34:11
| 148,213,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 65,373
|
r
|
6 - 3nt Addition Energy.R
|
require(ggplot2)
require(cowplot)
require(compiler)
enableJIT(3)
helixEnergy <- function(x53=NA, y35=NA, offset=0, calc='average'){
#calculates the free enegry at 37 degrees for a given helix.
# Turner energy rules (2004) are used for calculations.
# handles nearest neighbours, terminal mismatches, dangling ends, and single nucleotide mismatches
#
#Args:
# x53 sequence in 5-3
# y35 sequence in 3-5
# offset positive numbers right shift x53, negative numbers right shift y35
# allows for dangling ends and starting at a position other than 1
#
#Return:
# df$totalEnergy vector containing free energies
#columns are 5'-3' in turner energy rules
#nearest neghbours Turner 2004 https://rna.urmc.rochester.edu/NNDB/turner04/index.html
energyTable <- data.frame(AA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.93),
AC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.24,NA),
AG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.08,NA,-0.55),
AU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-1.1,NA,-1.36,NA),
CA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.11,NA,NA,NA,NA),
CC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-3.26,NA,NA,NA,NA,NA),
CG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.36,NA,-1.41,NA,NA,NA,NA),
CU=c(NA,NA,NA,NA,NA,NA,NA,NA,-2.08,NA,-2.11,NA,NA,NA,NA,NA),
GA=c(NA,NA,NA,NA,NA,NA,NA,-2.35,NA,NA,NA,NA,NA,NA,NA,-1.27),
GC=c(NA,NA,NA,NA,NA,NA,-3.42,NA,NA,NA,NA,NA,NA,NA,-2.51,NA),
GG=c(NA,NA,NA,NA,NA,-3.26,NA,-1.53,NA,NA,NA,NA,NA,-2.11,NA,-0.5),
GU=c(NA,NA,NA,NA,-2.24,NA,-2.51,NA,NA,NA,NA,NA,-1.36,NA,1.29,NA),
UA=c(NA,NA,NA,-1.33,NA,NA,NA,NA,NA,NA,NA,-1,NA,NA,NA,NA),
UC=c(NA,NA,-2.35,NA,NA,NA,NA,NA,NA,NA,-1.53,NA,NA,NA,NA,NA),
UG=c(NA,-2.11,NA,-1,NA,NA,NA,NA,NA,-1.41,NA,0.3,NA,NA,NA,NA),
UU=c(-0.93,NA,-1.27,NA,NA,NA,NA,NA,-0.55,NA,-0.5,NA,NA,NA,NA,NA))
rownames(energyTable) <- colnames(energyTable)
deltaET <- data.frame(AA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.03),
AC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.06,NA),
AG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.06,NA,0.32),
AU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.08,NA,0.24,NA),
CA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.07,NA,NA,NA,NA),
CC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.07,NA,NA,NA,NA,NA),
CG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,0.09,NA,0.24,NA,NA,NA,NA),
CU=c(NA,NA,NA,NA,NA,NA,NA,NA,0.06,NA,0.25,NA,NA,NA,NA,NA),
GA=c(NA,NA,NA,NA,NA,NA,NA,0.06,NA,NA,NA,NA,NA,NA,NA,0.28),
GC=c(NA,NA,NA,NA,NA,NA,0.08,NA,NA,NA,NA,NA,NA,NA,0.25,NA),
GG=c(NA,NA,NA,NA,NA,0.07,NA,0.27,NA,NA,NA,NA,NA,0.25,NA,0.96),
GU=c(NA,NA,NA,NA,0.06,NA,0.25,NA,NA,NA,NA,NA,0.24,NA,0.56,NA),
UA=c(NA,NA,NA,0.09,NA,NA,NA,NA,NA,NA,NA,0.3,NA,NA,NA,NA),
UC=c(NA,NA,0.06,NA,NA,NA,NA,NA,NA,NA,0.27,NA,NA,NA,NA,NA),
UG=c(NA,0.07,NA,0.3,NA,NA,NA,NA,NA,0.24,NA,0.48,NA,NA,NA,NA),
UU=c(0.03,NA,0.28,NA,NA,NA,NA,NA,0.32,NA,0.96,NA,NA,NA,NA,NA))
rownames(deltaET) <- colnames(deltaET)
endMM <- data.frame(A=c(T,T,T,F), C=c(T,T,F,T), G=c(T,F,T,F), U=c(F,T,F,T))
rownames(endMM) <- colnames(endMM)
#terminal mismatches Turner 2004 https://rna.urmc.rochester.edu/NNDB/turner04/index.html
mmNrg <- data.frame(AA=c(NA,NA,NA,-1.0,NA,NA,NA,-0.7,NA,NA,NA,-1.1,-0.8,-1.0,-0.8,-1.0),
AC=c(NA,NA,-1.1,NA,NA,NA,-1.1,NA,NA,NA,-1.6,NA,-0.6,-0.7,-0.6,-0.7),
AG=c(NA,-1.5,NA,-1.0,NA,-1.0,NA,-0.7,NA,-1.4,NA,-0.5,-0.8,-1.0,-0.8,-1.0),
AU=c(-0.8,NA,-0.3,NA,-0.6,NA,-0.6,NA,-0.8,NA,-0.6,NA,-0.6,-0.8,-0.6,-0.8),
CA=c(NA,NA,NA,-0.8,NA,NA,NA,-0.6,-1.5,-1.5,-1.4,-1.5,NA,NA,NA,-0.6),
CC=c(NA,NA,-1.5,NA,NA,NA,-0.7,NA,-1.0,-1.1,-1.0,-0.8,NA,NA,-1.0,NA),
CG=c(NA,-1.5,NA,-0.8,NA,-1.1,NA,-0.6,-1.4,-1.5,-1.6,-1.5,NA,-1.4,NA,-0.6),
CU=c(-1.0,NA,-1.0,NA,-0.7,NA,-0.7,NA,-1.0,-1.4,-1.0,-1.2,-0.7,NA,-0.8,NA),
GA=c(NA,NA,NA,-1.1,-1.1,-1.5,-1.3,-1.5,NA,NA,NA,-1.2,-0.3,-1.0,-0.8,-1.0),
GC=c(NA,NA,-1.3,NA,-1.1,-0.7,-1.1,-0.5,NA,NA,-1.4,NA,-0.6,-0.7,-0.6,-0.7),
GG=c(NA,-1.4,NA,-1.1,-1.6,-1.5,-1.4,-1.5,NA,-1.6,NA,-0.8,-0.6,-1.0,-0.8,-1.0),
GU=c(-0.8,NA,-0.8,NA,-1.1,-1.0,-1.1,-0.7,-0.8,NA,-0.8,NA,-0.6,-0.8,-0.6,-0.6),
UA=c(-1.0,-0.8,-1.1,-0.8,NA,NA,NA,-0.5,-1.0,-0.8,-1.1,-0.8,NA,NA,NA,-0.5),
UC=c(-0.7,-0.6,-0.7,-0.5,NA,NA,-0.5,NA,-0.7,-0.6,-0.7,-0.5,NA,NA,-0.7,NA),
UG=c(-1.1,-0.8,-1.2,-0.8,NA,-0.8,NA,NA,-0.5,-0.8,-0.8,-0.8,NA,-1.2,NA,-0.5),
UU=c(-0.7,-0.6,-0.7,-0.5,-0.7,NA,NA,NA,-0.7,-0.6,-0.7,-0.5,-0.8,NA,-0.6,NA),
stringsAsFactors=F)
rownames(mmNrg) <- colnames(mmNrg)
#dangling ends Turner 2004 https://rna.urmc.rochester.edu/NNDB/turner04/index.html
dangleStart <- data.frame(A=c(NA,NA,NA,NA,NA,NA,NA,-0.7,NA,NA,NA,-0.1,NA,NA,NA,-0.7,NA,NA,NA,-0.1),
C=c(NA,NA,NA,NA,NA,NA,-1.1,NA,NA,NA,-0.4,NA,NA,NA,-1.3,NA,NA,NA,-0.6,NA),
G=c(NA,NA,NA,NA,NA,-1.7,NA,-0.7,NA,-0.8,NA,-0.1,NA,-1.7,NA,-0.7,NA,-1.2,NA,-0.1),
U=c(NA,NA,NA,NA,-0.8,NA,-0.8,NA,-0.5,NA,-0.5,NA,-0.8,NA,-0.8,NA,-0.6,NA,-0.6,NA),
AA=c(NA,NA,NA,-0.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AC=c(NA,NA,-0.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AG=c(NA,-0.2,NA,-0.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AU=c(-0.3,NA,-0.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CA=c(NA,NA,NA,-0.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CC=c(NA,NA,-0.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CG=c(NA,-0.3,NA,-0.3,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CU=c(-0.1,NA,-0.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GA=c(NA,NA,NA,-0.4,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GC=c(NA,NA,-0.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GG=c(NA,0,NA,-0.4,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GU=c(-0.2,NA,-0.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UA=c(NA,NA,NA,-0.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UC=c(NA,NA,-0.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UG=c(NA,0,NA,-0.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UU=c(-0.2,NA,-0.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
stringsAsFactors=F)
rownames(dangleStart) <- colnames(dangleStart)
dangleEnds <- data.frame(A=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.3,-0.1,-0.2,-0.2),
C=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.2,-0.3,0,0,NA,NA,NA,NA),
G=c(NA,NA,NA,NA,NA,NA,NA,NA,-0.5,-0.3,-0.2,-0.1,NA,NA,NA,NA,-0.3,-0.1,-0.2,-0.2),
U=c(NA,NA,NA,NA,-0.3,-0.3,-0.4,-0.2,NA,NA,NA,NA,-0.3,-0.3,-0.4,-0.2,NA,NA,NA,NA),
AA=c(NA,NA,NA,-0.8,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AC=c(NA,NA,NA,-0.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AG=c(NA,NA,NA,-0.8,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AU=c(NA,NA,NA,-0.6,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CA=c(NA,NA,-1.7,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CC=c(NA,NA,-0.8,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CG=c(NA,NA,-1.7,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CU=c(NA,NA,-1.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GA=c(NA,-1.1,NA,-0.8,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GC=c(NA,-0.4,NA,-0.5,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GG=c(NA,-1.3,NA,-0.8,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GU=c(NA,-0.6,NA,-0.6,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UA=c(-0.7,NA,-0.7,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UC=c(-0.1,NA,-0.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UG=c(-0.7,NA,-0.7,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UU=c(-0.1,NA,-0.1,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
stringsAsFactors=F)
rownames(dangleEnds) <- colnames(dangleEnds)
#single nucleotide mismatches Davis 2010 DOI: 10.1021/bi100146z
mismatchNrg <- data.frame(UAC=c(NA,-0.64,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.77,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGG=c(-0.64,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.01,NA,NA,NA),
CAU=c(NA,NA,NA,-0.64,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.77,NA,NA,NA,NA,NA,NA,NA),
GGA=c(NA,NA,-0.64,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.01,NA,NA),
CAC=c(NA,NA,NA,NA,NA,0.21,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GGG=c(NA,NA,NA,NA,0.21,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UAG=c(NA,NA,NA,NA,NA,NA,NA,1.26,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGC=c(NA,NA,NA,NA,NA,NA,1.26,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GAU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,1.26,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CGA=c(NA,NA,NA,NA,NA,NA,NA,NA,1.26,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UAU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.92,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.92,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UUG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.82,NA,NA,0.33,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GUC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.82,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GUU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.82,NA,0.33,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CUG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-2.82,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AUC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.33,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CUA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.33,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.17,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CAA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.17,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UCG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.17,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AAC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.17,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CAG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.32,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GCC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.32,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GAC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.32,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CCG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.32,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AAG=c(1.77,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.51,NA,NA,NA,NA,NA,NA,NA,NA),
UCC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.51,NA,NA,NA,NA,NA,NA,NA,2.24,NA),
GAA=c(NA,NA,1.77,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.51,NA,NA,NA,NA,NA,NA),
CCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.51,NA,NA,NA,NA,NA,NA,2.24),
UCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.26,NA,NA,NA,NA),
AUA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.26,NA,NA,NA,NA,NA),
UGC=c(NA,-0.01,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CGU=c(NA,NA,NA,-0.01,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
ACG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.24,NA,NA,NA,NA,NA,NA,NA,NA),
GCA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.24,NA,NA,NA,NA,NA,NA),
stringsAsFactors=F)
rownames(mismatchNrg) <- colnames(mismatchNrg)
#5' shift penalties DOI: 10.1021/bi100146z
mismatch5Shift <- data.frame(UAC=c(NA,0.425,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.08,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGG=c(0.24,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.065,NA,NA,NA),
CAU=c(NA,NA,NA,0.24,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.52,NA,NA,NA,NA,NA,NA,NA),
GGA=c(NA,NA,0.425,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.14,NA,NA),
CAC=c(NA,NA,NA,NA,NA,-0.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GGG=c(NA,NA,NA,NA,-0.53,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UAG=c(NA,NA,NA,NA,NA,NA,NA,-1.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGC=c(NA,NA,NA,NA,NA,NA,-0.18,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GAU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.18,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CGA=c(NA,NA,NA,NA,NA,NA,NA,NA,-1.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UAU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.265,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.715,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UUG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.63,NA,NA,-0.065,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GUC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.165,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GUU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.165,NA,-0.49,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CUG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.63,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AUC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.49,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CUA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.065,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.265,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CAA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.165,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UCG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.165,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AAC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.265,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CAG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GCC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.185,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GAC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.185,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CCG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AAG=c(-0.52,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.43,NA,NA,NA,NA,NA,NA,NA,NA),
UCC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,NA,NA,NA,NA,NA,NA,NA,0,NA),
GAA=c(NA,NA,-0.08,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,NA,NA,NA,NA,NA,NA),
CCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.43,NA,NA,NA,NA,NA,NA,-0.88),
UCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.08,NA,NA,NA,NA),
AUA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.52,NA,NA,NA,NA,NA),
UGC=c(NA,0.14,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CGU=c(NA,NA,NA,-0.065,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
ACG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.88,NA,NA,NA,NA,NA,NA,NA,NA),
GCA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,NA,NA,NA,NA,NA,NA),
stringsAsFactors=F)
rownames(mismatch5Shift) <- colnames(mismatch5Shift)
#3' shift penalties DOI: 10.1021/bi100146z
mismatch3Shift <- data.frame(UAC=c(NA,0.24,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.52,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGG=c(0.425,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.14,NA,NA,NA),
CAU=c(NA,NA,NA,0.425,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.08,NA,NA,NA,NA,NA,NA,NA),
GGA=c(NA,NA,0.24,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.065,NA,NA),
CAC=c(NA,NA,NA,NA,NA,-0.53,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GGG=c(NA,NA,NA,NA,-0.2,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UAG=c(NA,NA,NA,NA,NA,NA,NA,-0.18,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGC=c(NA,NA,NA,NA,NA,NA,-1.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GAU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,-1.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CGA=c(NA,NA,NA,NA,NA,NA,NA,NA,-0.18,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UAU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.715,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AGA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.265,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UUG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.165,NA,NA,-0.49,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GUC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.63,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GUU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,1.63,NA,-0.065,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CUG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,2.165,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AUC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.065,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CUA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.49,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.165,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CAA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.265,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
UCG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.265,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AAC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.165,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CAG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.185,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GCC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
GAC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0.025,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CCG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.185,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
AAG=c(-0.08,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,NA,NA,NA,NA,NA,NA,NA,NA),
UCC=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.43,NA,NA,NA,NA,NA,NA,NA,-0.88,NA),
GAA=c(NA,NA,-0.52,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.43,NA,NA,NA,NA,NA,NA),
CCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,NA,NA,NA,NA,NA,NA,0),
UCU=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.52,NA,NA,NA,NA),
AUA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.08,NA,NA,NA,NA,NA),
UGC=c(NA,-0.065,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
CGU=c(NA,NA,NA,0.14,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA),
ACG=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,0,NA,NA,NA,NA,NA,NA,NA,NA),
GCA=c(NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,NA,-0.88,NA,NA,NA,NA,NA,NA),
stringsAsFactors=F)
rownames(mismatch3Shift) <- colnames(mismatch3Shift)
#end penalties Turner 2004 https://rna.urmc.rochester.edu/NNDB/turner04/index.html
endPenalties <- data.frame(AU=.45, CG=0, GC=0, GU=.45, UA=.45, UG=.45, stringsAsFactors = F)
deltaEP <- data.frame(AU=.04, CG=0, GC=0, GU=.04, UA=.04, UG=.04, stringsAsFactors = F)
if(calc=='min'){
energyTable <- energyTable - deltaET
endPenalties <- endPenalties - deltaEP
} else if(calc=='max') {
energyTable <- energyTable + deltaET
endPenalties <- endPenalties + deltaEP
}
#make data frame
if(is.na(x53) && is.na(y35)){ #sample data
df <- data.frame(seq=c("G", 'GC', 'GCA', 'GCG', 'GCAA', 'GCGA', 'GCAAA', 'GCGAA', 'GCAAAA', 'GCGAAA','GCAAAAG', 'GCGAAAG', 'GCAAAAGC', 'GCGAAAGC', "G", 'GC', 'GCA', 'GCG', 'GCAA', 'GCGA', 'GCAAA', 'GCGAA', 'GCAAAA', 'GCGAAA','GCAAAAG', 'GCGAAAG', 'GCAAAAGC', 'GCGAAAGC'), template=c('UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC','UCGUUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC', 'UCGCUUUCGUCC'), stringsAsFactors = F)
} else {
if(length(x53)!=length(y35)){
warning("different numbers of sequences in each group")
}
df <- data.frame(seq=x53, template=y35, stringsAsFactors = F) #passed data
}
#offset handling
if(length(offset)==1){ #works with a vector or an int
if(offset>0){
adj <- " "
if(offset>1){
for(i in 2:offset){
adj <- paste(adj," ", sep="")
}
}
df$seq <- paste(adj, df$seq, sep="")
} else if(offset<0){
offset <- offset*-1
adj <- " "
if(offset>1){
for(i in 2:offset){
adj <- paste(adj," ", sep="")
}
}
#offset testing
# nums <- 1:10
# adj <- nums[1]
# if(offset>1){
# for(i in 2:offset){
# adj <- paste(adj,nums[i], sep="")
# }
# }
df$template <- paste(adj, df$template, sep="")
}
offset <- offset + 1 #fixes offset to new start position
df$startPos <- offset
} else if(length(offset)==nrow(df)){
df$startPos <- offset
df$offset <- ""
for(i in 1:max(abs(df$startPos))){
df$offset[abs(df$startPos)>=i] <- paste(df$offset[abs(df$startPos)>=i], " ", sep="")
}
df$seq[df$startPos>0] <- paste(df$offset[df$startPos>0], df$seq[df$startPos>0], sep="")
df$template[df$startPos<0] <- paste(df$offset[df$startPos<0], df$template[df$startPos<0], sep="")
df$startPos[df$startPos<0] <- df$startPos[df$startPos<0] *-1
df <- df[,-ncol(df)]
df$startPos <- df$startPos + 1
} else {
stop("Offset length must be a single int or a vector with as many elements as x53 and y35")
}
#nchar stop point for each row
pnums <- data.frame(nchar(df[,1]),nchar(df[,2]))
pnums[,3] <- pnums[,1]
pnums[,3][pnums[,2]<pnums[,1]] <- pnums[,2][pnums[,2]<pnums[,1]]
df$endPos <- as.vector(pnums[,3])
rm(pnums)
#start and end of helix
df$start <- paste(substr(df$seq, df$startPos, df$startPos),substr(df$template, df$startPos, df$startPos), sep="-")
df$end <- paste(substr(df$seq, df$endPos, df$endPos),substr(df$template,df$endPos, df$endPos), sep="-")
#dangling ends
df$dangle53 <- paste(substr(df$seq, df$startPos-1, df$startPos),substr(df$template, df$startPos-1, df$startPos), sep="-")
df$dangle53 <- gsub(" ","",df$dangle53)
df$dangle53[nchar(df$dangle53)!=4] <- ""
df$dangle35 <- ""
df$dangle35[nchar(df$seq)>nchar(df$template)] <- paste(substr(df$seq[nchar(df$seq)>nchar(df$template)], nchar(df$template[nchar(df$seq)>nchar(df$template)]), nchar(df$template[nchar(df$seq)>nchar(df$template)])+1),substr(df$template[nchar(df$seq)>nchar(df$template)], nchar(df$template[nchar(df$seq)>nchar(df$template)]), nchar(df$template[nchar(df$seq)>nchar(df$template)])), sep="-")
df$dangle35[nchar(df$seq)<nchar(df$template)] <- paste(substr(df$seq[nchar(df$seq)<nchar(df$template)], nchar(df$seq[nchar(df$seq)<nchar(df$template)]), nchar(df$seq[nchar(df$seq)<nchar(df$template)])),substr(df$template[nchar(df$seq)<nchar(df$template)], nchar(df$seq[nchar(df$seq)<nchar(df$template)]), nchar(df$seq[nchar(df$seq)<nchar(df$template)])+1), sep="-")
df$dangle35 <- gsub(" ","",df$dangle35)
df$dangle35[nchar(df$dangle35)!=4] <- ""
#finding mismatches at either end
df$startMM <- ""
df$endMM <- ""
repeat{
df$sMM <- F
df$eMM <- F
for(i in 1:ncol(endMM)){
for(j in 1:nrow(endMM)){
end <- paste(colnames(endMM)[i], rownames(endMM)[j], sep="-")
df$sMM[df$start==end] <- endMM[j,i]
df$eMM[df$end==end] <- endMM[j,i]
}
rm(end)
}
if(all(df$sMM==F) && all(df$eMM==F)){
break
}
#if mismatch, remove dangling ends, get mismatch, set new start/end position, change start and end sequence
df$dangle53[df$sMM] <- ""
df$startMM[df$sMM] <- paste(substr(df$seq[df$sMM], df$startPos[df$sMM], df$startPos[df$sMM]+1),substr(df$template[df$sMM], df$startPos[df$sMM], df$startPos[df$sMM]+1), sep="-")
df$startPos[df$sMM] <- df$startPos[df$sMM] + 1
df$dangle35[df$eMM] <- ""
df$endMM[df$eMM] <- paste(substr(df$seq[df$eMM], df$endPos[df$eMM]-1, df$endPos[df$eMM]),substr(df$template[df$eMM], df$endPos[df$eMM]-1, df$endPos[df$eMM]), sep="-")
df$endPos[df$eMM] <- df$endPos[df$eMM] - 1
df$start <- paste(substr(df$seq, df$startPos, df$startPos),substr(df$template, df$startPos, df$startPos), sep="-")
df$end <- paste(substr(df$seq, df$endPos, df$endPos),substr(df$template,df$endPos, df$endPos), sep="-")
} #check fdor more mismatches
df <- df[,-c(ncol(df)-1, ncol(df))]
#nearest neighbour pairs
for(i in 1:(max(df$endPos - df$startPos))){
df[,ncol(df)+1] <- ""
df[,ncol(df)][df$endPos>=i+df$startPos] <- paste(substr(df$seq[df$endPos>=i+df$startPos], df$startPos[df$endPos>=i+df$startPos]+i-1, df$startPos[df$endPos>=i+df$startPos]+i), substr(df$template[df$endPos>=i+df$startPos], df$startPos[df$endPos>=i+df$startPos]+i-1, df$startPos[df$endPos>=i+df$startPos]+i), sep="-")
df[,ncol(df)][df$endPos<i+df$startPos] <- ""
colnames(df)[ncol(df)] <- paste("pair",i,sep="_")
pairs <- i
}
#convert to energy
for(i in 1:ncol(energyTable)){
for(j in 1:nrow(energyTable)){
df[,(ncol(df)-pairs+1):ncol(df)][df[,(ncol(df)-pairs+1):ncol(df)]==paste(colnames(energyTable)[i], rownames(energyTable)[j], sep="-")] <- energyTable[j,i]
}
}
if(pairs>1){
df[rowSums(is.na(df[,(ncol(df)-pairs+1):ncol(df)]))==1,][is.na(df[rowSums(is.na(df[,(ncol(df)-pairs+1):ncol(df)]))==1,])] <- 0
}
df[,c(grep("pair_", colnames(df)))][df[,c(grep("pair_", colnames(df)))]==""] <- 0
####Start NA handling
nas <- max(rowSums(is.na(df)))
if(nas>0){ #slower if nas, but faster if no mismatch
#find all the nas, label as T in new columns
df[,(ncol(df)+1):(ncol(df)+pairs)] <- is.na(df[,(ncol(df)-pairs+1):ncol(df)])
df[,(ncol(df)-pairs+1):ncol(df)][df[,(ncol(df)-pairs+1):ncol(df)]==F] <- "F"
df[,(ncol(df)-pairs+1):ncol(df)][df[,(ncol(df)-pairs+1):ncol(df)]==T] <- "T"
colnames(df)[(ncol(df)-pairs+1):ncol(df)] <- paste("NAnum_",1:pairs,sep="")
#collapse rows to a string
pasteArgs <- c(df[,c(grep("NAnum_", colnames(df)))], sep="")
df[,ncol(df)+1] <- do.call(paste, pasteArgs)
rm(pasteArgs)
df <- df[-c(grep("NAnum_", colnames(df)))]
colnames(df)[ncol(df)] <- "ntmm"
for(i in 1:ceiling(nas/2)){ #gets mismatch location by regexpr and collapsed rows string
df[,ncol(df)+1] <- regexpr("T", df$ntmm)
colnames(df)[ncol(df)] <- paste("mm",i,sep="_")
df$ntmm[df[,ncol(df)]>0] <- substr(df$ntmm[df[,ncol(df)]>0], df[,ncol(df)][df[,ncol(df)]>0]+2, nchar(df$ntmm[df[,ncol(df)]>0]))
df[,ncol(df)][df[,ncol(df)]>0] <- df[,ncol(df)][df[,ncol(df)]>0] + df$startPos[df[,ncol(df)]>0]
}
#fix up the pair energies
df[,c(grep("mm_", colnames(df)))][df[,c(grep("mm_", colnames(df)))]<0] <- 0
df[,c(grep("pair_", colnames(df)))][is.na(df[,c(grep("pair_", colnames(df)))])] <- 0
df <- df[,-c(grep("ntmm", colnames(df)))]
#get mm substrs
for(i in 1:ceiling(nas/2)){
df[,ncol(df)+1] <- ""
#mm_ gives na location; -1 +1 gives 3 nt needed for snmm
df[,ncol(df)][df[,grep("mm_",colnames(df))[i]]>0] <- paste(substr(df$seq[df[,grep("mm_",colnames(df))[i]]>0], df[,grep("mm_",colnames(df))[i]][df[,grep("mm_",colnames(df))[i]]>0]-1, df[,grep("mm_",colnames(df))[i]][df[,grep("mm_",colnames(df))[i]]>0]+1), substr(df$template[df[,grep("mm_",colnames(df))[i]]>0], df[,grep("mm_",colnames(df))[i]][df[,grep("mm_",colnames(df))[i]]>0]-1, df[,grep("mm_",colnames(df))[i]][df[,grep("mm_",colnames(df))[i]]>0]+1), sep="-")
colnames(df)[ncol(df)] <- paste('snMis', i, sep="_")
df[,ncol(df)+1] <- 0
df[,ncol(df)][df[,grep("mm_",colnames(df))[i]]>0] <- 4 - (df[,grep("mm_",colnames(df))[i]][df[,grep("mm_",colnames(df))[i]]>0] - df$startPos[df[,grep("mm_",colnames(df))[i]]>0])
df[,ncol(df)][ df[,ncol(df)]<0] <- 0
colnames(df)[ncol(df)] <- paste('snMisls', i, sep="_")
df[,ncol(df)+1] <- 0
df[,ncol(df)][df[,grep("mm_",colnames(df))[i]]>0] <- 4 - (df$endPos[df[,grep("mm_",colnames(df))[i]]>0] - df[,grep("mm_",colnames(df))[i]][df[,grep("mm_",colnames(df))[i]]>0])
df[,ncol(df)][ df[,ncol(df)]<0] <- 0
colnames(df)[ncol(df)] <- paste('snMisrs', i, sep="_")
}
#for in nas/2
#get match
#check 5' and adj
#check 3' and adj
#get the energy of the mismatch
for(k in 1:ceiling(nas/2)){
for(i in 1:ncol(mismatchNrg)){
for(j in 1:nrow(mismatchNrg)){
df[,grep("snMis_", colnames(df))[k]][df[,grep("snMis_", colnames(df))[k]]==paste(colnames(mismatchNrg)[i],rownames(mismatchNrg)[j], sep="-")] <- mismatchNrg[j,i] + df[,grep("snMisls_", colnames(df))[k]][df[,grep("snMis_", colnames(df))[k]]==paste(colnames(mismatchNrg)[i],rownames(mismatchNrg)[j], sep="-")] * mismatch5Shift[j,i] + df[,grep("snMisrs_", colnames(df))[k]][df[,grep("snMis_", colnames(df))[k]]==paste(colnames(mismatchNrg)[i],rownames(mismatchNrg)[j], sep="-")] * mismatch3Shift[j,i]
}
}
}
df <- df[,-c(grep("mm_|snMisls|snMisrs",colnames(df)))] #get rid of all added columns
}#end NA handling
####start and end handling
df$start <- gsub("-","", df$start)
df$end <- gsub("-","", df$end)
for(i in 1:ncol(endPenalties)){
df$start[df$start==colnames(endPenalties)[i]] <- endPenalties[1,i]
df$end[df$end==colnames(endPenalties)[i]] <- endPenalties[1,i]
}
####dangling ends
for(i in 1:ncol(dangleStart)){
for(j in 1:nrow(dangleStart)){
df$dangle53[df$dangle53==paste(colnames(dangleStart)[i],rownames(dangleStart)[j],sep='-')] <- dangleStart[j,i]
df$dangle35[df$dangle35==paste(colnames(dangleEnds)[i],rownames(dangleEnds)[j],sep='-')] <- dangleEnds[j,i]
}
}
####terminal mismatches
for(i in 1:ncol(mmNrg)){
for(j in 1:nrow(mmNrg)){
df$startMM[df$startMM==paste(colnames(mmNrg)[i], rownames(mmNrg)[j], sep="-")] <- mmNrg[j,i]
df$endMM[df$endMM==paste(colnames(mmNrg)[i], rownames(mmNrg)[j], sep="-")] <- mmNrg[j,i]
}
}
#setting zeros, converting data to numeric, taking sum of data
df[,5:ncol(df)][df[,5:ncol(df)]==""] <- 0
df[,5:ncol(df)] <- as.numeric(as.character(unlist(df[,5:ncol(df)])))
df$totalEnergy <- rowSums(df[,5:ncol(df)])
#df.helnrg <<- df
return(df$totalEnergy)
}
strBraceRemoval <- function(x) {
# Removes braces from a string and returns a vector of strings containing all possible strings with
# the contained characters. i.e., 12[34] would return 123 and 124
#
# Args:
# x: a string or vector of strings
#
# Returns:
# strBraceRemoval(str): recursion, removes subsequent []
# str: vector containing strings with [] removed and replaced with contents
#take string
#find [
#extract things between
#get number
#recompile string with each possibility
#if it finds more []
#recursion
x <- as.vector(x)
str <- vector(mode="character", length=0)
for(i in 1:length(x)){
if(grepl("\\[", x[i])){
x.open <- regexpr("\\[", x[i], perl=T)
x.close <- regexpr("\\]", x[i], perl=T)
x.contents <- substr(x[i], x.open+1, x.close-1)
x.contents <- unlist(strsplit(x.contents, split=""))
for(j in 1:length(x.contents)){
str[(length(str)+1)] <- paste(substr(x[i], 1, x.open-1),x.contents[j], substr(x[i], x.close+1, nchar(x[i])), sep="")
}
} else {
str[(length(str)+1)] <- x[i]
}
}
if(any(grepl("\\[",str))){
return(strBraceRemoval(str))
} else {
return(str)
}
}
subunitDecompress <- function(df, sub=1, drops=c(1:8), drop=T, name="Subunit"){
# decompresses the data by replicating a column based on read count; can also remove columns
#
# Args:
# df: data frame containing data
# sub: column containing data to be replicated
# drops: columns to be dropped
# drop: should the other subunits be dropped
#
# Returns:
# df: decompress data frame for a given subunit
if(drop){
drops <- drops[drops != sub]#retains subunit column if in drops range
if(length(drops)!=0){
df <- df[,-drops]
sub <- sub - sum(drops < sub)#one more opperation versus replicating than dropping, but stores less stuff in ram during replication
}
}
df <- df[(rep(row.names(df), df[,sub])),]
df[,sub] <- colnames(df)[sub]
colnames(df)[sub] <- name
return (df)
}
subunitSeperate <- function(df, subs=c(1:8), decompress=F, name="Subunit"){
#Takes a data frame with read counts in columns and then clones the rows seperating the subunits so that they are on seperate rows
#
#Args:
# df: data frame with subunit read counts
# subs: column numbers of subunits
# decompress: flag indicating if the subunits should be decompressed
# name: name for collumn if decompress is T
#
#Return:
# dfFinal: data frame with columns cloned based on subunit
for(i in 1:length(subs)){
dfTemp <- df[df[,subs[i]]>0,]
if(decompress){
dfTemp <- subunitDecompress(dfTemp, sub=subs[i], drops=subs, drop=T, name=name)
} else {
dfTemp[,subs[-i]] <- 0
}
if(i==1){
dfFinal <- dfTemp
} else {
dfFinal <- rbind(dfFinal, dfTemp)
}
rm(dfTemp)
}
return(dfFinal)
}
interLength <- function(df, rounds, trim, seqAdded=NA, seqRound, numRound=NA, final=F){
#takes a dataframe of RNA reads that have been trimmed for prime and realign, gives intermediates between min rounds + 1 and max rounds - 1
#
#Args:
# df: dataframe containing trimmed sequence, rounds, and additions
# rounds: column index containing rounds of trimming information
# trim: column index containing the trimmed sequence
# seqAdded: column index containing the sequence added (optional)
# seqRound: column index containing the seq added each round; may be a vector
# numRound: column index containing the number of nucleotides added each round; may be a vector
# optional argument will also recalculate other numeric markers
# final: include the fully realigned intermediate
#
#return:
# df: dataframe containing the original with the intermediate trimming products added
if(final==F){
loopVar <- (max(df[,rounds])-1) #max rounds -1 is the upper limit for intermediates
} else {
loopVar <- (max(df[,rounds]))
}
df$Intermediate <- FALSE
for(i in 1:loopVar){
if(i==1){ #creates temporary df, or trims existing temporary df
dfTemp <- df[df[,rounds]>1,]
dfTemp$Intermediate <- TRUE
} else if(final && loopVar==i){
dfTemp <- df[df[,rounds]==1,]
dfTemp$Intermediate <- TRUE
} else {
dfTemp <- dfTemp[dfTemp[,rounds]>1,]
}
dfTemp[,rounds] <- dfTemp[,rounds]-1 #drops rounds by one so as to show number of rounds trimming to the new length
dfTemp[,trim] <- paste(dfTemp[,trim],dfTemp[,seqRound[1]],sep="") #adds the removed sequence
for(j in 1:loopVar){ #left shifts the sequence added
if(j!=length(seqRound)){
dfTemp[,seqRound[j]] <- dfTemp[,seqRound[j+1]]
dfTemp[,seqRound[j+1]] <- ""
} else {
dfTemp[,seqRound[j]] <- ""
}
}
if(!is.na(seqAdded)){ #changes what was added to reflect intermediate
paste_args <- c(dfTemp[,c(grep("Seq_Trim_", colnames(dfTemp)))], sep="")
dfTemp[,seqAdded] <- do.call(paste,paste_args)
}
df <- rbind(df, dfTemp) #adds intermediates
}
if(!is.na(numRound[1])){ #checks to see if numbers should be addressed by this program
for(i in 1:length(numRound)){ #fixes numbers
df[,numRound[i]] <- nchar(df[,seqRound[i]])
}
#adds/changes number variables
df$Trim_Len <- nchar(df[,trim])
df$Len_Adjust <- nchar(df[,seqAdded])
df$Velocity <- df$Len_Adjust/df[,rounds]
df$Velocity[is.na(df$Velocity)] <- 0
}
dfintlenfun <<- df
return(df)
}
#setwd("") #if not default dir
hk <- read.csv("HK_All_Match.csv", stringsAsFactors = F)
pr8 <- read.csv("PR8_All_Match.csv", stringsAsFactors = F)
wsn <- read.csv("WSN_All_Match.csv", stringsAsFactors = F)
bri <- read.csv("BRI_All_Match.csv", stringsAsFactors = F)
ntCap <- 3
####Generate conversion table
templates <- c("UCGUUUUCGUCC", "UCGCUUUCGUCC")
additionVect <- c("[ACGU]", '[ACGU]', 'G','C','[AG]','A','A','A','G','C','A','G','G') #string objects containing all possibilities
additions <- strBraceRemoval(paste(additionVect[1:3],collapse = ""))
if(length(additionVect)>=4){
for(i in 4:length(additionVect)){
additions <- c(additions, strBraceRemoval(paste(additionVect[1:i],collapse = ""))) #increase vector length
}
}
rm(additionVect)
i=1
if(length(additions) <= length(templates)){
dfArgs <- list(Additions=additions[i], Templates=templates, stringsAsFactors=F)
} else {
dfArgs <- list(Additions=additions, Templates=templates[i], stringsAsFactors=F)
}
nrgConverter <- do.call(data.frame, dfArgs)
rm(dfArgs)
if(min(length(additions), length(templates))>1){
for(i in 2:min(length(additions), length(templates))){
if(length(additions) <= length(templates)){
dfArgs <- list(Additions=additions[i], Templates=templates, stringsAsFactors=F)
} else {
dfArgs <- list(Additions=additions, Templates=templates[i], stringsAsFactors=F)
}
nrgConverter <- rbind(nrgConverter, do.call(data.frame, dfArgs))
rm(dfArgs)
}
}
rm(i, templates, additions)
nrgConverter$TotalEnergy <- helixEnergy(x53=nrgConverter[,1], y35=nrgConverter[,2], offset=-1, calc='max')
####End conversion table generation
dfPreProcess <- function(df, subsa=c(1:5), subsb=c(6:8), converter=NA, strain="Influenza"){
#pre-processes data for bon energy.R
#
#args
# df: dataframe
# subs: columns containing subunit reads
# converter: an nrgConverter made by helixEnergy for all possible templates and additions; if NA returns the table
# strain: name of the strain; stored in a column for global analysis
#
#return
# df: processed data.frame
subs <- c(subsa, subsb)
df <- df[,c(subs, grep("round", colnames(df)), grep("Trim_Sequence",colnames(df)), grep("Seq_Trim_R", colnames(df)), grep("NT_coupure", colnames(df)), grep("Sequence",colnames(df))[1])]
df <- interLength(df, (length(subs)+1),grep("Trim_Sequence",colnames(df)), seqRound=grep("Seq_Trim_R", colnames(df)), final = T)
df$Strain <- strain
df <- df[,c(subs, ncol(df), grep("Trim_Sequence",colnames(df)), grep("Seq_Trim_R", colnames(df))[1], grep("NT_coupure", colnames(df)),(ncol(df)-1))]
df$NT_coupure <- substr(df$NT_coupure, 1, 1)
df <- subunitSeperate(df, subs)
df$Template <- ""
df$Template[rowSums(df[,c(subsa)])>0] <- "UCGUUUUCGUCC"
df$Template[rowSums(df[,c(subsb)])>0] <- "UCGCUUUCGUCC"
df$Addition <- ""
df$Addition[df$Seq_Trim_R1!=""] <- paste(substr(df$Trim_Sequence[df$Seq_Trim_R1!=""], nchar(df$Trim_Sequence[df$Seq_Trim_R1!=""])-1, nchar(df$Trim_Sequence[df$Seq_Trim_R1!=""])), df$Seq_Trim_R1[df$Seq_Trim_R1!=""], sep="")
df$Addition[df$Addition=="" & df$Template=="UCGUUUUCGUCC"] <- paste(substr(df$Trim_Sequence[df$Addition=="" & df$Template=="UCGUUUUCGUCC"], nchar(df$Trim_Sequence[df$Addition=="" & df$Template=="UCGUUUUCGUCC"])-1, nchar(df$Trim_Sequence[df$Addition=="" & df$Template=="UCGUUUUCGUCC"])), "GCAAAAGCAGG", sep="")
df$Addition[df$Addition=="" & df$Template=="UCGCUUUCGUCC"] <- paste(substr(df$Trim_Sequence[df$Addition=="" & df$Template=="UCGCUUUCGUCC"], nchar(df$Trim_Sequence[df$Addition=="" & df$Template=="UCGCUUUCGUCC"])-1, nchar(df$Trim_Sequence[df$Addition=="" & df$Template=="UCGCUUUCGUCC"])), "GCGAAAGCAGG", sep="")
df$Addition <- gsub("T","U",df$Addition)
df$G_comp <- F
df$G_comp[df$NT_coupure=="G" & df$Intermediate==F] <- T
df$Realign <- nchar(df$Seq_Trim_R1)
df$Length <- nchar(df$Trim_Sequence)
df <- df[,-c(grep("NT_coupure", colnames(df)), grep("Seq_Trim_R", colnames(df)), grep("Trim_Sequence",colnames(df)))]
if(all(is.na(converter))){
return(df)
}
df$TotalEnergy <- 0
for(j in 1:(max(nchar(converter$Additions))-2)){
df[,ncol(df)+1] <- 0
for(i in 1:nrow(converter[nchar(converter$Additions)==(j+2),])){
df[,ncol(df)][substr(df$Addition,1,nchar(converter[nchar(converter$Additions)==(j+2),][i,1]))==converter[nchar(converter$Additions)==(j+2),][i,1] & df$Template==converter[nchar(converter$Additions)==(j+2),][i,2]] <- converter[nchar(converter$Additions)==(j+2),][i,3]
df$TotalEnergy[df$Addition==converter[nchar(converter$Additions)==(j+2),][i,1] & df$Template==converter[nchar(converter$Additions)==(j+2),][i,2]] <- converter[nchar(converter$Additions)==(j+2),][i,3]
}
colnames(df)[ncol(df)] <- paste("nt_addition_",j,sep="")
}
df <- subunitSeperate(df, subs=subs, decompress = T)
#df[,9:20] <- -1*log2((-1*df[9:20]))
#df[,9:20][df[,9:20]==Inf] <- 0
return(df)
}
#5x5 graph looks good with 1.6 for legend
#may want to show realignment thresholds with dashed lines and the difference between this line and those that go on to the next length
nrgGraph <- function(df=NA, ntCap=NA, name=""){
#makes a boxplot showing energy values for passed data.frame pre-processed by dfPreProcess in this script
#
#args
# df: a data.frame from dfPreProcess; if NA spoofs the legend for cowplot
# ntCap: upper limitt for nt addition to be graphed
#
#returns
# get_legend(nrgGraph): spoofed legend for cowplot
# NULL: the df had no rows; returns a NULL so it doesn't break the code with an error
# nrgGraph: boxplot of energies
if(length(df)==1){ #catches df=NA; returns a spoofed legend
dfpoint <- data.frame(Series=c("No_Realignment", 'Trimmed','Realigns'), y=-1, x=c(1,2,1), weight=1)
nrgGraph <- ggplot() +
geom_point(data=dfpoint, aes(x=x, y=y, group=Series, alpha=weight, colour=Series),position = 'jitter', show.legend = T) +
scale_colour_manual(name='legend', breaks=c('No_Realignment', 'Trimmed', 'Realigns'),
values=c(No_Realignment=hcl(h=seq(15,375, length=(3+1))[2], c=100, l=65), Trimmed='#D98000', Realigns=hcl(h=seq(15,375, length=(3+1))[1], c=100, l=65)),
labels=c(No_Realignment='Transcribed', Trimmed='Will Realign', Realigns='Realigns')) +
scale_alpha(guide=F) +
theme(legend.title = element_blank(), legend.text = element_text(size=7))
return(get_legend(nrgGraph)) #end spoofed legend
} else if(nrow(df)==0) {
return(NULL)
}
if(is.na(ntCap)){
ntCap <- length(grep("nt_addition_", colnames(df))) #sets ntCap to maximum addition
}
df$Series <- ""
df$Series[df$Realign==0] <- "No_Realignment"
df$Series[df$Series==""] <- "Trimmed"
nrgGraph <- ggplot() #shell for graph
i = ntCap # lazy version
#I need to get the the data for each unique nrg in each of the 3 groups
#I need to get the mean and sd
#I need to recompress the reads for alpha values
print(paste("nt_addition_", i, sep=""))
#make a data.frame with the means and the standard deviations
stats <- data.frame(Series=c('No_Realignment', 'Trimmed', 'Realigns'),
mean=c(mean(df[,grep("nt_addition_", colnames(df))[i]][df$Realign==0]), mean(df[,grep("nt_addition_", colnames(df))[i]][df$Realign>i]), mean(df[,grep("nt_addition_", colnames(df))[i]][df$Realign==i])),
sd=c(sd(df[,grep("nt_addition_", colnames(df))[i]][df$Realign==0]), sd(df[,grep("nt_addition_", colnames(df))[i]][df$Realign>i]), sd(df[,grep("nt_addition_", colnames(df))[i]][df$Realign==i])),
x=c(3*(i-1) + 1, x=3*(i-1) + 2, x=3*(i-1) + 3))
#need to deal with NaN and NA
#write stats
write.csv(stats, paste(name, " ", i, ' nt', '.csv', sep=''))
nrgs <- unique(df[,grep("nt_addition_", colnames(df))[i]])
nrgs <- nrgs[order(nrgs, decreasing = T)]
dfPoints <- rbind(
data.frame(Series='No_Realignment', y=nrgs, number=0, x=3*(i-1) + 1, stringsAsFactors = F),
data.frame(Series='Trimmed', y=nrgs, number=0, x=3*(i-1) + 2, stringsAsFactors = F),
data.frame(Series='Realigns', y=nrgs, number=0, x=3*(i-1) + 3, stringsAsFactors = F)
)
for(j in 1:length(nrgs)){
dfPoints[(1+(j-1)),3] <- nrow(df[df$Realign==0 & df[,grep("nt_addition_", colnames(df))[i]]==nrgs[j],])
dfPoints[(length(nrgs)+1+(j-1)),3] <- nrow(df[df$Realign>i & df[,grep("nt_addition_", colnames(df))[i]]==nrgs[j],])
dfPoints[(2*length(nrgs)+1+(j-1)),3] <- nrow(df[df$Realign==i & df[,grep("nt_addition_", colnames(df))[i]]==nrgs[j],])
}
dfPoints <- dfPoints[dfPoints$number>0,]
if(nrow(dfPoints[dfPoints$Series=='No_Realignment',])==0){
dfPoints[nrow(dfPoints)+1,] <- data.frame(Series='No_Realignment', y=10, number=0, x=-10, stringsAsFactors = F)
}
if(nrow(dfPoints[dfPoints$Series=='Trimmed',])==0){
dfPoints[nrow(dfPoints)+1,] <- data.frame(Series='Trimmed', y=10, number=0, x=-10, stringsAsFactors = F)
}
if(nrow(dfPoints[dfPoints$Series=='Realigns',])==0){
dfPoints[nrow(dfPoints)+1,] <- data.frame(Series='Realigns', y=10, number=0, x=-10, stringsAsFactors = F)
}
dfPoints$Series <- factor(dfPoints$Series, levels =c('No_Realignment', 'Trimmed', 'Realigns'))
dfPoints <- dfPoints[order(dfPoints$Series),]
dfPoints$number[dfPoints$Series=='No_Realignment'] <- dfPoints$number[dfPoints$Series=='No_Realignment'] /max(dfPoints$number[dfPoints$Series=='No_Realignment'])
dfPoints$number[dfPoints$Series=='Trimmed'] <- dfPoints$number[dfPoints$Series=='Trimmed'] /max(dfPoints$number[dfPoints$Series=='Trimmed'])
dfPoints$number[dfPoints$Series=='Realigns'] <- dfPoints$number[dfPoints$Series=='Realigns'] /max(dfPoints$number[dfPoints$Series=='Realigns'])
nrgGraph <-nrgGraph +
geom_point(data=dfPoints, aes(y=y, x=x, group=Series, colour=Series, alpha=number), show.legend=F, position = position_jitter(width=.2)) +
geom_errorbar(data=stats, aes(ymin=mean-2*sd, ymax=mean+2*sd, x=x)) +
stat_summary(fun.y='mean', data=stats, colour="#000000", geom="errorbar", aes(group=Series,x=x, y=mean,ymax=..y.., ymin=..y..), width=.75, linetype="dashed", show.legend=F)
rm(nrgs, stats, dfPoints)
#make the graph look nice
nrgGraph <- nrgGraph +
labs(y=bquote(Delta~G[37]^{o}*' (kcal/mol)'), x=paste(ntCap, "Nucleotide Sequence Addition", sep=' ')) + #delta G degree 37 and addition
scale_x_continuous(breaks=c(((ntCap-1)*3+1):(3*ntCap)), labels=c('Transcribed', 'Will Realign', 'Realigns'), limits=c((ntCap-1)*3+.5, 3*ntCap+.5)) +
scale_y_continuous(limits = c(-8, -5), breaks=seq(-5,-8, length.out = 7), labels= seq(-5,-8, length.out = 7)) +
scale_colour_manual(name='legend', breaks=c('No_Realignment', 'Trimmed', 'Realigns'),
values=c(No_Realignment=hcl(h=seq(15,375, length=(3+1))[2], c=100, l=65), Trimmed='#D98000', Realigns=hcl(h=seq(15,375, length=(3+1))[1], c=100, l=65)),
labels=c(No_Realignment='Transcribed', Trimmed='Will Realign', Realigns='Realigns'))
theme(legend.title = element_blank()) +
theme_bw()
return(nrgGraph)
}
getNrgGraphs <- function(df, ntCap=NA, target='Subunit', seqLen=NA){
#generates and saves graphs from a df from dfPreProcess()
#
#Args
# df: dataframe from dfPreProcess
# ntCap: upper limitt for nt addition to be graphed
# target: column to create graphs around (ex., Strain, Subunit)
# seqLen: vector of sequences to examine; if NA analyzes all lengths
colnames(df)[grep(target, colnames(df))] <- 'target'
subs <- unique(df$target)
if(target=="Strain"){
name <- 'Global'
} else {
name <- df$Strain[1]
}
if(length(seqLen)==1 && is.na(seqLen)){
seqLen <- c(min(df$Length):max(df$Length))
}
if(!is.na(ntCap)){
df <- df[,-c(grep("nt_addition_", colnames(df))[-c(1:ntCap)])]
} else {
ntCap <- length(grep("nt_addition_", colnames(df)))
}
#LoN <- nrgGraph() #Legend of Energy: the Graph Legend
print(target)
lenGraph <- function(df, var, ntCap, sub, target, name){
LoN <- nrgGraph() #Legend of Energy: the Graph Legend
for(j in 1:ceiling(length(var)/4)){
print(paste("Length =",var[4*(j-1)+1]))
titleText <- gsub("\\.","",paste(sub," ", target, " - ", var[4*(j-1)+1], " Nucleotides", sep=""))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph1 <- plot_grid(title, nrgGraph(df[df$Length==var[4*(j-1)+1],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
if(length(var)<(4*(j-1)+2)){
graph2 <- NULL
} else {
print(paste('length =',var[4*(j-1)+2]))
titleText <- gsub("\\.","",paste(sub," ", target, " - ", var[4*(j-1)+2], " Nucleotides", sep=""))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph2 <- plot_grid(title, nrgGraph(df[df$Length==var[4*(j-1)+2],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
}
if(length(var)<(4*(j-1)+3)){
graph3 <- NULL
} else {
print(paste('length =',var[4*(j-1)+3]))
titleText <- gsub("\\.","",paste(sub," ", target, " - ", var[4*(j-1)+3], " Nucleotides", sep=""))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph3 <- plot_grid(title, nrgGraph(df[df$Length==var[4*(j-1)+3],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
}
if(length(var)<(4*(j-1)+4)){
graph4 <- NULL
} else {
print(paste('length =',var[4*(j-1)+4]))
titleText <- gsub("\\.","",paste(sub," ", target, " - ", var[4*(j-1)+4], " Nucleotides", sep=""))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph4 <- plot_grid(title, nrgGraph(df[df$Length==var[4*(j-1)+4],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
}
if(j==1){
graph <- plot_grid(graph1, graph2, graph3, graph4, ncol=4, nrow=1, rel_widths = c(5,5,5,5), labels = c(LETTERS[(4*(j-1)+1):(4*(j-1)+4)], NULL))
} else {
graph <- plot_grid(graph, plot_grid(graph1, graph2, graph3, graph4, ncol=4, nrow=1, rel_widths = c(5,5,5,5), labels = c(LETTERS[(4*(j-1)+1):(4*(j-1)+4)], NULL)), rel_widths=c(1,1), rel_heights=c(j-1, 1), ncol=1, nrow=2)
}
rm(graph1, graph2, graph3, graph4)
gc()
if(j==ceiling(length(var)/4)){
print('plotting')
save_plot(paste(gsub("\\.","",paste(name,"_", sub, "_by_Length_",sep="")),'.png',sep=""), graph, base_height = 5.5*j, base_width = 20, dpi=600)
}
}
}
for(i in 1:ceiling(length(subs)/4)){
print(subs[4*(i-1)+1])
lenGraph(df[df$target==subs[4*(i-1)+1],], seqLen, ntCap, sub=subs[4*(i-1)+1], name=name, target=target)
titleText <- gsub("\\.","",paste(subs[4*(i-1)+1],target, sep=" "))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph1 <- plot_grid(title, nrgGraph(df[df$target==subs[4*(i-1)+1],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
if(length(subs)<(4*(i-1)+2)){
graph2 <- NULL
} else {
print(subs[4*(i-1)+2])
lenGraph(df[df$target==subs[4*(i-1)+2],], seqLen, ntCap, sub=subs[4*(i-1)+2], name=name, target=target)
titleText <- gsub("\\.","",paste(subs[4*(i-1)+2],target, sep=" "))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph2 <- plot_grid(title, nrgGraph(df[df$target==subs[4*(i-1)+2],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
}
if(length(subs)<(4*(i-1)+3)){
graph3 <- NULL
} else {
print(subs[4*(i-1)+3])
lenGraph(df[df$target==subs[4*(i-1)+3],], seqLen, ntCap, sub=subs[4*(i-1)+3], name=name, target=target)
titleText <- gsub("\\.","",paste(subs[4*(i-1)+3],target, sep=" "))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph3 <- plot_grid(title, nrgGraph(df[df$target==subs[4*(i-1)+3],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
}
if(length(subs)<(4*(i-1)+4)){
graph4 <- NULL
} else {
print(subs[4*(i-1)+4])
lenGraph(df[df$target==subs[4*(i-1)+4],], seqLen, ntCap, sub=subs[4*(i-1)+4], name=name, target=target)
titleText <- gsub("\\.","",paste(subs[4*(i-1)+4],target, sep=" "))
title <- ggdraw() + draw_label(titleText, fontface='bold')
graph4 <- plot_grid(title, nrgGraph(df[df$target==subs[4*(i-1)+4],], ntCap, name=paste(name, titleText)),ncol=1, nrow=2, rel_heights = c(0.5,5))
}
if(i==1){
graph <- plot_grid(graph1, graph2, graph3, graph4, ncol=4, nrow=1, rel_widths = c(5,5,5,5), labels = c(LETTERS[(4*(i-1)+1):(4*(i-1)+4)], NULL))
} else {
graph <- plot_grid(graph, plot_grid(graph1, graph2, graph3, graph4, ncol=4, nrow=1, rel_widths = c(5,5,5,5), labels = c(LETTERS[(4*(i-1)+1):(4*(i-1)+4)], NULL)), rel_widths=c(1,1), rel_heights=c(i-1, 1), ncol=1, nrow=2)
}
rm(graph1, graph2, graph3, graph4)
gc()
if(i==ceiling(length(subs)/4)){
print('plotting')
save_plot(paste(gsub("\\.","",paste(name, "_by_", target, sep="")),'.png',sep=""), graph, base_height = 5.5*i, base_width = 20, dpi=900)
rm(graph)
}
}
}
setwd("./Helical Stability/3nt")
setwd("./Puerto Rico")
pr8 <- dfPreProcess(pr8, subsa=c(1:5), subsb=c(6:8), converter = nrgConverter, strain="Puerto Rico")
gc()
setwd('..')
setwd("./Hong Kong")
hk <- dfPreProcess(hk, subsa=c(1:6), subsb=c(7:8), converter = nrgConverter, strain="Hong Kong")
gc()
setwd('..')
setwd("./WSN")
wsn <- dfPreProcess(wsn, subsa=c(1:5), subsb=c(6:8), converter = nrgConverter, strain="WSN")
#getNrgGraphs(wsn, 3, target = 'Subunit', seqLen = 9:16)
gc()
setwd('..')
setwd("./Brisbane")
bri <- dfPreProcess(bri, subsa=c(1:5), subsb=c(6:8), converter = nrgConverter, strain="Brisbane")
gc()
setwd("..")
global <- rbind(pr8, hk, wsn, bri)
rm(pr8, hk, wsn, bri)
gc()
#turn off intermediates and final transcription
#global <- global[global$Intermediate==F,] #this option does not change the data meanigfully
getNrgGraphs(global, 3, target = 'Strain', seqLen = 9:16)
#get statistics
global <- global[global$Realign==0 | global$Realign>=3,]
gc()
strains <- c("Puerto Rico", 'Hong Kong', 'WSN', 'Brisbane')
series <- c('Transcribed', 'Will Realign', 'Realigns')
global$series <- 'Transcribed'
global$series[global$Realign == 3] <- 'Realigns'
global$series[global$Realign > 3] <- 'Will Realign'
means <- vector(length = 12, mode = 'numeric') #means
sds <- vector(length = 12, mode = 'numeric') #standard deviation
p1 <- vector(length = 12, mode = 'numeric') #p versus transcription
p2 <- vector(length = 12, mode = 'numeric') #p versus will realign
p3 <- vector(length = 12, mode = 'numeric') #p versus realigns
for(i in 1:4){
for(j in 1:3){
means[(i-1)*3+j] <- mean(global$nt_addition_3[global$Strain==strains[i] & global$series==series[j]])
sds[(i-1)*3+j] <- sd(global$nt_addition_3[global$Strain==strains[i] & global$series==series[j]])
p1[(i-1)*3+j] <- as.numeric(t.test(global$nt_addition_3[global$Strain==strains[i] & global$series==series[1]], global$nt_addition_3[global$Strain==strains[i] & global$series==series[j]])$p.value)
p2[(i-1)*3+j] <- as.numeric(t.test(global$nt_addition_3[global$Strain==strains[i] & global$series==series[2]], global$nt_addition_3[global$Strain==strains[i] & global$series==series[j]])$p.value)
p3[(i-1)*3+j] <- as.numeric(t.test(global$nt_addition_3[global$Strain==strains[i] & global$series==series[3]], global$nt_addition_3[global$Strain==strains[i] & global$series==series[j]])$p.value)
}
}
statsTable <- data.frame(Strain = c(rep(strains[1], 3), rep(strains[2], 3), rep(strains[3], 3), rep(strains[4], 3)),
Series = rep(series, 4),
Mean = means,
Standard_Deviation = sds,
Lower_CI = 0,
Upper_CI = 0,
pvalue_vs_Transcribed = p1,
pvalue_vs_Will_Realign = p2,
pvalue_vs_Realigns = p3,
stringsAsFactors = F)
statsTable$Upper_CI <- statsTable$Mean-2*statsTable$Standard_Deviation
statsTable$Lower_CI <- statsTable$Mean+2*statsTable$Standard_Deviation
colnames(statsTable) <- gsub('_', ' ', colnames(statsTable))
write.csv(statsTable, 'Energy Stats.csv', row.names = F)
rm(global)
gc()
|
3ec987357cd2dce294ff7eeebaefe3cbee12bb21
|
a93e797750ee719069703464a84b7881f74d9220
|
/R/helper_functions.R
|
21a156b551814206e38ef95c94f454bceafefc14
|
[
"Unlicense"
] |
permissive
|
nevrome/sysdedupe
|
aa86a4e3fa93bd3240c1507a9ecf22488b5d5fe7
|
09e2bd84e3112fe7487947597750111161391513
|
refs/heads/master
| 2020-05-25T23:18:48.851256
| 2019-05-22T23:15:28
| 2019-05-22T23:15:28
| 188,032,322
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 264
|
r
|
helper_functions.R
|
# https://stackoverflow.com/questions/42734547/generating-random-strings
random_string_generator <- function(n) {
a <- do.call(paste0, replicate(5, sample(LETTERS, n, TRUE), FALSE))
paste0(a, sprintf("%04d", sample(9999, n, TRUE)), sample(LETTERS, n, TRUE))
}
|
6627237c9cfc84c2967320e7bf934566ec0586d9
|
482d9b669cdb1a41b525214ee58c1de7c3e2b8b0
|
/R/launch_ggedit.R
|
da1ab2527b4e583b7216cf16134aa64a2391d8b9
|
[] |
no_license
|
ktaranov/ggedit
|
0459b1188e6d335a225326d0785f2b5dc6baaf56
|
1f90aee52aa102e41939323d150d774527a4abe3
|
refs/heads/master
| 2021-01-19T08:40:18.945096
| 2017-04-06T20:03:18
| 2017-04-06T20:03:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 548
|
r
|
launch_ggedit.R
|
#' @import shiny
launch_ggedit <- function(p,output,rstudio, ...)
{
assign(x = '.p',envir = .ggeditEnv,value = p)
assign(x = '.output',envir = .ggeditEnv,value = output)
on.exit({
#rm(list = ls(envir = .GlobalEnv)[ls(envir = .GlobalEnv)%in%c('p.new','p.in','p.theme','pList.new','p.Elems','themeUpdate')],envir = .GlobalEnv)
assign(x = '.p',envir = .ggeditEnv,value = NULL)
#assign(x = '.output',envir = .ggeditEnv,value = NULL)
},add = T
)
shiny::runApp(appDir = system.file("application", package = 'ggedit'),...)
}
|
b1ec3dabc0314cb5971aa99710234afb7049cf0c
|
3555804fa67ea7e5f7d770e54fff9f44994e8b32
|
/ui.R
|
2692de4d444c5620ab6318b1903d404ba7f5be52
|
[] |
no_license
|
jacksmd12/devdataproduct
|
c4eb07ee2820ae13e227d823ebd042c09f57d1d1
|
f1cc79875722f463af2e7cee8d1866d6463af918
|
refs/heads/master
| 2021-01-10T10:28:48.226906
| 2015-11-20T19:44:25
| 2015-11-20T19:44:25
| 46,581,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,197
|
r
|
ui.R
|
# Shiny app (exploratory app for data sets)
library(shiny)
library(ggplot2)
data(mtcars)
ui <- fluidPage(
titlePanel("Explore mtcars dataset"),
sidebarLayout(
sidebarPanel(
selectInput(inputId = "ind", label = "Independent Variable", choices = names(mtcars), selected = "mpg"),
selectInput(inputId = "dep", label = "Dependent Variable", choices = names(mtcars), selected = "hp"),
textInput(inputId = "title", label = "Title for Graph", value = "MPG vs. HP (default)")
),
mainPanel(
tabsetPanel(
tabPanel("Graph and linear fit", plotOutput("graph"), tableOutput(outputId = "fit")),
tabPanel("About", "This app allows the user to explore dataset (i.e. mtcars) using simple
drop down menus to define the x (independent) and y (dependent) variables. Once
the desired data set has been selected the coeffecients for the linear model
outputs at the bottom of the graph.In addition, the user has the option of
providing a title for the graph generated.")
)
)
)
)
|
7f9b5e3e3b3a22547aeb5cdc3bfb6b401037f756
|
d91ca58ef6c050991ece3a1af3caa18e4fb3c5e3
|
/Performance.R
|
070a2bdb596bf6308178c79feba8eb500d72e450
|
[] |
no_license
|
Gmabatah93/PEOPLE_Analytics_Performance
|
5a0507b0965ff1d31ab428787994385612160031
|
752ea9d185452d8f1cc9938eeeae7c9ad312b405
|
refs/heads/main
| 2023-04-02T14:48:59.669984
| 2021-03-27T17:33:06
| 2021-03-27T17:33:06
| 352,129,502
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,856
|
r
|
Performance.R
|
library(readr)
library(dplyr)
library(ggplot2)
library(infer)
library(broom)
# PERFROMANCE: ----
## Data
performance <- read_csv("Data/performance_data.csv")
hr_data <- read_csv("Data/hr_data.csv")
# - join
hr_perfromance <- hr_data %>%
left_join(performance, by = "employee_id")
## Exploratory Data Analysis
# - summary
hr_perfromance %>% skimr::skim()
hr_perfromance %>% glimpse()
hr_perfromance %>% count(department)
hr_perfromance %>% count(job_level)
hr_perfromance %>% count(gender)
### Visual: Average Ratings by Gender
hr_perfromance %>%
group_by(gender) %>%
summarise(avg_ratings = mean(rating)) %>%
ggplot(aes(gender, avg_ratings)) +
geom_col() + geom_hline(yintercept = 2.83, color = "red") +
ggtitle("Avg Rating ~ Gender")
### Visual: Proportion of High Performanced by Gender
hr_perfromance %>%
mutate(high_performer = factor(ifelse(rating >= 4, 1, 0))) %>%
ggplot(aes(gender, fill = high_performer)) +
geom_bar(position = "fill") +
ggtitle("Are males ranked favorably as opposed to women ?")
# - stat test: Significant
hr_perfromance %>%
mutate(high_performer = factor(ifelse(rating >= 4, 1, 0))) %>%
chisq_test(high_performer ~ gender)
### Visual: Job Distribution By Gender
hr_perfromance %>%
ggplot(aes(gender, fill = job_level)) +
geom_bar(position = "fill") +
ggtitle("Job Distribution by Gender Significant ?")
# - stat test: Significant
hr_perfromance %>% chisq_test(job_level ~ gender)
### Visual: High Performance by Gender factored by Job Level
hr_perfromance %>%
mutate(high_performer = factor(ifelse(rating >= 4, 1, 0))) %>%
ggplot(aes(gender, fill = high_performer)) +
geom_bar(position = "fill") +
facet_wrap(~ job_level) +
ggtitle("Are males ranked favorably as opposed to women ~ Job Level ?")
# - stat test
hr_perfromance %>%
mutate(high_performer = factor(ifelse(rating >= 4, 1, 0))) %>%
glm(high_performer ~ gender + job_level, family = "binomial", data = .) %>%
tidy(exponentiate = TRUE)
# -- Male: Significant
# -- Hourly: Significant
# -- Manager: Significant
# -- Salaried: Significant
# PATH Analysis: ----
## Data
pm <- read_csv("Data/PerfMgmtRewardSystemsExample.csv")
## Exploratory Data Analysis
# - summary
pm %>% skimr::skim()
pm %>% glimpse()
# - Feature Engineering
pm <- pm %>%
mutate(Perf_Overall = ( Perf_Qual + Perf_Prod + Perf_Effort + Perf_Qual ) / 4)
## Visual: Correlation Matrix
pm %>% select(SalesRevenue, everything(), -Sex, EmpID) %>% cor() %>%
ggcorrplot::ggcorrplot(method = "circle", type = "lower")
## Visual: Sales Revenue By Sex
pm %>%
ggplot(aes(Sex, SalesRevenue)) +
geom_boxplot() +
ggtitle("Sales Revenue ~ Sex")
# - stat test: Not Significant
pm %>% t_test(SalesRevenue ~ Sex)
## Visual: Sales Revenue By Performance factored by Education Level
pm %>%
ggplot(aes(Perf_Overall, SalesRevenue)) +
geom_point() +
geom_smooth(method = "lm", se = FALSE,
aes(color = factor(EducationLevel))) +
ggtitle("Sale Revenue ~ Overall Peformance")
# - stat tests: Significant
lm(SalesRevenue ~ Perf_Overall, data = pm) %>% tidy()
lm(SalesRevenue ~ Perf_Overall + factor(EducationLevel), data = pm) %>% tidy()
# MEDIATION: Indirect Effect ----
library(lavaan)
## Data
pb <- read_csv("Data/PlannedBehavior.csv")
## Exploratory Data Analysis
# - summary
pb %>% skimr::skim()
pb %>% glimpse()
## Path Analysis
spec_mod <- "
# Path c' (Direct Effect)
behavior ~ c*attitude
# Path a
intention ~ a*attitude
# Path b
behavior ~ b*intention
# Indirect Effect (a * b)
ab := a*b
"
mod <- sem(model = spec_mod, data = pb)
mod %>% summary(fit.measures = TRUE, rsquare = TRUE)
## Path Analysis: Resampling (Percentile Bootstrap)
set.seed(2019)
mod_2 <- sem(spec_mod, data = pb,
se = "bootstrap", bootstrap = 100)
parameterEstimates(mod_2, ci = TRUE, level = 0.95, boot.ci.type = "perc")
|
51222b4a545bbaf1feea402a42160bf113722ff1
|
d49fa877b3c3db1c4ab8cf2c2dd56d11b8c593a9
|
/src/helpers/SignificancyDataCollection.R
|
4b7b10cbb65ca8d33bc4c37955c96d5f3ecd651a
|
[] |
no_license
|
aysegul-kt/MelonomaMetastasisPrediction
|
510dfecc0c80b7cc5b4605144d9fe20f0daf5e05
|
524c201771f6b26da5a3b5a5219766a6475f5b2a
|
refs/heads/master
| 2023-06-05T05:45:51.263538
| 2021-06-22T19:54:25
| 2021-06-22T19:54:25
| 225,965,472
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,782
|
r
|
SignificancyDataCollection.R
|
###############################################
# ADI : Base An. Script
# YAZAR : AYSEGUL KUTLAY
# ACIKLAMA :
# GUNCELLEME TAR?H?: 06.06.2019
# VERSIYON : 02
################################################
TrimMiRNA <- function(trimInputdata)
{
trimInputdata <- joind_map
trimInputdata$refName <- rep("", nrow(trimInputdata))
tempMirnaList <- unique(trimInputdata$miRNA)
for (i in 1:length(tempMirnaList))
{
mirnaname <- str_to_lower(tempMirnaList[i])
condition <- str_count(mirnaname, '-')
while (condition>2) {
mirnaname <- str_sub(mirnaname, 0, str_length(mirnaname) - 1)
condition <- str_count(mirnaname, '-')
}
# x$Age[x$Name %in% "John"] <- rep(c(10),length(x$Age[x$Name %in% "John"] ))
trimInputdata$refName[trimInputdata$miRNA %in% tempMirnaList[i]] <- rep(c(mirnaname),length(trimInputdata$refName[trimInputdata$miRNA %in% tempMirnaList[i]]))
cat(paste(i, ": ", tempMirnaList[i], " to ", mirnaname, "\n"))
}
trimInputdata
}
getRegulationDetailsResults <- function(mydata)
{
mydata$direction <- rep(NA, nrow(mydata))
mydata$Diff001 <- rep(c(1), nrow(mydata))
mydata$Diff005 <- rep(c(1), nrow(mydata))
mydata$dtype <- rep(NA, nrow(mydata))
mydata$refName <- rep("", nrow(mydata))
for (i in 1:nrow(mydata))
{
if (mydata$P_Val_Less[i] > mydata$P_Val_Greater[i])
{
mydata$direction[i] <- "H"
} else
{
mydata$direction[i] <- "L"
}
if (mydata$P_Val[i] > 0.001) {
mydata$Diff001[i] <- 0
}
if (mydata$P_Val[i] > 0.005) {
mydata$Diff005[i] <- 0
}
if (i < 1608) {
mydata$dtype[i] <- "miRNA"
mirnaname <- str_to_lower(mydata$probName[i])
if (str_count(mirnaname, '-') == 3) {
mirnaname <- str_sub(mirnaname, 0, str_length(mirnaname) - 2)
if (str_count(mirnaname, '-') == 3) {
mirnaname <- str_sub(mirnaname, 0, str_length(mirnaname) - 1)
if (str_count(mirnaname, '-') == 3) {
mirnaname <- str_sub(mirnaname, 0, str_length(mirnaname) - 1)
}
}
}
mydata$refName[i] <- mirnaname
}
else {
mydata$dtype[i] <- "mRNA"
phrase <- mydata$probName[i]
phrase <- str_sub(phrase, 0, str_locate(phrase, "\\.")[1] - 1)
mydata$refName[i] <- phrase
}
}
mydata
}
getGeneMethylationDetailedResults <- function(mydata){
mydata$direction <- rep(NA, nrow(mydata))
mydata$Diff001 <- rep(c(1), nrow(mydata))
mydata$Diff005 <- rep(c(1), nrow(mydata))
mydata$dtype <- rep("Methylation", nrow(mydata))
mydata$hgnc_symbol <- mydata$ProbName
for (i in 1:nrow(mydata))
{
if (mydata$P_Val_Less[i] > mydata$P_Val_Greater[i])
{
mydata$direction[i] <- "H"
} else
{
mydata$direction[i] <- "L"
}
if (mydata$P_Val[i] > 0.0001) {
mydata$Diff001[i] <- 0
}
if (mydata$P_Val[i] > 0.0005) {
mydata$Diff005[i] <- 0
}
}
mydata
}
getPattern <- function(miRNAPatern, mRNAPatern, methylationPatern, grp_code)
{
# uses
# ttest.results_detailed
# joind_map
# miRNAPatern <- "NA"
# mRNAPatern <- "H"
# grp_code <-"6"
# NOTE: null target of mRNA will be added 28.02.2020
subset.miRNA.all <-
ttest.results_detailed[ttest.results_detailed$dtype %in% "miRNA",]
if (miRNAPatern != "NA") {
subset.miRNA.pattern <-
subset.miRNA.all[subset.miRNA.all$Diff001 %in% "1", ]
subset.miRNA.pattern <-
subset.miRNA.pattern[subset.miRNA.pattern$direction %in% miRNAPatern, ]
} else if (miRNAPatern == "NA" ){
subset.miRNA.pattern <-
subset.miRNA.all[subset.miRNA.all$Diff001 %in% "0", ]
}else {
subset.miRNA.pattern <-
subset.miRNA.all[subset.miRNA.all$Diff001 %in% "nan", ]
}
#View(subset.miRNA.pattern)
subset.mRNA.all <-
ttest.results_detailed[ttest.results_detailed$dtype %in% "mRNA",]
if (mRNAPatern != "NA" ) {
subset.mRNA.pattern <-
subset.mRNA.all[subset.mRNA.all$Diff001 %in% "1", ]
subset.mRNA.pattern <-
subset.mRNA.pattern[subset.mRNA.pattern$direction %in% mRNAPatern, ]
} else if (mRNAPatern == "NA" ){
subset.mRNA.pattern <- subset.mRNA.all[subset.mRNA.all$Diff001 %in% "0", ]
}else {
subset.mRNA.pattern <- subset.mRNA.all[subset.mRNA.all$Diff001 %in% "nan", ]
}
if (methylationPatern != "Nan") {
subset.methylation.pattern <- GeneMethylationDetailedResults[GeneMethylationDetailedResults$Diff001 %in% "1", ]
subset.methylation.pattern <- subset.methylation.pattern[subset.methylation.pattern$direction %in% methylationPatern, ]
} else{
subset.methylation.pattern <- GeneMethylationDetailedResults[GeneMethylationDetailedResults$Diff001 %in% c("nan"), ]
}
#View(subset.mRNA.pattern)
#View(joind_map)
miRNA.Set <- subset.miRNA.pattern
#View(miRNA.Set)
subset.joinedmap <-joind_map[joind_map$refName %in% subset.miRNA.pattern$refName,]
#View(subset.joinedmap)
myList <- unique(subset.joinedmap$ensembl_gene_id)
mRNA.Set <-subset.mRNA.pattern[subset.mRNA.pattern$refName %in% myList, ]
#View(mRNA.Set)
colnames(miRNA.Set) <-
c(
"miRNA_probName",
"miRNA_Ftest",
"miRNA_Ttest",
"miRNA_Diff01",
"miRNA_P_Val",
"miRNA_P_Val_Less",
"miRNA_P_Val_Greater" ,
"miRNA_direction",
"miRNA_Diff001",
"miRNA_Diff005",
"miRNA_dtype",
"refName"
)
colnames(mRNA.Set) <-
c(
"mRNA_probName",
"mRNA_Ftest",
"mRNA_Ttest",
"mRNA_Diff01",
"mRNA_P_Val",
"mRNA_P_Val_Less",
"mRNA_P_Val_Greater" ,
"mRNA_direction",
"mRNA_Diff001",
"mRNA_Diff005",
"mRNA_dtype",
"ensembl_gene_id"
)
#View(subset.joinedmap)
myList <- unique(subset.joinedmap$`Target Gene`)
# `Target Gene`
methylation.Set <- subset.methylation.pattern[subset.methylation.pattern$`Target Gene` %in% myList, ]
colnames(methylation.Set) <-
c(
"methy_probName",
"methy_Ftest",
"methy_Ttest",
"methy_Diff01",
"methy_P_Val",
"methy_P_Val_Less",
"methy_P_Val_Greater" ,
"methy_direction",
"methy_Diff001",
"methy_Diff005",
"methy_dtype",
"hgnc_symbol"
)
result <- merge(subset.joinedmap, miRNA.Set, by = "refName")
result <- merge(result, mRNA.Set, by = "ensembl_gene_id")
result <- merge(x=result, y=methylation.Set, by = "hgnc_symbol",all= TRUE, x.all=TRUE)
#View(result)
# colnames(result[ c(1, 5, 8, 9, 10, 11, 12, 13, 14, 18, 19, 29, 30,40,41)])
result <- result[, - c(1, 5, 8, 9, 10, 11, 12, 13, 14, 18, 19, 29, 30,40,41)]
result <- result[!duplicated(result), ]
result$grp_code <- rep(c(grp_code), nrow(result))
result
}
getReleatedMarkers <- function(markerList,grp_code)
{
# uses
# ttest.results_detailed
# joind_map
# miRNAPatern <- "NA"
# mRNAPatern <- "H"
# grp_code <-"6"
# NOTE: null target of mRNA will be added 28.02.2020
subset.miRNA.all <-
ttest.results_detailed[ttest.results_detailed$dtype %in% "miRNA",]
miRNA.Set <-
subset.miRNA.all[subset.miRNA.all$probName %in% markerList, ]
subset.mRNA.all <-
ttest.results_detailed[ttest.results_detailed$dtype %in% "mRNA",]
mRNA.Set <-
subset.mRNA.all[subset.mRNA.all$probName %in% markerList, ]
methylation.Set <- GeneMethylationDetailedResults[GeneMethylationDetailedResults$ProbName %in% markerList, ]
subset.joinedmap <-rbind(joind_map[joind_map$refName %in% miRNA.Set$refName,],
joind_map[joind_map$ensembl_gene_id %in% miRNA.Set$refName,],
joind_map[joind_map$hgnc_symbol %in% methylation.Set$ProbName,])
colnames(miRNA.Set) <-
c(
"miRNA_probName",
"miRNA_Ftest",
"miRNA_Ttest",
"miRNA_Diff01",
"miRNA_P_Val",
"miRNA_P_Val_Less",
"miRNA_P_Val_Greater" ,
"miRNA_direction",
"miRNA_Diff001",
"miRNA_Diff005",
"miRNA_dtype",
"refName"
)
colnames(mRNA.Set) <-
c(
"mRNA_probName",
"mRNA_Ftest",
"mRNA_Ttest",
"mRNA_Diff01",
"mRNA_P_Val",
"mRNA_P_Val_Less",
"mRNA_P_Val_Greater" ,
"mRNA_direction",
"mRNA_Diff001",
"mRNA_Diff005",
"mRNA_dtype",
"ensembl_gene_id"
)
colnames(methylation.Set) <-
c(
"methy_probName",
"methy_Ftest",
"methy_Ttest",
"methy_Diff01",
"methy_P_Val",
"methy_P_Val_Less",
"methy_P_Val_Greater" ,
"methy_direction",
"methy_Diff001",
"methy_Diff005",
"methy_dtype",
"hgnc_symbol"
)
result <- merge(subset.joinedmap,miRNA.Set , by = "refName")
result <- merge(result, mRNA.Set, by = "ensembl_gene_id",all= TRUE, x.all=TRUE)
result <- merge(x=result, y=methylation.Set, by = "hgnc_symbol",all= TRUE, x.all=TRUE)
#View(result)
# colnames(result[ c(1, 5, 8, 9, 10, 11, 12, 13, 14, 18, 19, 29, 30,40,41)])
result <- result[, - c(1, 5, 8, 9, 10, 11, 12, 13, 14, 18, 19, 29, 30,40,41)]
result <- result[!duplicated(result), ]
result$grp_code <- rep(c(grp_code), nrow(result))
result
}
getCrossTableForSignificancyResults <- function()
{
path <-
paste(global.workingDir,
"/data/raw/",
global.mrna_mirna_mappingFile,
sep = "")
mrna_mirna_mapping <- read_csv(path)
significancyTestResults <- ttest.results
result.part0 <-
data.frame(
ProbName = character(0),
Ftest = character(0),
Ttest = character(0),
Diff = numeric(0),
P_Val = numeric(0),
P_Val_Less = numeric(0),
P_Val_Greater = numeric(0),
targetName = character(0),
targetFtest = character(0),
targetTtest = character(0),
targetDiff = numeric(0),
targetP_Val = numeric(0),
targetP_Val_Less = numeric(0),
targetP_Val_Greater = numeric(0),
relation_type = character(0)
)
sample <- result.part0[1, ]
for (i in 1:1607)
{
cat("\014")
cat(paste("..", i, ".."))
sample$ProbName <- significancyTestResults$probName[i]
sample$Ftest <- significancyTestResults$Ftest[i]
sample$Ttest <- significancyTestResults$Ttest[i]
sample$Diff <- significancyTestResults$Diff[i]
sample$P_Val <- significancyTestResults$P_Val[i]
sample$P_Val_Less <- significancyTestResults$P_Val_Less[i]
sample$P_Val_Greater <- significancyTestResults$P_Val_Greater[i]
result.part0 <- rbind(result.part0, sample)
}
result.part1 <-
runmRNA(1608, 2000, significancyTestResults, mrna_mirna_mapping)
result.part2 <-
runmRNA(2001, 5000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part2.csv",
result.part2,
"L")
result.part3 <-
runmRNA(5001, 10000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part3.csv",
result.part3,
"L")
result.part4 <-
runmRNA(10001, 15000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part4.csv",
result.part4,
"L")
result.part5 <-
runmRNA(15001, 20000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part5.csv",
result.part5,
"L")
result.part6 <-
runmRNA(20001, 25000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part6.csv",
result.part6,
"L")
result.part7 <-
runmRNA(25001, 30000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part7.csv",
result.part7,
"L")
result.part8 <-
runmRNA(30001, 35000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part8.csv",
result.part8,
"L")
result.part9 <-
runmRNA(35001, 40000, significancyTestResults, mrna_mirna_mapping)
logToFile("SignificancyDataCol",
"result.part9.csv",
result.part9,
"L")
result.part10 <-
runmRNA(
40001,
nrow(significancyTestResults),
significancyTestResults,
mrna_mirna_mapping
)
result.part3 <-
read_csv("output/logs/SignificancyDataCol_result.part3.csv",
col_types = cols(X1 = col_skip()))
result.part4 <-
read_csv("output/logs/SignificancyDataCol_result.part4.csv",
col_types = cols(X1 = col_skip()))
result.part5 <-
read_csv("output/logs/SignificancyDataCol_result.part5.csv",
col_types = cols(X1 = col_skip()))
result.part6 <-
read_csv("output/logs/SignificancyDataCol_result.part6.csv",
col_types = cols(X1 = col_skip()))
result.part7 <-
read_csv("output/logs/SignificancyDataCol_result.part7.csv",
col_types = cols(X1 = col_skip()))
result.part8 <-
read_csv("output/logs/SignificancyDataCol_result.part8.csv",
col_types = cols(X1 = col_skip()))
result.part9 <-
read_csv("output/logs/SignificancyDataCol_result.part9.csv",
col_types = cols(X1 = col_skip()))
r <-
rbind(
result.part0,
result.part1,
result.part2,
result.part3,
result.part4,
result.part5,
result.part6,
result.part7,
result.part8,
result.part9,
result.part10
)
r
}
runmRNA <-
function(startAt,
StopAt,
significancyTestResults,
mrna_mirna_mapping)
{
result.part <-
data.frame(
ProbName = character(0),
Ftest = character(0),
Ttest = character(0),
Diff = numeric(0),
P_Val = numeric(0),
P_Val_Less = numeric(0),
P_Val_Greater = numeric(0),
targetName = character(0),
targetFtest = character(0),
targetTtest = character(0),
targetDiff = numeric(0),
targetP_Val = numeric(0),
targetP_Val_Less = numeric(0),
targetP_Val_Greater = numeric(0),
relation_type = character(0)
)
sample <- result.part[1, ]
subset <- significancyTestResults
# subset<-significancyTestResults[significancyTestResults$Diff==1, ]
for (i in startAt:StopAt)
#nrol(significancyTestResults))
{
cat("\014")
cat(paste("..", i, ".."))
# create mrna Part
phrase <- significancyTestResults$probName[i]
phrase <- str_sub(phrase, 0, str_locate(phrase, "\\.")[1] - 1)
sample$targetName <- significancyTestResults$probName[i]
sample$targetFtest <- significancyTestResults$Ftest[i]
sample$targetTtest <- significancyTestResults$Ttest[i]
sample$targetDiff <- significancyTestResults$Diff[i]
sample$targetP_Val <- significancyTestResults$P_Val[i]
sample$targetP_Val_Less <- significancyTestResults$P_Val_Less[i]
sample$targetP_Val_Greater <-
significancyTestResults$P_Val_Greater[i]
result.part <- rbind(result.part, sample)
#search target mRNA of given mirna
search.results <-
na.omit(mrna_mirna_mapping [grep(phrase, mrna_mirna_mapping$mRNA), ])
if (nrow(search.results) > 0)
{
for (y in 1:nrow(search.results))
{
cat("\014")
cat(paste("..", i, "..", nrow(search.results) , "/", y))
# for each mrna search if there are any significacy result
mirnaname <- str_to_lower(search.results$miRNA[y])
if (str_count(mirnaname, '-') == 3)
{
mirnaname <- str_sub(mirnaname, 0, str_length(mirnaname) - 2)
if (str_count(mirnaname, '-') == 3)
{
mirnaname <- str_sub(mirnaname, 0, str_length(mirnaname) - 1)
if (str_count(mirnaname, '-') == 3)
{
mirnaname <- str_sub(mirnaname, 0, str_length(mirnaname) - 1)
}
}
}
mrna.search.results <-
subset [grep((mirnaname), subset$probName, fixed = TRUE), ]
length(mrna.search.results[, 1])
# bind searched results
if (nrow(mrna.search.results) > 0)
{
for (k in 1:nrow(mrna.search.results))
{
if ((mrna.search.results$probName[k]) == mirnaname)
{
sampleRow <- sample
sampleRow$ProbName <- mrna.search.results$probName[k]
sampleRow$Ftest <- mrna.search.results$Ftest[k]
sampleRow$Ttest <- mrna.search.results$Ttest[k]
sampleRow$Diff <- mrna.search.results$Diff[k]
sampleRow$P_Val <- mrna.search.results$P_Val[k]
sampleRow$P_Val_Less <-
mrna.search.results$P_Val_Less[k]
sampleRow$P_Val_Greater <-
mrna.search.results$P_Val_Greater[k]
result.part <- rbind(result.part, sampleRow)
}
}
}
}
}
}
result.part
}
# write.csv(file="~/Documents/Melonoma_TCGA-SKCM/2019_June/ttest.results_mirna_mrna.csv",r)
# result <- read.csv(file="~/Documents/Melonoma_TCGA-SKCM/2019_June/ttest.results_mirna_mrna.csv")
flagCrossTableOfSignificancyResults <- function(result) {
result$prob_p[1] <- NA
result$target_p[1] <- NA
result$relation_type <-
factor(result$relation_type , levels = c(1, 2, 3, 4, 5, 6, 7, 8, 9))
for (i in 1:nrow(result))
{
cat("\014")
cat(i)
if (!is.na(result$ProbName[i]))
{
if (result$Diff[i] == 1)
{
if (result$P_Val_Less[i] > result$P_Val_Greater[i])
{
result$prob_p[i] <- "L"
} else
{
result$prob_p[i] <- "H"
}
}
}
if (!is.na(result$targetName[i]))
{
if (result$targetDiff[i] == 1)
{
if (result$targetP_Val_Less[i] > result$targetP_Val_Greater[i])
{
result$target_p[i] <- "L"
} else
{
result$target_p[i] <- "H"
}
}
}
# TYPE-1 : miRNA H mRNA H
if (result$prob_p[i] %in% "H" &
result$target_p[i] %in% "H") {
result$relation_type[i] <- 1
}
# TYPE-2 : miRNA H mRNA L
if (result$prob_p[i] %in% "H" &
result$target_p[i] %in% "L") {
result$relation_type[i] <- 2
}
# TYPE-3 : miRNA NA mRNA L
if (is.na(result$prob_p[i]) &
result$target_p[i] %in% "L") {
result$relation_type[i] <- 3
}
# TYPE-4 : miRNA HNA mRNA H
if (is.na(result$prob_p[i]) &
result$target_p[i] %in% "H") {
result$relation_type[i] <- 4
}
# TYPE-5 : miRNA L mRNA H
if (result$prob_p[i] %in% "L" &
result$target_p[i] %in% "H") {
result$relation_type[i] <- 5
}
# TYPE-6 : miRNA L mRNA L
if (result$prob_p[i] %in% "L" &
result$target_p[i] %in% "L") {
result$relation_type[i] <- 6
}
# TYPE-7 : miRNA L mRNA NA
if (result$prob_p[i] %in% "L" &
is.na(result$target_p[i])) {
result$relation_type[i] <- 7
}
# TYPE-8 : miRNA H mRNA NA
if (result$prob_p[i] %in% "H" &&
is.na(result$target_p[i])) {
result$relation_type[i] <- 8
}
# TYPE-9 : miRNA NA mRNA NA
if (is.na(result$prob_p[i]) &&
is.na(result$target_p[i])) {
result$relation_type[i] <- 9
}
}
result
}
# for(i in 1608:nrow(significancyTestResults)) #nrol(significancyTestResults))
# {
# cat("\014")
# cat(paste("..", i, ".."))
#
# # create mrna Part
# phrase<-significancyTestResults$probName[i]
#
# phrase <- str_sub(phrase, 0,str_locate(phrase,"\\.")[1]-1)
#
# sample$targetName<-significancyTestResults$probName[i]
# sample$targetFtest<-significancyTestResults$Ftest[i]
# sample$targetTtest<-significancyTestResults$Ttest[i]
# sample$targetDiff<-significancyTestResults$Diff[i]
# sample$targetP_Val<-significancyTestResults$P_Val[i]
# sample$targetP_Val_Less<-significancyTestResults$P_Val_Less[i]
# sample$targetP_Val_Greater<-significancyTestResults$P_Val_Greater[i]
# result<-rbind(result,sample)
# #search target mRNA of given mirna
# search.results<- na.omit( mrna_mirna_mapping [grep( phrase,mrna_mirna_mapping$mRNA ),])
#
# if ( nrow(search.results) >0)
# {
# for( y in 1:nrow(search.results) )
# {
#
# cat("\014")
# cat(paste("..",i,"..", nrow(search.results) ,"/",y))
# # for each mrna search if there are any significacy result
# mirnaname<-str_to_lower(search.results$miRNA[y])
# if ( str_count(mirnaname,'-') ==3)
# {
# mirnaname <- str_sub(mirnaname, 0,str_length(mirnaname)-2)
# if ( str_count(mirnaname,'-') ==3)
# {
# mirnaname <- str_sub(mirnaname, 0,str_length(mirnaname)-1)
# if ( str_count(mirnaname,'-') ==3)
# {
# mirnaname <- str_sub(mirnaname, 0,str_length(mirnaname)-1)
#
# }
# }
#
# }
# mrna.search.results<- subset [grep((mirnaname),subset$probName, fixed = TRUE ),]
#
# length(mrna.search.results[,1])
# # bind searched results
# if (nrow(mrna.search.results)>0)
# {
# for(k in 1:nrow(mrna.search.results))
# {
# if ( (mrna.search.results$probName[k]) == mirnaname)
# {
# sampleRow<-sample
# sampleRow$ProbName <- mrna.search.results$probName[k]
# sampleRow$Ftest <- mrna.search.results$Ftest[k]
# sampleRow$Ttest <- mrna.search.results$Ttest[k]
# sampleRow$Diff <- mrna.search.results$Diff[k]
# sampleRow$P_Val <- mrna.search.results$P_Val[k]
# sampleRow$P_Val_Less <- mrna.search.results$P_Val_Less[k]
# sampleRow$P_Val_Greater <- mrna.search.results$P_Val_Greater[k]
# result<-rbind(result,sampleRow)
#
# }
# }
#
#
# }
#
# }
# }
#
#
# }
# subset<-significancyTestResults[significancyTestResults$Diff==1, ]
|
5d2f2b18c90b434d84b95e79ac4cdcd7d828de0f
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/quanteda/examples/head.dfm.Rd.R
|
18a91eb9378ede7e7723865f53913505fea2921f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 294
|
r
|
head.dfm.Rd.R
|
library(quanteda)
### Name: head.dfm
### Title: Return the first or last part of a dfm
### Aliases: head.dfm tail.dfm
### Keywords: dfm
### ** Examples
head(data_dfm_lbgexample, 3, nf = 5)
head(data_dfm_lbgexample, -4)
tail(data_dfm_lbgexample)
tail(data_dfm_lbgexample, n = 3, nf = 4)
|
0b942f035a93160c6beafac15f18ec833ad58653
|
affd25a59adb603e84ed2d8da6ca982c1ef26fbe
|
/Beispiel03.r
|
8cc44dbe49aaae8dcfb849738adbaf147949ebe4
|
[] |
no_license
|
NightRoadIx/ICTmetR
|
20da7d9d064b71c37bbd551d659d8c69d3d704c8
|
b6792bcdf495ae26ac070bddc7d2549e386641f3
|
refs/heads/master
| 2021-07-07T23:38:56.377212
| 2020-07-14T05:16:00
| 2020-07-14T05:16:00
| 130,419,610
| 0
| 1
| null | 2020-06-23T04:01:57
| 2018-04-20T22:33:44
|
R
|
UTF-8
|
R
| false
| false
| 3,593
|
r
|
Beispiel03.r
|
# Instalar el paquete para manejar la librería rscopus, para datos de la BD de SCOPUS
# install.packages("rscopus")
##########################################################################################
# Instalar el paquete para manejar la librería rplos, para datos de la BD de PLOS ONE
install.packages("rplos")
# Otros páquetes que pueden ser instalados
install.packages("readr") # Para la lectura de datos
install.packages("plyr") # Para 'wrangling' (exprimir) datos
install.packages("dplyr") # Para 'wrangling' (exprimir) datos
install.packages("tidyr") # Para ordenar datos
install.packages("stringr") # Para manipular cadenas (muy importante)
install.packages("tm") # Para hacer mineria de textos
install.packages("XML") # Para tratar con texto en XML
# Ahora cargar las librerias
library('ggplot2')
# Utilizar la libreria de PLOS
library(rplos)
library(readr)
library(plyr) # Cargar esta librería antes que dplry para evitar errores
library(dplyr)
library(tidyr)
library(stringr)
library(tm)
library(XML)
# Obtener los campos que pueden ser buscados mediante la librería RPLOS
head(plosfields)
# o mediante
plosfields
# Hacer la búsqueda pidiendo todos los datos a guardar
res <- searchplos('optical phantom hydrogel', 'id,author, title, alternate_title, title_display, publication_date,cross_published_journal_key,journal', limit = 50)
# puede pedirse ayuda de la función mediante '?'(searchplos)
# pedir de que clase es el resultado
class(res)
# o la estructura
str(res)
# Revisar los metadatos
head(res$meta) # = res[[1]]
# Número de elementos hallados
res$meta$numFound # = res[[1]][[1]]
# Revisar datos
head(res$data)
# Buscar varios datos y guardarlos en un vector de Metadatos
q <- c('optical','phantom','hydrogel')
# Aplicar a la función de searchplos
lapply(q, function(x) searchplos(x, limit=10))
# Mas específico está para buscar
# Por Abstract (o dentro del abstract)
res1 <- plosabstract(q = 'optical phantom', 'id,author, title, alternate_title, title_display, publication_date,cross_published_journal_key,journal', limit = 5)
# Por título
res2 <- plostitle(q = 'optical phantom', 'id,author, title, alternate_title, title_display, publication_date,cross_published_journal_key,journal', limit = 5)
# Buscar por términos y visualizar los resultados
# PLOSWORD permite buscar palabras y visualizarlas en un histograma por el número de elementos encontrados
res3 <- plosword(list('optical phantom', 'phototherapy', 'photodynamic therapy', 'hydrogel'), vis = 'TRUE')
# Se obtiene una tabla
res3$table
# O la gráfica
res3$plot
# Graficar a través del tiempo como se han desarrollado los artículos con algún término
# (solo pueden buscarse hasta 2 términos)
plot_throughtime(terms = "optical phantom", limit = 200) + geom_line(size=2, color='black')
# Hacer una búsqueda por ejemplo, de cuántos artículos hay dependiendo de los artículos de PLOS
# PLOS One, PLOS Genetics, etc...
facetplos(q='*:*', facet.field='journal')
# Para incuir un query con conteos ???
facetplos(q='*:*', facet.field='journal', facet.query='optical,phantom')
# Mas info
# Obtener el directorio actual
getwd()
# pasar la información a una tabla
dat <- res$data
# Escribir la información en un archivo
write.csv(dat, "dat.csv", row.names = FALSE)
# Pude usarse este modificador fq = 'cross_published_journal_key:PLoSONE'
# dentro de searchplos para limitar en que journal de los 7 disponibles en PLOS habrá que hacer la búsqueda
|
0f486c0bcdfa7b3a3dc2fc4ca06bb2df4b8eabb8
|
d3d9eb5f90468b11c62081fb65dbf353d20792d6
|
/man/obter_ipca.Rd
|
65f21547f071b4fab1e238d6d7e7a5fc53c70db4
|
[] |
no_license
|
Figuera/dadosbc
|
1e4f83f3a7936c21f7624d65d10ea9e0c5f90f6c
|
046226d1728801791f8e6a1f3d18033d26e9a3c5
|
refs/heads/master
| 2020-03-21T21:17:26.790575
| 2018-07-10T14:26:09
| 2018-07-10T14:26:09
| 139,056,226
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,137
|
rd
|
obter_ipca.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/obter_ipca.R
\name{obter_ipca}
\alias{obter_ipca}
\title{Realiza consulta de série de tempo do IPCA}
\usage{
obter_ipca(inicio = "01/01/1995", fim = Sys.Date(), tipo = c("indice",
"var"), frequencia = c("mensal", "anual"), base = 1993)
}
\arguments{
\item{inicio}{data de inicío da consulta}
\item{fim}{data final da consulta}
\item{tipo}{tipo de variavel ("indice", "var")}
\item{frequencia}{frequencia da variavel ("mensal", "anual")}
\item{base}{ano base do número indice. Somente útil quando tipo = "indice". Padrão = 1993.}
}
\value{
Série de inflação medida pelo IPCA.
}
\description{
Função consulta a série de IPCA no webservice do BC.
}
\examples{
# Obter série de ipca, em número índice, a partir de 01/01/1995
dolar <- obter_ipca()
# Obter série de ipca, em variação percentual, a partir de 01/01/1995
dolar <- obter_ipca(tipo = "var")
# Obter série anual de ipca entre 2010 e 2017
dolar <- obter_ipca(inicio = 2010, fim = 2017, frequencia = "anual")
}
\seealso{
\code{\link{obter_bcws}}
\code{\link{getValoresSeriesXML}}
}
|
6c9f949c72a02c40194722518f4a3dc3f60c2132
|
19dceb6996411251385c29d7c00ade2aae67a4ce
|
/4gini_scoring.R
|
dca745bd4669bdef598ae21f28f8e4d31410d2cc
|
[] |
no_license
|
m0rr15/NHL_metrics.R
|
2c6e7449418a5857a01a405aed2d5fca5e553c82
|
d3a80e2c58d43d47c5f446e39a7235c1ad52eb54
|
refs/heads/master
| 2020-04-15T11:15:38.595664
| 2019-03-19T08:07:47
| 2019-03-19T08:07:47
| 164,622,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,135
|
r
|
4gini_scoring.R
|
# gini_scoring.R
# Morris Trachsler, 2016
#
# "Just how important is secondary scoring in ice hockey?" I wrote this script
# to adress this question. I first collect and clean ten years of NHL data
# ("Read data", "Data Treatment" sections). I then propose a new statistic to
# measure any team's "star - dependency", the Gini coefficient of scoring. This
# metric measures the degree of inequality in a given distribution and fits our
# purpose well ("Gini coefficient" section). The subsequent analysis,
# based on our data, clearly indicates a strong POSITIVE relationship between
# the secondary scoring of a team and this team's success.
#
library("RCurl", lib.loc="~/R/win-library/3.1")
library("rjson", lib.loc="~/R/win-library/3.1")
library("ggplot2", lib.loc="~/R/win-library/3.1")
library("lmtest", lib.loc="~/R/win-library/3.1")
library("rstudio", lib.loc="~/R/win-library/3.1")
library("graphics", lib.loc="~/R/win-library/3.1")
library("stats", lib.loc="~/R/win-library/3.1")
library("plyr", lib.loc="~/R/win-library/3.1")
library("psych", lib.loc="~/R/win-library/3.1")
library("MASS", lib.loc="C:/Program Files/R/R-3.1.1/library")
library("reshape2", lib.loc="~/R/win-library/3.1")
library("ineq", lib.loc="~/R/win-library/3.1")
library("readr", lib.loc="~/R/win-library/3.1")
library(stringr)
library(chron)
library(abind)
# library("biglm", lib.loc="~/R/win-library/3.1")
# library("bitops", lib.loc="~/R/win-library/3.1")
# library("nhlscrapr", lib.loc="~/R/win-library/3.1")
# library("dplyr", lib.loc="~/R/win-library/3.1")
# Set working directory
setwd("C:/Users/morris/Desktop/RR/2ndscoring")
# READ DATA --------------------------------------------------------------------
nhlTm <- read_delim("nhlTm.txt","\t", escape_double = FALSE, col_names = FALSE)
# Individual player stats for the NHL seasons 0506 until 1415
statspl05061415 <- read_delim(
"statspl05061415.csv", ";", escape_double=F,
col_types=cols(Goalie.GAR=col_double(), drawn.GAR=col_double()))
# Individual goalie stats for the NHL seasons 0506 until 1415
# statsgo05061415 <- read_delim("statsgo05061415.csv", ";", escape_double=F)
# League standings at end of regular season for each year from 0506 until 1415
nhlstandings0506 <- read_csv("./standings/nhl200506standings.txt")
nhlstandings0607 <- read_csv("./standings/nhl200607standings.txt")
nhlstandings0708 <- read_csv("./standings/nhl200708standings.txt")
nhlstandings0809 <- read_csv("./standings/nhl200809standings.txt")
nhlstandings0910 <- read_csv("./standings/nhl200910standings.txt")
nhlstandings1011 <- read_csv("./standings/nhl201011standings.txt")
nhlstandings1112 <- read_csv("./standings/nhl201112standings.txt")
nhlstandings1213 <- read_csv("./standings/nhl201213standings.txt")
nhlstandings1314 <- read_csv("./standings/nhl201314standings.txt")
nhlstandings1415 <- read_csv("./standings/nhl201415standings.txt")
nhlstandings0506$season <- 20052006
nhlstandings0607$season <- 20062007
nhlstandings0708$season <- 20072008
nhlstandings0809$season <- 20082009
nhlstandings0910$season <- 20092010
nhlstandings1011$season <- 20102011
nhlstandings1112$season <- 20112012
nhlstandings1213$season <- 20122013
nhlstandings1314$season <- 20132014
nhlstandings1415$season <- 20142015
nhlstandings <- rbind(nhlstandings0506, nhlstandings0607, nhlstandings0708,
nhlstandings0809, nhlstandings0910, nhlstandings1011,
nhlstandings1112,nhlstandings1213,nhlstandings1314,
nhlstandings1415)
rm(nhlstandings0506, nhlstandings0607, nhlstandings0708, nhlstandings0809,
nhlstandings0910, nhlstandings1011, nhlstandings1112, nhlstandings1213,
nhlstandings1314, nhlstandings1415)
# Playoff "standings" for each season from 0506 until 1415
nhlstandings0506po <- read_csv("./standings/nhl200506standingspo.txt")
nhlstandings0607po <- read_csv("./standings/nhl200607standingspo.txt")
nhlstandings0708po <- read_csv("./standings/nhl200708standingspo.txt")
nhlstandings0809po <- read_csv("./standings/nhl200809standingspo.txt")
nhlstandings0910po <- read_csv("./standings/nhl200910standingspo.txt")
nhlstandings1011po <- read_csv("./standings/nhl201011standingspo.txt")
nhlstandings1112po <- read_csv("./standings/nhl201112standingspo.txt")
nhlstandings1213po <- read_csv("./standings/nhl201213standingspo.txt")
nhlstandings1314po <- read_csv("./standings/nhl201314standingspo.txt")
nhlstandings1415po <- read_csv("./standings/nhl201415standingspo.txt")
nhlstandings0506po$season <- 20052006
nhlstandings0607po$season <- 20062007
nhlstandings0708po$season <- 20072008
nhlstandings0809po$season <- 20082009
nhlstandings0910po$season <- 20092010
nhlstandings1011po$season <- 20102011
nhlstandings1112po$season <- 20112012
nhlstandings1213po$season <- 20122013
nhlstandings1314po$season <- 20132014
nhlstandings1415po$season <- 20142015
nhlstandingspo <- rbind(
nhlstandings0506po, nhlstandings0607po, nhlstandings0708po,
nhlstandings0809po, nhlstandings0910po, nhlstandings1011po,
nhlstandings1112po, nhlstandings1213po, nhlstandings1314po,
nhlstandings1415po)#,nhlstandings1516po)
rm(nhlstandings0506po, nhlstandings0607po, nhlstandings0708po,
nhlstandings0809po, nhlstandings0910po, nhlstandings1011po,
nhlstandings1112po, nhlstandings1213po, nhlstandings1314po,
nhlstandings1415po)#, nhlstandings1516po)
# Voilà, we are going to work on these data frames:
# "nhlstandings": League standings for regular seasons 0506-1415
# "nhlstandingspo": Playoff "standings" for seasons 0506-1415
# "nhlTm": df containing names and suffixes of NHL Teams, later removed
# "statspl05061415": df containing player stats for seasons 0506-1415
# DATA TREATMENT ---------------------------------------------------------------
# G_GP, A_GP, PTS_GP: a player's Goals, Assists and Points per games
statspl05061415$G_GP <- statspl05061415$G / statspl05061415$GP
statspl05061415$A_GP <- statspl05061415$A / statspl05061415$GP
statspl05061415$PTS_GP <- statspl05061415$PTS / statspl05061415$GP
colnames(nhlTm) <- c("Full Name","Tm")
colnames(nhlstandings)[2] <- "Full Name"
colnames(nhlstandingspo)[2] <- "Full Name"
nhlstandings <- nhlstandings[!(nhlstandings$`Full Name`=="League Average"), ]
nhlstandingspo <- nhlstandingspo[
!(nhlstandingspo$`Full Name`=="League Average"), ]
nhlstandings <- merge(nhlstandings, nhlTm, by="Full Name")
nhlstandingspo <- merge(nhlstandingspo, nhlTm, by="Full Name")
rm(nhlTm)
# RDS: How many playoff rounds did the team go?
nhlstandingspo$RDS <- 1
nhlstandingspo$RDS[nhlstandingspo$W>3] <- 2
nhlstandingspo$RDS[nhlstandingspo$W>7] <- 3
nhlstandingspo$RDS[nhlstandingspo$W>11] <- 4
nhlstandingspo$RDS[nhlstandingspo$W==16] <- 5
# Adjusting for franchise changes (Atlanta -> Winnipeg etc.)
nhlstandings$Tm2 <- nhlstandings$Tm
nhlstandings$Tm2 <- replace(nhlstandings$Tm2, which(nhlstandings$Tm2=="PHX"),
"ARI")
nhlstandings$Tm2 <- replace(nhlstandings$Tm2, which(nhlstandings$Tm2=="ATL"),
"WPG")
nhlstandingspo$Tm2 <- nhlstandingspo$Tm
nhlstandingspo$Tm2 <- replace(nhlstandingspo$Tm2, which(
nhlstandingspo$Tm2=="PHX"),"ARI")
nhlstandingspo$Tm2 <- replace(nhlstandingspo$Tm2, which(
nhlstandingspo$Tm2=="ATL"),"WPG")
statsgo05061415$Tm2 <- statsgo05061415$tm.fin
statsgo05061415$Tm2 <- replace(statsgo05061415$Tm2, which(
statsgo05061415$Tm2=="PHX"),"ARI")
statsgo05061415$Tm2 <- replace(statsgo05061415$Tm2, which(
statsgo05061415$Tm2=="ATL"),"WPG")
statspl05061415$Tm2 <- statspl05061415$tmfin
statspl05061415$Tm2 <- replace(statspl05061415$Tm2, which(
statspl05061415$Tm2=="PHX"),"ARI")
statspl05061415$Tm2 <- replace(statspl05061415$Tm2, which(
statspl05061415$Tm2=="ATL"),"WPG")
# DIFF, PTSG: a team's Goal Differential and Points per game in a season
# DIFFG: a team's Goal Differential per Game in a season
nhlstandings$DIFF <- nhlstandings$GF - nhlstandings$GA
nhlstandings$DIFFG <- nhlstandings$DIFF / nhlstandings$GP
nhlstandings$PTSG <- nhlstandings$PTS / nhlstandings$GP
nhlstandings$d1213 <- 0 # Dummy for Lockout Season 20122013
nhlstandings$d1213[nhlstandings$season==20122013] <- 1
# ATOI2: transform ATOI from text to time
dfatoi2 <- data.frame(str_split_fixed(statspl05061415$ATOI, ":", 3))
dfatoi2$X3 <- 00
dfatoi2$X4 <- do.call(paste, c(dfatoi2[c("X3","X1", "X2")], sep = ":"))
dfatoi2$X5 <- chron(times=dfatoi2$X4,format = c("h:m:s"),out.format=c("h:m:s"))
statspl05061415$ATOI2 <- dfatoi2$X5
rm(dfatoi2)
#Positions
statspl05061415$pos2 <- "F"
statspl05061415$pos2[statspl05061415$pos=="D"] <- "D"
statspl05061415$pos2[statspl05061415$pos=="G"] <- "G"
# GINI Coefficients ------------------------------------------------------------
# ginis, ginispo: Create Gini coefficients for teams per season. 0 = perfect
# scoring equality since every player contributes the same amount of goals, 1 =
# perfect scoring inequality since one player alone scores all the goals.
ginis<-ddply(
subset(statspl05061415, session=="Regular"),
.(Tm2, season), summarise,
gG=ineq(G),
gA=ineq(A),
gPTS=ineq(PTS),
gG_GP=ineq(G_GP),
gA_GP=ineq(A_GP),
gPTS_GP=ineq(PTS_GP))
ginispo<-ddply(
subset(statspl05061415, session=="Playoffs"),
.(Tm2, season), summarise,
gG=ineq(G),
gA=ineq(A),
gPTS=ineq(PTS),
gG_GP=ineq(G_GP),
gA_GP=ineq(A_GP),
gPTS_GP=ineq(PTS_GP))
# nhlstandings: add gini coefficients of any team and any season
nhlstandings <- merge(nhlstandings, ginis, by=c("Tm2", "season"))
nhlstandingspo <- merge(nhlstandingspo, ginispo, by=c("Tm2", "season"))
rm(ginis, ginispo)
# GINI PLOTS -------------------------------------------------------------------
# I play a bit around looking at different relationships and different Gini
# coefficients. Later, I zoom in on the Gini Goal coefficient and run some
# regressions
# Scatter of Gini Goal and Goal Differential per Game
ggplot(nhlstandings) +
geom_jitter(aes(gG,DIFFG), colour="red",shape=1,na.rm=TRUE) +
geom_smooth(aes(gG,DIFFG), method=lm, se=FALSE,colour="orange",na.rm=TRUE) +
# geom_jitter(aes(weakGAR,DIFFG), colour="blue",shape=1) +
# geom_smooth(aes(weakGAR,DIFFG), method=lm, se=FALSE,colour="orange") +
labs(x = "Gini Goals", y = "Goal Diff. per Game")
# Scatter of Gini Goal and Points won per Game
ggplot(nhlstandings) +
geom_jitter(aes(gG, PTSG), colour="red", shape=1, na.rm=TRUE) +
geom_smooth(aes(gG, PTSG), method=lm, se=FALSE, colour="orange", na.rm=TRUE) +
# geom_jitter(aes(weakGAR,DIFFG), colour="blue",shape=1) +
# geom_smooth(aes(weakGAR,DIFFG), method=lm, se=FALSE,colour="orange") +
labs(x = "Gini Goals", y = "Points per Game won")
# Scatter of Gini Assist and Goal Differential per Game
ggplot(nhlstandings) +
geom_jitter(aes(gA,DIFFG), colour="red",shape=1,na.rm=TRUE) +
geom_smooth(aes(gA,DIFFG), method=lm, se=FALSE,colour="orange",na.rm=TRUE) +
# geom_jitter(aes(weakGAR,DIFFG), colour="blue",shape=1) +
# geom_smooth(aes(weakGAR,DIFFG), method=lm, se=FALSE,colour="orange") +
labs(x = "Gini Assists", y = "Goal Diff. per Game")
# Scatter of Gini Points and Goal Differential per Game
ggplot(nhlstandings) +
geom_jitter(aes(gPTS,DIFFG), colour="red",shape=1,na.rm=TRUE) +
geom_smooth(aes(gPTS,DIFFG), method=lm, se=FALSE,colour="orange",na.rm=TRUE) +
# geom_jitter(aes(weakGAR,DIFFG), colour="blue",shape=1) +
# geom_smooth(aes(weakGAR,DIFFG), method=lm, se=FALSE,colour="orange") +
labs(x = "Gini Points", y = "Goal Diff. per Game")
# Scatters of Gini Goal and Goal Differential per Game
ggplot(nhlstandings) + #YEARBYYEAR COMPARISON!
geom_jitter(aes(gG,DIFFG, colour=as.character(season))) +
geom_smooth(aes(gG,DIFFG, colour=as.character(season)), method=lm, se=FALSE) +
facet_wrap(~season) + #, scales="free_x") +
labs(x = "Gini Goals", y = "Goal Diff. per Game")
# Scatters of Gini Goal and Points per Game
ggplot(nhlstandings) + #YEARBYYEAR COMPARISON!
geom_jitter(aes(gG,PTSG, colour=as.character(season))) +
geom_smooth(aes(gG,PTSG, colour=as.character(season)), method=lm, se=FALSE) +
facet_wrap(~season) + #, scales="free_x") +
labs(x = "Gini Goals", y = "Points per Game")
# The scatterplots clearly illustrate a solid negative relationship between
# scoring inequality and team success. Time to run some regressions:
# GINI REGRESSIONS--------------------------------------------------------------
# Linear Regression of DIFFG, PTSG on gini goals
lmgG1 <- lm(DIFFG~gG,data=nhlstandings)
summary(lmgG1)
lmgG2 <- lm(PTSG~gG,data=nhlstandings)
summary(lmgG2)
# The linear regressions prove the point: There's a solid negative relationship
# between the gini coefficient of scoring and team success. To interpret the
# estimate is not straightforward since our independent var is a coefficient
# (what does a 0.1 point decrease in our gini coeff. mean?). Nonetheless, our
# results imply that secondary scoring contributes to a team's overall success.
|
a72f467e152210df7bb7d3cd9697a1b2fb246093
|
22f73b20031c768f7e6d9b20fd02504c29f60ac2
|
/scripts/generate_samples.R
|
5fea5b328d89f368110333628ad8c7c759c182bb
|
[] |
no_license
|
tlortz/forecasting-package-study
|
7944927c1e39a799560f00c0661c09e02ca50cc1
|
3ed284d4ea48180c36c7aee1af9f3a0445b8b58e
|
refs/heads/master
| 2020-04-05T05:53:49.420895
| 2019-02-09T18:25:53
| 2019-02-09T18:25:53
| 156,616,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,928
|
r
|
generate_samples.R
|
source('scripts/compare_forecasts.R')
library(data.table)
raw_data = fn_get_raw_data()
clean_data = fn_fill_ts_na(raw_data)
rm(raw_data)
gc()
station_names = clean_data %>% select(StationName) %>% unique()
station_names = as.vector(station_names[['StationName']])
model_results_list = list()
for (s in station_names){
set.seed(as.integer(Sys.time()))
model_results_list[[s]] =
fn_compare_forecasts_main(raw_data = clean_data[clean_data$StationName==s,],
arg_train_lengths = c(24,168,168*4,168*4*4),
arg_test_lengths = c(4,24,168,168*4),
arg_num_reps = 3)
gc()
}
# had to limit the number of reps per batch to 3-5; otherwise, Prophet would almost certainly
# throw a c++ sampler error before finishing the execution. Even at the 3-5 rep range, it would
# still throw the error, but much less often. This is apparently a known issue per
# https://github.com/facebook/prophet/issues/93
model_results = do.call(rbind,model_results_list)
# fwrite(model_results,file = "data/output_01.csv")
# fwrite(model_results,file = "data/output_02.csv")
# fwrite(model_results,file = "data/output_03.csv")
# fwrite(model_results,file = "data/output_04.csv")
# fwrite(model_results,file = "data/output_05.csv")
fwrite(model_results,file = "data/output_06.csv")
# model_results = fn_compare_forecasts_main(raw_data = clean_data,
# arg_train_lengths = c(24,168,168*4,168*4*4),
# arg_test_lengths = c(4,24,168,168*4),
# arg_num_reps = 10)
# model_results_2 = fn_compare_forecasts_main(raw_data = clean_data,
# arg_train_lengths = c(168),
# arg_test_lengths = c(24),
# arg_num_reps = 1)
|
eb1007d2eb372dee7cbf17ba97eb9e2cf6916e2d
|
9781c5384830f3a9074793f42af3062b58516714
|
/man/SafeMerge.Rd
|
c32774b8aa805219930dc56615c8ed9169ee724a
|
[] |
no_license
|
ohumblet/SafeMerge
|
4404d722235ecdb41ff0cd4b71cc21beb1ce79c8
|
c04474c1ff8dc9ca0f22a8f3373ead95c77648ec
|
refs/heads/master
| 2021-01-23T07:33:48.848753
| 2015-09-25T05:49:49
| 2015-09-25T05:49:49
| 32,885,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,357
|
rd
|
SafeMerge.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/SafeMerge.R
\name{SafeMerge}
\alias{SafeMerge}
\title{Safer merging of two data frames.}
\usage{
SafeMerge(x, y, by = NULL, by.x = by, by.y = by, all = FALSE,
all.x = all, all.y = all, sort = TRUE, suffixes = c(".x", ".y"),
incomparables = NULL, verbose = FALSE, ...)
}
\arguments{
\item{x}{Identical to base merge function.}
\item{y}{Identical to base merge function.}
\item{by}{specifications of the columns used for merging (See Details), but differs from base function in that default is NULL, to prevent inadvertent merging without a by variable.}
\item{by.x}{Identical to base merge function.}
\item{by.y}{Identical to base merge function.}
\item{all}{Identical to base merge function.}
\item{all.x}{Identical to base merge function.}
\item{all.y}{Identical to base merge function.}
\item{sort}{Identical to base merge function.}
\item{suffixes}{Identical to base merge function.}
\item{incomparables}{Identical to base merge function.}
\item{verbose}{Logical; default = FALSE; if TRUE then more diagnostic information is printed (under development).}
}
\value{
Returns the data frame resulting from the merge of the two input data frames. The functionality is mostly identical to 'merge', except as described in 'Details'.
}
\description{
A wrapper for the merge function, for interactive use. Prints information and warnings about the merge, then conducts the merge in question (with identical functionality, except as described in 'Details'.
}
\details{
The functionality is mostly identical to 'merge', except that the by variable is set to NULL by default, and there is an additional parameter 'verbose'). The effect of the by variable being set to NULL as a default means that failure to specify a by variable will result in an error. This function will not seek to guess which variables you mean to merge on, i.e. by checking which variables are present in both data frames. The by variable must be specified. \cr
Assumes that either a 'by' variable is specified, or that both 'by.x' and 'by.y' are specified. May not handle the edge case where 'by' and one of 'by.x' or 'by.y' are provided.
}
\examples{
x <- data.frame(id = 1:10, xval = 10:1)
y <- data.frame(id = 1:5, yval = 15:11)
SafeMerge(x, y, by = "id")
}
\author{
Olivier Humblet
}
|
1bbd07bf9d91a7e2b1f2b480e33275eeaf32a2a5
|
3ff2900210f84c69f4f3a299598076c687a00d16
|
/old files/Functions/spline_state level.R
|
132b22e62841e6f4294a9bc6e1b8387dc96123ac
|
[] |
no_license
|
sjweston/Rapid-R3-Website
|
424db286b58e8b6383ed8cb5de563b38cdde38bc
|
f75e0ff4da7179e142316140b268118b525096d4
|
refs/heads/master
| 2023-05-27T06:27:41.293523
| 2021-05-24T18:20:13
| 2021-05-24T18:20:13
| 257,148,940
| 0
| 1
| null | 2021-05-24T18:20:14
| 2020-04-20T02:17:58
|
HTML
|
UTF-8
|
R
| false
| false
| 2,284
|
r
|
spline_state level.R
|
state_splines.groups = function(data, outcome, group, point){
group.name = deparse(substitute(group))
group.label = gsub("_", " ", group.name)
group.label = stringr::str_to_sentence(group.label)
outcome.label = deparse(substitute(outcome))
outcome.label = gsub("_", " ", outcome.label)
outcome.label = stringr::str_to_sentence(outcome.label)
group.levs = unique(as.data.frame(data[,group.name]))
ngroups = nrow(group.levs)
contrast = sum(group.levs[,1] %in% c(-1,1)) == 2
data = data %>%
filter(!is.na({{group}})) %>%
filter(Week > 0) %>%
group_by(state, Week, {{group}}) %>%
summarise_at(vars({{outcome}}), mean, na.rm=T) %>%
ungroup() %>%
mutate(SL1 = ifelse(Week <= point, Week-point, 0),
SL2 = ifelse(Week > point, Week-point, 0))
reg.formula = as.formula(paste0(deparse(substitute(outcome)),
" ~ SL1*",
deparse(substitute(group)),
"+ SL2*",
deparse(substitute(group))))
model = lm(reg.formula, data)
mod.summary = broom::tidy(model)
mod.summary = mod.summary %>%
mutate(section = case_when(
grepl("SL1", term) ~ "SL1",
grepl("SL2", term) ~ "SL2",
TRUE ~ "Intercept"
)) %>%
arrange(section, term) %>%
select(-section) %>%
kable(., digits = 2) %>%
kable_styling()
data$pred = predict(model)
plot = data %>%
filter(!is.na({{outcome}})) %>%
ggplot(aes(x = Week, color = as.factor({{group}}))) +
geom_point(aes(y = {{outcome}}), alpha = .5) +
geom_line(aes(y = pred)) +
scale_x_continuous(breaks = c(1:max(data$Week)))+
labs(y = outcome.label) +
theme_pubclean()
if(contrast){
plot = plot +
scale_color_manual(group.label,
values = c("red", "darkgrey"),
labels = c("Group", "Sample Average"))
} else{
plot = plot +
scale_color_brewer(group.label, palette = "Set2")
}
plotdata = plot$data
plot = ggplotly(plot)
return.list = list(model = model,
summary = mod.summary,
plot = plot,
plotdata = plotdata)
return(return.list)
}
|
9ae9065cba420d255f9feca66a7c332b5087e55a
|
982415a4fbff0d1291926e3a857f322531fa9a98
|
/R/number_tides.R
|
8639f897a04fbfc74bf196836bfff32098fa6884
|
[] |
no_license
|
ScenicVerve/VulnToolkit
|
89554775667054ae0cccd3dc9e596fc02a544713
|
e18da64ea4ca633eb49519473c4df1f3a0b4eee6
|
refs/heads/master
| 2021-01-17T22:28:51.142893
| 2015-08-12T13:26:28
| 2015-08-12T13:26:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,336
|
r
|
number_tides.R
|
### function to number tides and insert tide numbers (for whole tidal cycles, flood, and ebb tides)
### in high-frequency dataset.
### Requires HL() function output, and initial dataset
### input arguments:
### "data": full water level dataframe
### "datetime": date/time column from full dataset (used as "time" argument in HL())
### "hl": HL() output
number.tides <- function(data, datetime, hl) {
data$ht.ind <- ifelse(datetime %in% hl$time[hl$tide == "H"], 1, 0)
data$lt.ind <- ifelse(datetime %in% hl$time[hl$tide == "L"], 1, 0)
data$tide.no <- cumsum(data$lt.ind) # number tidal cycles
# number the flood and ebb tides (include numbers *only* on flood and ebb tides)
data$ht.temp <- cumsum(data$ht.ind)
data$index <- c(1:nrow(data))
if(data$index[data$ht.temp == "1"][1] - data$index[data$tide.no == "1"][1] > 0){ # positive value: low tide comes first
data$ebb.fld <- ifelse(data$ht.temp == data$tide.no, 1, 0) # 1 indicates ebb tide, 0 indicates flood tide
} else { data$ebb.fld <- ifelse(data$ht.temp > data$tide.no, 1, 0)
}
data$fld.no <- ifelse(data$ebb.fld == 0, data$tide.no, NA)
data$ebb.no <- ifelse(data$ebb.fld == 1, data$tide.no, NA)
drop <- c("ht.temp", "index")
data <- data[, !(names(data) %in% drop)]
invisible(data)
}
|
5d2766b3748fe89b5a8e314e685e9d9071547d55
|
fbabac8318e6c3a7c84d13f24244dee6536905ce
|
/rankall.R
|
780fb3788652bf855908036d9be7cc4b125f8dda
|
[] |
no_license
|
arun25/Hospital_data_analysis
|
eb60c382f55be56d1392b7982a8ece9e757871aa
|
1c9d40a063c2f06cafe7994d0c0989d7914098af
|
refs/heads/master
| 2021-01-11T22:05:20.657179
| 2017-01-16T17:44:14
| 2017-01-16T17:44:14
| 78,919,290
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,593
|
r
|
rankall.R
|
rankall <- function( outcome, num = "best") {
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
states <- data[ , 7]
outcomes <- c("heart attack", "heart failure", "pneumonia")
#if ((state %in% states) == FALSE) { stop(print("invalid state")) }
#if ((outcome %in% outcomes) == FALSE) { stop(print("invalid outcome")) }
if (outcome == "heart attack") { outcome_column <- 11 }
else if (outcome == "heart failure") { outcome_column <- 17 }
else { outcome_column <- 23 }
data[, outcome_column] <- as.numeric(data[,outcome_column])
#for (state_val in unique(states)) {}
s <- split(data,data$State)
df <- data.frame(hospital = character(), state = character(),stringsAsFactors = FALSE)
lapply(s, function(x){
#x[, outcome_column] <- as.numeric(x[,outcome_column])
bad <- is.na(x[, outcome_column])
desired_data <- x[!bad, ]
outcome_column_name <- names(desired_data)[outcome_column]
hospital_column_name <- names(desired_data)[2]
index <- with(desired_data, order(desired_data[outcome_column_name], desired_data[hospital_column_name]))
ordered_desired_data <- desired_data[index, ]
#if nume is either "best" or "worst", then interpret it to the
#corresponding numerical value
if (is.character(num) == TRUE) {
if (num == "best") {
num = 1
}
else if (num == "worst") {
num = length(ordered_desired_data[, outcome_column])
}
}
#return the hospital name with the outcome ranking of num
ordered_desired_data[num, 2]
})
}
|
d33f124c77a563a8a96a41eb62073edf9ca0f73b
|
29bd7ec43e40263e62535f51169d95082d11832f
|
/man/taxon_richness.Rd
|
9071a401c09a51f42b0993d004d80dbc3df7791d
|
[] |
no_license
|
esocid/Benthos
|
171ab922b751f5b2d03caa5e36aedc239f7894d0
|
60ba98f6935ee70998e6061ca0891f32dc0a8d5d
|
refs/heads/master
| 2021-06-18T18:28:10.526456
| 2017-06-22T16:18:44
| 2017-06-22T16:18:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 617
|
rd
|
taxon_richness.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diversity_metrics.R
\name{taxon_richness}
\alias{taxon_richness}
\title{Taxon Richness}
\usage{
taxon_richness(long, taxon, low.res.rank, high.res.rank)
}
\arguments{
\item{long}{Taxonomic counts arrange in a long data format (i.e., each
row represents a unique sample and taxon).}
\item{rank}{The taxonomic rank used to perform the analysis. This
function requires a rank below the Order level taxonomic classification.}
}
\value{
The number of taxa identified as ephemeropterans (Order: Ephemeroptera).
}
\description{
Taxon Richness
}
|
707c043f50a72df06c602181b4934bc926f3405d
|
5e4199af14b7b6cac8841cbf320c88b6787b420a
|
/dplyr_data_table.R
|
cc1ab739a1f75983ce5c6c02b0c0a5fd60167540
|
[] |
no_license
|
mfarr76/DataScience
|
ed8528c572eee97423a81bb6e697ee7d2d9d1f17
|
a37bc4ef5da7a98174c36ea5835dda442cbc67ed
|
refs/heads/master
| 2021-07-10T22:38:07.734645
| 2017-12-07T13:52:39
| 2017-12-07T13:52:39
| 96,574,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,368
|
r
|
dplyr_data_table.R
|
#https://datascienceplus.com/best-packages-for-data-manipulation-in-r/
rm(list = ls())
#install.packages("compare")
library(dplyr)
library(data.table)
library(lubridate)
library(jsonlite)
library(tidyr)
library(compare)
library(ggplot2)
spending=fromJSON("https://data.medicare.gov/api/views/nrth-mfg3/rows.json?accessType=DOWNLOAD")
names(spending)
meta=spending$meta
hospital_spending=data.frame(spending$data)
colnames(hospital_spending)=make.names(meta$view$columns$name)
hospital_spending=select(hospital_spending,-c(sid:meta))
glimpse(hospital_spending)
cols = 6:11; # These are the columns to be changed to numeric.
hospital_spending[,cols] <- lapply(hospital_spending[,cols], as.character)
hospital_spending[,cols] <- lapply(hospital_spending[,cols], as.numeric)
cols = 12:13; # These are the columns to be changed to dates.
hospital_spending[,cols] <- lapply(hospital_spending[,cols], mdy)
sapply(hospital_spending,class)
hospital_spending_DT = data.table(hospital_spending)
class(hospital_spending_DT)
from_dplyr = select(hospital_spending, Hospital_Name)
from_data_table = hospital_spending_DT[,.(Hospital_Name)]
compare(from_dplyr,from_data_table, allowAll=TRUE)
from_dplyr = select(hospital_spending, -Hospital_Name)
from_data_table = hospital_spending_DT[,!c("Hospital_Name"),with=FALSE]
compare(from_dplyr,from_data_table, allowAll=TRUE)
|
04d0480c5a335c044090aca755ccaf588921c1fd
|
1fc2af32d1b74cdaef9cc2298c4b5726b89965f0
|
/cachematrix.R
|
ca80a6d75940f360b0e464da98b1c98551e66c07
|
[] |
no_license
|
evajenseg/ProgrammingAssignment2
|
02a7e00e8e60d96674fdd2e4cc192c41e079f689
|
f604b1efe40f2f7f3cf9163d0dfb2eb3691f1c60
|
refs/heads/master
| 2021-01-20T06:42:41.762415
| 2017-05-01T12:24:31
| 2017-05-01T12:24:31
| 89,912,044
| 0
| 0
| null | 2017-05-01T09:27:59
| 2017-05-01T09:27:58
| null |
UTF-8
|
R
| false
| false
| 1,559
|
r
|
cachematrix.R
|
## x is a square invertible matrix
## makeCacheMatrix is input to cacheSolve, where makeCacheMatrix can cache its inverse
## cacheSolve:computes the inverse of makeCacheMatrix (returned by makeCacheMatrix above).
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL #object within function used later in function
set<-function(y) {
x<<-y #assign to parent environment "<<-"
inv<<-NULL #assign to parent environment "<<-", This line of code clears any value of
#inv that had been cached by a prior execution cacheSolve
}
get<-function() x #retrieves x from the parent environment
setinv = function(inverse) inv <<- inverse #defines the setter for the inv
getinv = function() inv #assign input argument to the value of inv in the parent environment
list(set=set, get=get, setinv=setinv, getinv=getinv)
# gives the name 'set' to the set() function defined above, get to get
# gives the name 'setinv' to the setinv() function defined above, 'getinv' to getinv() function
}
cacheSolve <- function(x, ...) {
inv<-x$getinv() #get the inv value from passed in argument
if(!is.null(inv)) { #if not NULL - no new value set, it gets the cache value
message ("getting cached data")
return (inv)
}
mat.data<-x$get() # if (!is.null(inv)) is false, then it gets the matrix from input and calculate the inverse
inv<-solve(mat.data,...)
x$setinv(inv)
return(inv)
}
|
668418af7bbf7bfe1f29f232455715f5088c4d13
|
1679bf45175566aec338b46eb0c968879cae2343
|
/R/plot_cluster.R
|
e52c87c728ee9bc50ae831de3693bee2a158d01f
|
[] |
no_license
|
UW-GAC/QCpipeline
|
e485044355e74dae3de324fc5985f82b9b41cb2b
|
f60319122d0842852acaa410eb977344be2bf4dc
|
refs/heads/master
| 2021-07-10T00:17:13.832045
| 2020-05-27T00:01:50
| 2020-05-27T00:01:50
| 97,990,389
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,039
|
r
|
plot_cluster.R
|
##########
# Cluster plots for association tests
# Usage: R --args config.file < plot.cluster.R
##########
library(GWASTools)
library(QCpipeline)
sessionInfo()
# read configuration
args <- commandArgs(trailingOnly=TRUE)
if (length(args) < 1) stop("missing configuration file")
config <- readConfig(args[1])
# check config and set defaults
required <- c("annot_snp_file", "samp_geno_file", "samp_xy_file")
optional <- c("annot_snp_rsIDCol", "out_assoc_prefix", "out_plot_prefix")
default <- c("rsID", "assoc", "assoc")
config <- setConfigDefaults(config, required, optional, default)
print(config)
# variables
(pathprefix <- config["out_assoc_prefix"])
(qqfname <- config["out_plot_prefix"])
# make genotypedata and intensityData
snpAnnot <- getobj(config["annot_snp_file"])
snpID <- getSnpID(snpAnnot)
chrom <- getChromosome(snpAnnot)
rsID <- getVariable(snpAnnot, config["annot_snp_rsIDCol"])
geno <- GenotypeReader(config["samp_geno_file"])
(genoData <- GenotypeData(geno, snpAnnot=snpAnnot))
xy <- IntensityReader(config["samp_xy_file"])
(xyData <- IntensityData(xy, snpAnnot=snpAnnot))
fname <- paste0(pathprefix, "_combined_qual_filt.RData")
combined <- getobj(fname)
combined <- combined[combined$composite.filter,]
# select pvalue to use for plots
(varp <- intersect(paste0(c("LR", "Wald", "z"), ".pval"), names(combined))[1])
combined.intid <- combined[order(combined[,varp]),c("snpID",varp)]
snp.intid <- combined.intid[1:27,]
pdf(paste0(qqfname, "_lowP_hits.pdf"))
par(mfrow=c(3,3))
ind <- match(snp.intid$snpID, snpID)
text <- paste(rsID[ind], "Chr", chrom[ind])
mtxt <- paste(text,"\np-value",sprintf("%.2e",snp.intid[,varp]))
## plot
genoClusterPlot(xyData,genoData, plot.type="RTheta", snp.intid$snpID, mtxt)
dev.off()
## single page png for QC report
png(paste0(qqfname, "_lowP_hits.png"), width=720, height=720)
par(mfrow=c(3,3), mar=c(5,5,4,2)+0.1, lwd=1.5,
cex.axis=1.5, cex.lab=1.5, cex.main=1.5)
genoClusterPlot(xyData,genoData, plot.type="RTheta", snp.intid$snpID[1:9], mtxt[1:9])
dev.off()
|
ebe4b5db74a705978ca67b88d150884f8e637c69
|
f2d0e19b55cb262c1e76dad1de21557a8f6640d1
|
/preprocessingPart1.R
|
884c512b6ec4dc74fd7e4830bf09edf7f88e5cb6
|
[] |
no_license
|
desmarais-lab/govWebsites
|
e405989a82374832e9715e08b785b13b34a85493
|
e22e3ef38d7f986c7332a6d0d04d80e8a1b0edef
|
refs/heads/master
| 2021-12-23T11:43:47.803689
| 2019-05-06T20:35:53
| 2019-05-06T20:35:53
| 80,395,954
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,209
|
r
|
preprocessingPart1.R
|
# This document creates a list of all the files that were downloaded with wget
# and then merges it with the city metadata
# the resultant citydocs.rdata file should be used as input for convertToText.R
# the resultant city-specific rdata files will then further be processed in preprocessingPart2.R
#setwd("govWebsites")
library('stringr')
library('urltools') #used toget the domain from each url
library('tools')
# ---- #
#get metadata
load("rfiles/allURLs.rdata")
#get URLs verified through Python Selenium
websiteUrls$urls_verified <- readLines("websites/urls_verified.txt")
#get the domain from each url
websiteUrls$urls_verified <- url_parse(websiteUrls$urls_verified)$domain
#make some corrections
websiteUrls$urls_verified[websiteUrls$urls_verified=="www.atticaonline.com"] <- "attica-in.gov"
websiteUrls$urls_verified[websiteUrls$urls_verified=="www.unioncity-in.gov"] <- "unioncity-in.com"
# ---- #
path <- "/home/mneumann/hd2/govWebsites"
f <- list.files(path, recursive = T) #create a list of all files in all subdirectories
f <- f[!stringr::str_detect(f, "[^\\x00-\\x7F]")]
#file types
ext <- file_ext(f) #get file extension
folder <- str_split(f, "\\/(?=[^\\/]+$)", simplify = T)[,1]
filename <- str_split(f, "\\/(?=[^\\/]+$)", simplify = T)[,2]
#store objects in a data frame
d <- data.frame(path = str_c(path, f, sep = "/"),
folder = str_c(path, folder, sep = "/"),
filename,
ext,
stringsAsFactors = F)
d <- subset(d, filename != "")
#new files
#only txt, pdf, html, doc, or docx
d <- d[d$ext %in% c('txt', 'pdf', 'html', 'doc', 'docx'),]
#remove some files that crash readtext
#everything in this folder causes some problems
d <- d[-which(str_detect(d$path, "/home/mneumann/hd2/govWebsites/bloomington.in.gov/trades/parcel/(.*?).pdf")),]
d$path <- str_replace_all(d$path, "\\[", "\\\\[")
d$path <- str_replace_all(d$path, "\\]", "\\\\]")
#save
d$iter <- 1:nrow(d)
save(d, file = "rfiles/docnames.rdata")
# ---- #
#load the document paths
#load("rfiles/doc_chunks/docnames.rdata")
#function to extract city from directory
extractCity <- function(path){
city <- str_replace(path, "/home/mneumann/hd2/govWebsites/", "")
city <- str_extract(city, "^(.*?)\\/")
city <- str_replace(city, "\\/", "")
return(city)
}
d$city <- extractCity(d$path)
#character vector of city base urls
cities <- unique(d$city)
#kick out the documents whose city has less than 5 documents
citytable <- data.frame(table(d$city))
d <- d[!d$city%in%citytable$Var1[citytable$Freq<5],]
rm(citytable)
#re-do character vector of city base urls
cities <- unique(d$city)
#which chunk file is a given document in?
# ab <- list()
# chunk_size <- 10000
# for (i in seq(1, nrow(d), chunk_size)) {
# seq_size <- chunk_size
# if ((i + seq_size) > nrow(d)) seq_size <- nrow(d) - i + 1
# ab[[i]] <- paste0("rfiles/doc_chunks/parsedtexts_", i, "_", (i+seq_size-1), ".rdata")
# }
# ab <- do.call(c, ab)
# ab[length(ab)] <- "rfiles/doc_chunks/parsedtexts_1520001_1528747.rdata"
# abc <- rep(ab, each = 10000)
# d$parsedtextfile <- abc[1:nrow(d)]
# rm(ab, abc, i, seq_size, chunk_size)
# ---- #
## merge the metadata with the document paths
#test which cities still don't work
websiteUrls2 <- subset(websiteUrls, select = c(urls_verified, State))
cities2 <- data.frame(urls_verified = cities)
test <- merge(websiteUrls2, cities2, by.y = "urls_verified", all = T)
print(test$urls_verified[is.na(test$State)==T])
#manually fix these:
d$city[d$city=="charmeck.org"] <- "charlottenc.gov"
websiteUrls$urls_verified[websiteUrls$State_City=="New York_Peekskill"] <- "www.cityofpeekskill.com"
websiteUrls$urls_verified[websiteUrls$urls_verified=="springsgov.com"] <- "www.springsgov.com"
#re-do character vector of city base urls
cities <- unique(d$city)
rm(websiteUrls2, cities2, test)
#do the actual merge
d <- merge(d, websiteUrls, by.x = "city", by.y = "urls_verified", all = T)
#cities that still need to be downloaded/fixed
todo <- d[is.na(d$path)==T,]
#remove the above from the big data frame
d <- d[is.na(d$path)==F,]
save(d, file = "rfiles/citydocs.rdata")
save(todo, file = "rfiles/citydocs_todo.rdata")
rm(todo, websiteUrls)
|
f641ef3feb9b2b453a7362a741d03141a5e8edde
|
1c9e8cfff234c1177f36cd295f39764842bbc641
|
/man/getInvfunction-Curve-method.Rd
|
b072d5756797ffa34ddf7ca0b01fa462765e24da
|
[] |
no_license
|
cran/gestate
|
afb9e543700ad03d909336e6b08247c92423c984
|
4eda0e9fcc7af2a82cb94f1aa643330244a8bb0c
|
refs/heads/master
| 2023-05-11T15:41:19.042482
| 2023-04-26T10:02:30
| 2023-04-26T10:02:30
| 236,603,581
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 586
|
rd
|
getInvfunction-Curve-method.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/s4_architecture.R
\name{getInvfunction,Curve-method}
\alias{getInvfunction,Curve-method}
\title{Method for returning the inverse-CDF function for a Curve object}
\usage{
\S4method{getInvfunction}{Curve}(theObject, p = "p")
}
\arguments{
\item{theObject}{The name of the Curve Object}
\item{p}{The probability parameter name to use in the CDF function. Default=p}
}
\description{
This retrieves the inverse CDF function of the specified Curve object as a string
}
\examples{
getCDFfunction(Weibull(100,1))
}
|
1ebb841d5dbde62d9e101e987ad018c07ae56eb9
|
20d578dc272a70fdef27325f09b1c2e2681042c4
|
/plot4.r
|
a60359a17202182666091748d7a46ae07e513675
|
[] |
no_license
|
SWest101/ExData_Plotting1
|
882e0e5cfe779abb821e1c72ac7d0774ed67ec8e
|
9adcc5bc85edb40d88472a444c9cb20f52e4589c
|
refs/heads/master
| 2021-01-12T02:48:59.800029
| 2017-01-05T12:46:18
| 2017-01-05T12:46:18
| 78,112,057
| 0
| 0
| null | 2017-01-05T12:43:02
| 2017-01-05T12:43:01
| null |
UTF-8
|
R
| false
| false
| 1,415
|
r
|
plot4.r
|
##Script to plot the graphic 'plot4.png'
#Data importing seperated into it's own script as it will be used for all plots.
source("dataread.R")
#Set par to a 2 x 2 matrix of graphics
par(mfrow=c(2,2))
#Create a timeseries plot of the Global Active Power column of the dataset
plot(date_time,raw_data$Global_active_power, type="l", ylab = "Global Active Power (kilowatts)", xlab="")
#Create a timeseries plot of the Voltage column of the dataset
plot(date_time,raw_data$Voltage, type="l", ylab = "Voltage", xlab="datetime")
#Create a timeseries plot of the Sub Metering dataset
plot(date_time,raw_data$Sub_metering_1, type="l", ylab = "Energy sub metering", xlab="", col="black")
lines(date_time,raw_data$Sub_metering_2, col="red")
lines(date_time,raw_data$Sub_metering_3, col="blue")
#Create the legend for the graphic
colset <- c("black","red","blue")
legend("topright", legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), bty ="n", cex = 0.8, lty = 1, col = colset)
#Create a timeseries plot of the Global Reactive Power column of the dataset
plot(date_time,raw_data$Global_reactive_power, type="l", ylab = "Global Reactive Power", xlab="datetime")
#Set par back to single row & single column so that it doesn't split further graphs
par(mfrow=c(1,1))
#Setting the graphic device size to 480 x 480 px
dev.copy(png,"plot4.png", width = 480, height = 480)
#Shut down the graphics device
dev.off()
|
24aa9b754ed1cd7a60a7af8ce1c773a4330ed8f3
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/ChangePointTaylor/R/change_point_analyzer_function.R
|
a52b92e90de914d300fc75b79d5b4e43a9687d00
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,717
|
r
|
change_point_analyzer_function.R
|
#' @useDynLib ChangePointTaylor
#' @importFrom Rcpp sourceCpp
#' @importFrom magrittr "%>%"
#' @importFrom stats "complete.cases"
#' @importFrom stats "quantile"
#' @importFrom rlang ":="
#' @importFrom rlang "!!"
#' @importFrom rlang ".data"
NULL
if(getRversion() >= "2.15.1") utils::globalVariables(c("."))
bootstrap_S_diff <- function(x, n_smpls = 1000){
X_bar <- mean.default(x)
length_x <- length(x)
# 1. Generate a bootstrap sample of n units, denoted X01, X02, ..., Xn, by randomly reordering the original n values.
x_btstrps <- purrr::map(1:n_smpls, function(i) sample_cpp(x,length_x))
#2 Based on the bootstrap sample, calculate the bootstrap CUSUM, denoted S00, S01, ..., Sn.
S_btstrps <- purrr::map(x_btstrps, cusum, X_bar)
#Calculate the maximum, minimum and difference of the bootstrap CUSUM, denoted S0max, S0min, and S0diff
S_diff_btstrp <- purrr::map_dbl(S_btstrps, S_diff)
return(S_diff_btstrp)
}
confidence_change_occurred <- function(x, n_bootstraps = 1000){
#1. First calculate the average
X_bar <- mean.default(x)
S_x <- cusum(x, X_bar)
# plot(S_x)
S_diff_x <- S_diff(S_x)
# print(S_diff_x)
S_diff_btstrp <- bootstrap_S_diff(x, n_bootstraps)
return(sum(S_diff_btstrp < S_diff_x)/n_bootstraps)
}
# Once a change has been detected, an estimate of when the change occurred can be made. One such estimator is the CUSUM estimator
# Sm is the point furthest from zero in the CUSUM chart. The point m estimates last point before the change occurred. The point m+1 estimates the first point after the change.
Sm_ix <- function(x){
S <- cusum(x, mean.default(x))[2:(length(x) + 1)] #remove S_0 at ix 1
which(abs(S) == max(abs(S)))
}
get_change <- function(x, min_conf = 0.5, recursive = T, mthd, level = 1, recursive_call = F, original_indices = NA, n_bootraps = 1000){
if(is.na(original_indices[1]) & !recursive_call){
# message("no indices passed to function. Assuming all original values were provided...")
original_x_indices <- 1:length(x)
}else{
original_x_indices <- original_indices
}
minimum_confidence <- min_conf
change_conf <- confidence_change_occurred(x,n_bootraps)
# print(x)
# print(change_conf)
if(change_conf < min_conf | length(x) <5){#minimum segment length is 5
return(data.frame('change_ix' = NA, 'change_conf' = NA, 'level' = NA))
}
if(mthd == "MSE"){
change_ix <- min_MSE_ix(x)
}else if(mthd == "CUSUM"){
change_ix <- Sm_ix(x)
}else{
stop("method must be 'MSE' or 'CUSUM'")
}
original_x_change_ix <- original_x_indices[change_ix]
changes <- data.frame('change_ix' = original_x_change_ix, 'change_conf' = change_conf, 'level' = level)
if(recursive == F){
return(changes)
}else{
x_indices_before_change <- original_x_indices[1:(change_ix)]
x_indices_after_change <- original_x_indices[(change_ix+1):length(x)]
x_before_change <- x[1:(change_ix)]
x_after_change <- x[(change_ix+1):length(x)]
changes_before <- get_change(x_before_change, original_indices = x_indices_before_change, min_conf = minimum_confidence, mthd = mthd, recursive_call = T, level = level + 1,n_bootraps = n_bootraps)
changes_after <- get_change(x_after_change, original_indices = x_indices_after_change, min_conf = minimum_confidence, mthd = mthd, recursive_call = T, level = level + 1,n_bootraps = n_bootraps)
return(
dplyr::bind_rows(changes, changes_before,changes_after) %>%
dplyr::filter(complete.cases(.))
)
}
}
#make function that will speed up bootrapping CI
get_change_ix_only <- function(x,mthd, original_indices){
if(mthd == "MSE"){
change_ix <- min_MSE_ix(x)
}else if(mthd == "CUSUM"){
change_ix <- Sm_ix(x)
}else{
stop("method must be 'MSE' or 'CUSUM'")
}
return(original_indices[change_ix])
}
get_change_sections <- function(x, change_df, min_conf, mthd, recursive = F,filter_by,filter_values){
sorted_change_df <- change_df %>%
dplyr::arrange(.data$change_ix)
change_ixs <- sorted_change_df$change_ix
sorted_initial_change_ixs <- change_ixs
change_sections <- list()
change_section_start_stops <- c(0,sorted_initial_change_ixs, length(x))
change_section_orig_ixs <- list()
# print(change_section_start_stops)
for(i in 1:length(change_ixs)){
change_section_seq <- (change_section_start_stops[i]+1):(change_section_start_stops[i+2])
change_sections[[i]] <- x[change_section_seq]
change_section_orig_ixs[[i]] <- change_section_seq
}
if(filter_by == "levels"){
reestimate_lgl <- sorted_change_df$level %in% c(filter_values)
}else if(filter_by == "index"){
reestimate_lgl <- sorted_change_df$change_ix %in% c(filter_values)
}
return(list(x = change_sections[reestimate_lgl], original_indices = change_section_orig_ixs[reestimate_lgl]
, recursive = recursive, min_conf = min_conf,mthd = mthd
, change_ix = sorted_initial_change_ixs[reestimate_lgl], level = sorted_change_df$level[reestimate_lgl]))
}
reestimate_changes_by_level <- function(x,df_changes_for_reestimation, min_conf, mthd, levels_to_reestimate){
changes_in_non_reestimation_levels <- df_changes_for_reestimation %>%
dplyr::filter(!(.data$level %in% c(levels_to_reestimate)))
changes_in_reestimation_levels <- df_changes_for_reestimation %>%
dplyr::filter(.data$level %in% c(levels_to_reestimate))
changes_for_reestimation <- get_change_sections(x,df_changes_for_reestimation, min_conf = min_conf, mthd = mthd, filter_values = levels_to_reestimate, filter_by = "levels")
# print(changes_for_reestimation[c('x','original_indices','recursive','min_conf','mthd')])
reestimated_changes_df <- purrr::pmap_df(changes_for_reestimation[c('x','original_indices','recursive','min_conf','mthd')],get_change) %>%
dplyr::mutate(level = changes_for_reestimation$level) %>%
dplyr::bind_rows(changes_in_non_reestimation_levels)
return(reestimated_changes_df)
}
reestimate_change_level_seq <- function(x,df_changes_for_reestimation, min_conf, mthd, level_sequence){
for(level_to_reestimate in level_sequence){
df_changes_for_reestimation <- reestimate_changes_by_level(x,df_changes_for_reestimation, min_conf, mthd, level_to_reestimate) %>%
dplyr::filter(complete.cases(.)) #sometimes we lose one due to the min 5 length segment.
}
df_changes_for_reestimation %>%
dplyr::group_by(.data$change_ix) %>%
dplyr::summarize(change_conf = max(.data$change_conf)
,level = min(.data$level)) %>%
return()
}
drop_any_under_threshold_in_given_level <- function(change_df, drop_level,min_tbl_conf){
change_df %>%
dplyr::filter(!(.data$level == drop_level & .data$change_conf < min_tbl_conf)) %>%
return()
}
drop_lowest_change_conf_in_given_level <- function(change_df, drop_level){
min_change_conf_in_given_level <- min(change_df$change_conf[change_df$level == drop_level])
change_df %>%
dplyr::filter(!(.data$level == drop_level & .data$change_conf == min_change_conf_in_given_level)) %>%
return()
}
get_all_changes <- function(x, mthd, labels = NA, n_bootraps = 1000, min_candidate_conf = 0.5, min_tbl_conf = 0.9){
changes_for_reestimation_df <- get_change(x, min_conf = min_candidate_conf, mthd = mthd, n_bootraps = n_bootraps)
labels_lookup_df <- data.frame('label' = labels, change_ix = 1:length(labels))
# print(changes_for_reestimation_df %>%
# left_join(labels_lookup_df, by = c("change_ix"))%>%
# mutate(data = "Change Candidates"))
n_change_rows <- nrow(changes_for_reestimation_df)
if(n_change_rows == 1 & is.na(changes_for_reestimation_df$change_conf[1])){
# print("0 Changes Identified After Re-Estimation")
return(data.frame())
}
# print(paste0(n_change_rows, " Candidate Change(s) Identified"))
#if there is only one change initially, check to see if its above our tbl threshold.
if(n_change_rows == 1){
if(changes_for_reestimation_df$change_conf >min_tbl_conf) {
reestimated_changes_df <- changes_for_reestimation_df %>%
dplyr::mutate(change_ix = .data$change_ix + 1)
changes_df_w_labels <- labels_lookup_df %>%
dplyr::right_join(reestimated_changes_df, by = c("change_ix"))
# print("1 Change Identified After Re-Estimation")
return(changes_df_w_labels)
}else{
# print("0 Changes Identified After Re-Estimation")
return(data.frame())
}
}
# print(changes_for_reestimation_df %>%
# left_join(labels_lookup_df, by = c("change_ix"))%>%
# mutate(data = "Change Candidates"))
changes_for_reestimation_df <- drop_any_under_threshold_in_given_level(changes_for_reestimation_df, max(changes_for_reestimation_df$level),min_tbl_conf)
all_changepoints_above_tbl_threshold <- FALSE
while(!all_changepoints_above_tbl_threshold){
# print(changes_for_reestimation_df %>%
# left_join(labels_lookup_df, by = c("change_ix"))%>%
# mutate(data = "While Loop Start"))
if(nrow(changes_for_reestimation_df) == 0){
# print("nrow = 0")
changes_for_reestimation_df <- get_change(x, min_conf = min_tbl_conf, mthd = mthd, recursive = F, n_bootraps = n_bootraps) %>%
# print() %>%
dplyr::filter(complete.cases(.)) %>%
dplyr::group_by(.data$change_ix) %>%
dplyr::summarize(change_conf = max(.data$change_conf))
}else{
### reestimate all change candidates top to bottom ###
level_sequence_top_to_bottom <- max(changes_for_reestimation_df$level):min(changes_for_reestimation_df$level)
changes_for_reestimation_df <- reestimate_change_level_seq(x,changes_for_reestimation_df, min_conf = 0, mthd = mthd, level_sequence_top_to_bottom)
}
all_changepoints_above_tbl_threshold <- sum(changes_for_reestimation_df$change_conf > min_tbl_conf) == nrow(changes_for_reestimation_df)
#if all changes aren't above the change threshold, remove the change with the lowest confidence and re-estimate.
if(!all_changepoints_above_tbl_threshold){
# changes_for_reestimation_df <- drop_lowest_change_conf(changes_for_reestimation_df)
#level based removal
lowest_level_with_change_under_tbl_threshold <- max(changes_for_reestimation_df$level[changes_for_reestimation_df$change_conf < min_tbl_conf])
# changes_for_reestimation_df <- drop_any_under_threshold_in_given_level(changes_for_reestimation_df,lowest_level_with_change_under_tbl_threshold)
changes_for_reestimation_df <- drop_lowest_change_conf_in_given_level(changes_for_reestimation_df,lowest_level_with_change_under_tbl_threshold)
}else{
reestimated_changes_df <- changes_for_reestimation_df %>%
dplyr::mutate(change_ix = .data$change_ix + 1)
}
}
message(paste0(nrow(reestimated_changes_df), " Change(s) Identified"))
if(any(!is.na(labels))){
changes_df_w_labels <- labels_lookup_df %>%
dplyr::right_join(reestimated_changes_df, by = c("change_ix")) %>%
dplyr::arrange(.data$change_ix)
return(changes_df_w_labels)
}else{
message("NA supplied to 'label' argument")
reestimated_changes_df %>%
dplyr::mutate(label = NA) %>%
dplyr::arrange(.data$change_ix) %>%
return()
}
}
percent <- function(x, digits = 0, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
}
# From Dr. Taylor:
# -Again, just the two intervals each side of the change up to the closest changes are used.
# A bootstrap is a random reordering of the data on each side of the change point.
# The change-point is than reestimated. The 2.5% percentile and 97.5% percentile of the estimates are used to construct the 95% confidence interval.
get_bootstraps_CIs <- function(chng_df,x,mthd, labels = NA, CI_level = 0.95,n_boots = 1000){
# print(chng_df)
# print("estimating confidence intervals....")
if(is.na(labels[1])){
labels <- as.character(1:length(x))
}
change_ixs <- chng_df$change_ix
# print(change_ixs)
sorted_initial_change_ixs <- sort(change_ixs)
change_section_start_stops <- c(1,sorted_initial_change_ixs, length(x)+1)
change_section_orig_ixs <- list()
for(i in 1:length(change_ixs)){
change_section_orig_ixs[[i]] <- (change_section_start_stops[i]):(change_section_start_stops[i+2]-1)
}
mp_bootstrap_CI <- function(change_sec_orig_ixs,chg_ix, orig_x,CI, mthd) {
tryCatch({
# print(change_sec_orig_ixs)
# print(chg_ix)
#get ix of the change for this particular section based on the original value
sec_change_ix <- which(change_sec_orig_ixs == chg_ix)
# print(sec_change_ix)
# print(mthd)
pre_change_ixs <- change_sec_orig_ixs[1:sec_change_ix-1]
post_change_ixs <- change_sec_orig_ixs[sec_change_ix:length(change_sec_orig_ixs)]
bootstraped_change_ixs <- purrr::map_int(1:n_boots, function(i) get_change_ix_only(orig_x[c(sample(pre_change_ixs), sample(post_change_ixs))],mthd = mthd, original_indices = change_sec_orig_ixs))
# bootstraped_change_ixs <- replace_na(bootstraped_change_ixs, chg_ix)
CI_labels <- labels[quantile(bootstraped_change_ixs + 1, c((1-CI)/2, 1-((1-CI)/2)),na.rm = T)]
#if change butts up againsts another change, the conf interval can return an NA. replace with original label value
CI_labels <- tidyr::replace_na(CI_labels, labels[chg_ix])
# print(CI_labels)
# print(order(CI_labels))
# print(order(CI_labels)[labels])
# print(sort(order(CI_labels)[labels]))
CI_labels <- CI_labels[order(match(CI_labels,labels))] #sort based on original
# CI_labels <- CI_labels[sort(order(CI_labels)[labels])]
CI_labels_str <- paste0("(",paste0( CI_labels, collapse = " - "),")")
return(CI_labels_str)},
error = function(e) conditionMessage(e))
}
CIs <- purrr::map2_chr(change_section_orig_ixs,sorted_initial_change_ixs, mp_bootstrap_CI, orig_x = x, CI = CI_level, mthd = mthd)
# print(CIs)
chng_df %>%
dplyr::mutate(!!as.name(paste0("CI (",percent(CI_level),")")) := CIs)
}
get_change_levels <- function(chng_df,x){
change_ixs <- unique(chng_df$change_ix)
sorted_initial_change_ixs <- sort(change_ixs)
change_section_start_stops <- c(1,sorted_initial_change_ixs, length(x)+1)
change_section_orig_ixs <- list()
change_sections <- list()
for(i in 1:(length(change_ixs)+1)){
change_section_seq <- (change_section_start_stops[i]):(change_section_start_stops[i+1]-1)
change_sections[[i]] <- x[change_section_seq]
}
# print(change_sections)
section_means <- purrr::map_dbl(change_sections, mean.default)
from <- NA
to <- NA
for(i in 1:length(change_ixs)){
from[i] <- section_means[i]
to[i] <- section_means[i+1]
}
chng_df %>%
dplyr::arrange(.data$change_ix) %>%
dplyr::mutate(From = from) %>%
dplyr::mutate(To = to) %>%
return()
}
#' change_point_analyzer
#'
#'
#' a simple implementation of the change in mean detection \href{https://variation.com/wp-content/uploads/change-point-analyzer/change-point-analysis-a-powerful-new-tool-for-detecting-changes.pdf}{methods} developed by Wayne Taylor and utilized in his \href{https://variation.com/product/change-point-analyzer/}{Change Point Analyzer} software. The package recursively uses the 'MSE' change point calculation to identify candidate change points. Taylor's backwards elimination process is then employed to come up with a final set of change points.
#'
#' @param x a numeric vector
#' @param labels a vector the same length as \code{x}. Will generate labels for the change points in the output dataframe.
#' @param n_bootstraps an integer value. Determines the number of bootstraps when calculating the change confidence level.
#' @param min_candidate_conf a value between 0 and 1. The minimum change confidence level to become a candidate change point before re-estimation and backwards elimination.
#' @param min_tbl_conf a value between 0 and 1. The minimum change confidence level below which a candidate change point will be eliminated after re-estimation and backwards elimination.
#' @param CI a value between 0 and 1. The value of the confidence interval.
#'
#' @return a dataframe containing the change points, their confidence levels, and other relevant information
#' @export
#'
#' @references \href{https://variation.com/wp-content/uploads/change-point-analyzer/change-point-analysis-a-powerful-new-tool-for-detecting-changes.pdf}{Taylor, W. A. (2000). Change-point analysis: a powerful new tool for detecting changes.}
#'
#' @examples
#' x <- US_Trade_Deficit$deficit_billions
#' label_vals <- US_Trade_Deficit$date
#'
#' change_point_analyzer(x)
#'
#' change_point_analyzer(x, label = label_vals)
#'
#' change_point_analyzer(x, label = label_vals, n_bootstraps = 10000)
#'
#' change_point_analyzer(x, label = label_vals, min_candidate_conf = 0.66, min_tbl_conf = 0.95)
change_point_analyzer <- function(x, labels = NA, n_bootstraps = 1000, min_candidate_conf = 0.5, min_tbl_conf = 0.9, CI = 0.95){
if(!is.numeric(x) | length(x) < 5){
stop("Invalid x argument. 'x' must be a numeric vector with length(x) >= 5")
}
if(!is.na(labels[1]) & length(x) != length(labels)){
stop("Invalid labels argument. length(x) != length(labels)")
}
if(!is.numeric(n_bootstraps) | !dplyr::between(n_bootstraps, 100,1000000)){
stop("Invalid n_bootraps argument. n_bootraps must be a numeric value between 100 and 1,000,000")
}
if(!is.numeric(min_candidate_conf) | !dplyr::between(min_candidate_conf, 0.3,1)){
stop("Invalid min_candidate_conf argument. min_candidate_conf must be a numeric value between 0.3 and 1")
}
if(!is.numeric(min_tbl_conf) | !dplyr::between(min_tbl_conf, 0.5,1)){
stop("Invalid min_tbl_conf argument. min_tbl_conf must be a numeric value between 0.5 and 1")
}
if(!is.numeric(min_tbl_conf) | !dplyr::between(min_tbl_conf, 0.9,0.999)){
stop("Invalid CI argument. CI must be a numeric value between 0.9 and 0.999")
}
method <- "MSE"
tryCatch({
all_changes_df <- get_all_changes(x, mthd = method, labels, min_candidate_conf = min_candidate_conf,min_tbl_conf = min_tbl_conf)
#if there aren't any changes return that.
if(nrow(all_changes_df)==0){
data.frame(change_ix = NA, label = NA, CI_init = NA
, change_conf = NA, From = NA, To= NA ) %>%
dplyr::rename(!!as.name(paste0("CI (",percent(CI),")")) := .data$CI_init) %>%
return()
}else{
if(is.na(CI)){
all_changes_df_with_CI <- all_changes_df %>%
dplyr::mutate(CI = NA)
}else{
all_changes_df_with_CI <- all_changes_df %>%
get_bootstraps_CIs(x, mthd = method, labels,CI , n_boots = n_bootstraps)
}
all_changes_df_with_CI %>%
get_change_levels(x) %>%
dplyr::select(.data$change_ix, .data$label, dplyr::matches("CI"), .data$change_conf, .data$From, .data$To) %>%
return()
}
},
error = function(e) {
warning(e)
data.frame(change_ix = NA, label = NA, CI_init = "error"
, change_conf = NA, From = NA, To= NA ) %>%
dplyr::rename(!!as.name(paste0("CI (",percent(CI),")")) := .data$CI_init) %>%
return()
}
)
}
|
d692cbbf927023dd1757fe3bfeaf56452559f66d
|
3b759584191822a004bb060de383b140a1c84694
|
/Rcode.R
|
0c26922c79b54bcf496e44c1f9539b3f7b283b34
|
[] |
no_license
|
FunQi/CompMus
|
ab5e3172c3b9339497119f49135a370abd8f3f3e
|
3bcf93d9e386f7b34589f3031e64d1b49e167e0f
|
refs/heads/main
| 2023-03-21T01:50:57.694457
| 2021-03-28T14:50:08
| 2021-03-28T14:50:08
| 335,895,560
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 156
|
r
|
Rcode.R
|
library(remotes)
library(tidyverse)
library(usethis)
library(spotifyr)
remotes::install_github('charlie86/spotifyr')
spotifyr::get_spotify_access_token()
|
2ded7a04ac9401ce794cb4fce747aa6104b39c99
|
2ffff05a55c6beef31f3e80d72614467b5caa020
|
/Discover_Paris/global.R
|
d842ccca52eb1517360e726931ad929b12b737aa
|
[] |
no_license
|
jayendrashinde91/Discover_Paris
|
8aa78cd488190649c3ac46978916f752e39d25f9
|
c71cf587bbc7dd9c780e18a5ab1aa0784f8f28cd
|
refs/heads/master
| 2021-01-12T14:40:25.632794
| 2016-10-27T20:01:56
| 2016-10-27T20:01:56
| 72,044,357
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20
|
r
|
global.R
|
load("pData.RData")
|
85c23886a6e2732b8f89cdfbc57add162ed6e04a
|
92196c33ce999f4dc04db549a6785ef67fad84db
|
/tests/testthat/test-anlz_fibmap.R
|
c03af41136639721c12d646a72b3f96bfece30df
|
[
"MIT"
] |
permissive
|
tbep-tech/tbeptools
|
2414d900643ba2871a922826343940c7a44b2e74
|
bb4489ea83509f812e0d8d09290251d97b374455
|
refs/heads/master
| 2023-08-04T13:57:09.114359
| 2023-07-31T13:44:54
| 2023-07-31T13:44:54
| 184,627,857
| 7
| 3
|
NOASSERTION
| 2020-06-22T22:03:33
| 2019-05-02T18:03:48
|
R
|
UTF-8
|
R
| false
| false
| 1,526
|
r
|
test-anlz_fibmap.R
|
# Test case 1: Check if the output has the expected columns
test_that("Output has the expected columns for anlz_fibmap", {
result <- anlz_fibmap(fibdata)
expected_columns <- c("area", "epchc_station", "class", "yr", "mo",
"Latitude", "Longitude", "ecoli", "ecocci", "ind",
"cat", "col")
expect_equal(colnames(result), expected_columns)
})
# Test case 2: Check if filtering by year works correctly
test_that("Filtering by year works correctly for anlz_fibmap", {
result <- anlz_fibmap(fibdata, yrsel = 2020)
expected_years <- c(2020)
expect_equal(unique(result$yr), expected_years)
})
# Test case 3: Check if filtering by month works correctly
test_that("Filtering by month works correctly for anlz_fibmap", {
result <- anlz_fibmap(fibdata, mosel = 7)
expected_months <- c(7)
expect_equal(unique(result$mo), expected_months)
})
# Test case 4: Check if filtering by area works correctly
test_that("Filtering by area works correctly for anlz_fibmap", {
result <- anlz_fibmap(fibdata, areasel = "Alafia")
expected_areas <- c('Hillsborough River', 'Hillsborough River Tributary', 'Lake Thonotosassa',
'Lake Thonotosassa Tributary', 'Lake Roberta')
expect_false(any(result$area %in% expected_areas))
})
# Test case 5: Check error no data
test_that("Checking error for no data with anlz_fibmap", {
expect_error(anlz_fibmap(fibdata, yrsel = 2020, mosel = 5, areasel = "Alafia"),
'No FIB data for May 2020, Alafia')
})
|
d061cea3582ac4044f7f6572fc46329ec9cf244b
|
6022b903a56b6fefc799fa7d540287900916c9f8
|
/E050112/25.R
|
8c8f5067285f26ef7893b47278ceb7c3ee9b5dfd
|
[] |
no_license
|
hojinWoo/R_backup
|
f694680555a315e1c819aaf2cf1e02cb19dc2edc
|
5daee6f35581012e9871a5b65d96218b50e83880
|
refs/heads/master
| 2020-05-15T14:45:06.472050
| 2019-04-20T04:21:19
| 2019-04-20T04:21:19
| 182,341,806
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
25.R
|
# 임의의 주소지를 중심으로 지도를 출력하기
gc<-geocode(enc2utf8('강원도 속초시'))
gc
cen<-as.numeric(gc) #숫자형 모드로 변환
cen
map<-get_googlemap(center=cen,
maptype="roadmap",
marker=gc)
ggmap(map, extent="device")
names<-c("1.망상해수욕장", "2.속초해수욕장",
"3.낙산해수욕장","4.송지호해수욕장",
"5.하조대해수욕장")
addr<-c("강원도 동해시 망상동 393-16",
"강원도 속초시 조양동 1464-11",
"강원도 양양군 강현면 주청리 1",
"강원도 고성군 죽왕면 8",
"강원도 양양군 현북면 하광정리 1")
sk<-c("서울","광주")
gc<-geocode(enc2utf8(sk))
gc
gc<-geocode(enc2utf8(addr))
gc
df<-data.frame(name=names, lon=gc$lon,
lat=gc$lat)
df
mean(df$lon)
cen<-c(mean(df$lon), mean(df$lat))
cen
map<-get_googlemap(center=cen,
maptype="roadmap",
zoom=9, marker=gc)
gmap<-ggmap(map, extent="device")
# ggplot2 패키지에 있는 geom_text()함수를
# 이용하여 해수욕장의 이름을 출력
# geom_text()함수의 리턴값은 문자가
# 들어있는 Layer
# geom_text(data, aes, size, label,...)
# data : Layer에 표시될 데이터 ,
# aes : 위치좌표값(위도, 경도)
# size : 문자크기, 디폴트값은 5,
# label : 출력될 문자
gmap+geom_text(data=df,aes(x=lon, y=lat),
size=3, label=df$name,
hjust=-.2, vjust=-1)
df1<-read.csv("E:\\빅데이터강좌\\R\\exam\\E050112\\kang.csv",
header=TRUE)
df1
df1$address
df1$address<-as.character(df1$address)
#문자형 mode로 변환
# gc1<-geocode(enc2utf8(df1$address))
cen<-c(mean(df1$longitude),mean(df1$latitude))
map<-get_googlemap(center=cen, maptype="roadmap",
zoom=9)
ggmap(map)
ggmap(map)+geom_text(data=df1,
aes(x=longitude, y=latitude),
size=3, label=df1$names)
ggmap(map,extent="devide")+
geom_point(aes(x=df1$longitude,
y=df1$latitude,
color="red", size=5),
data=df1, shape=15)
|
1b4a32e5ec38fe1e97fa3933c83ae1e75b58ac1d
|
9c6e2ab15a4c32e6ed864807d887be8b5b467db9
|
/R/ppareto2.R
|
d1bac43164415a347731e6b7810b7959ec484eab
|
[] |
no_license
|
cran/CaDENCE
|
853b37566024fd329a0419b5e643c4b16c4d698e
|
7c754e5e408f46aa3704cdf0c06626a52a548c00
|
refs/heads/master
| 2021-01-23T13:22:25.997825
| 2017-12-05T03:05:17
| 2017-12-05T03:05:17
| 17,678,401
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 111
|
r
|
ppareto2.R
|
ppareto2 <-
function(q, scale = 1, shape = 1)
{
p <- 1 - (1 + q/scale)^(-shape)
p[q <= 0] <- 0
p
}
|
6e86f29caf2c3b6bdcd72884e29a6c4d521026ef
|
5ed0a9765939a26594e7b7498b03b9480a365ce8
|
/tests/testthat.R
|
8845e331a3f2ebba5282c3e5a766ef6517af06c5
|
[] |
no_license
|
zejin/MDMeasure
|
c9cf2b8cbc6b28769a93c6051997d3dded384dd6
|
4ce1dcad66087d496b89ec24e531a99a16c7736d
|
refs/heads/master
| 2021-05-11T04:23:42.319010
| 2018-02-12T17:55:40
| 2018-02-12T17:55:40
| 117,938,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
library(testthat)
library(MDMeasure)
test_check("MDMeasure")
|
05755aa02472c2a60cff955f8a1c372471192fb1
|
ac9b44f711d7ddbfebfe05651feae87c70d49a5a
|
/simple-array-sum.R
|
3d32a64392120a6a2aef8ba0ce75a46cf5b348c5
|
[] |
no_license
|
mohammedabed/hackerrank-1
|
1f3da37ed99a199c4a7edb79782df281c93ecc1f
|
bf9787ebd4a3cb00b385f367ab9a9b2cb3782199
|
refs/heads/master
| 2021-09-15T18:37:06.625447
| 2015-10-20T13:51:01
| 2015-10-20T13:51:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 655
|
r
|
simple-array-sum.R
|
# Problem Statement
#
# You are given an array of integers of size N. You need to print the sum of the elements in the array.
#
# Input Format
# The first line of input consists of an integer N. The next line contains N space-separated integers contained inside the array.
#
# Constraints
# 1=N=1000
# 0=A[i]=1000
#
# Output Format
# Output a single value equal to the sum of the elements in the array.
#
# Sample Input
#
# 6
# 1 2 3 4 10 11
# Sample Output
#
# 31
T <- suppressWarnings(readLines(file("stdin")))
T <- strsplit(T, " ")
Ti <- as.numeric(T[[1]])
Tnew <- T[-1]
out <- sum(as.numeric(Tnew[[1]]))
write(as.character(out), stdout())
|
47a550e37d9c778b7576b92b863122015ea18917
|
6248006ae5f4cd59bd1352c19e911666240da47f
|
/scripts/TMI-sourmash-explore.R
|
a8f118385c7ac0d813110224f19f00f3e4747a6d
|
[
"MIT"
] |
permissive
|
Arcadia-Science/metagenomics-of-cheese-rind-microbiomes
|
785829b29e9b098aa0a39fa6f1fca5cf88131487
|
7121fd7589cc93d6f9ef97645066a4e9793bcec8
|
refs/heads/main
| 2023-05-26T11:42:31.086755
| 2023-05-25T20:21:18
| 2023-05-25T20:21:18
| 598,318,496
| 1
| 0
|
MIT
| 2023-05-25T20:21:19
| 2023-02-06T21:22:53
|
Standard ML
|
UTF-8
|
R
| false
| false
| 9,703
|
r
|
TMI-sourmash-explore.R
|
library(tidyverse)
library(sourmashconsumr)
library(ggpubr)
library(ArcadiaColorBrewer)
#########################################################
# TMI Illumina sourmashconsumr exploration
#########################################################
## read in reads files for TMI illumina samples
# reads signatures
tmi_illumina_sigs_directory <- ("processed_data/2023-05-23-TMI-processed-data/TMI_illumina/sketch/")
tmi_illumina_sigs_files <- list.files(path = tmi_illumina_sigs_directory, pattern = "*.reads.sig", full.names = TRUE)
tmi_illumina_reads_sigs <- read_signature(tmi_illumina_sigs_files)
# reads compare
tmi_illumina_reads_compare_csv <- read_compare_csv("processed_data/2023-05-23-TMI-processed-data/TMI_illumina/compare/reads.comp.csv", sample_to_rownames = F)
# reads gather CSVs
tmi_illumina_gather_directory <- ("processed_data/2023-05-23-TMI-processed-data/TMI_illumina/gather/")
tmi_illumina_gather_csvs <- list.files(path = tmi_illumina_gather_directory, pattern = "*.reads.gather.csv", full.names = TRUE)
tmi_illumina_reads_gather <- read_gather(tmi_illumina_gather_csvs, intersect_bp_threshold = 50000)
# reads taxonomy annotate
tmi_illumina_taxonomy_csvs <- list.files(path = "processed_data/2023-05-23-TMI-processed-data/TMI_illumina/taxonomy/", pattern = "*.reads.gather.with-lineages.csv.gz", full.names = TRUE)
tmi_illumina_reads_taxonomy <- read_taxonomy_annotate(tmi_illumina_taxonomy_csvs, intersect_bp_threshold = 50000, separate_lineage = T)
## read in assemblies files for TMI illumina samples
# assembly signatures
tmi_illumina_assembs_sigs_files <- list.files(path = tmi_illumina_sigs_directory, pattern = "*.assembly.sig", full.names = TRUE)
tmi_illumina_assembs_sigs <- read_signature(tmi_illumina_assembs_sigs_files)
# assemblies compare
tmi_illumina_assemblies_compare_csv <- read_compare_csv("processed_data/2023-05-23-TMI-processed-data/TMI_illumina/compare/assembly.comp.csv", sample_to_rownames = F)
# assemblies gather
tmi_illumina_assembs_gather_csvs <- list.files(path = tmi_illumina_gather_directory, pattern = "*.assembly.gather.csv", full.names = TRUE)
tmi_illumina_assembs_gather <- read_gather(tmi_illumina_assembs_gather_csvs, intersect_bp_threshold = 50000)
# assemblies taxonomy
tmi_illumina_assembs_taxonomy_csvs <- list.files(path = "processed_data/2023-05-23-TMI-processed-data/TMI_illumina/taxonomy/", pattern = "*.assembly.gather.with-lineages.csv.gz", full.names = TRUE)
tmi_illumina_assembs_taxonomy <- read_taxonomy_annotate(tmi_illumina_assembs_taxonomy_csvs, intersect_bp_threshold = 50000, separate_lineage = T)
## plotting TMI illumina reads and assemblies results
# plot compare mds and heatmap
tmi_illumina_compare_reads_mds_df <- make_compare_mds(tmi_illumina_reads_compare_csv)
tmi_illumina_compare_assemblies_mds_df <- make_compare_mds(tmi_illumina_assemblies_compare_csv)
plot_compare_mds(tmi_illumina_compare_reads_mds_df)
plot_compare_mds(tmi_illumina_compare_assemblies_mds_df)
plot_compare_heatmap(tmi_illumina_reads_compare_csv, cexRow = 0.75, cexCol = 0.75)
illumina_assemblies_compared_plot <- plot_compare_heatmap(tmi_illumina_assemblies_compare_csv, cexRow = 0.75, cexCol = 0.75)
# plotting gather results
plot_gather_classified(tmi_illumina_reads_gather)
plot_gather_classified(tmi_illumina_assembs_gather)
# remove low hits to plant db
arcadia.pal(6, 'Accent')
illumina_assemb_classified_plot <- tmi_illumina_assembs_gather %>% mutate(query_name = gsub(".assembly", "", query_name)) %>%
plot_gather_classified() +
scale_fill_manual(values = c("#5088C5", "#F28360", "#3B9886", "#F898AE", "#7A77AB", "#F7B846")) +
ggtitle("Classified Sequences in Illumina Assemblies") +
scale_y_continuous(expand = c(0,0))
illumina_assemb_classified_plot
# plotting taxonomy annotate results
plot_taxonomy_annotate_sankey(tmi_illumina_reads_taxonomy, tax_glom_level = "order")
plot_taxonomy_annotate_sankey(tmi_illumina_assembs_taxonomy, tax_glom_level = "order")
#########################################################
# TMI Nanopore sourmashconsumr exploration
#########################################################
## read in reads files for TMI Nanopore samples
# reads signatures
tmi_nanopore_sigs_directory <- ("processed_data/2023-05-23-TMI-processed-data/TMI_nanopore/sketch/")
tmi_nanopore_sigs_files <- list.files(path = tmi_nanopore_sigs_directory, pattern = "*.reads.sig", full.names = TRUE)
tmi_nanopore_reads_sigs <- read_signature(tmi_nanopore_sigs_files)
# reads compare
tmi_nanopore_reads_compare_csv <- read_compare_csv("processed_data/2023-05-23-TMI-processed-data/TMI_nanopore/compare/reads.comp.csv", sample_to_rownames = F)
# reads gather CSVs
tmi_nanopore_gather_directory <- ("processed_data/2023-05-23-TMI-processed-data/TMI_nanopore/gather/")
tmi_nanopore_gather_csvs <- list.files(path = tmi_nanopore_gather_directory, pattern = "*.reads.gather.csv", full.names = TRUE)
tmi_nanopore_reads_gather <- read_gather(tmi_nanopore_gather_csvs, intersect_bp_threshold = 50000)
# reads taxonomy annotate
tmi_nanopore_taxonomy_csvs <- list.files(path = "processed_data/2023-05-23-TMI-processed-data/TMI_nanopore/taxonomy/", pattern = "*.reads.gather.with-lineages.csv.gz", full.names = TRUE)
tmi_nanopore_reads_taxonomy <- read_taxonomy_annotate(tmi_nanopore_taxonomy_csvs, intersect_bp_threshold = 50000, separate_lineage = T)
## read in assemblies files for TMI Nanopore samples
# assembly signatures
tmi_nanopore_assembs_sigs_files <- list.files(path = tmi_nanopore_sigs_directory, pattern = "*.assembly.sig", full.names = TRUE)
tmi_nanopore_assembs_sigs <- read_signature(tmi_nanopore_assembs_sigs_files)
# assemblies compare
tmi_nanopore_assemblies_compare_csv <- read_compare_csv("processed_data/2023-05-23-TMI-processed-data/TMI_nanopore/compare/assembly.comp.csv", sample_to_rownames = F)
# assemblies gather
tmi_nanopore_assembs_gather_csvs <- list.files(path = tmi_nanopore_gather_directory, pattern = "*.assembly.gather.csv", full.names = TRUE)
tmi_nanopore_assembs_gather <- read_gather(tmi_nanopore_assembs_gather_csvs, intersect_bp_threshold = 50000)
# assemblies taxonomy
tmi_nanopore_assembs_taxonomy_csvs <- list.files(path = "processed_data/2023-05-23-TMI-processed-data/TMI_nanopore/taxonomy/", pattern = "*.assembly.gather.with-lineages.csv.gz", full.names = TRUE)
tmi_nanopore_assembs_taxonomy <- read_taxonomy_annotate(tmi_nanopore_assembs_taxonomy_csvs, intersect_bp_threshold = 50000, separate_lineage = T)
## plotting TMI Nanopore reads and assemblies results
# plot compare mds and heatmap
tmi_nanopore_compare_reads_mds_df <- make_compare_mds(tmi_nanopore_reads_compare_csv)
tmi_nanopore_compare_assemblies_mds_df <- make_compare_mds(tmi_nanopore_assemblies_compare_csv)
plot_compare_mds(tmi_nanopore_compare_reads_mds_df)
plot_compare_mds(tmi_nanopore_compare_assemblies_mds_df)
plot_compare_heatmap(tmi_nanopore_reads_compare_csv, cexRow = 0.75, cexCol = 0.75)
plot_compare_heatmap(tmi_nanopore_assemblies_compare_csv, cexRow = 0.75, cexCol = 0.75)
# plotting gather results
plot_gather_classified(tmi_nanopore_reads_gather)
nanopore_assembs_classified_plot <- tmi_nanopore_assembs_gather %>% mutate(query_name = gsub(".assembly", "", query_name)) %>%
plot_gather_classified() +
scale_fill_manual(values = c("#5088C5", "#F28360", "#3B9886", "#F898AE", "#7A77AB", "#F7B846")) +
ggtitle("Classified Sequences in Nanopore Assemblies") +
scale_y_continuous(expand = c(0,0))
nanopore_assembs_classified_plot
# plotting taxonomy annotate results
options(repr.plot.width = 8.5, repr.plot.height = 3, repr.plot.res = 300)
nanopore_assembs_tax_sankey_plot <- plot_taxonomy_annotate_sankey(tmi_nanopore_assembs_taxonomy, tax_glom_level = "order", palette = grDevices::colorRampPalette(c("#C6E7F4", "#F8C5C1", "#F5E4BE", "#B5BEA4",
"#DCBFFC", "#B6C8D4", "#DA9085",
"#F5CBE4", "#BABEE0", "#D1EADF", "#F1E8DA"))(n = 11),
label = F) +
ggforce::geom_parallel_sets_labels(colour = 'black', angle = 360, size = 3, fontface = "italic", hjust = -0.25) +
labs(x = "Taxonomic rank") +
scale_x_continuous(labels = c("Domain", "Phylum", "Class", "Order", ""),
breaks = c(1, 2, 3, 4, 5),
limits = c(.75, 5)) +
theme(axis.text = element_text(size = 13),
axis.title = element_text(size = 16))
nanopore_assembs_tax_sankey_plot
# eligo nanopore time series plots
eligo_taxonomy <- tmi_nanopore_assembs_taxonomy %>%
filter(grepl('EL', query_name))
eligo_time_df <- data.frame(query_name = c('EL2weeks.assembly', 'EL4weeks.assembly', 'EL12weeks.assembly'),
time = c('1', '2', '3'))
plot_taxonomy_annotate_ts_alluvial(eligo_taxonomy, eligo_time_df, tax_glom_level = "order", fraction_threshold = 0.01) +
ggplot2::scale_fill_brewer(palette = "Paired")
#########################################################
# Plot grids and save figures
#########################################################
classified_plot <- ggarrange(illumina_assemb_classified_plot, nanopore_assembs_classified_plot, ncol=2, labels = c("A", "B"), common.legend = TRUE, legend = "bottom", align = "h")
classified_plot
ggsave("figures/TMI_classified_plot.pdf", classified_plot, width=30, height=15, units=c("cm"))
ggsave("figures/TMI_nanopore_assembs_tax_sankey.pdf", nanopore_assembs_tax_sankey_plot, width=30, height=20, units=c("cm"))
|
32c938c73e9fdae33e724de9a75a08556941d3df
|
9b6b2dc0b505fec339b7330a25e2fd50df00f20c
|
/07_tree_structure_feature_bias.R
|
7825f367c125dece53afc1f1ef37d29f6f4cae2a
|
[] |
no_license
|
vla6/Stereotyping_ROCDS
|
bec4317132dd2fd8433e43afac1c55d96c649eed
|
b825a6a7d87eb961d72b599d6c6c69336717708a
|
refs/heads/main
| 2023-02-21T22:53:09.104274
| 2021-01-22T01:23:40
| 2021-01-22T01:23:40
| 311,982,866
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,952
|
r
|
07_tree_structure_feature_bias.R
|
#
# Examines tree structure for selected
# feature bias models. Examines female-income interactions.
# For now, examines only models Q and S
#
rm(list=ls())
source('00_setup.R')
library(h2o)
# Exempt model with different female definitions
kModelInclude <- c('p', 'q', 'r', 's', 't')
#
# Import data ----
#
h2o.init()
h2o.removeAll()
model_id_df <- fread(file.path(kOutputDir, '/02_REPORT_model_ids.csv'))
model_list = model_id_df$model[model_id_df$model %in%kModelInclude]
model_list_h2o <- sapply(model_list,
function(x) model_load(x, model_id_df), USE.NAMES = T)
split_id_val <- readRDS(file.path(kOutputDir, '/01_DATA_split_id_val.rds'))
data_val <- readRDS(file.path(kOutputDir, '/01_DATA_base_gender_inf.rds')) %>%
semi_join(split_id_val, by='ID')
predictors_df <- readRDS(file.path(kOutputDir, '02_DATA_predictors.rds')) %>%
dplyr::filter(model %in% kModelInclude)
#
# Functions ----
#
# Get decision paths ending at leaf nodes only
get_decision_path = function(node, prior_values) {
if(class(node)[1] == 'H2OLeafNode') {
return(paste(prior_values, as.character(node@prediction), sep=','))
}
if (!is.null(prior_values)) {
this_values= paste(prior_values, node@split_feature, sep=',')
} else {
this_values = node@split_feature
}
left_info <- get_decision_path(node@left_child, this_values)
right_info <- get_decision_path(node@right_child, this_values)
return(c(left_info, right_info))
}
# Count decision paths with income, female status, both, or neither
dec_path_type = function(decision_path, oth_feat = 'female_pq') {
female_match = paste0('^', oth_feat, '|,', oth_feat)
has_income = grepl('^annual_inc_pq|,annual_inc_pq', decision_path)
has_female = grepl(female_match, decision_path)
if (!(has_income | has_female)) {
return('neither')
} else if (has_income & has_female) {
return('both')
} else if (has_income ) {
return('income')
} else {
return('female')
}
}
# Count depth of path
dec_path_depth = function(decision_path) {
return(lengths(regmatches(decision_path, gregexpr(",", decision_path))))
}
# For model S, include thresholds in values
get_decision_path_w_thresh = function(node, prior_values) {
if(class(node)[1] == 'H2OLeafNode') {
return(paste(prior_values, as.character(node@prediction), sep=','))
}
this_item = paste0(node@split_feature, '#', as.character(node@threshold))
if (!is.null(prior_values)) {
this_values= paste(prior_values, this_item, sep=',')
} else {
this_values = this_item
}
left_info <- get_decision_path_w_thresh(node@left_child, this_values)
right_info <- get_decision_path_w_thresh(node@right_child, this_values)
return(c(left_info, right_info))
}
# Get thresholds for a feature from decision path.
# Return all
dec_path_thresh <- function(data, filt_feature) {
items_df = data.frame(item = strsplit(as.character(data$path[1]), ',')[[1]]) %>%
mutate(path_level = seq_len(n())) %>%
separate(item, into=c('feature', 'threshold'), sep='#') %>%
dplyr::filter(feature == filt_feature) %>%
mutate(threshold= as.numeric(threshold)) %>%
dplyr::select(path_level, threshold)
return(items_df)
}
#
# Model Q -----
# Look at all trees, and get statistics
#
this_model = 'q'
this_model_h2o = model_list_h2o[[this_model]]
num_trees = this_model_h2o@parameters$ntrees
# Loop through trees and get key statistics
tree_statistics = data.frame()
for (this_tree_id in seq(1, num_trees)) {
htree = h2o.getModelTree(this_model_h2o, tree_number=this_tree_id)
# Get the initial stats
this_stats = data.frame(tree_number = htree@tree_number,
num_nodes = length(htree),
max_depth_model = this_model_h2o@parameters$max_depth)
# Count total nodes with income, female features
this_stats <- this_stats %>%
mutate(num_nodes_female_pq = sum(htree@features == 'female_pq', na.rm=T),
num_nodes_annual_inc_pq= sum(htree@features == 'annual_inc_pq', na.rm=T))
# Get terminal decision paths
term_dec_path <- get_decision_path(htree@root_node, NULL)
# Get the types of each path, count
term_dec_path_types <- sapply(term_dec_path, dec_path_type, simplify = "array", USE.NAMES = F)
term_dec_path_types_table <- table(term_dec_path_types)
term_dec_path_df <- data.frame(tname = names(term_dec_path_types_table),
tval = as.vector(term_dec_path_types_table)) %>%
pivot_wider(names_from='tname', values_from='tval') %>%
rename_all(function(x) paste0('dec_path_type_', x))
this_stats <- this_stats %>%
mutate(dec_path_count = length(term_dec_path_types)) %>%
bind_cols(term_dec_path_df)
# Get decision path depths
dec_path_depths <- sapply(term_dec_path, dec_path_depth, simplify = "array", USE.NAMES = F)
this_stats <- this_stats %>%
mutate(dec_path_depth_min = min(dec_path_depths),
dec_path_depth_max = max(dec_path_depths),
dec_path_depth_median = median(dec_path_depths),
dec_path_depth_mean = mean(dec_path_depths))
tree_statistics <- tree_statistics %>%
bind_rows(this_stats)
}
saveRDS(tree_statistics, file.path(kOutputDir, '/07_DATA_model_q_tree_stats.rds'))
summary(tree_statistics$dec_path_count)
summary(tree_statistics$dec_path_type_female)
summary(tree_statistics$dec_path_type_income)
summary(tree_statistics$dec_path_type_both)
# Replace zeros
# Aggregate tree statistics
tree_statistics_overall <- tree_statistics %>%
dplyr::select(-tree_number) %>%
replace(is.na(.), 0) %>%
summarize_all(mean) %>%
bind_cols(trees_with_no_fem_only = sum(is.na(tree_statistics$dec_path_type_female)))
fwrite(tree_statistics_overall, file.path(kOutputDir, '/07_DATA_model_q_tree_stats_mean.csv'))
# Get some sample trees
tree_statistics %>%
sample_n(3) %>%
fwrite(file.path(kOutputDir, '/07_DATA_model_q_tree_stats_sample.csv'))
# Plot histogram of number of trees with female only
gp_fem_hist <- tree_statistics %>%
replace(is.na(.), 0) %>%
ggplot(aes(x=dec_path_type_female)) +
geom_histogram(binwidth=3) +
theme_minimal(base_size = 14) +
labs(x = '# paths with female status but not income',
y= '# trees')
print(gp_fem_hist)
ggsave(file.path(kOutputDir, '/07_PLOT_model_q_hist_female_only.png'),
gp_fem_hist, type='cairo', width=5, height = 4)
# Scatter of female only vs both type
gp_tree_scatter <- tree_statistics %>%
dplyr::select(tree_number, dec_path_type_both, dec_path_type_female) %>%
replace(is.na(.), 0) %>%
ggplot(aes(x=dec_path_type_both, y=dec_path_type_female)) +
geom_point() +
theme_minimal(base_size = 14) +
labs(y = '# paths with female status but not income',
x = '# paths with both female status and income')
print(gp_tree_scatter)
ggsave(file.path(kOutputDir, '/07_PLOT_model_q_scatter_dec_path_fem_both.png'),
gp_tree_scatter, type='cairo', width=5, height = 4)
#
# Model S ----
# Examine split points for the income feature, and in particular
# paths with low cut points and no income interactions
#
this_model = 's'
this_model_h2o = model_list_h2o[[this_model]]
num_trees_s = this_model_h2o@parameters$ntrees
tree_statistics_s = data.frame()
decision_path_summary = data.frame()
thresholds_nonincome = data.frame()
# Loop through trees and get key statistics
for (this_tree_id in seq(1, num_trees_s)) {
htree_s = h2o.getModelTree(this_model_h2o, tree_number=this_tree_id)
# Get the initial stats
this_stats = data.frame(tree_number = htree_s@tree_number,
num_nodes = length(htree_s),
max_depth_model = this_model_h2o@parameters$max_depth)
# Get decision paths with thresholds
term_dec_path_s <- get_decision_path_w_thresh(htree_s@root_node, NULL)
# Finalize basic statistics
this_stats <- this_stats %>%
mutate(num_nodes_inc_female_pq = sum(htree_s@features == 'inc_female_pq', na.rm=T),
num_nodes_annual_inc_pq= sum(htree_s@features == 'annual_inc_pq', na.rm=T),
num_dec_paths = term_dec_path_s %>% length())
tree_statistics_s <- tree_statistics_s %>%
bind_rows(this_stats)
# Summarize decision paths
term_dec_path_types_s <- sapply(term_dec_path_s,
function(x) dec_path_type(x, 'inc_female_pq'),
simplify = "array", USE.NAMES = F)
term_dec_path_types_table_s <- table(term_dec_path_types_s)
term_dec_path_df_s <- data.frame(tname = names(term_dec_path_types_table_s),
tval = as.vector(term_dec_path_types_table_s)) %>%
pivot_wider(names_from='tname', values_from='tval') %>%
rename_all(function(x) paste0('dec_path_type_', x)) %>%
mutate(tree_number = this_tree_id,
num_dec_paths = term_dec_path_s %>% length())
# Get split points for the inc_female_pq features
this_thresholds <- data.frame(path = term_dec_path_s) %>%
mutate(path_id = seq_len(n())) %>%
group_by(path_id) %>%
do(dec_path_thresh(., 'inc_female_pq')) %>%
ungroup()
# Find paths with income feature
this_inc_feat <- data.frame(path = term_dec_path_s) %>%
mutate(path_id = seq_len(n())) %>%
dplyr::filter(grepl('annual_inc_pq', path)) %>%
distinct(path_id)
# Get paths without income feature, and with thresholds near 0
this_thresholds_nonincome <- this_thresholds %>%
anti_join(this_inc_feat, by='path_id') %>%
mutate(tree_number = this_tree_id)
thresholds_nonincome <- thresholds_nonincome %>%
bind_rows(this_thresholds_nonincome)
# Add low threshold info to the decision path summary
this_thresholds_sm <- this_thresholds %>%
mutate(low_thresh_5k = ifelse(threshold < 5000, 1, 0),
low_thresh_10k = ifelse(threshold < 10000, 1, 0)) %>%
group_by(path_id) %>%
dplyr::summarize(low_thresh_5k = max(low_thresh_5k),
low_thresh_10k = max(low_thresh_10k)) %>%
ungroup() %>%
dplyr::summarize(low_thresh_5k = sum(low_thresh_5k),
low_thresh_10k = sum(low_thresh_10k)) %>%
ungroup()
decision_path_summary <- decision_path_summary %>%
bind_rows(term_dec_path_df_s %>%
bind_cols(this_thresholds_sm))
}
saveRDS(tree_statistics_s,
file.path(kOutputDir, '/07_DATA_model_s_tree_stats.rds'))
saveRDS(thresholds_nonincome,
file.path(kOutputDir, '/07_DATA_model_s_thresholds_nonincome.rds'))
saveRDS(decision_path_summary,
file.path(kOutputDir, '/07_DATA_model_s_decision_path_summary.rds'))
# Get mean decision path info
decision_path_info_mean <- decision_path_summary %>%
replace(is.na(.), 0) %>%
dplyr::select(-tree_number) %>%
dplyr::summarize_all(mean)
decision_path_info_mean %>%
fwrite(file,path(kOutputDir, '/07_DATA_model_s_decision_path_summary_mean.csv'))
|
c0fafef6450595700fe2872466c53111c2523390
|
ebf2d02e0b13ae18614a5dce7203991548d5ea32
|
/scripts/spat_check_results.R
|
d9b7a6600bece764e92c65631347a2a9fde005d9
|
[
"MIT"
] |
permissive
|
weecology/mete-spatial
|
48d2917afb1b88b9e80f94e5a95b96a0cb1a18ca
|
1ed11e0661b70dfb79834058729b3ad5a7df1be8
|
refs/heads/master
| 2022-01-23T12:07:31.462049
| 2022-01-10T18:12:26
| 2022-01-10T18:12:26
| 7,699,982
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,058
|
r
|
spat_check_results.R
|
setwd('./sorensen')
files = dir()
sites = as.character(read.table('../data/shrtnames.txt', colClasses='character'))
load_list = function(filenames, R_obj=FALSE, obj_name=NULL) {
dat = vector('list', length(filenames))
for (i in seq_along(filenames)) {
if (R_obj) {
load(filenames[i])
dat[[i]] = eval(parse(text=obj_name))
}
else {
dat[[i]] = read.csv(filenames[i])
}
}
return(dat)
}
## bisected binary empirical results
tmp = files[grep('_bisect_binary.Rdata', files)]
tmp = tmp[grep('_C200_', tmp, invert=TRUE)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][2]))
sites[!sites %in% tmp_sites]
## bisected abundance empirical results
tmp = files[grep('_bisect_abu.Rdata', files)]
tmp = tmp[grep('_C200_', tmp, invert=TRUE)]
tmp = tmp[grep('_uni_', tmp, invert=TRUE)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][2]))
sites[!sites %in% tmp_sites]
dat = load_list(tmp, TRUE, 'sorensen')
for(i in seq_along(dat)) {
for(j in seq_along(dat[[i]]))
print(paste(tmp[i], ncol(dat[[i]][[j]]$sorensenNull$vario)))
}
## bisected abundance univariate empirical results
tmp = files[grep('_bisect_abu.Rdata', files)]
tmp = tmp[grep('_C200_', tmp, invert=TRUE)]
tmp = tmp[grep('_uni_', tmp)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][2]))
sites[!sites %in% tmp_sites]
## mete analytical logseries results
tmp = files[grep('.csv', files)]
tmp = tmp[grep('_empirSAD_', tmp, invert=T)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][1]))
sites[!sites %in% tmp_sites]
## mete analytical empirSAD results
tmp = files[grep('.csv', files)]
tmp = tmp[grep('_empirSAD_', tmp)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][1]))
sites[!sites %in% tmp_sites]
## bisected binary logseries simulated results
tmp = files[grep('_bisect_binary.Rdata', files)]
tmp = tmp[grep('_C200_', tmp)]
tmp = tmp[grep('_empirSAD_', tmp, invert=T)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][2]))
sites[!sites %in% tmp_sites]
dat = load_list(tmp, TRUE, 'metrics')
for(i in seq_along(dat)) {
print(paste(tmp[i], sum(sapply(dat[[i]], function(x) !is.null(x)))))
}
## bisected abundance logseries simulated results
tmp = files[grep('_bisect_abu.Rdata', files)]
tmp = tmp[grep('_C200_', tmp)]
tmp = tmp[grep('_empirSAD_', tmp, invert=T)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][2]))
sites[!sites %in% tmp_sites]
## bisected binary empirSAD simulated results
tmp = files[grep('_bisect_binary.Rdata', files)]
tmp = tmp[grep('_C200_', tmp)]
tmp = tmp[grep('_empirSAD_', tmp)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][2]))
sites[!sites %in% tmp_sites]
## bisected abundance empirSAD simulated results
tmp = files[grep('_bisect_abu.Rdata', files)]
tmp = tmp[grep('_C200_', tmp)]
tmp = tmp[grep('_empirSAD_', tmp)]
tmp_sites = as.character(sapply(tmp, function(x) strsplit(x, '_')[[1]][2]))
sites[!sites %in% tmp_sites]
|
5a6ef6abeba3facbc1125dd90544a4aa48e0466d
|
dd4707aabfcb0275859f77b0f60c34805ad6d4b0
|
/R/AIC.R
|
233f40cfa96d39c317e7fce3a0d9106268520f16
|
[] |
no_license
|
RobHayward/Econometrics2
|
a37eb6d5ad0130ab57a2b885db3b6cf7096b8562
|
30e5bd9c00f7c5dfea1342935c641c7fca12d954
|
refs/heads/master
| 2021-01-21T13:14:27.075082
| 2016-12-20T15:15:15
| 2016-12-20T15:15:15
| 20,849,641
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 244
|
r
|
AIC.R
|
# this comes from http://www.r-bloggers.com/computing-aic-on-a-validation-sample/
library(splines)
AIC(glm(dist ~ speed, data = train_cars, family =
poisson(link = "log")))
|
5a498368e6693aa7a9f453ba1c3ac9605e7904dc
|
d2fa1a7f6b3382507b628aa75c5c76387e389656
|
/R/modelEnsembling&stacking.R
|
042555bd75792d5958a7f1d2559cc45e8e55268b
|
[] |
no_license
|
himankjn/Practical-Machine-Learning
|
ac8e12405f6181c65941822a7a58a67347fcd9cf
|
4a56d800758bcd1a396f79bc9feac2552cb50382
|
refs/heads/master
| 2022-03-30T21:50:38.526319
| 2020-01-05T12:30:09
| 2020-01-05T12:30:09
| 200,061,321
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,770
|
r
|
modelEnsembling&stacking.R
|
#REGULARIZED REGRESSION:
#The train error always reduces with increased number of predictors but the test error
#decreases first and then start increasing again. this is due to overfitting.
#we use methods like PCA , regularized regression to decrease dimensions by removing correlated and unnecessary predictors
#in regularized regression we penalize/shrink the coefficients which are large.
#lambda is tuning parameter proportional to penalization of coefficients.
#Ensembling is combining classifiers/algorithms to improve accuracy. it reduces interpretability.
#RF,BOOSTING,BAGGING are also themes of ensembling.
#Model stacking uses output of initial models as input to combined model
library(ISLR)
data(Wage)
library(caret)
partition<-createDataPartition(y=Wage$wage,p=0.7,list=F)
validation<-Wage[-partition,]
builddata<-Wage[partition,]
partition<-createDataPartition(y=builddata$wage,p=0.7,list=F)
training<-builddata[partition,]
testing<-builddata[-partition,]
model1<-train(wage~.,method='glm',data=training)
model2<-train(wage~.,method='rf',data=training,trControl=trainControl(method='cv'),number=3)
pred1<-predict(model1,testing)
pred2<-predict(model2,testing)
qplot(pred1,pred2,colour=wage,data=testing)
predF<-data.frame(pred1,pred2,wage=testing$wage)
combmodel<-train(wage~.,method='gam',data=predF)
combpred<-predict(combmodel,predF)
RMSE(pred1,testing$wage)
RMSE(pred2,testing$wage)
RMSE(combpred,testing$wage)
sqrt(sum((pred1-testing$wage)^2))
sqrt(sum((pred2-testing$wage)^2))
sqrt(sum((combpred-testing$wage)^2))
#combined predictors model reduces sum of squared errors by alot
pred1v<-predict(model1,validation)
pred2v<-predict(model2,validation)
predvdf<-data.frame(pred1=pred1v,pred2=pred2v)
combpredv<-predict(combmodel,predvdf)
|
12701cced32bb1dea5012df3cccdb65705c474ae
|
96aeeffe655e13a4da83c8f73918372cc7335df5
|
/man/plot3d.ThreeDimensionalColor.Rd
|
570e7096b1cef553f51f5a290468492c7ee25ee9
|
[] |
no_license
|
HenrikBengtsson/R.colors
|
2ff1a46725d4ad00215c1a4c8e775fdbb6cb98aa
|
500d2e4ffb153f990ceedfd4b6b5251c5a206c01
|
refs/heads/master
| 2021-01-20T00:58:43.927288
| 2014-06-19T04:20:16
| 2014-06-19T04:20:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,069
|
rd
|
plot3d.ThreeDimensionalColor.Rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% ThreeDimensionalColor.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{plot3d.ThreeDimensionalColor}
\alias{plot3d.ThreeDimensionalColor}
\alias{ThreeDimensionalColor.plot3d}
\alias{plot3d.ThreeDimensionalColor}
\alias{plot3d,ThreeDimensionalColor-method}
\title{Plots the colors in a three-dimensional plot}
\usage{\method{plot3d}{ThreeDimensionalColor}(this, col=getColors(this), xlab=NULL, ylab=NULL, zlab=NULL, xlim=c(0, 1), ylim=xlim, zlim=xlim, ...)}
\description{
Plots the colors in a three-dimensional plot.
}
\author{Henrik Bengtsson (\url{http://www.braju.com/R/})}
\examples{
# One-dimensional colors
ncolors <- 256
x <- seq(0,1, length=ncolors)
ch1 <- x
ch2 <- rev(x)
colors <- list(
# One-dimensional colors
red = RedColor(x),
green = GreenColor(x),
blue = BlueColor(x),
gray = GrayColor(x),
spectrum = WavelengthColor(seq(380,779,length=ncolors)),
heat = HeatColor(x),
rainbow = RainbowColor(x),
topo = TopologyColor(x),
terrain = TerrainColor(x),
cm = CyanMagentaColor(x),
# Two-dimensional colors
rg = TwoChannelMicroarrayColor(ch1,ch2, maxColorValue=1),
yb = TwoChannelMicroarrayColor(ch1,ch2, hueRange=c(HsvgColor$YELLOW.HUE,HsvgColor$BLUE.HUE), maxColorValue=1),
# Three-dimensional colors
rgb = RgbColor(x,rev(x),sample(x)),
hcl = HclColor(x),
# Four-dimensional colors
hsvg = HsvgColor(x),
cmyk = CmykColor(x,sample(x),rev(x),x/2)
)
layout(matrix(1:16, ncol=4, byrow=TRUE))
opar <- par(mar=c(0,0,1,0)+0.5)
for(color in colors) {
plot3d(RgbColor(color), axes=FALSE, xlab="", ylab="", zlab="", phi=35, theta=30, pch=20, cex=2)
title(main=data.class(color), cex=0.7, line=0)
}
par(opar)
}
\keyword{color}
\seealso{
For more information see \code{\link{ThreeDimensionalColor}}.
}
\keyword{internal}
\keyword{methods}
|
7bcb5841546f2a623f1e69b9a7b0a47ddba520cc
|
56208c93517c510bbe3a25fbee15735001f3fae4
|
/OCR.R
|
747b4e769313aac10de87f2ca3a90c1fdc4ef237
|
[] |
no_license
|
AdiModi96/Hindi-Numbers-OCR
|
d83caea3fa00e7a9b8c80076fd67b9c6e73c4d43
|
c1ef253bfdd04892826d375393fe87a7e396b655
|
refs/heads/master
| 2021-07-16T14:21:09.936474
| 2017-08-11T12:20:37
| 2017-08-11T12:20:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,433
|
r
|
OCR.R
|
library(png)
library(DBI)
library(RMySQL)
library(party)
library(randomForest)
#Path Declarations
rootFeaturesLocation <- "D:/Codes/Data-Science-OCR/Features"
rootReadLocation <- "D:/Codes/Data-Science-OCR/OCR Test Alphabets"
rootDiscretizedLocation <- "D:/Codes/Data-Science-OCR/OCR Test Alphabets-Discretized"
rootBinaritizedLocation <- "D:/Codes/Data-Science-OCR/OCR Test Alphabets-Binaritized"
#Creating Directory Hierarchy for Storing Image Pixel-Intensity Values as Discrete Values
createDirectoryDiscretize <- function(rootReadLocation, rootWriteLocation)
{
if(!dir.exists(rootWriteLocation))
{
#Creating the Root Directory if it doesn't exist
dir.create(rootWriteLocation)
}
for(dir in list.files(rootReadLocation))
{
if(!dir.exists(paste(rootWriteLocation,dir,sep = '/')))
{
#Creating the Sub-Directories if it doesn't exist
dir.create(paste(rootWriteLocation,dir,sep = '/'))
}
}
}
discretize <-function(rootReadLocation, rootWriteLoaction)
{
dirs <- list.dirs(rootReadLocation, full.names = FALSE, recursive = FALSE)
progressTotal<-3000
progressValue<-0
filename <- 1
progressBar <- winProgressBar(title = "Generating Pixel-Intensity Values", min = 0, max = progressTotal, width = 500)
for(dir in dirs)
{
filenumber <- 1
pngs <- list.files(file.path(rootReadLocation, dir), full.names = TRUE)
for(png in pngs)
{
#Reading PNG Files
png <- readPNG(png)
name <- paste(as.character(filenumber),".csv", sep = "")
#Writing CSV Files with Pixel-Intensity Values
write.table(png, file = file.path(file.path(rootWriteLoaction, dir), name), sep = ",", row.names = FALSE, col.names = FALSE)
progressValue<-progressValue+1
setWinProgressBar(progressBar, progressValue, title=paste("Generating Pixel-Intensity Values: ", round(progressValue/progressTotal*100, 0), "% done", sep = ""))
filenumber <- filenumber+1
}
}
close(progressBar)
}
#Creating Directory Hierarchy for Storing Image Pixel-Intensity Values as Binary Values
createDirectoryBinaritize <- function(rootReadLocation, rootWriteLocation)
{
if(!dir.exists(rootWriteLocation))
{
#Creating the Root Directory if it doesn't exist
dir.create(rootWriteLocation)
}
for(dir in list.files(rootReadLocation))
{
if(!dir.exists(paste(rootWriteLocation,dir,sep = '/')))
{
#Creating the Sub-Directories if it doesn't exist
dir.create(paste(rootWriteLocation,dir,sep = '/'))
}
}
}
binaritize <-function(rootReadLocation, rootWriteLocation)
{
dirs <- list.dirs(rootReadLocation, full.names = FALSE, recursive = FALSE)
progressTotal<-3000
progressValue<-0
filename <- 1
progressBar <- winProgressBar(title = "Generating Binary Intensity Values", min = 0, max = progressTotal, width = 500)
for(dir in dirs)
{
filenumber <- 1
csvs <- list.files(file.path(rootReadLocation, dir), full.names = TRUE)
for(csv in csvs)
{
#Reading Discrete Pixel-Intensity Values
csv <- read.table(csv, header = FALSE, sep = ',')
csv <- as.matrix(csv)
csv <- replace(csv, csv < 0.5, 0)
csv <- replace(csv, csv >= 0.5, 1)
name <- paste(as.character(filenumber),".csv", sep="")
#Writing Binary Pixel-Intensity Values
write.table(csv, file = file.path(file.path(rootWriteLocation, dir), name), sep = ",", row.names = FALSE, col.names = FALSE)
progressValue<-progressValue+1
setWinProgressBar(progressBar, progressValue, title=paste("Generating Binary-Intensity Values: ", round(progressValue/progressTotal*100, 0), "% done", sep = ""))
filenumber <- filenumber+1
}
}
close(progressBar)
}
createDirectoryFeature <- function(rootWriteLocation)
{
if(!dir.exists(rootWriteLocation))
{
#Creating Root Directory For Storing Feature-Set
dir.create(rootWriteLocation)
}
}
createDiscretizedFeature <- function(rootReadLocation, rootWriteLocation, databaseConnection)
{
dirs <- list.dirs(rootReadLocation, full.names = FALSE, recursive = FALSE)
#Declaring essential Values
progressTotal<-3000
progressValue<-0
imageDimension<-32
widthOfZone <- 8
filename <- 1
progressBar <- winProgressBar(title = "Generating Discretized Feature-Set", min = 0, max = progressTotal, width = 500)
featureTable<-matrix(nrow = 0, ncol = ((imageDimension/widthOfZone)*(imageDimension/widthOfZone))+4+1)
for(dir in dirs)
{
filenumber <- 1
csvs <- list.files(file.path(rootReadLocation, dir), full.names = TRUE)
for(csv in csvs)
{
csv <- read.table(csv, header = FALSE, sep = ',')
csv <- as.matrix(csv)
featureVector <- vector(length =0)
for(i in seq(1, nrow(csv), by = widthOfZone))
{
for(j in seq(1, ncol(csv), by = widthOfZone))
{
sum <- 0
density <- 0
for(k in csv[seq(i, i+widthOfZone-1), seq(j, j+widthOfZone-1)])
{
sum <- sum+k
}
density <- sum/(widthOfZone*widthOfZone)
featureVector<-c(featureVector, density)
}
}
#Declaring Chords and their Flags to calculate Intersection Points
chord1<-0
chord1Flag<-csv[1,1]
chord2<-0
chord2Flag<-csv[1,imageDimension]
chord3<-0
chord3Flag<-csv[1,(imageDimension/2)]
chord4<-0
chord4Flag<-csv[(imageDimension/2),1]
for(i in seq(1,imageDimension))
{
for(j in seq(1,imageDimension))
{
if(i == j)
{
if(chord1Flag==0 && csv[i,j]>0)
{
chord1Flag<-1
chord1<-chord1+1
}
else
{
chord1Flag<-csv[i,j]
}
}
if((i+j)==imageDimension-1)
{
if(chord2Flag==0 && csv[i,j]>0)
{
chord2Flag<-1
chord2<-chord2+1
}
else
{
chord2Flag<-csv[i,j]
}
}
if(j==(imageDimension/2))
{
if(chord3Flag==0 && csv[i,j]>0)
{
chord3Flag<-1
chord3<-chord3+1
}
else
{
chord3Flag<-csv[i,j]
}
}
if(i==(imageDimension/2))
{
if(chord4Flag==0 && csv[i,j]>0)
{
chord4Flag<-1
chord4<-chord4+1
}
else
{
chord4Flag<-csv[i,j]
}
}
}
}
#Appending Feature values to the Feature-Set
featureVector<-c(featureVector, chord1)
featureVector<-c(featureVector, chord2)
featureVector<-c(featureVector, chord3)
featureVector<-c(featureVector, chord4)
featureVector<-c(featureVector, dir)
featureTable<-rbind(featureTable, featureVector)
insertIntoDiscretizedFeatureTable(featureVector, databaseConnection)
progressValue<-progressValue+1
setWinProgressBar(progressBar, progressValue, title=paste("Generating Discretized Feature-Set: ", round(progressValue/progressTotal*100, 0), "% done", sep = ""))
}
}
close(progressBar)
#Defining Column Names
colnames(featureTable)<-union(seq(1,20), "class")
#Writing the Feature-Set
write.table(featureTable, file = file.path(paste(rootWriteLocation, "Discretized Features.csv", sep = "/")), sep = ",", col.names = TRUE, row.names = FALSE)
print("Feature-Set Generated")
print(">Feature 1-16 represents Zone Densities")
print(">Feature 17-20 represents number of Chord Intersections")
print(">Feature 21 represents the Class")
}
createBinaratizedFeature <- function(rootReadLocation, rootWriteLocation, databaseConnection)
{
dirs <- list.dirs(rootReadLocation, full.names = FALSE, recursive = FALSE)
#Declaring essential Values
progressTotal<-3000
progressValue<-0
imageDimension<-32
widthOfZone <- 8
filename <- 1
progressBar <- winProgressBar(title = "Generating Binaritized Feature-Set", min = 0, max = progressTotal, width = 500)
featureTable<-matrix(nrow = 0, ncol = ((imageDimension/widthOfZone)*(imageDimension/widthOfZone))+4+1)
for(dir in dirs)
{
filenumber <- 1
csvs <- list.files(file.path(rootReadLocation, dir), full.names = TRUE)
for(csv in csvs)
{
csv <- read.table(csv, header = FALSE, sep = ',')
csv <- as.matrix(csv)
featureVector <- vector(length =0)
for(i in seq(1, nrow(csv), by = widthOfZone))
{
for(j in seq(1, ncol(csv), by = widthOfZone))
{
sum <- 0
density <- 0
for(k in csv[seq(i, i+widthOfZone-1), seq(j, j+widthOfZone-1)])
{
sum <- sum+k
}
density <- sum/(widthOfZone*widthOfZone)
featureVector<-c(featureVector, density)
}
}
#Declaring Chords and their Flags to calculate Intersection Points
chord1<-0
chord1Flag<-csv[1,1]
chord2<-0
chord2Flag<-csv[1,imageDimension]
chord3<-0
chord3Flag<-csv[1,(imageDimension/2)]
chord4<-0
chord4Flag<-csv[(imageDimension/2),1]
for(i in seq(1,imageDimension))
{
for(j in seq(1,imageDimension))
{
if(i == j)
{
if(chord1Flag==0 && csv[i,j]>0)
{
chord1Flag<-1
chord1<-chord1+1
}
else
{
chord1Flag<-csv[i,j]
}
}
if((i+j)==imageDimension-1)
{
if(chord2Flag==0 && csv[i,j]>0)
{
chord2Flag<-1
chord2<-chord2+1
}
else
{
chord2Flag<-csv[i,j]
}
}
if(j==(imageDimension/2))
{
if(chord3Flag==0 && csv[i,j]>0)
{
chord3Flag<-1
chord3<-chord3+1
}
else
{
chord3Flag<-csv[i,j]
}
}
if(i==(imageDimension/2))
{
if(chord4Flag==0 && csv[i,j]>0)
{
chord4Flag<-1
chord4<-chord4+1
}
else
{
chord4Flag<-csv[i,j]
}
}
}
}
#Appending Feature values to the Feature-Set
featureVector<-c(featureVector, chord1)
featureVector<-c(featureVector, chord2)
featureVector<-c(featureVector, chord3)
featureVector<-c(featureVector, chord4)
featureVector<-c(featureVector, dir)
featureTable<-rbind(featureTable, featureVector)
insertIntoBinaritizedFeatureTable(featureVector, databaseConnection)
progressValue<-progressValue+1
setWinProgressBar(progressBar, progressValue, title=paste("Generating Binaritized Feature-Set: ", round(progressValue/progressTotal*100, 0), "% done", sep = ""))
}
}
close(progressBar)
#Defining Column Names
colnames(featureTable)<-union(seq(1,20), "class")
#Writing the Feature-Set
write.table(featureTable, file = file.path(paste(rootWriteLocation, "Binaritized Features.csv", sep = "/")), sep = ",", col.names = TRUE, row.names = FALSE)
print("Feature-Set Generated")
print(">Feature 1-16 represents Zone Densities")
print(">Feature 17-20 represents number of Chord Intersections")
print(">Feature 21 represents the Class")
}
#Perform Random Forest on Feature-Set
performRandomForestFromFile<-function(rootLocation){
output<-randomForest(formula = class ~ ., data = read.csv(rootLocation))
print(output)
}
performRandomForestFromData<-function(data){
output<-randomForest(formula = class ~ ., data = data)
print(output)
}
connectToDatabase <- function()
{
databaseConnection=dbConnect(MySQL(), user="root", password="", dbname="HindiOCR", host="localhost")
print("Connection Established")
return(databaseConnection)
}
disconnectToDatabase <- function()
{
dbDisconnect(databaseConnection)
print("Database Disconnected")
}
createDiscretizedFeatureTable <- function(databaseConnection)
{
if(dbExistsTable(databaseConnection, "discretized_features"))
{
dbSendQuery(databaseConnection, paste("drop table", "discretized_features", sep = " "))
}
dbSendQuery(databaseConnection,"create table discretized_features(zone1 float, zone2 float, zone3 float, zone4 float, zone5 float, zone6 float, zone7 float, zone8 float,zone9 float, zone10 float, zone11 float, zone12 float, zone13 float, zone14 float, zone15 float, zone16 float, chord1 integer, chord2 integer, chord3 integer, chord4 integer, class varchar(10))")
print("Discretized Table Created")
}
createBinaritizedFeatureTable <- function(databaseConnection)
{
if(dbExistsTable(databaseConnection, "binaritized_features"))
{
dbSendQuery(databaseConnection, paste("drop table", "binaritized_features", sep = " "))
}
dbSendQuery(databaseConnection,"create table binaritized_features(zone1 float, zone2 float, zone3 float, zone4 float, zone5 float, zone6 float, zone7 float, zone8 float,zone9 float, zone10 float, zone11 float, zone12 float, zone13 float, zone14 float, zone15 float, zone16 float, chord1 integer, chord2 integer, chord3 integer, chord4 integer, class varchar(10))")
print("Binaritized Table Created")
}
listTables <- function(databaseConnection)
{
dbListTables(databaseConnection)
}
insertIntoDiscretizedFeatureTable <- function(featureVector, databaseConnection)
{
query<-"insert into discretized_features(zone1, zone2, zone3, zone4, zone5, zone6, zone7, zone8, zone9, zone10, zone11, zone12, zone13, zone14, zone15, zone16, chord1, chord2, chord3, chord4, class)"
values<-"values ("
for(i in seq(1, length(featureVector)-1))
{
if(i==1)
{
values<-paste(values, featureVector[i], sep = "")
}
else
{
values<-paste(values, featureVector[i], sep = ", ")
}
}
values<-paste(values, paste("'", featureVector[length(featureVector)], "'", sep = ""), sep = ", ")
values<-paste(values, ");", sep = "")
query<-paste(query, values, sep = " ")
dbSendQuery(databaseConnection, query)
}
insertIntoBinaritizedFeatureTable <- function(featureVector, databaseConnection)
{
query<-"insert into binaritized_features(zone1, zone2, zone3, zone4, zone5, zone6, zone7, zone8, zone9, zone10, zone11, zone12, zone13, zone14, zone15, zone16, chord1, chord2, chord3, chord4, class)"
values<-"values ("
for(i in seq(1, length(featureVector)-1))
{
if(i==1)
{
values<-paste(values, featureVector[i], sep = "")
}
else
{
values<-paste(values, featureVector[i], sep = ", ")
}
}
values<-paste(values, paste("'", featureVector[length(featureVector)], "'", sep = ""), sep = ", ")
values<-paste(values, ");", sep = "")
query<-paste(query, values, sep = " ")
dbSendQuery(databaseConnection, query)
}
#createDirectoryDiscretize(rootReadLocation, rootDiscretizedLocation)
#discretize(rootReadLocation,rootDiscretizedLocation)
#createDirectoryBinaritize(rootDiscretizedLocation, rootBinaritizedLocation)
#binaritize(rootDiscretizedLocation,rootBinaritizedLocation)
#databaseConnection<-connectToDatabase()
#createDiscretizedFeatureTable(databaseConnection)
#createBinaritizedFeatureTable(databaseConnection)
#listOfTables<-listTables(databaseConnection)
#createDirectoryFeature(rootFeaturesLocation)
#createDiscretizedFeature(rootDiscretizedLocation, rootFeaturesLocation, databaseConnection)
#createBinaratizedFeature(rootBinaritizedLocation, rootFeaturesLocation, databaseConnection)
#performRandomForestFromFile(paste(rootFeaturesLocation, "Discretized Features.csv", sep="/"))
#performRandomForestFromFile(paste(rootFeaturesLocation, "Binaritized Features.csv", sep="/"))
#disconnectToDatabase()
|
82c623924d2c7dcd491cbc402d25827f9ccd4b0d
|
75d303983f1789b3b26b433117d9360001cd37d9
|
/BLG 527E - Machine Learning/hw/hw1/1079026/q2d.R
|
7775eecb40dd006abae16a2d67e1aecfcc1385b4
|
[] |
no_license
|
tugrulyatagan/itu_comp_eng_lectures
|
22452ef2af569bbc89de68809595bac5f992d89a
|
c6f62e142a7df5aaef68e3345833f2092b2f2364
|
refs/heads/master
| 2022-12-15T21:18:18.519189
| 2020-09-20T22:21:46
| 2020-09-20T22:21:46
| 297,064,338
| 14
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
q2d.R
|
# BLG 527E Machine Learning HW1 Q2d
# Author: Tugrul Yatagan 504161551
range <- 10
g1_x_func <- function(x) {
log(1/(sqrt(2*pi))) - (x^2)/2 + log(0.2)
}
g2_x_func <- function(x) {
log(1/(2*sqrt(2*pi))) - ((x-1)^2)/8 + log(0.8)
}
plot(g1_x_func, -range, range, col='blue', main = "Q2d: g1(x)[Blue] and g2(x)[Red]", ylab = "gi(x)")
plot(g2_x_func, -range, range, col='red', add = T)
|
c402731dff1be6eda35eb38f40a152acf06b7769
|
1e86a06ed13301e7663d81a0c163f07d5ac74394
|
/man/getModEqn.Rd
|
7e69d84d46e26b19b26250d02cebe3e57c89f0ae
|
[] |
no_license
|
cran/modEvA
|
9184b2b9f43feb25e26e48d3731ea01b037d8c5a
|
970384a0efde2e38d24ff9fc6c0237856abd351d
|
refs/heads/master
| 2023-05-01T03:08:02.824453
| 2023-04-14T22:10:02
| 2023-04-14T22:10:02
| 63,406,050
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,830
|
rd
|
getModEqn.Rd
|
\name{getModEqn}
\alias{getModEqn}
\title{
Get model equation
}
\description{
This function retrieves the equation of a model, to print or apply elsewhere.
}
\usage{
getModEqn(model, type = "Y", digits = NULL, prefix = NULL,
suffix = NULL)
}
\arguments{
\item{model}{
a model object of class 'lm' or glm'.
}
\item{type}{
the type of equation to get; can be either "Y" (the default, for the linear model equation), "P" (for probabiity) or "F" (for favourability).
}
\item{digits}{
the number of significant digits to which to round the coefficient estimates in the equation.
}
\item{prefix}{
the prefix to add to each variable name in the equation.
}
\item{suffix}{
the suffix to add to each variable name in the equation.
}
}
\details{
The summary of a model in R gives you a table of the coefficient estimates and other parameters. Sometimes it may be useful to have a string of text with the model's equation, so that you can present it in an article (e.g. Real et al. 2005) or apply it in a (raster map) calculation, either in R (although here you can usually use the 'predict' function for this) or in a GIS software (e.g. Barbosa et al. 2010). The \code{getModEqn} function gets this equation for linear or generalized linear models.
By default it prints the \code{"Y"} linear equation, but for generalized linear models you can also set \code{type = "P"} (for the equation of probability) or \code{type = "F"} (for favourability, which modifies the intercept to eliminate the effect of modelled prevalence - see Real et al. 2006).
If the variables to which you want to apply the model have a prefix or suffix (e.g. something like prefix = "raster.stack$" for the R 'raster' or 'terra' package, or prefix = "mydata$" for a data frame, or suffix = "@1" in QGIS, or suffix = "@mapset" in GRASS), you can get these in the equation too, using the \code{prefix} and/or the \code{suffix} argument.
}
\value{
A charachter string of the model equation.
}
\references{
Barbosa A.M., Real R. & Vargas J.M. (2010) Use of coarse-resolution models of species' distributions to guide local conservation inferences. Conservation Biology 24: 1378-87
Real R., Barbosa A.M., Martinez-Solano I. & Garcia-Paris, M. (2005) Distinguishing the distributions of two cryptic frogs (Anura: Discoglossidae) using molecular data and environmental modeling. Canadian Journal of Zoology 83: 536-545
Real R., Barbosa A.M. & Vargas J.M. (2006) Obtaining environmental favourability functions from logistic regression. Environmental and Ecological Statistics 13: 237-245
}
\author{
A. Marcia Barbosa
}
\examples{
# load sample models:
data(rotif.mods)
# choose a particular model to play with:
mod <- rotif.mods$models[[1]]
getModEqn(mod)
getModEqn(mod, type = "P", digits = 3, suffix = "@mapset")
getModEqn(mod, type = "F", digits = 2)
}
|
a23f7767c8cb3c4cd4870d2a30302c677f68e3ba
|
8293de8755c004ecd43613d523327ac5065523e7
|
/Validation Phase II/Cross Ensemble/valtest_sekc20.R
|
9a4dcfc819c46903b0724e971af425660da5ca7e
|
[] |
no_license
|
HarshaDaparti/Capstone_ClusterEnsembles
|
21aa4c2c1ece720e2b797ef8f9cc740fe3b4a227
|
fe20203accf65a89d0fa4cc13e2a92036db929a5
|
refs/heads/master
| 2022-07-11T13:25:09.597640
| 2020-05-21T18:36:45
| 2020-05-21T18:36:45
| 265,625,270
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 533
|
r
|
valtest_sekc20.R
|
g$cluster=k20e$cluster
gke20<-g[order(g$cluster),]
gke20_pro=gke20[, 1:17]
g=g[, 1:17]
gboostekc20=clara(gke20_pro, 20, metric = "euclidean", stand = FALSE, samples = 100, sampsize = 50,
trace = 0, rngR = FALSE, pamLike = TRUE, correct.d = TRUE)
dekc20=daisy(g, metric = "euclidean")
sekc20=silhouette(k20e$cluster, dekc20)
plot(sekc20, col = p2,border=NA)
dekc20_pro=daisy(gke20_pro, metric = "euclidean")
sekc20_pro=silhouette(gboostekc20$clustering, dekc20_pro)
plot(sekc20_pro, col = p2,border=NA)
|
e0a8ff479553f350a4fae6f8a6ac83dd9050e65d
|
37fcfce951487d3ba45d2ba3dbc22b6360939b77
|
/man/franchise_leaders.Rd
|
7ac5d803f293e2d8a79935ce271a6a249df6ca55
|
[] |
no_license
|
abresler/nbastatR
|
576155fb58378c03010590c81806515161c03eb5
|
aba9179ef644f263387c1536d6ddd26104d79cf4
|
refs/heads/master
| 2023-08-08T08:52:05.149224
| 2023-07-19T13:09:21
| 2023-07-19T13:09:21
| 43,844,510
| 307
| 94
| null | 2023-02-01T16:59:22
| 2015-10-07T21:00:59
|
R
|
UTF-8
|
R
| false
| true
| 1,144
|
rd
|
franchise_leaders.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/league_leaders.R
\name{franchise_leaders}
\alias{franchise_leaders}
\title{Franchise leaders}
\usage{
franchise_leaders(
teams = NULL,
all_teams = FALSE,
remove_inactive_teams = F,
modes = "Totals",
season_types = c("Regular Season"),
return_message = TRUE,
nest_data = FALSE
)
}
\arguments{
\item{teams}{vector of team names}
\item{all_teams}{if \code{TRUE} returns all teams}
\item{remove_inactive_teams}{if \code{TRUE} removes inactive teams}
\item{modes}{mode of search \itemize{
\item Totals
\item Per Game
}}
\item{season_types}{type of season \itemize{
\item Regular Season
\item Playoffs
\item Pre Season
}'}
}
\value{
a \code{tibble}
}
\description{
Gets franchise leader information for
by specified input for specific teams
}
\examples{
franchise_leaders(teams = "Brooklyn Nets", modes = c("Totals"))
}
\seealso{
Other teams:
\code{\link{drafts}()},
\code{\link{seasons_rosters}()},
\code{\link{teams_players_stats}()},
\code{\link{teams_rankings}()}
Other leaders:
\code{\link{metrics_leaders}()}
}
\concept{leaders}
\concept{teams}
|
5559d9ae70a3525f9304a7ee091a75d68403ad6f
|
396a0112c7f3a76303324b820e131381a2a4fcae
|
/R/weblmInit.R
|
8f7f59e141a4a2d315e23af711edcd458c01ee33
|
[
"MIT"
] |
permissive
|
cran/mscsweblm4r
|
940c4895480b8e013aa48b4fb57d7fb31815cea2
|
2b33b3c3756109c604193419925d938d97b3536d
|
refs/heads/master
| 2021-01-21T14:32:58.816525
| 2016-06-15T22:02:43
| 2016-06-15T22:02:43
| 59,103,575
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,072
|
r
|
weblmInit.R
|
#' @title Initializes the \pkg{mscsweblm4r} package.
#'
#' @description This function initializes the Microsoft Cognitive Services Web
#' Language Model REST API key and URL by reading them either from a
#' configuration file or environment variables.
#'
#' This function \strong{MUST} be called right after package load and before calling
#' any \pkg{mscsweblm4r} core functions, or these functions will fail.
#'
#' The \code{\link{weblmInit}} configuration function will first check to see
#' if the variable \code{MSCS_WEBLANGUAGEMODEL_CONFIG_FILE} exists in the system
#' environment. If it does, the package will use that as the path to the
#' configuration file.
#'
#' If \code{MSCS_WEBLANGUAGEMODEL_CONFIG_FILE} doesn't exist, it will look for
#' the file \code{.mscskeys.json} in the current user's home directory (that's
#' \code{~/.mscskeys.json} on Linux, and something like \code{C:/Users/Phil/Documents/.mscskeys.json}
#' on Windows). If the file is found, the package will load the API key and URL
#' from it.
#'
#' If using a file, please make sure it has the following structure:
#'
#' \preformatted{
#' {
#' "weblanguagemodelurl": "https://api.projectoxford.ai/text/weblm/v1.0/",
#' "weblanguagemodelkey": "...MSCS Web Language Model API key goes here..."
#' }
#' }
#'
#' If no configuration file is found, \code{\link{weblmInit}} will attempt to
#' pick up its configuration information from two Sys env variables instead:
#'
#' \code{MSCS_WEBLANGUAGEMODEL_URL} - the URL for the Web LM REST API.
#'
#' \code{MSCS_WEBLANGUAGEMODEL_KEY} - your personal Web LM REST API key.
#'
#' \code{\link{weblmInit}} needs to be called \emph{only once}, after package
#' load.
#'
#' @export
#'
#' @author Phil Ferriere \email{pferriere@hotmail.com}
#'
#' @examples \dontrun{
#' weblmInit()
#' }
weblmInit <- function() {
# Get config info from file
configFile <- weblmGetConfigFile()
if (file.exists(configFile)) {
weblm <- jsonlite::fromJSON(configFile)
if (is.null(weblm[["weblanguagemodelkey"]])) {
assign("weblm", NULL, envir = .weblmpkgenv)
stop(paste0("mscsweblm4r: Field 'weblanguagemodelkey' either empty or missing from ", configFile), call. = FALSE)
} else if (is.null(weblm[["weblanguagemodelurl"]])) {
assign("weblm", NULL, envir = .weblmpkgenv)
stop(paste0("mscsweblm4r: Field 'weblanguagemodelurl' either empty or missing from ", configFile), call. = FALSE)
} else {
weblm[["weblanguagemodelconfig"]] <- configFile
assign("weblm", weblm, envir = .weblmpkgenv)
}
} else {
# Get config info from Sys env, if config file is missing
weblm <- list(
weblanguagemodelkey = Sys.getenv("MSCS_WEBLANGUAGEMODEL_KEY", ""),
weblanguagemodelurl = Sys.getenv("MSCS_WEBLANGUAGEMODEL_URL", ""),
weblanguagemodelconfig = ""
)
if (weblm[["weblanguagemodelkey"]] == "" || weblm[["weblanguagemodelurl"]] == "") {
assign("weblm", NULL, envir = .weblmpkgenv)
stop("mscsweblm4r: could not load config info from Sys env nor from file", call. = FALSE)
} else {
assign("weblm", weblm, envir = .weblmpkgenv)
}
}
}
## The next seven \pkg{mscsweblm4r} internal functions are used to facilitate
## configuration and assist with error handling:
##
## \itemize{
## \item API URL configuration - \code{\link{weblmGetURL}}, \code{\link{weblmSetURL}} functions
## \item API key configuration - \code{\link{weblmGetKey}}, \code{\link{weblmSetKey}} functions
## \item Package configuration file - \code{\link{weblmGetConfigFile}}, \code{\link{weblmSetConfigFile}} functions
## \item Httr assist - \code{\link{weblmHttr}} function
## }
##
## @title Retrieves the Microsoft Cognitive Services Web Language Model REST API key.
##
## Do not call this internal function outside this package.
##
## @return A character string with the value of the API key.
##
## @author Phil Ferriere \email{pferriere@hotmail.com}
##
## @examples \dontrun{
## weblmGetKey()
## }
weblmGetKey <- function() {
if (!is.null(.weblmpkgenv$weblm))
.weblmpkgenv$weblm[["weblanguagemodelkey"]]
else
stop("mscsweblm4r: REST API key not found in package environment.", call. = FALSE)
}
## @title Retrieves the Microsoft Cognitive Services Web Language Model REST API base URL.
##
## @return A character string with the value of the REST API base URL.
##
## Do not call this internal function outside this package.
##
## @author Phil Ferriere \email{pferriere@hotmail.com}
##
## @examples \dontrun{
## weblmGetURL()
## }
weblmGetURL <- function() {
if (!is.null(.weblmpkgenv$weblm))
.weblmpkgenv$weblm[["weblanguagemodelurl"]]
else
stop("mscsweblm4r: REST API URL not found in package environment.", call. = FALSE)
}
## @title Retrieves the path to the configuration file.
##
## @return A character string with the path to the configuration file. This path
## may be empty if the package was configured using environment variables.'
##
## Do not call this internal function outside this package.
##
## @author Phil Ferriere \email{pferriere@hotmail.com}
##
## @examples \dontrun{
## weblmGetConfigFile()
## }
weblmGetConfigFile <- function() {
if (!is.null(.weblmpkgenv$weblm))
.weblmpkgenv$weblm[["weblanguagemodelconfig"]]
else {
weblanguagemodelconfig = Sys.getenv("MSCS_WEBLANGUAGEMODEL_CONFIG_FILE", "")
if (weblanguagemodelconfig == "") {
if (file.exists("~/.mscskeys.json"))
weblanguagemodelconfig = "~/.mscskeys.json"
}
weblanguagemodelconfig
}
}
## @title Sets the Microsoft Cognitive Services Web Language Model REST API key.
##
## @description This function sets the Microsoft Cognitive Services Web Language
## Model REST API key. It is only used for testing purposes, to make sure that
## the package fails with an error when using an invalid key.
##
## Do not call this internal function outside this package.
##
## @param key (character) REST API key to use
##
## @author Phil Ferriere \email{pferriere@hotmail.com}
##
## @examples \dontrun{
## mscsweblm4r:::weblmSetKey("invalid-key")
## }
weblmSetKey <- function(key) {
if (!is.null(.weblmpkgenv$weblm)) {
.weblmpkgenv$weblm[["weblanguagemodelkey"]] <- key
}
else
stop("mscsweblm4r: The package wasn't initialized properly.", call. = FALSE)
}
## @title Sets the Microsoft Cognitive Services Web Language Model REST API URL.
##
## @description This function sets the Microsoft Cognitive Services Web Language
## Model REST API URL. It is only used for testing purposes, to make sure that
## the package fails with an error when the URL is misconfigured.
##
## Do not call this internal function outside this package.
##
## @param url (character) REST API URL to use
##
## @author Phil Ferriere \email{pferriere@hotmail.com}
##
## @examples \dontrun{
## mscsweblm4r:::weblmSetURL("invalid-URL")
## }
weblmSetURL <- function(url) {
if (!is.null(.weblmpkgenv$weblm))
.weblmpkgenv$weblm[["weblanguagemodelurl"]] <- url
else
stop("mscsweblm4r: The package wasn't initialized properly.", call. = FALSE)
}
## @title Sets the file path for the configuration file.
##
## @description This function sets the file path for the configuration file. It
## is only used for testing purposes, to make sure that the package fails
## gracefully when the the configuration file is missing/compromised.
##
## Do not call this internal function outside this package.
##
## @param path (character) File path for the configuration file
##
## @author Phil Ferriere \email{pferriere@hotmail.com}
##
## @examples \dontrun{
## weblmSetConfigFile("invalid-path")
## }
weblmSetConfigFile <- function(path) {
if (!is.null(.weblmpkgenv$weblm))
.weblmpkgenv$weblm[["weblanguagemodelconfig"]] <- path
else
stop("mscsweblm4r: The package wasn't initialized properly.", call. = FALSE)
}
|
a38849fd5f4dfa7a4ec3bd30a855fc7f52583384
|
bc73b51699fb07f7dc81ea189cfbc2e086ffa794
|
/R/plot_shapley.R
|
c898a111f56a9509b06a9f55adfacb9351af75dd
|
[] |
no_license
|
redichh/ShapleyR
|
ec6c8bfac9925ac6bdc89110c8ba48072bf5278c
|
a977c041519e19f9b7f230e47c79235b3194d4f0
|
refs/heads/master
| 2021-07-06T04:58:19.363784
| 2019-03-05T22:55:16
| 2019-03-05T22:55:16
| 113,659,609
| 29
| 10
| null | 2019-03-05T22:55:17
| 2017-12-09T10:02:17
|
R
|
UTF-8
|
R
| false
| false
| 5,908
|
r
|
plot_shapley.R
|
#' Plots the difference for a single observation. Works only with `task.type` "regr".
#'
#' @description This method draws a plot for the data.mean, the observed value
#' and describes the influence of all features/variables for this difference.
#' @param shap.values A shapley object (generated by the shapley function) that contains
#' the shapley.values and other important information about the task and model.
#' @param shap.id (optional) Determones what observation should be taken for plotting, if
#' shap.values have multiple observations.
#' @export
plot.shapley.singleValue = function(shap.values, shap.id=-1) {
if(shap.id != -1 & !shap.id %in% getShapleyIds(shap.values)) {
print(paste("Warning: Could not find _Id <", shap.id, "> in shap.values!"))
shap.id = getShapleyIds(shap.values)[1]
print(paste("First observation with _Id <", shap.id, "> is used from given shap.values."))
at = which(getShapleyValues(shap.values)$"_Id" == shap.id)
} else if(dim(getShapleyValues(shap.values))[1] == 1) {
at = 1
} else if(shap.id %in% getShapleyIds(shap.values)) {
at = which(getShapleyValues(shap.values)$"_Id" == shap.id)
} else {
print("Warning: shap.values contains too many observations..")
shap.id = getShapleyIds(shap.values)[1]
at = 1
print(paste("First observation with _Id <", shap.id, "> is used from given shap.values."))
}
data.mean = getShapleyDataMean(shap.values)
shap.values = getShapleySubsetByResponseClass(shap.values)
data = getShapleyValues(shap.values)[at, getShapleyFeatureNames(shap.values)]
points = compute.shapley.positions(data, data.mean)
plot = ggplot(points, aes(x = values, y = 0)) +
coord_cartesian(ylim = c(-.4, .4)) +
scale_colour_gradient2(low = "#832424FF", high = "#3A3A98FF", mid = "lightgrey", midpoint = data.mean) +
geom_line(aes(colour = values), size = 30) +
geom_label(aes(label = names), angle = 70, nudge_y = rep(c(.1, -.1), times = nrow(points))[1:nrow(points)]) +
geom_point(aes(x = getShapleyPredictionResponse(shap.values)[at], y = 0.1), colour = "black", size = 3) +
theme(axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
legend.position = "none")
return(plot)
}
#' Plots a graph that shows the expected values, observed values and their difference.
#'
#' @description This method draws a plot that shows the mean of all observations, the observed
#' values and the estimated values over multiple observcations.
#' @param shap.values A shapley object (generated by shapley(...)) that contains
#' the shapley.values and other important information about the task and model.
#' @export
plot.shapley.multipleValues = function(shap.values) {
values = getShapleyValues(shap.values)[,getShapleyFeatureNames(shap.values)]
data.mean = getShapleyDataMean(shap.values)
data.names = c("response.plus", "response.minus", "position", "color")
data = data.frame(matrix(data = 0, nrow = nrow(values), ncol = length(data.names)))
names(data) = data.names
data$response.plus = rowSums(apply(values, 1:2, FUN = function(x) {max(0, x)}))
data$response.minus = rowSums(apply(values, 1:2, FUN = function(x) {min(0, x)}))
data$position = as.numeric(getShapleyIds(shap.values))
data$color = ifelse(data$response.plus < abs(data$response.minus), "red", "green")
ggplot() +
geom_line(data = data, aes(x = position, y = data.mean, colour = "data mean")) +
geom_line(data = data, aes(x = position, y = data.mean + response.plus,
colour = "positive effects")) +
geom_line(data = data, aes(x = position, y = data.mean + response.minus,
colour = "negative effects")) +
geom_ribbon(data = data, aes(x = position, ymax = data.mean,
ymin = data.mean + rowSums(values)), fill = "blue", alpha = .2)
}
#' Calculates the positions of the features influence for plot.singleValue.
#'
#' @description Orders the values by their sign and value, shifts them and returns
#' them as a vector.
#' @param points A vector of shapley.values for a single row
#' @param shift data.mean
compute.shapley.positions = function(points, shift = 0) {
points.minus = sort(points[which(points < 0)])
points.plus = sort(points[which(points >= 0)], decreasing = TRUE)
points.labels = c(names(rev(points.minus)), "0", names(points.plus))
positions = sort(c(cumsum(t(points.minus)), 0, cumsum(t(points.plus))))
result = data.frame(positions + shift)
names(result) = c("values")
result$names = points.labels
result$align = ifelse(result$values > shift,"right", "left")
return(result)
}
#' Plots a graph that shows the effect of several features over multiple observations.
#' @description This method draws a plot for, the observed value and describes
#' the influence of the selected features.
#' @param shap.values A shapley object (generated by shapley(...)) that contains
#' the shapley.values and other important information about the task and model.
#' @param features A vector of the interesting feature names.
#' @export
plot.shapley.multipleFeatures = function(shap.values, features = c("crim", "lstat")) {
features.values = getShapleyValues(shap.values)[,features]
features.numbers = ncol(features.values)
data = data.frame(matrix(data = 0, nrow = nrow(getShapleyValues(shap.values)), ncol = 1 + features.numbers))
names(data) = c(names(features.values), "position")
data[,names(features.values)] = features.values
data$position = as.numeric(getShapleyIds(shap.values))
plot.data = melt(data, id.vars = "position")
plot = ggplot(plot.data) +
geom_line(aes(x = position, y = value, group = variable, color = variable))
return(plot)
}
|
e3da5afa7c886e77aca57170d3a63662f15fe208
|
4efac93116c266322340995eb05640d882d32ad8
|
/scripts/to_snake_case.R
|
89220ae5e6d4ff8445643a8e3615b1d014dad593
|
[
"Apache-2.0"
] |
permissive
|
kingaa/kingaa.github.io
|
85c64870d977cb5dca7037876cf41386fc2d067b
|
8bf76dd9c5c098068354df02559c5e294d59bb9f
|
refs/heads/master
| 2023-08-17T10:13:02.635750
| 2023-08-14T14:41:14
| 2023-08-14T14:41:14
| 38,618,388
| 5
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,235
|
r
|
to_snake_case.R
|
## This script will edit files so as to replace calls
## to deprecated *pomp* functions with their proper replacements.
## Usage:
## 1. Make a directory and copy all files that you wish to edit into it.
## 2. In an R session, source this script.
## 3. Call the `to_snake_case` function with the path to your new
## directory as its sole argument
## 4. Examine the differences between the files for correctness.
##
## The script requires version R version >= 4.2
to_snake_case <- function (dir, extensions = c("R", "Rmd", "Rnw")) {
stopifnot(`insufficient R version`=getRversion()>="4.2")
require(readr,quietly=TRUE)
require(stringi,quietly=TRUE)
oldnames <- c(
r"{filter.mean}",
r"{pred.mean}",
r"{pred.var}",
r"{filter.traj}",
r"{cond.logLik}",
r"{save.states}",
r"{saved.states}",
r"{eff.sample.size}",
r"{as.pomp}",
r"{mvn.rw}",
r"{mvn.diag.rw}",
r"{mvn.rw.adaptive}",
r"{probe.acf}",
r"{probe.ccf}",
r"{probe.marginal}",
r"{probe.mean}",
r"{probe.median}",
r"{probe.nlar}",
r"{probe.period}",
r"{probe.quantile}",
r"{probe.sd}",
r"{probe.var}",
r"{periodic.bspline.basis}",
r"{bspline.basis}",
r"{rw.sd}"
)
oldnames |>
stri_replace_all_fixed(
pattern=r"{.}",
replacement=r"{\.}"
) |>
paste0(
r"{(\s?)\(}"
) -> patt
oldnames |>
stri_replace_all_fixed(
pattern=r"{.}",
replacement=r"{_}"
) |>
paste0(
r"{$1\(}"
) -> repl
lapply(
paste0(r"{^.*\.}",extensions,r"{$}"),
\(.) list.files(path=dir,pattern=.,full.names=TRUE)
) |>
do.call(c,args=_) -> filelist
cat(
"scanning files:\n",
paste("\t",filelist,sep="",collapse="\n"),
"\n\n"
)
filelist |>
sapply(
\(file) {
read_file(file) -> s
s |>
stri_replace_all_regex(
pattern=patt,
replacement=repl,
vectorize_all=FALSE
) -> t
t |> write_file(file)
s != t
}
) -> res
if (any(res)) {
paste(
"modified files:\n",
paste("\t",filelist[res],sep="",collapse="\n")
) |> cat("\n")
} else {
cat("no files modified\n")
}
}
|
95e01aeeda274e96835fb6d29db758bc39704991
|
1fc6cdf2b36678fa0096015640ab9e6f14d7aefc
|
/man/mappabilityCalc.Rd
|
8ba910eedd698272d043356d71a1bb7dbe7bf52a
|
[] |
no_license
|
clark-lab-robot/Repitools_bioc
|
e36b4a9912f8fe3c34ab592a02069afe860a6afa
|
b838a8fd34b2ecc41dd86276bd470bfdae53d544
|
refs/heads/master
| 2021-01-01T18:37:22.034108
| 2014-04-15T01:49:48
| 2014-04-15T01:49:48
| 2,335,128
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,923
|
rd
|
mappabilityCalc.Rd
|
\name{mappabilityCalc}
\alias{mappabilityCalc}
\alias{mappabilityCalc,GRanges,MappabilitySource-method}
\alias{mappabilityCalc,data.frame,MappabilitySource-method}
\title{Calculate The Mappability of a Region}
\description{Function to calculate mappability of windows}
\usage{
\S4method{mappabilityCalc}{GRanges,MappabilitySource}(x, organism, window = NULL,
type = c("block", "TSS", "center"), verbose = TRUE)
\S4method{mappabilityCalc}{data.frame,MappabilitySource}(x, organism, window = NULL,
type = c("block", "TSS", "center"), ...)
}
\arguments{
\item{x}{A \code{GRanges} object or a \code{data.frame}, with columns \code{chr} and
either \code{position} or \code{start}, \code{end} and \code{strand}.}
\item{window}{Bases around the locations that are in the window. Calculation will
consider \code{windowSize/2} bases upstream, and \code{windowSize/2-1}
bases downstream.} For unstranded features, the effect is the same as
for + strand features.
\item{type}{What part of the interval to make the window around. If the value is
\code{"TSS"}, the the start coordinate is used for all + strand features,
and the end coordinate is used for all - strand features. If \code{"cemter"}
is chosen, then the coordinate that is half way between the start and end of
each feature will be used as the reference point. \code{"block"} results in
the use the start and end coordinates without modification.}
\item{organism}{The \code{BSgenome} object to calculate mappability upon, or
the file path to a FASTA file generated by GEM Mappability, or
the path to a bigWig file containing mappability scores.}
\item{verbose}{Whether to print the progess of processing.}
\item{...}{The \code{verbose} variable for the \code{data.frame} method,
passed onto the \code{GRanges} method.}
}
\details{
The windows considered will be \code{windowSize/2} bases upstream and
\code{windowSize/2-1} bases downstream of the given position of stranded features,
and the same number of bases towards the start and end of the chromosome for unstranded
features. The value returned for each region is a percentage of bases in that region that
are not N (any base in IUPAC nomenclature).
For any positions of a window that are off the end of a chromosome, they will be
considered as being N.
}
\value{
A vector of mappability percentages, one for each region.
}
\author{Aaron Statham}
\examples{
\dontrun{
require(BSgenome.Hsapiens36bp.UCSC.hg18mappability)
TSSTable <- data.frame(chr = paste("chr", c(1,2), sep = ""), position = c(100000, 200000))
mappabilityCalc(TSSTable, Hsapiens36bp, window = 200, type = "TSS")
}
}
|
ca79329e49aeee49d9ca5ea0d6f2ad9d83664340
|
367805d6ab8316f1ad7a1c8424847c9e9d986baf
|
/quiz4.R
|
9859ff88bd17967409c4bda07c443eea103abafc
|
[] |
no_license
|
abzaloid/stat-327
|
4e0004c9ff62e95922d19d9d7e3fe6d16df58df0
|
35e973a1b78f9aa93c7a5fa44444f9032a33000e
|
refs/heads/master
| 2021-01-21T02:24:13.529822
| 2015-07-16T04:00:37
| 2015-07-16T04:00:37
| 38,990,743
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 578
|
r
|
quiz4.R
|
s = read.csv("serekovQ4.csv")
cat(mean(s$age[s$sex=="F"]))
cat(mean(s$age[s$sex=="M" & s$eye.color=="brown"]))
cat(sum(s$hair.color %in% c("black","blond","brown")))
cat(sum(s$eye.color=="hazel" & s$hair.color!="gray"))
cat(s[order(s$weight,decreasing = TRUE),][3,]$age)
ordered=s[order(s$eye.color, s$weight, decreasing = TRUE),]
cat(ordered[which(ordered$sex=="M")[3],]$age)
c = CO2
cat(summary(c))
cat(mean(c[c$Type=="Quebec",]$uptake))
cat(sd(c[c$Type=="Quebec",]$uptake))
cat(mean(c[c$Type=="Mississippi",]$uptake))
cat(sd(c[c$Type=="Mississippi",]$uptake))
|
86f8f106dfe726d9a8c8773e89411a118cb2f929
|
67a61966b03dc457e6a80c9de1913a375ae2d8c4
|
/Platform/functions/quandl.R
|
cb41851c0db249f2ce3c24d92fb2db42b7c6b95b
|
[] |
no_license
|
KarasiewiczStephane/automated_trading_with_R
|
75f6e453e99e894c26ef1003ecb9f9d5a3ae3b0f
|
628c44fce866bac969200e918d0e762c4b422089
|
refs/heads/master
| 2021-07-12T03:35:34.833375
| 2017-10-13T22:53:43
| 2017-10-13T22:53:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,316
|
r
|
quandl.R
|
# Source : https://docs.quandl.com/docs/time-series-1
# Examples
# You can get the same data in a dataframe: data <- Quandl("FRED/GDP", type="raw")
# In ts format: data_ts <- Quandl("FRED/GDP", type="ts")
# In xts format: data_xts <- Quandl("FRED/GDP", type="xts")
# In zoo format: data_zoo <- Quandl("FRED/GDP", type="zoo")
# data <- Quandl(c("FRED/GDP.1", "WIKI/AAPL.4"))
# AAPL <- Quandl("WIKI/AAPL")
# data <- Quandl("WIKI/AAPL.4")
# data_NSE_OIL <- Quandl('NSE/OIL', type = "raw")
# data_gdp_aapl <- Quandl(c("FRED/GDP.1", "WIKI/AAPL.4"))
# data_acn_aapl <- Quandl(c("WIKI/ACN", "WIKI/AAPL.4"))
# mydata = Quandl("FRED/GDP", start_date="2001-12-31", end_date="2005-12-31")
# mydata_columns <- Quandl(c("WIKI/AAPL.8", "WIKI/AAPL.9"), start_date="2017-01-01")
#quandl API
quandl_api = "MYAPIKEY"
#add my key
Quandl.api_key(quandl_api)
quandl_get <-
function(sym, start_date = "2017-01-01") {
require(devtools)
require(Quandl)
# create a vector with all lines
tryCatch(Quandl(c(
paste0("WIKI/", sym, ".8"), # Adj. Open
paste0("WIKI/", sym, ".9"), # Adj. High
paste0("WIKI/", sym, ".10"), # Adj. Low
paste0("WIKI/", sym, ".11"), # Adj. Close
paste0("WIKI/", sym, ".12")), # Adj. Volume
start_date = start_date,
type = "zoo"
))
}
|
b3ef09a48883f230a2c64bb3f922101041faffb6
|
2aa238d03328cffa2a3b41df70c1c05c4cf97c15
|
/man/bl.Rd
|
c8be9b2c7991b8c753f4daf3068bd29c8232ad6b
|
[] |
no_license
|
cran/easyreg
|
24dcc6387501e824b7e871701a120f39c6f46ea5
|
2b83e29f8632ebcf023d18c0b499f3fd13db15b6
|
refs/heads/master
| 2021-01-11T01:55:27.933134
| 2019-09-13T12:30:02
| 2019-09-13T12:30:02
| 70,836,877
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,579
|
rd
|
bl.Rd
|
\name{bl}
\alias{bl}
\title{
Analysis of broken line regression
}
\description{
The function performs analysis of broken line regression
}
\usage{
bl(data, model=1, alpha=0.05, xlab = "Explanatory Variable", ylab = "Response Variable",
position = 1, digits = 6, mean = TRUE, sd=FALSE, legend = TRUE, lty=2,
col="dark blue", pch=20, xlim="default.x",ylim="default.y", ...)
}
\arguments{
\item{data}{
data is a data.frame
The first column contain the treatments (explanatory variable) and the
second column the response variable
}
\item{model}{
model for analysis: 1=two linear; 2=linear plateau (LRP); 3= model 1 with blocks random; 4 = model 2 with blocks random
}
\item{alpha}{
significant level for cofidence intervals (parameters estimated)
}
\item{xlab}{
name of explanatory variable
}
\item{ylab}{
name of response variable
}
\item{position}{
position of equation in the graph
top=1
bottomright=2
bottom=3
bottomleft=4
left=5
topleft=6 (default)
topright=7
right=8
center=9
}
\item{digits}{
number of digits (default=6)
}
\item{mean}{
mean=TRUE (plot mean of data)
mean=FALSE (plot all data)
}
\item{sd}{
sd=FALSE (plot without standard deviation)
sd=TRUE (plot with standard deviation)
}
\item{legend}{
legend=TRUE (plot legend)
legend=FALSE (not plot legend)
}
\item{lty}{
line type
}
\item{col}{
line color
}
\item{pch}{
point type
}
\item{xlim}{
limits for x
}
\item{ylim}{
limits for y
}
\item{...}{
others graphical parameters (see par)
}
}
\value{
Returns coefficients of the models, t test for coefficients, knot (break point), R squared, adjusted R squared, AIC, BIC, residuals and shapiro-wilk test for residuals.
}
\references{
KAPS, M. and LAMBERSON, W. R. Biostatistics for Animal Science: an introductory text. 2nd Edition. CABI Publishing, Wallingford, Oxfordshire, UK, 2009. 504p.
}
\author{
Emmanuel Arnhold <emmanuelarnhold@yahoo.com.br>
}
\seealso{
lm, ea1(easyanova package), er1
}
\examples{
# the growth of Zagorje turkeys (Kaps and Lamberson, 2009)
weight=c(44,66,100,150,265,370,455,605)
age=c(1,7,14,21,28,35,42,49)
data2=data.frame(age,weight)
# two linear
regplot(data2, model=5, start=c(25,6,10,20))
bl(data2, digits=2)
#linear and quadratic plateau
x=c(0,1,2,3,4,5,6)
y=c(1,2,3,6.1,5.9,6,6.1)
data=data.frame(x,y)
bl(data,model=2, lty=1, col=1, digits=2, position=8)
# effect os blocks
x=c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8)
y=c(4,12,9,20,16,25,21,31,28,42,33,46,33,46,34,44)
blocks=rep(c(1,2),8)
dat=data.frame(x,blocks,y)
bl(dat, 3)
bl(dat,4, sd=TRUE)
bl(dat,4, mean=FALSE)
}
|
70b2382139f3fcf8737ee368628440af5320139b
|
06d7da77c509f52f6d1629307b9c9a59ab106020
|
/Rcode/01-intro.R
|
227a8a53b3b3173cb3ecc76059de239c0901b433
|
[] |
no_license
|
happyrabbit/CE_JSM2017
|
b823883d01c7787a77e424aa099464564005aedc
|
bddf17356b156c41bfe948ce6f86ee2d1198e4f8
|
refs/heads/master
| 2021-01-17T12:00:37.183957
| 2017-07-31T21:19:07
| 2017-07-31T21:19:07
| 95,393,608
| 8
| 4
| null | 2017-07-06T16:18:56
| 2017-06-26T00:27:40
|
HTML
|
UTF-8
|
R
| false
| false
| 209
|
r
|
01-intro.R
|
### -----------------------------
### Hui Lin
### @gossip_rabbit
###
### http://scientistcafe.com
### -----------------------------
## preparations -----------------------
source("Rcode/00-course-setup.r")
|
a019ab09d1651a0e9f3fa339ca2d3eed0b22944c
|
caab88f01472dccf8f8e6c36d8b6686c9a69ec36
|
/nonparametric/distances.R
|
cc0516d1f2638387798d5f511044141764ff0fa7
|
[] |
no_license
|
clarkfitzg/pems_fd
|
e3e51142f9a173136ae3a41fec292082a55b5a67
|
eb379ec3fd03e23591a92de359f4efc06bef0741
|
refs/heads/master
| 2021-05-07T14:31:39.219377
| 2019-05-20T18:38:07
| 2019-05-20T18:38:07
| 109,893,320
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 632
|
r
|
distances.R
|
source("helpers.R")
stn = load_station()
N = length(stn)
distwrap = function(s1, s2)
{
distance(x1 = c(0, s1$right_end_occ)
, y1 = c(0, s1$mean_flow)
, x2 = c(0, s2$right_end_occ)
, y2 = c(0, s2$mean_flow)
)
}
# Works
distwrap(stn[[1]], stn[[2]])
fd_dist = matrix(NA, nrow = N, ncol = N)
system.time(
for(i in 1:N){
for(j in i:N){
fij = distwrap(stn[[i]], stn[[j]])
fd_dist[i, j] = fij
fd_dist[j, i] = fij
}
}
)
colnames(fd_dist) = rownames(fd_dist) = keep
save(fd_dist, file = "~/data/pems/fd_dist.rds")
|
530acf2894bc8bb4cfb80b0dd6f067dc9ee4ebe0
|
e03a21bf93181acbd72f19e10e7f0604bb32a6df
|
/R/phystiodata.R
|
4e035c54c790e151ac89aa7854fdb2c49f359f5e
|
[] |
no_license
|
cran/seedr
|
996862f5c5069506ab1ed187c4a020fd40d105b5
|
530ed2921907b6ca067901f260dc221a583c7310
|
refs/heads/master
| 2023-01-06T08:26:14.438066
| 2020-11-03T07:30:02
| 2020-11-03T07:30:02
| 310,522,632
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,842
|
r
|
phystiodata.R
|
#' Transforms dataset to physiodata format
#'
#' \code{physiodata} takes the user's dataset and transforms it to an object of
#' class "physiodata". This object will be used by the model-fitting functions,
#' and it can also be used to explore the data.
#'
#' @usage physiodata(d, t = "times", g = "germinated", pg = "germinable", x =
#' "treatment", groups = NULL)
#' @param d a data.frame containing the results of a germination experiment. The
#' data frame should include columns with scoring times, germination counts
#' (not cumulative), number of potentially germinable seeds, and the
#' environmental variable of interest. (e.g. temperature or water potential)
#' (see \code{\link{grasses}} example dataset for appropriate structure).
#' @param t the name of a column in \code{d} containing a vector of numeric
#' scoring times.
#' @param g the name of a column in \code{d} containing a vector of integer
#' germination counts (non cumulative).
#' @param pg the name of a column in \code{d} containing a vector of integer
#' numbers of potentially germinable seeds.
#' @param x the name of a column in \code{d} containing a vector of numeric
#' values for the environmental variable of interest (e.g. temperature, water
#' potential).
#' @param groups optional, the names of columns in \code{d} containing grouping
#' variables for the experiment that have to be analysed separately (e.g.
#' different species or populations, different temperatures in a water
#' potential experiment, different treatments to break seed dormancy).
#' @return \code{physiodata} returns a S3 object of class "physiodata". The
#' object is a list containing, for each group, treatment and scoring time:
#' the cumulative germination count; the cumulative germination proportion;
#' and the lower and upper bounds of the 95 % binomial confidence interval,
#' calculated with the Wilson method as implemented in the package
#' \code{binom}. The object can be used to explore the data using the generic
#' functions \code{summary}, \code{barplot} and \code{plot}.
#' @examples
#' cent <- physiodata(centaury, x = "temperature")
#' cent
#' summary(cent) # average final germination proportions and germination rates per treatment
#' barplot(cent) # bar plots for the final germination proportions and germination rates
#' plot(cent) # cumulative germination curves
#' physiodata(grasses, x = "psi", groups = "species") # grouping dataset by species
#' @export
physiodata <- function(d, t = "times", g = "germinated", pg = "germinable",
x = "treatment", groups = NULL)
{
dd <- data.table(d)
setnames(dd, c(t, x), c("time", "treatment"))
dd <- dd[, .(germinated = sum(get(g)), germinable = sum(get(pg))),
by = c(groups, "treatment", "time")]
dd[, germinable := max(germinable), by = c("treatment", groups)]
setorderv(dd, c(groups, "treatment", "time"))
dd[, cumulative := cumsum(get(g)), by = c("treatment", groups)]
bci <- binom::binom.confint(dd$cumulative, dd$germinable, method = "wilson")
dd <- cbind(dd, germination = bci[4:6])
l <- list(proportions = dd, groups = groups)
class(l) <- "physiodata"
l
}
# physiodata generic functions
#' @export
print.physiodata <- function(x, ...)
{
print(x$proportions)
}
#' @export
summary.physiodata <- function(object, ...)
{
dd <- object$proportions[object$proportions[, .I[(time == max(time))], by = c(object$groups, "treatment")]$V1]
dd[, c("germinated", "germinable", "cumulative") := NULL][]
setorderv(dd, c(object$groups, "treatment"))
dr <- object$proportions[, .(r50 = rates(d = .SD, fractions = 0.5, extrapolate.prange = 1)), by = c(object$groups, "treatment")]
cbind(dd, dr[, list(r50)])
}
#' @export
barplot.physiodata <- function(height, ..., x.lab = "Treatment")
{
dd <- summary(height)
if(! is.null(height$groups)){
listd <- split(dd, by = height$groups, drop = TRUE)
ask.status <- par()$ask
par(ask = TRUE)
for(i in seq_along(listd)) {
mfrow.status <- par()$mfrow
oma.status <- par()$oma
par(mfrow = c(1, 2), oma = c(0, 0, 2, 0))
p <- barplot(listd[[i]]$germination.mean,
names.arg = as.numeric(listd[[i]][, treatment]),
ylim = c(0, 1),
xlab = x.lab,
ylab = "Final germination proportion")
segments(p, listd[[i]]$germination.lower, p, listd[[i]]$germination.upper)
arrows(p, listd[[i]]$germination.lower, p, listd[[i]]$germination.upper,
lwd = 1.5, angle = 90, code = 3, length = 0.05)
barplot(listd[[i]]$r50,
names.arg = as.numeric(listd[[i]][, treatment]),
xlab = x.lab,
ylab = paste("Median germination rate"))
mtext(names(listd)[i], line = 0, side = 3, outer = TRUE)
par(mfrow = mfrow.status)
par(oma = oma.status)
}
par(ask = ask.status)} else{
mfrow.status <- par()$mfrow
par(mfrow = c(1, 2))
p <- barplot(dd$germination.mean,
names.arg = as.numeric(dd[, treatment]),
ylim = c(0, 1),
xlab = x.lab,
ylab = "Final germination proportion")
segments(p, dd$germination.lower, p, dd$germination.upper)
arrows(p, dd$germination.lower, p, dd$germination.upper,
lwd = 1.5, angle = 90, code = 3, length = 0.05)
barplot(dd$r50,
names.arg = as.numeric(dd[, treatment]),
xlab = x.lab,
ylab = paste("Median germination rate"))
par(mfrow = mfrow.status)
}
}
#' @export
plot.physiodata <- function(x, ...)
{
if(! is.null(x$groups)){
listd <- split(x$proportions, by = x$groups, drop = TRUE)
ask.status <- par()$ask
par(ask = TRUE)
for(i in seq_along(listd)) {
colnumber <- listd[[i]][, .(n = length(unique(treatment)))][[1]]
colramp <- colorRampPalette(c("violet", "blue", "green",
"yellow", "orange", "red"))
X <- split(listd[[i]]$time, listd[[i]][, treatment])
y <- split(listd[[i]]$germination.mean, listd[[i]][, treatment])
xpd.status <- par()$xpd
mar.status <- par()$mar
par(xpd = TRUE)
par(mar = mar.status + c(0, 0, 0, 4))
plot(1 : max(unlist(X)), ylim = (c(0, 1)), type = "n",
xlab = "Time", ylab = "Germination proportion")
mapply(lines, X, y, col = colramp(colnumber), pch = 16, type = "o")
legend(max(listd[[i]][, time]) + max(listd[[i]][, time])*.05, 1.1,
title = "Treatment",
legend = levels(as.factor(round(listd[[i]][, treatment], 1))), pch = 16,
col = colramp(colnumber), lwd = 1, lty = 1)
par(xpd = xpd.status)
par(mar = mar.status)
title(names(listd)[i])}
par(ask = ask.status)} else{
colnumber <- x$proportions[, .(n = length(unique(treatment)))][[1]]
colramp <- colorRampPalette(c("violet", "blue", "green",
"yellow", "orange", "red"))
X <- split(x$proportions$time, x$proportions[, treatment])
y <- split(x$proportions$germination.mean, x$proportions[, treatment])
xpd.status <- par()$xpd
mar.status <- par()$mar
par(xpd = TRUE)
par(mar = mar.status + c(0, 0, 0, 4))
plot(1 : max(unlist(X)), ylim = (c(0, 1)), type = "n",
xlab = "Time", ylab = "Germination proportion")
mapply(lines, X, y, col = colramp(colnumber), pch = 16, type = "o")
legend(max(x$proportions[, time]) + max(x$proportions[, time])*.05, 1.1,
title = "Treatment",
legend = levels(as.factor(round(x$proportions[, treatment], 1))), pch = 16,
col = colramp(colnumber), lwd = 1, lty = 1)
par(xpd = xpd.status)
par(mar = mar.status)}
}
# physiodata internal functions
rates <- function(d, fractions = (1:9)/10, extrapolate.prange = 1)
{
pos <- c()
pos0 <- c()
for (i in 1:length(fractions))
{
posA <- match(FALSE, d$germination.mean < fractions[i], nomatch = NA)
pos0A <- posA - 1
if (is.na(posA))
{
posA <- length(d$germination.mean)
pos0A <- match(FALSE, d$germination.mean < d$germination.mean[posA], nomatch = NA) - 1
}
if (pos0A == 0)
{
posA <- match(FALSE, d$germination.mean <= fractions[i], nomatch = NA)
pos0A <- posA - 1
}
pos <- c(pos, posA)
pos0 <- c(pos0, pos0A)
}
p <- d$germination.mean[pos]
q <- d$germination.mean[pos0]
y <- d$time[pos]
x <- d$time[pos0]
r <- x + (fractions - q) * (y - x) / (p - q)
r[r > (extrapolate.prange * max(d$time))] <- NA
1/r
}
|
075b0eb2b303c78ab424f2c9d06cc9f1af6132b7
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gMemoryInputStreamAddData.Rd
|
0ce5a8e7d76c7562ada095e2dd98a1513fe8d5c2
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 403
|
rd
|
gMemoryInputStreamAddData.Rd
|
\alias{gMemoryInputStreamAddData}
\name{gMemoryInputStreamAddData}
\title{gMemoryInputStreamAddData}
\description{Appends \code{data} to data that can be read from the input stream}
\usage{gMemoryInputStreamAddData(object, data)}
\arguments{
\item{\verb{object}}{a \code{\link{GMemoryInputStream}}}
\item{\verb{data}}{input data}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
64bdd292e512f4d8feff495716f7fd056f857ebb
|
e30bae69fc9d33fd2953cc93669686a52013049e
|
/tests/testthat.R
|
b59a36b69904be66ebe92f719a0cca377e9343ff
|
[
"MIT"
] |
permissive
|
Silvia-1404/Proyecto1
|
74194b9b0183ecc1237350561813ffd70664af9c
|
b57d007c8f082b6d4f91ffa714b062e664298651
|
refs/heads/main
| 2023-03-20T18:51:56.080080
| 2021-03-16T18:20:58
| 2021-03-16T18:20:58
| 348,431,076
| 0
| 0
|
NOASSERTION
| 2021-03-16T18:02:22
| 2021-03-16T17:14:19
|
R
|
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
library(testthat)
library(Proyecto1)
test_check("Proyecto1")
|
bbf5b6ec094b53dc2ca1772afae593816af32094
|
522ca94e04ba06504404ff5a1d6d1570fe909097
|
/Bioconductor_RNASeqAnalysis.R
|
7338970af679a6c2b4e3f18c8431f1862ef70157
|
[] |
no_license
|
AGBioInfo/Bioconductor-RNASeqAnalysis
|
3db3cbf593abcc497f5f81820ccfe52b896645df
|
fab8c3218e3401eee5b063f40c75c2fe8ecb1b50
|
refs/heads/master
| 2020-04-01T17:15:00.267691
| 2018-10-17T10:15:01
| 2018-10-17T10:15:01
| 153,420,041
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,045
|
r
|
Bioconductor_RNASeqAnalysis.R
|
# What fraction of reads in this file has an A nucleotide in the 5th base of the read?
library(ShortRead)
library(yeastRNASeq)
fastqFilePath <- system.file("reads", "wt_1_f.fastq.gz", package = "yeastRNASeq")
fqFile <- FastqFile(fastqFilePath)
reads <- readFastq(fqFile)
reads_set <- sread(reads)
sum(DNAStringSet(reads_set,5,5) == "A") / length(reads_set)
#What is the average numeric quality value of the 5th base of these reads?
qm <- as(quality(reads), "matrix")
mean(qm[,5:5])
#In this interval, how many reads are duplicated by position?
library(leeBamViews)
bamFilePath <- system.file("bam", "isowt5_13e.bam", package="leeBamViews")
bamFile <- BamFile(bamFilePath)
seqinfo(bamFile)
aln <- scanBam(bamFile)
aln <- aln[[1]]
names(aln)
lapply(aln, function(xx) xx[1])
unique(aln$rname)
gr <- GRanges(seqnames = "Scchr13", ranges = IRanges(start = 800000, end = 801000))
params <- ScanBamParam(which = gr, what = scanBamWhat())
aln <- scanBam(bamFile, param = params)
aln <- aln[[1]]
aln$pos
duplicatedValues = unique(aln$pos[duplicated(aln$pos)])
sum(aln$pos %in% duplicatedValues)
# What is the absolute value of the log foldchange ( logFC) of the gene with the lowest P.value.
library(limma)
design <- model.matrix(~ normData$group)
fit <- lmFit(normData, design)
fit <- eBayes(fit)
topTable(fit)
abs(topTable(fit, n=1)$logFC)
#How many genes are differentially expressed between the two groups at an adj.P.value cutoff of 0.05?
topTable(fit, p.value = 0.05)
#What is the mean difference in beta values between the 3 normal samples and the 3 cancer samples, across OpenSea CpGs?
library(minfi)
require(minfiData)
data(RGsetEx)
p <- preprocessFunnorm(RGsetEx)
b <- getBeta(p)
is <- getIslandStatus(p)
pData(p)$status
norm <- b[,c(1,2,5)]
can <- b[,c(3,4,6)]
norm_os <- norm[is == "OpenSea",]
can_os <- can[is == "OpenSea",]
mean(norm_os) - mean(can_os)
#How many of these DNase hypersensitive sites contain one or more CpGs on the 450k array?
library(AnnotationHub)
ah <- AnnotationHub()
qah_h1 <- query(ah, c("Caco2", "AWG"))
h <- qah_h1[["AH22442"]]
sum(countOverlaps(p,h))
ah_s <- subset(ah, genome == "hg19")
ah_s <- subset(ah, dataprovider == "UCSC")
# write.csv(mcols(ah), "ah.csv")
# g <- ah[["AH5018"]] # assembly
cpg <- ah_s[["AH5086"]] # CpG islands
h_cpg <- subsetByOverlaps(cpg, h)
ov <- subsetByOverlaps(h_cpg, p)
#How many features are differentially expressed between control and treatment (ie. padj <= 0.05)?
library(DESeq2)
library(zebrafishRNASeq)
data(zfGenes)
#exclude spike-in controls
tx <- zfGenes[grep("^ERCC", rownames(zfGenes), invert = T),]
counts_mat <- as.matrix(tx)
colData <- DataFrame(sampleID=colnames(tx), group=as.factor(c("control", "control", "control", "treatment", "treatment", "treatment")))
ddsMat <- DESeqDataSetFromMatrix(counts_mat, colData, design = ~ group)
ddsMat <- DESeq(ddsMat)
res <- results(ddsMat)
res <- res[order(res$padj),]
sigRes <- subset(res, padj <= 0.05)
dim(sigRes)
|
dc52f8995493bed77b7b7832e4b3257b7acc720d
|
c7e9a7fe3ee4239aad068c6c41149a4a09888275
|
/OLD_GALLERY_RSCRIPT/#25_histogram_without_border.R
|
77004c432344e7e0cdc0e511da11d6858e4a440d
|
[
"MIT"
] |
permissive
|
holtzy/R-graph-gallery
|
b0dfee965ac398fe73b3841876c6b7f95b4cbae4
|
7d266ad78c8c2d7d39f2730f79230775930e4e0b
|
refs/heads/master
| 2023-08-04T15:10:45.396112
| 2023-07-21T08:37:32
| 2023-07-21T08:37:32
| 31,253,823
| 591
| 219
|
MIT
| 2023-08-30T10:20:37
| 2015-02-24T09:53:50
|
HTML
|
UTF-8
|
R
| false
| false
| 287
|
r
|
#25_histogram_without_border.R
|
# Creating data
my_variable=c(rnorm(1000 , 0 , 2) , rnorm(1000 , 9 , 2))
# Draw the histogram with border=F
png("#25_histogram_without_border.png" , width = 480, height = 480)
par(mar=c(3,4,2,2))
hist(my_variable , breaks=40 , col=rgb(0.2,0.8,0.5,0.5) , border=F , main="")
dev.off()
|
7573babcc72b82bfd80cef111d0904641969f20a
|
3208cc4c01370e22961fcb21f350f7557e006428
|
/plots_select.R
|
69fbadbf47fd18f6c7868e7df4e4cb58915e8d30
|
[] |
no_license
|
austinctodd/ConnectedCar
|
edadb2614577c4a0f8c25174a36c427b2b6cf8ac
|
78bcd810065d58cc1917217b4be6e48c78c8cb60
|
refs/heads/master
| 2020-12-31T05:10:34.110816
| 2016-04-25T11:26:00
| 2016-04-25T11:26:00
| 56,857,366
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,517
|
r
|
plots_select.R
|
tagList(
br(),
# fluidRow(
# column(width = 3),
# column(width = 6,
# sliderInput("map_area", "Time Range",
# min = as.POSIXlt(min(cars_weather$GPS_UTC_Time)/1000,origin="1970-01-01",tz="EST5EDT"),
# max = as.POSIXlt(max(cars_weather$GPS_UTC_Time)/1000,origin="1970-01-01",tz="EST5EDT"),
# value = c(
# as.POSIXlt(min(cars_weather$GPS_UTC_Time)/1000,origin="1970-01-01",tz="EST5EDT"),
# as.POSIXlt(max(cars_weather$GPS_UTC_Time)/1000,origin="1970-01-01",tz="EST5EDT")
# ))),
# column(width = 3)
# ),
fluidRow(
column(width=4,'Select the variables you wish to plot and click "Update Plot"'),
column(width=5,
selectInput("plotvars",label=NULL, choices=colnames(cars_weather), selected = "GPS_Speed", multiple = TRUE)),
column(width=3,actionButton("makeplot", " Update Plot", icon("line-chart"),
style="color: #fff; background-color: #337ab7; border-color: #2e6da4"))
),
br(),
fluidRow(
dygraphOutput("dygraph")
),
br(),
fluidRow(
"To zoom in on a selected part of the trip, either use the levers to adjust the window at the bottom of the figure,
or click and drag the mouse across the desired zoom region. Reset the zoom to the entire trip length
by double-clicking in the plot window. "
),br(),
# fluidRow(
# verbatimTextOutput("plot_hoverinfo")
# ),
br()
)
|
eb643bb00105116c8d598e77ae4e5b8dbaa3f560
|
e67a32b7985e535e258a883ef71b679045429b6e
|
/4_model/src/process_models/random_walk.R
|
7f44d1f7edc6dee62ca10ebcd051389080f3c2e0
|
[] |
no_license
|
jzwart/delaware-water-temp
|
7b114d9ca92a60c5a406c18bca80553af1614003
|
4c50519980ea97512835a015cd341746fd64da80
|
refs/heads/master
| 2021-08-07T10:26:29.158373
| 2021-07-08T16:42:18
| 2021-07-08T16:42:18
| 203,851,125
| 2
| 4
| null | 2020-10-29T14:11:45
| 2019-08-22T18:28:39
|
R
|
UTF-8
|
R
| false
| false
| 250
|
r
|
random_walk.R
|
#' random walk model
#'
#' @param states model states at the previous time step
#' @param sd state error as standard deviation
random_walk = function(states, sd){
states_1 = rnorm(n = length(states), mean = states, sd = sd)
return(states_1)
}
|
90b2d68e4e45f6cd2c7bfcb67ee4c247ae8f7024
|
598f42e4af35f3371251fe9c01b57f13bc2318c3
|
/north_filter.R
|
5b2fd20a92c69bb288aeaf965a066ce8c43b7559
|
[] |
no_license
|
lucaskdo/Thesis
|
298fd2ba1ff63f1484dd4a0848f34cc229ef19ff
|
6cf4b177feaadf092e7486e38b6afb51e12d2e8e
|
refs/heads/master
| 2020-12-19T17:18:41.492565
| 2020-01-28T19:24:54
| 2020-01-28T19:24:54
| 235,798,499
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,211
|
r
|
north_filter.R
|
#install and set up necessary packages
install.packages("tidyverse")
install.packages("janitor")
library(tidyverse)
library(janitor)
#import all data sets for mid (yield projections for each climate model)
#has to be run manually for each crop/scenario/normal combination (10 total)
yr <- c(2070:2099)
for (i in 1:5){
mod <- switch(i, "gfdl", "hadgem", "ipsl", "miroc", "noresm")
ifile <- paste("data/", mod, "/", mod, "_maize_north_rcp85_2070-2099.dat", sep = "")
t <- read.table(text = gsub("[\t]+", "\t", readLines(ifile), perl = TRUE),
sep="\t", header = TRUE, fill = TRUE) %>%
clean_names() %>%
filter(year >= yr[1]) %>%
filter(year <= yr[30])
#subset all data sets to years and grid cells
t_subset_1 <- t %>%
filter(lat == 41.25) %>%
filter(lon == -90.75)
t_subset_2 <- t %>%
filter(lat == 41.25) %>%
filter(lon == -90.25)
t_subset_3 <- t %>%
filter(lat == 41.25) %>%
filter(lon == -89.75)
t_subset_4 <- t %>%
filter(lat == 41.25) %>%
filter(lon == -89.25)
t_subset_5 <- t %>%
filter(lat == 41.25) %>%
filter(lon == -88.75)
t_subset_6 <- t %>%
filter(lat == 41.25) %>%
filter(lon == -88.25)
t_subset_7 <- t %>%
filter(lat == 41.25) %>%
filter(lon == -87.75)
t_subset_8 <- t %>%
filter(lat == 41.75) %>%
filter(lon == -90.25)
t_subset_9 <- t %>%
filter(lat == 41.75) %>%
filter(lon == -89.75)
t_subset_10 <- t %>%
filter(lat == 41.75) %>%
filter(lon == -89.25)
t_subset_11 <- t %>%
filter(lat == 41.75) %>%
filter(lon == -88.75)
t_subset_12 <- t %>%
filter(lat == 41.75) %>%
filter(lon == -88.25)
t_subset_13 <- t %>%
filter(lat == 41.75) %>%
filter(lon == -87.75)
t_subset_14 <- t %>%
filter(lat == 42.25) %>%
filter(lon == -90.25)
t_subset_15 <- t %>%
filter(lat == 42.25) %>%
filter(lon == -89.75)
t_subset_16 <- t %>%
filter(lat == 42.25) %>%
filter(lon == -89.25)
t_subset_17 <- t %>%
filter(lat == 42.25) %>%
filter(lon == -88.75)
t_subset_18 <- t %>%
filter(lat == 42.25) %>%
filter(lon == -88.25)
t_subset_whole <- bind_rows(t_subset_1,
t_subset_2,
t_subset_3,
t_subset_4,
t_subset_5,
t_subset_6,
t_subset_7,
t_subset_8,
t_subset_9,
t_subset_10,
t_subset_11,
t_subset_12,
t_subset_13,
t_subset_14,
t_subset_15,
t_subset_16,
t_subset_17,
t_subset_18)
#extract mean yearly wso (yield) value for each data set
out <- t_subset_whole %>%
select(year, wso) %>%
group_by(year) %>%
summarize(mod = mean(wso)) %>%
mutate(region = "north")
ofile <- paste("output/", mod, "/", mod, "_m_n_85_70.csv", sep = "")
write.csv(out, file = ofile)
}
|
75f35ed1b302f1eda282b2c2bc627f2003097802
|
6895dfd6ce7a392962790f2fdb64f8414180adb0
|
/man/prep_cross.Rd
|
9bb1c895062fd9a91a283aa201a67563a86bd234
|
[] |
no_license
|
neyhartj/fsimpute
|
957d832b0f08965596c2f467fa13aa93c1c71b8d
|
993561395c7b37b1cb2475e357cab4159dc6a1a0
|
refs/heads/master
| 2020-07-04T12:11:49.655347
| 2017-01-03T14:56:51
| 2017-01-03T14:56:51
| 74,064,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,107
|
rd
|
prep_cross.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prep_cross.R
\name{prep_cross}
\alias{prep_cross}
\title{Create a cross object}
\usage{
prep_cross(map, founders, finals, selfing.gen)
}
\arguments{
\item{map}{The genetic map, formatted as a \code{list} of chromosomes, where
each chromosome is a named vector of marker positions (in cM) and the names
are the marker names.}
\item{founders}{A list of founder genotypes, where each element in the list
is a \code{matrix} of genotypes on a single chromosome. Genotypes should be
coded as \code{z {0, 1, 2, NA}} where \code{z} is the number of reference alleles.
Column names should be marker names. This is the observed genotype data.}
\item{finals}{A list of progeny genotypes. The encoding and formatting should
be identical to the argument \code{founders}.}
\item{selfing.gen}{The number of selfing generations that the \code{finals}
have undergone. If the generation of the finals is \emph{F_t}, then the
argument \code{selfing.gen} would be \emph{t} - 1.}
}
\value{
An object of class \code{cross} with the normal elements \code{geno} and
\code{pheno}. The \code{geno} element is a list of chromosomes, each with
the elements:
\describe{
\item{data}{The recoded progeny genotypes}
\item{map}{The genetic map on that chromosome}
\item{founders}{The original founder genotypes}
\item{finals}{The original final genotypes}
\item{founders_unamb}{The unambiguous founder genotypes}
\item{finals_unamb}{The unambiguous final genotypes}
}
}
\description{
Takes raw founder and final genotypes and assembles a \code{cross}
object for downstream analysis. The genotypic data is first filtered
for unambiguous genotypes (see \code{Details}), then the genotypes are
recoded based on the unambiguous genotypes. Finally a \code{cross} object
is assembled and returned.
}
\details{
To force genotype data from a bi-parental family into a \code{cross} object
in \code{\link[qtl]{read.cross}}, the genotypes must be recoded into parental
states. Say two inbred parents have the observed gentypes \code{parent1 = 0} and
\code{parent2 = 2}, the parental states would be recoded as
\code{parent1 = 1} and \code{parent2 = 3}. Parent 1 is also given a parental
state of \code{1} and parent 2 is always given a parental state of \code{3}.
Among the progeny, any genotype call that is identical to the that of parent 1
would received a recoded genotype of \code{1}, and any gentype cal that is
identical to that of parent 2 would receive a recoded genotype of \code{3}.
Heterozygous genotype calls are recoded as \code{2}.
Of course, in observed genotype data, the parental states are inherently
unknown (otherwise imputation would be easy). To determine parental states at
a marker, the marker must be unambiguous. That is, the parental states must
be easily inferred. To do this, we must only look at markers for which the
parents are both observed (i.e. no NA) and polymorphic between (i.e.
\code{0} and \code{2}). Any ambiguous markers are set to missing. This is ok,
because the imputation step is easily able to impute these genotypes.
}
|
a58aa5316fd94656ee798a8091700632374427c3
|
47e05368c371a9d580dc8c1b7744450dfc6c973e
|
/tempVilnius.R
|
a242585b10d60c24653aa4a13c72668bfe11b224
|
[] |
no_license
|
Gwenael617/VilniusTemperature
|
2ad6b3f0f77996bc1c5b80cbdfba4f271b75ec94
|
c3ae0e9b8298cdc5650637ad62b63b1f3e7a6009
|
refs/heads/master
| 2021-01-10T17:44:13.250348
| 2016-05-05T20:30:38
| 2016-05-05T20:30:38
| 45,398,745
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,481
|
r
|
tempVilnius.R
|
setwd('N:/Rwd/VilniusTemperature')
cat("\n list of files in data folder : \n \n")
print(list.files("./data")) ## check the file numbers to pick up the correct one
library(lubridate)
tempVilnius <- function(fileNumber){
df <- read.csv2(paste0("./data/", list.files("./data")[fileNumber]),
skip=6, header = TRUE, stringsAsFactors = FALSE)
df <- df[,1:2] ## keep only first two columns one with temperature
## + one to keep it as a data.frame
df$date <- rownames(df)
df$temperature <- as.numeric(df[,1])
rownames(df) <- NULL
df <- df[,c(3,4)] ## keep only column date and temperature
# olson_time_zones()[361] ## [1] "Europe/Vilnius" (lubridate package)
df[,1] <- dmy_hm(df[,1], tz="Europe/Vilnius") ## lubridate package
return(df) ## to make it appear in the environment
}
cat("\n to compute use the function : tempVilnius(fileNumber)")
## example
## janvier2015 <- tempVilnius(5)
# oct31 <- dmy_hm("31-10-2015 23:00",tz="Europe/Vilnius")
# with(oct14, plot(date, temperature, ylim=c(-5, 25),
# main ="Température en octobre \n comparatif 2014-2015"))
# # abline(lm(temperature~date, oct14),col=1)
# par(new=T)
# with(oct15, plot(date, temperature, xlim=c(min(date), oct31), ylim=c(-5, 25),
# xlab="", ylab="", col="red", pch=20))
# legend("topright", legend= c(2014,2015), pch=c(1,20), col=c(1,2), cex=0.8)
# # abline(lm(temperature~date, oct15),col=2)
|
a95d49648c9e7af127fcaee7c5bd91bb8da1b8a3
|
b88e71cf62d75955fab994688502cbaf1250c55d
|
/_src/update_estado.R
|
33823b991ac3f91202668fcac3400cab3b9d162d
|
[] |
no_license
|
vikramroy622/covid19br.github.io
|
7789002e1135c5a0ecb9234dcd238210fa51e273
|
ef10128998974e23faf7ae1ebd290fbe10922cd6
|
refs/heads/master
| 2022-09-01T01:20:41.060808
| 2020-05-27T00:41:31
| 2020-05-27T00:41:31
| 267,365,257
| 2
| 0
| null | 2020-05-27T16:03:35
| 2020-05-27T16:03:34
| null |
UTF-8
|
R
| false
| false
| 3,063
|
r
|
update_estado.R
|
tryCatch({
# Libraries
library(widgetframe)
library(tidyverse)
library(plotly)
library(lubridate)
# Helper Function
makeNamedList <- function(...) {
structure(list(...), names = as.list(substitute(list(...)))[-1L])
}
# Estados a serem atualizados
estados.para.atualizar <- c('AC', 'AL', 'AM', 'AP', 'BA', 'CE', 'DF', 'ES', 'GO', 'MA', 'MG', 'MS', 'MT', 'PA', 'PB', 'PE', 'PI', 'PR', 'RJ', 'RN', 'RO', 'RR', 'RS', 'SC', 'SE', 'SP', 'TO') # manter ordem alfabetica
# Processamento de Dados -separo por estado?-
source('prepara_dados_estado.R')
source('ajuste_projecao_exponencial_estado.R')
# Geracao dos graficos
source('plots_estados.R')
## Data de Atualizacao
print("Atualizando data de atualizacao...")
file <- file("../web/last.update.estado.txt")
writeLines(c(paste(now())), file)
close(file)
################################################################################
## Atualiza plot.forecast.exp por estado
################################################################################
for (st in estados.para.atualizar) {
filepath <- paste("../web/plot.forecast.exp.", tolower(st), sep="") # plot.forecast.exp para estados
# widget interativo
graph.html <- ggplotly(estados.plot.forecast.exp.br[[st]])
saveWidget(frameableWidget(graph.html), file = paste(filepath,".html",sep=""), libdir="./libs")
# svg placeholder
graph.svg <- estados.plot.forecast.exp.br[[st]] + theme(axis.text=element_text(size=6.65), # corrige a diferenca do tamanho do texto entre svg e html
plot.margin = margin(10, 0, 0, 7, "pt")) # corrige a margem inserida pelo plotly
ggsave(paste(filepath,".svg",sep=""), plot = graph.svg, device = svg, scale = 1, width = 215, height = 146, units = "mm")
# tamanho calculado usando ppi = 141.21
# o tamanho do texto no placeholder deve ser um fator de 0.665 do tamanho original
# large
graph.sm.svg <- graph.svg + theme(axis.text=element_text(size=8.65)) # corrige a diferenca do tamanho do texto entre svg e html
ggsave(paste(filepath,".lg.svg",sep=""), plot = graph.sm.svg, device = svg, scale = 1, width = 215, height = 146, units = "mm")
# medium
graph.sm.svg <- graph.svg + theme(axis.text=element_text(size=12.65)) # corrige a diferenca do tamanho do texto entre svg e html
ggsave(paste(filepath,".md.svg",sep=""), plot = graph.sm.svg, device = svg, scale = 1, width = 215, height = 146, units = "mm")
# small
graph.sm.svg <- graph.svg + theme(axis.text=element_text(size=16.65)) # corrige a diferenca do tamanho do texto entre svg e html
ggsave(paste(filepath,".sm.svg",sep=""), plot = graph.sm.svg, device = svg, scale = 1, width = 215, height = 146, units = "mm")
# extra small
graph.sm.svg <- graph.svg + theme(axis.text=element_text(size=20.65)) # corrige a diferenca do tamanho do texto entre svg e html
ggsave(paste(filepath,".ex.svg",sep=""), plot = graph.sm.svg, device = svg, scale = 1, width = 215, height = 146, units = "mm")
}
}, error = function(cond){
message(cond)
quit(status = 1)
})
|
96552d290da392db81ef56af49ef31df9e12862a
|
06169fdcd899f4e59d424cc6602d801a43f94183
|
/R/organization_api.R
|
0c0016caf0e5b3888a977795f2c9860487c7f2dd
|
[] |
no_license
|
jfontestad/tldbclr
|
56b2efa27eb4d20d893611c65341ee6074ca4efd
|
f3ad4b338235f73af073fdb84d91bcd7f1ee7064
|
refs/heads/master
| 2022-12-04T00:44:18.624015
| 2020-08-18T18:19:43
| 2020-08-18T22:06:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 57,897
|
r
|
organization_api.R
|
# TileDB Storage Platform API
#
# TileDB Storage Platform REST API
#
# The version of the OpenAPI document: 2.0.4
#
# Generated by: https://openapi-generator.tech
#' @docType class
#' @title Organization operations
#' @description tiledbcloud.Organization
#' @format An \code{R6Class} generator object
#' @field apiClient Handles the client-server communication.
#'
#' @section Methods:
#' \describe{
#' \strong{ AddAWSAccessCredentials } \emph{ }
#' Add aws keys
#'
#' \itemize{
#' \item \emph{ @param } namespace character
#' \item \emph{ @param } aws.access.credentials \link{AWSAccessCredentials}
#'
#'
#' \item status code : 204 | AWS keys added successfully
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ AddUserToOrganization } \emph{ }
#' add a user to an organization
#'
#' \itemize{
#' \item \emph{ @param } organization character
#' \item \emph{ @param } user \link{OrganizationUser}
#'
#'
#' \item status code : 204 | user added to organization successfully
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ CheckAWSAccessCredentials } \emph{ }
#' Check if aws keys are set
#'
#' \itemize{
#' \item \emph{ @param } namespace character
#' \item \emph{ @returnType } list( \link{AWSAccessCredentials} ) \cr
#'
#'
#' \item status code : 200 | AWS keys are set
#'
#' \item return type : array[AWSAccessCredentials]
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ CheckAWSAccessCredentialsByName } \emph{ }
#' Check if aws keys are set by name
#'
#' \itemize{
#' \item \emph{ @param } namespace character
#' \item \emph{ @param } name character
#' \item \emph{ @returnType } \link{AWSAccessCredentials} \cr
#'
#'
#' \item status code : 200 | AWS keys are set
#'
#' \item return type : AWSAccessCredentials
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ CreateOrganization } \emph{ }
#' create a organization, the user creating will be listed as owner
#'
#' \itemize{
#' \item \emph{ @param } organization \link{Organization}
#'
#'
#' \item status code : 204 | organization created successfully
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ DeleteAWSAccessCredentials } \emph{ }
#' delete a AWS Access credentials in a namespace. This will likely cause arrays to become unreachable
#'
#' \itemize{
#' \item \emph{ @param } namespace character
#' \item \emph{ @param } name character
#'
#'
#' \item status code : 204 | AWS credentials deleted
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ DeleteOrganization } \emph{ }
#' delete a organization
#'
#' \itemize{
#' \item \emph{ @param } organization character
#'
#'
#' \item status code : 204 | organization deleted
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ DeleteUserFromOrganization } \emph{ }
#' delete a user from an organization
#'
#' \itemize{
#' \item \emph{ @param } organization character
#' \item \emph{ @param } username character
#'
#'
#' \item status code : 204 | user delete from organization successfully
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ GetAllOrganizations } \emph{ }
#' get all organizations that the user is member of
#'
#' \itemize{
#' \item \emph{ @returnType } list( \link{Organization} ) \cr
#'
#'
#' \item status code : 200 | array of organizations the user is member of
#'
#' \item return type : array[Organization]
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 400 | Error finding organizations
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 500 | Request user not found, or has empty context
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ GetOrganization } \emph{ }
#' get a organization
#'
#' \itemize{
#' \item \emph{ @param } organization character
#' \item \emph{ @returnType } \link{Organization} \cr
#'
#'
#' \item status code : 200 | organization details
#'
#' \item return type : Organization
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 404 | Organization does not exist
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ GetOrganizationUser } \emph{ }
#' get a user from an organization
#'
#' \itemize{
#' \item \emph{ @param } organization character
#' \item \emph{ @param } username character
#' \item \emph{ @returnType } \link{OrganizationUser} \cr
#'
#'
#' \item status code : 200 | user from organization
#'
#' \item return type : OrganizationUser
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 404 | User is not in organization
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ UpdateAWSAccessCredentials } \emph{ }
#' Update aws keys or associated buckets. This will update the key associations for each array in the namespace
#'
#' \itemize{
#' \item \emph{ @param } namespace character
#' \item \emph{ @param } name character
#' \item \emph{ @param } aws.access.credentials \link{AWSAccessCredentials}
#'
#'
#' \item status code : 204 | AWS keys updated successfully
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ UpdateOrganization } \emph{ }
#' update a organization
#'
#' \itemize{
#' \item \emph{ @param } organization character
#' \item \emph{ @param } organization.details \link{Organization}
#'
#'
#' \item status code : 204 | organization updated successfully
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' \strong{ UpdateUserInOrganization } \emph{ }
#' update a user in an organization
#'
#' \itemize{
#' \item \emph{ @param } organization character
#' \item \emph{ @param } username character
#' \item \emph{ @param } user \link{OrganizationUser}
#'
#'
#' \item status code : 204 | user update in organization successfully
#'
#'
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' \item status code : 0 | error response
#'
#' \item return type : Error
#' \item response headers :
#'
#' \tabular{ll}{
#' }
#' }
#'
#' }
#'
#'
#' @examples
#' \dontrun{
#' #################### AddAWSAccessCredentials ####################
#'
#' library(tiledbcloud)
#' var.namespace <- 'namespace_example' # character | namespace
#' var.aws.access.credentials <- AWSAccessCredentials$new() # AWSAccessCredentials | aws access credentials to store for a namespace
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$AddAWSAccessCredentials(var.namespace, var.aws.access.credentials)
#'
#'
#' #################### AddUserToOrganization ####################
#'
#' library(tiledbcloud)
#' var.organization <- 'organization_example' # character | organization name
#' var.user <- OrganizationUser$new() # OrganizationUser | user to add
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$AddUserToOrganization(var.organization, var.user)
#'
#'
#' #################### CheckAWSAccessCredentials ####################
#'
#' library(tiledbcloud)
#' var.namespace <- 'namespace_example' # character | namespace
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$CheckAWSAccessCredentials(var.namespace)
#'
#'
#' #################### CheckAWSAccessCredentialsByName ####################
#'
#' library(tiledbcloud)
#' var.namespace <- 'namespace_example' # character | namespace
#' var.name <- 'name_example' # character | name
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$CheckAWSAccessCredentialsByName(var.namespace, var.name)
#'
#'
#' #################### CreateOrganization ####################
#'
#' library(tiledbcloud)
#' var.organization <- Organization$new() # Organization | organization to create
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$CreateOrganization(var.organization)
#'
#'
#' #################### DeleteAWSAccessCredentials ####################
#'
#' library(tiledbcloud)
#' var.namespace <- 'namespace_example' # character | namespace
#' var.name <- 'name_example' # character | name
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$DeleteAWSAccessCredentials(var.namespace, var.name)
#'
#'
#' #################### DeleteOrganization ####################
#'
#' library(tiledbcloud)
#' var.organization <- 'organization_example' # character | organization name or id
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$DeleteOrganization(var.organization)
#'
#'
#' #################### DeleteUserFromOrganization ####################
#'
#' library(tiledbcloud)
#' var.organization <- 'organization_example' # character | organization name
#' var.username <- 'username_example' # character | username to manipulate
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$DeleteUserFromOrganization(var.organization, var.username)
#'
#'
#' #################### GetAllOrganizations ####################
#'
#' library(tiledbcloud)
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$GetAllOrganizations()
#'
#'
#' #################### GetOrganization ####################
#'
#' library(tiledbcloud)
#' var.organization <- 'organization_example' # character | organization name or id
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$GetOrganization(var.organization)
#'
#'
#' #################### GetOrganizationUser ####################
#'
#' library(tiledbcloud)
#' var.organization <- 'organization_example' # character | organization name
#' var.username <- 'username_example' # character | username to manipulate
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$GetOrganizationUser(var.organization, var.username)
#'
#'
#' #################### UpdateAWSAccessCredentials ####################
#'
#' library(tiledbcloud)
#' var.namespace <- 'namespace_example' # character | namespace
#' var.name <- 'name_example' # character | name
#' var.aws.access.credentials <- AWSAccessCredentials$new() # AWSAccessCredentials | aws credentials to update
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$UpdateAWSAccessCredentials(var.namespace, var.name, var.aws.access.credentials)
#'
#'
#' #################### UpdateOrganization ####################
#'
#' library(tiledbcloud)
#' var.organization <- 'organization_example' # character | organization name or id
#' var.organization.details <- Organization$new() # Organization | organization details to update
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$UpdateOrganization(var.organization, var.organization.details)
#'
#'
#' #################### UpdateUserInOrganization ####################
#'
#' library(tiledbcloud)
#' var.organization <- 'organization_example' # character | organization name
#' var.username <- 'username_example' # character | username to manipulate
#' var.user <- OrganizationUser$new() # OrganizationUser | user details to update
#'
#' api.instance <- OrganizationApi$new()
#'
#' #Configure API key authorization: ApiKeyAuth
#' api.instance$apiClient$apiKeys['X-TILEDB-REST-API-KEY'] <- 'TODO_YOUR_API_KEY';
#'
#' #Configure HTTP basic authorization: BasicAuth
#' # provide your username in the user-serial format
#' api.instance$apiClient$username <- '<user-serial>';
#' # provide your api key generated using the developer portal
#' api.instance$apiClient$password <- '<api_key>';
#'
#' result <- api.instance$UpdateUserInOrganization(var.organization, var.username, var.user)
#'
#'
#' }
#' @importFrom R6 R6Class
#' @importFrom base64enc base64encode
#' @export
OrganizationApi <- R6::R6Class(
'OrganizationApi',
public = list(
apiClient = NULL,
initialize = function(apiClient){
if (!missing(apiClient)) {
self$apiClient <- apiClient
}
else {
self$apiClient <- ApiClient$new()
}
},
AddAWSAccessCredentials = function(namespace, aws.access.credentials, ...){
apiResponse <- self$AddAWSAccessCredentialsWithHttpInfo(namespace, aws.access.credentials, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
AddAWSAccessCredentialsWithHttpInfo = function(namespace, aws.access.credentials, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`namespace`)) {
stop("Missing required parameter `namespace`.")
}
if (missing(`aws.access.credentials`)) {
stop("Missing required parameter `aws.access.credentials`.")
}
if (!missing(`aws.access.credentials`)) {
body <- `aws.access.credentials`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/credentials/{namespace}/aws"
if (!missing(`namespace`)) {
urlPath <- gsub(paste0("\\{", "namespace", "\\}"), URLencode(as.character(`namespace`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
AddUserToOrganization = function(organization, user, ...){
apiResponse <- self$AddUserToOrganizationWithHttpInfo(organization, user, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
AddUserToOrganizationWithHttpInfo = function(organization, user, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
if (missing(`user`)) {
stop("Missing required parameter `user`.")
}
if (!missing(`user`)) {
body <- `user`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/organizations/{organization}/user"
if (!missing(`organization`)) {
urlPath <- gsub(paste0("\\{", "organization", "\\}"), URLencode(as.character(`organization`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
CheckAWSAccessCredentials = function(namespace, ...){
apiResponse <- self$CheckAWSAccessCredentialsWithHttpInfo(namespace, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
CheckAWSAccessCredentialsWithHttpInfo = function(namespace, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`namespace`)) {
stop("Missing required parameter `namespace`.")
}
urlPath <- "/credentials/{namespace}/aws"
if (!missing(`namespace`)) {
urlPath <- gsub(paste0("\\{", "namespace", "\\}"), URLencode(as.character(`namespace`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "array[AWSAccessCredentials]", loadNamespace("tiledbcloud")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
CheckAWSAccessCredentialsByName = function(namespace, name, ...){
apiResponse <- self$CheckAWSAccessCredentialsByNameWithHttpInfo(namespace, name, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
CheckAWSAccessCredentialsByNameWithHttpInfo = function(namespace, name, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`namespace`)) {
stop("Missing required parameter `namespace`.")
}
if (missing(`name`)) {
stop("Missing required parameter `name`.")
}
urlPath <- "/credentials/{namespace}/aws/{name}"
if (!missing(`namespace`)) {
urlPath <- gsub(paste0("\\{", "namespace", "\\}"), URLencode(as.character(`namespace`), reserved = TRUE), urlPath)
}
if (!missing(`name`)) {
urlPath <- gsub(paste0("\\{", "name", "\\}"), URLencode(as.character(`name`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "AWSAccessCredentials", loadNamespace("tiledbcloud")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
CreateOrganization = function(organization, ...){
apiResponse <- self$CreateOrganizationWithHttpInfo(organization, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
CreateOrganizationWithHttpInfo = function(organization, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
if (!missing(`organization`)) {
body <- `organization`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/organization"
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "POST",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
DeleteAWSAccessCredentials = function(namespace, name, ...){
apiResponse <- self$DeleteAWSAccessCredentialsWithHttpInfo(namespace, name, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
DeleteAWSAccessCredentialsWithHttpInfo = function(namespace, name, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`namespace`)) {
stop("Missing required parameter `namespace`.")
}
if (missing(`name`)) {
stop("Missing required parameter `name`.")
}
urlPath <- "/credentials/{namespace}/aws/{name}"
if (!missing(`namespace`)) {
urlPath <- gsub(paste0("\\{", "namespace", "\\}"), URLencode(as.character(`namespace`), reserved = TRUE), urlPath)
}
if (!missing(`name`)) {
urlPath <- gsub(paste0("\\{", "name", "\\}"), URLencode(as.character(`name`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "DELETE",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
DeleteOrganization = function(organization, ...){
apiResponse <- self$DeleteOrganizationWithHttpInfo(organization, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
DeleteOrganizationWithHttpInfo = function(organization, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
urlPath <- "/organizations/{organization}"
if (!missing(`organization`)) {
urlPath <- gsub(paste0("\\{", "organization", "\\}"), URLencode(as.character(`organization`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "DELETE",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
DeleteUserFromOrganization = function(organization, username, ...){
apiResponse <- self$DeleteUserFromOrganizationWithHttpInfo(organization, username, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
DeleteUserFromOrganizationWithHttpInfo = function(organization, username, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
if (missing(`username`)) {
stop("Missing required parameter `username`.")
}
urlPath <- "/organizations/{organization}/{username}"
if (!missing(`organization`)) {
urlPath <- gsub(paste0("\\{", "organization", "\\}"), URLencode(as.character(`organization`), reserved = TRUE), urlPath)
}
if (!missing(`username`)) {
urlPath <- gsub(paste0("\\{", "username", "\\}"), URLencode(as.character(`username`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "DELETE",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
GetAllOrganizations = function(...){
apiResponse <- self$GetAllOrganizationsWithHttpInfo(...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
GetAllOrganizationsWithHttpInfo = function(...){
args <- list(...)
queryParams <- list()
headerParams <- c()
urlPath <- "/organizations"
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "array[Organization]", loadNamespace("tiledbcloud")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
GetOrganization = function(organization, ...){
apiResponse <- self$GetOrganizationWithHttpInfo(organization, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
GetOrganizationWithHttpInfo = function(organization, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
urlPath <- "/organizations/{organization}"
if (!missing(`organization`)) {
urlPath <- gsub(paste0("\\{", "organization", "\\}"), URLencode(as.character(`organization`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "Organization", loadNamespace("tiledbcloud")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
GetOrganizationUser = function(organization, username, ...){
apiResponse <- self$GetOrganizationUserWithHttpInfo(organization, username, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
GetOrganizationUserWithHttpInfo = function(organization, username, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
if (missing(`username`)) {
stop("Missing required parameter `username`.")
}
urlPath <- "/organizations/{organization}/{username}"
if (!missing(`organization`)) {
urlPath <- gsub(paste0("\\{", "organization", "\\}"), URLencode(as.character(`organization`), reserved = TRUE), urlPath)
}
if (!missing(`username`)) {
urlPath <- gsub(paste0("\\{", "username", "\\}"), URLencode(as.character(`username`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "GET",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
deserializedRespObj <- tryCatch(
self$apiClient$deserialize(resp, "OrganizationUser", loadNamespace("tiledbcloud")),
error = function(e){
stop("Failed to deserialize response")
}
)
ApiResponse$new(deserializedRespObj, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UpdateAWSAccessCredentials = function(namespace, name, aws.access.credentials, ...){
apiResponse <- self$UpdateAWSAccessCredentialsWithHttpInfo(namespace, name, aws.access.credentials, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
UpdateAWSAccessCredentialsWithHttpInfo = function(namespace, name, aws.access.credentials, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`namespace`)) {
stop("Missing required parameter `namespace`.")
}
if (missing(`name`)) {
stop("Missing required parameter `name`.")
}
if (missing(`aws.access.credentials`)) {
stop("Missing required parameter `aws.access.credentials`.")
}
if (!missing(`aws.access.credentials`)) {
body <- `aws.access.credentials`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/credentials/{namespace}/aws/{name}"
if (!missing(`namespace`)) {
urlPath <- gsub(paste0("\\{", "namespace", "\\}"), URLencode(as.character(`namespace`), reserved = TRUE), urlPath)
}
if (!missing(`name`)) {
urlPath <- gsub(paste0("\\{", "name", "\\}"), URLencode(as.character(`name`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "PATCH",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UpdateOrganization = function(organization, organization.details, ...){
apiResponse <- self$UpdateOrganizationWithHttpInfo(organization, organization.details, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
UpdateOrganizationWithHttpInfo = function(organization, organization.details, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
if (missing(`organization.details`)) {
stop("Missing required parameter `organization.details`.")
}
if (!missing(`organization.details`)) {
body <- `organization.details`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/organizations/{organization}"
if (!missing(`organization`)) {
urlPath <- gsub(paste0("\\{", "organization", "\\}"), URLencode(as.character(`organization`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "PATCH",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
},
UpdateUserInOrganization = function(organization, username, user, ...){
apiResponse <- self$UpdateUserInOrganizationWithHttpInfo(organization, username, user, ...)
resp <- apiResponse$response
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
apiResponse$content
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
apiResponse
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
apiResponse
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
apiResponse
}
},
UpdateUserInOrganizationWithHttpInfo = function(organization, username, user, ...){
args <- list(...)
queryParams <- list()
headerParams <- c()
if (missing(`organization`)) {
stop("Missing required parameter `organization`.")
}
if (missing(`username`)) {
stop("Missing required parameter `username`.")
}
if (missing(`user`)) {
stop("Missing required parameter `user`.")
}
if (!missing(`user`)) {
body <- `user`$toJSONString()
} else {
body <- NULL
}
urlPath <- "/organizations/{organization}/{username}"
if (!missing(`organization`)) {
urlPath <- gsub(paste0("\\{", "organization", "\\}"), URLencode(as.character(`organization`), reserved = TRUE), urlPath)
}
if (!missing(`username`)) {
urlPath <- gsub(paste0("\\{", "username", "\\}"), URLencode(as.character(`username`), reserved = TRUE), urlPath)
}
# API key authentication
if ("X-TILEDB-REST-API-KEY" %in% names(self$apiClient$apiKeys) && nchar(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]) > 0) {
headerParams['X-TILEDB-REST-API-KEY'] <- paste(unlist(self$apiClient$apiKeys["X-TILEDB-REST-API-KEY"]), collapse='')
}
# HTTP basic auth
headerParams['Authorization'] <- paste("Basic", base64enc::base64encode(charToRaw(paste(self$apiClient$username, self$apiClient$password, sep=":"))))
resp <- self$apiClient$CallApi(url = paste0(self$apiClient$basePath, urlPath),
method = "PATCH",
queryParams = queryParams,
headerParams = headerParams,
body = body,
...)
if (httr::status_code(resp) >= 200 && httr::status_code(resp) <= 299) {
ApiResponse$new(NULL, resp)
} else if (httr::status_code(resp) >= 300 && httr::status_code(resp) <= 399) {
ApiResponse$new(paste("Server returned " , httr::status_code(resp) , " response status code."), resp)
} else if (httr::status_code(resp) >= 400 && httr::status_code(resp) <= 499) {
ApiResponse$new("API client error", resp)
} else if (httr::status_code(resp) >= 500 && httr::status_code(resp) <= 599) {
ApiResponse$new("API server error", resp)
}
}
)
)
|
03257b12bee5cf952081e04e29df2c1b95b1718b
|
9ad93fb5324fbdcaada135cef25969eb381c8372
|
/analysis/R/generate_output.R
|
388988e1fe6b66f8fbe5af7e986671327765f890
|
[
"MIT"
] |
permissive
|
pennell-lab/fossil_sampling
|
4f208de90ffc34a315b892f05da92e7a387a9e96
|
dc8b5e72af5705372859ec63cd90cada7231bb70
|
refs/heads/master
| 2021-01-12T08:29:41.292371
| 2016-12-20T08:47:56
| 2016-12-20T08:47:56
| 76,596,511
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 654
|
r
|
generate_output.R
|
anolis = read.csv("analysis/data/anolis.convergence.csv")
head(anolis)
library(ggplot2)
library(ggforce)
gg0 = ggplot(anolis, aes(SVLength, FemurLength, colour = Island)) +
geom_point() +
facet_zoom(xy = Ecomorph == "Crown-Giant")
gg0
ggsave("analysis/output/anolis_zoom.pdf", plot = gg0)
library(dplyr)
ann = anolis %>%
group_by(Ecomorph, Island) %>%
summarise("mean" = mean(SVLength),
"sd" = sd(SVLength))
ann
gg1 = ggplot(ann, aes(Ecomorph, mean, colour = Island)) +
geom_point(stat = "identity") +
geom_pointrange(aes(ymax = mean + sd, ymin = mean - sd))
gg1
ggsave("analysis/output/anolis_mean.pdf", plot = gg1)
|
86825b63c8811d58606ac6b26830b29c51de1372
|
b512d9d69ebfd2b9537685c4cdf405f8ad59d5be
|
/man/importNmModInput.Rd
|
e80e809ea148ca99b554ea9434bef5c1bb66d98e
|
[] |
no_license
|
MikeKSmith/RNMImport
|
ba7b64b5be198e7fac364f6c747282b729211aae
|
ba395c6da585ade86dffe953159a4f50213fd792
|
refs/heads/master
| 2020-12-28T06:56:26.201975
| 2018-04-12T17:01:00
| 2018-04-12T17:01:00
| 67,253,939
| 0
| 1
| null | 2018-01-05T10:49:43
| 2016-09-02T20:49:22
|
R
|
UTF-8
|
R
| false
| true
| 918
|
rd
|
importNmModInput.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/importNmModInput.R
\name{importNmModInput}
\alias{importNmModInput}
\alias{.importNmModInput}
\title{Parse $INPUT statement}
\usage{
.importNmModInput(txt, .extract = length(grep("\\\\$INPUT", toupper(txt))) >
0)
}
\arguments{
\item{txt}{Character vector of text containing an $INPUT statement, typically the
contents of a control file}
\item{.extract}{Flag indicating whether this is the $INPUT section itself, or whether the section must first be
extracted}
}
\value{
a 2 column matrix mapping the variables in the input
statement with the variables used by NONMEM, which can be different by setting
aliases, e.g. ID=SUBJ in the $INPUT statement. Also some may be dropped
}
\description{
Parses the $INPUT section of a NONMEM control file
}
\examples{
.importNmModInput("$INPUT ID DOSE=AMT TIME CP=DV")
}
\author{
Mango Solutions
}
|
c8c62e92556553653497daf71ffee4ffc40a59c1
|
3b361820e93c9cbaa7e740b6edbf13c03a1cfcce
|
/man/BYlogreg.Rd
|
9319fa78d5aa6b13554d30478a7274838bc32828
|
[] |
no_license
|
msalibian/RobStatTM
|
f8dabc88197be2460f1ba4c95b595e95ff53c1e9
|
d542c29816d50889f25649817e3ae5de08946141
|
refs/heads/master
| 2023-05-14T08:42:27.747789
| 2023-05-09T17:08:37
| 2023-05-09T17:08:37
| 83,067,068
| 14
| 7
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,821
|
rd
|
BYlogreg.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BYlogreg.R
\name{logregBY}
\alias{logregBY}
\alias{BYlogreg}
\title{Bianco and Yohai estimator for logistic regression}
\usage{
logregBY(x0, y, intercept = 1, const = 0.5, kmax = 1000, maxhalf = 10)
}
\arguments{
\item{x0}{matrix of explanatory variables;}
\item{y}{vector of binomial responses (0 or 1);}
\item{intercept}{1 or 0 indicating if an intercept is included or or not}
\item{const}{tuning constant used in the computation of the estimator (default=0.5);}
\item{kmax}{maximum number of iterations before convergence (default=1000);}
\item{maxhalf}{max number of step-halving (default=10).}
}
\value{
A list with the following components:
\item{coefficients}{estimates for the regression coefficients}
\item{standard.deviation}{standard deviations of the coefficients}
\item{fitted.values}{fitted values}
\item{residual.deviances}{residual deviances}
\item{components}{logical value indicating whether convergence was achieved}
\item{objective}{value of the objective function at the minimum}
}
\description{
This function computes the M-estimator proposed by Bianco and Yohai for
logistic regression. By default, an intercept term is included and p
parameters are estimated. Modified by Yohai (2018) to take as initial estimator
a weighted ML estimator with weights derived from the MCD estimator.
For more details we refer to Croux, C., and Haesbroeck, G. (2002),
"Implementing the Bianco and Yohai estimator for Logistic Regression"
}
\examples{
data(skin)
Xskin <- as.matrix( skin[, 1:2] )
yskin <- skin$vasoconst
skinBY <- logregBY(Xskin, yskin, intercept=1)
skinBY$coeff
skinBY$standard.deviation
}
\references{
\url{http://www.wiley.com/go/maronna/robust}
}
\author{
Christophe Croux, Gentiane Haesbroeck, Victor Yohai
}
|
9e45ba4f456d56fc56cd4fc57471f2b12bd6c09e
|
e575932c6669de128effa5de43d815a5c5d5163c
|
/man/SummarizePosteriors.Rd
|
5c77441fe78d181371936076db14786b12840141
|
[] |
no_license
|
femoerman/PBPGM
|
03e9a9e12b00dfebed20f4be59436c1547b62d92
|
a84b7e1ff5daad0083ecebb18159c88a5fb30d67
|
refs/heads/master
| 2021-08-27T18:37:36.599988
| 2021-08-19T14:38:07
| 2021-08-19T14:38:07
| 166,230,502
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 560
|
rd
|
SummarizePosteriors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SummarizePosteriors.R
\name{SummarizePosteriors}
\alias{SummarizePosteriors}
\title{SumarizePosteriors function}
\usage{
SummarizePosteriors(fulloutput, modelname)
}
\arguments{
\item{fulloutput}{Output generated in the BHfitting function, containing all the posterior data from the model fitting}
}
\description{
Function to summarize the posterior data
}
\examples{
SummarizePosteriors()
}
\keyword{Beverton}
\keyword{Holt}
\keyword{output}
\keyword{posteriors}
\keyword{summary}
|
9bcfb4e8d03803f374c4a6a9856f38df611c4ad1
|
a5c83798a649d5113387bb005030bc91ef578cfa
|
/Reviews.R
|
b101a3c61c9d66fb1453f9097c20a19e933e892d
|
[] |
no_license
|
tejascphadnis/visitsingapore
|
3ec78fae7db75141c3c04d52f4c4fd1394d6b663
|
059466b0c260c115f6c9464c7b15c990b6cd59ed
|
refs/heads/master
| 2021-01-09T06:27:15.487959
| 2017-03-29T23:17:18
| 2017-03-29T23:17:18
| 80,988,997
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,367
|
r
|
Reviews.R
|
# Load Data
setwd("D:/Data Science/Data Incubator/DataSet")
reviews <- read.csv("Reviews.csv")
reviewsUSS <- subset(reviews, Attraction == "Universal Studios")
reviewsUSS$sentiment <- 0
reviewsUSS[reviewsUSS$Reviewer.Rating == 5 | reviewsUSS$Reviewer.Rating == 4, "sentiment"] <- 1
reviewsUSS_a <- aggregate(reviewsUSS$Id, list(reviewsUSS$Review.Month), length)
names(reviewsUSS_a)[names(reviewsUSS_a) == "x"] <- "Total"
temp <- aggregate(reviewsUSS$sentiment, list(reviewsUSS$Review.Month), sum)
names(temp)[names(temp) == "x"] <- "Positive"
reviewsUSS_a <- merge(reviewsUSS_a, temp, by.x = "Group.1", by.y = "Group.1")
reviewsUSS_a$Negative <- 0
reviewsUSS_a$Negative <- reviewsUSS_a$Total - reviewsUSS_a$Positive
reviewsUSS_a$Positive_precent <- 0
reviewsUSS_a$Positive_precent <- reviewsUSS_a$Positive / reviewsUSS_a$Total * 100
names(reviewsUSS_a)[names(reviewsUSS_a) == "Group.1"] <- "Period"
library(ggplot2)
ggplot(reviewsUSS_a) +
geom_line(aes(x=Period, y=Total, color="Total")) +
geom_line(aes(x=Period, y=Positive, col="Positive")) +
geom_line(aes(x=Period, y=Negative, col="Negative")) +
scale_color_discrete(name="Legend") + labs(list(x = "Period", y = "Number of Reviews", title="Universal Studio, Singapore - Reviews"))
### Gardens by the Bay
reviewsGBB <- subset(reviews, Attraction == "Gardens by the Bay")
reviewsGBB$sentiment <- 0
reviewsGBB[reviewsGBB$Reviewer.Rating == 5 | reviewsGBB$Reviewer.Rating == 4, "sentiment"] <- 1
reviewsGBB_a <- aggregate(reviewsGBB$Id, list(reviewsGBB$Review.Month), length)
names(reviewsGBB_a)[names(reviewsGBB_a) == "x"] <- "Total"
temp <- aggregate(reviewsGBB$sentiment, list(reviewsGBB$Review.Month), sum)
names(temp)[names(temp) == "x"] <- "Positive"
reviewsGBB_a <- merge(reviewsGBB_a, temp, by.x = "Group.1", by.y = "Group.1")
reviewsGBB_a$Negative <- 0
reviewsGBB_a$Negative <- reviewsGBB_a$Total - reviewsGBB_a$Positive
reviewsGBB_a$Positive_precent <- 0
reviewsGBB_a$Positive_precent <- reviewsGBB_a$Positive / reviewsGBB_a$Total * 100
names(reviewsGBB_a)[names(reviewsGBB_a) == "Group.1"] <- "Period"
library(ggplot2)
ggplot(reviewsGBB_a) +
geom_line(aes(x=Period, y=Total, color="Total")) +
geom_line(aes(x=Period, y=Positive, col="Positive")) +
geom_line(aes(x=Period, y=Negative, col="Negative")) +
scale_color_discrete(name="Legend") + labs(list(x = "Period", y = "Number of Reviews", title="Gardens by the Bay, Singapore - Reviews"))
# Comparision
reviews_BOTH <- merge(reviewsUSS_a, reviewsGBB_a, by.x = "Period", by.y = "Period")
ggplot(reviews_BOTH) +
geom_line(aes(x=Period, y=Positive_precent.x, color="Universal Studios, Singapore")) +
geom_line(aes(x=Period, y=Positive_precent.y, col="Gardens by the Bay")) +
scale_color_discrete(name="Legend") + labs(list(x = "Period", y = "% Positive Reviews", title="Universal Studios, Singapore Vs. Gardens by the Bay"))
# Analyze reasons for Poor Reviews
reviews_poor <- reviews[reviews$Reviewer.Rating < 4, c("Attraction", "Reviewer.Rating", "Reviewer.Level", "Reason.for.Poor.Rating", "Reviewer.Type")]
reviews_poorUSS <- reviews_poor[reviews_poor =="Universal Studios",]
reviews_poorUSS_a <- aggregate(Attraction ~ Reason.for.Poor.Rating + Reviewer.Type, data = reviews_poorUSS, length)
ggplot(data=reviews_poorUSS, aes(x = Reason.for.Poor.Rating,
fill = Reviewer.Rating, color = "Reviewer.Rating")) +
geom_bar() +
scale_x_discrete("Reason for Poor Rating") +
scale_y_continuous("Count") +
facet_grid(. ~ Reviewer.Type)
ggplot(data=reviews_poorUSS, aes(x = factor(Reason.for.Poor.Rating),
fill = Reviewer.Rating)) +
geom_bar() +
scale_x_discrete("Reason for Poor Rating") +
scale_y_continuous("Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
facet_grid(. ~ Reviewer.Type)
ggplot(data=reviews_poorUSS, aes(x = factor(Reviewer.Type),
fill = Reviewer.Rating)) +
geom_bar() +
scale_x_discrete("Reviewer Type") +
scale_y_continuous("Count") +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
facet_grid(. ~ Reason.for.Poor.Rating)
|
92706a0918b7550e04668b21845c09b7a4988418
|
e2ab8b8ac40148ea44407407f71e731329e126b4
|
/Rcode.R
|
e72d7f8577d8e0c9f8085646aad70962952ee901
|
[] |
no_license
|
reemaleithan/software_bug_prediction_explanation
|
9ceab7adae7a9c4e584c934fdbf802ad9f648d9b
|
e63fd12ad6425e14af82d1ceac99c335d9568d94
|
refs/heads/main
| 2023-03-24T15:28:48.674251
| 2021-03-15T20:42:55
| 2021-03-15T20:42:55
| 310,771,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,571
|
r
|
Rcode.R
|
# R snippets for Break-Down with DALEX
# read more about the metod at
# Explanatory Model Analysis
# https://pbiecek.github.io/ema/
# Prepare data
library("DALEX")
jdt_training <- read.csv(file.choose(), header = TRUE)
jdt_testing <- read.csv(file.choose(), header = TRUE)
# Train a model
library(ranger)
set.seed(1313)
#jdt_rf <- ranger(label ~ .,
# data = jdt_train,
# probability = TRUE,
# classification = TRUE)
library(randomForest)
jdt_rf <- randomForest(label ~.,
data = jdt_training)
jdt_rf
# Prepare an explainer
library("DALEX")
jdt_ex <- explain(jdt_rf,
data = jdt_training,
y = jdt_training$label == 0,
label = "Logistic Regression for Linux_1_instance_id= 343880")
# Prepare an instance
jdt_ins <- data.frame(
"10019_unlock" = 0,
"10881_eth" = 0,
"10114_free" = 0,
"10028_lock" = 0,
"10053_pdev" = 0,
"10638_ifdef" = 0,
"11078_static" = 10,
"10441_notic" = 0,
"403_lines_changed" = 29,
"10633_desc" = 0,
"11323_area" = 0,
"414_lines_inserted" = 48,
"11623_printk" = 18,
"11127_share" = 0,
"1012_firstparm" = 4,
"10841_window" = 0,
"1181_CASE" = 0,
"10042_cmd" = 0,
"11898_good" = 0,
"label" =1
)
jdt_ins
jdt_explanation <- predict_parts(explainer = jdt_ex,
new_observation = jdt_ins,
type = "break_down")
predict(jdt_ex, jdt_ins)
plot(jdt_explanation)
|
f063e1e0d9237adcec390e9215bcc8fc40a7f590
|
01a6c5a8fd11768560697e643f392bb519afdfae
|
/Scripts/Nutrition2013.R
|
d430a085888c4f7978f7ec9bc11d990e472e1332
|
[] |
no_license
|
vincentlinderhof/NutritionETH
|
05e9902c15bce6823d72dd5b38f246a20bf2d624
|
c321aa0a40f75b604f4dd138666fb8c048e454e1
|
refs/heads/master
| 2020-09-25T19:39:45.005718
| 2017-03-19T14:31:08
| 2017-03-19T14:31:08
| 66,675,689
| 0
| 1
| null | 2017-03-19T14:31:09
| 2016-08-26T19:58:22
|
R
|
UTF-8
|
R
| false
| false
| 18,015
|
r
|
Nutrition2013.R
|
# Tom
# dataPath <- "C:/Users/Tomas/Documents/LEI/data/TZA/2010/Data"
# LEI Path
# dataPath <- "W:/LEI/Internationaal Beleid (IB)/Projecten/2285000066 Africa Maize Yield Gap/SurveyData/TZA/2010/Data"
# ACT: Tom has to change his dataPath
if(Sys.info()["user"] == "Tomas"){
dataPath <- "C:/Users/Tomas/Documents/LEI/data/TZA/2013/Data"
} else {
dataPath <- "D:/Analyses/CIMMYT/NutritionETH/SurveyData/2013/Data"
}
setwd("D:/Analyses/CIMMYT/NutritionETH")
# load packages
library(haven)
library("stringr")
library("reshape2")
library(dplyr)
library("markdown")
library(tidyr) # Necessary for spread function in mutate command
library(Deducer) # necessary for descriptives.tables
options(scipen=999)
# ***************************************************************************************************
#Creation of FVS
# ***************************************************************************************************
#FOOD2013 <- read_dta(file.path(dataPath, "sect7_hh_w1.dta")) # CSI saq08 hh_s7q02_a to hh_s7q02_h
#FOOD2013 <- read_dta(file.path(dataPath, "sect5b_hh_w1.dta")) # DDS and FCS hh_s5aq00 hh_s5aq0a hh_s5aq01 hh_s5aq02_a hh_s5aq02_b
#FOOD2013 <- read_dta(file.path(dataPath, "sect5a_hh_w1.dta")) # FVS and DDS hh_s5aq00 hh_s5aq0a hh_s5aq01 hh_s5aq02_a hh_s5aq02_b
FOOD2013 <- read_dta(file.path(dataPath, "household/sect5b_hh_w2.dta")) # FVS and DDS hh_s5aq00 hh_s5aq0a hh_s5aq01 hh_s5aq02_a hh_s5aq02_b
FOOD2013 <- subset(FOOD2013, select=c(household_id, hh_s5bq00, hh_s5bq0a, hh_s5bq01, hh_s5bq02))
# How food items are connected to food groups, See FAO (2013)
# 2-13 100 cereals = mean(cereals, na.rm = TRUE),
# 14-20 200 rootsandtubers = mean(rootsandtubers, na.rm = TRUE),
# 21-23 300 vegetables = mean(vegetables, na.rm=TRUE),
# 24 400 pulsesandnuts = mean(pulsesandnuts, na.rm=TRUE),
# 25-28 500 fruits = mean(fruits, na.rm=TRUE),
# 29-31 600 meat = mean(meat, na.rm=TRUE),
# 32-35 700 eggs = mean(eggs, na.rm=TRUE),
# 36-38 800 fishandseafood= mean(fishandseafood, na.rm=TRUE),
# 39-48 900 milkandmilkproducts= mean(milkandmilkproducts, na.rm=TRUE),
# 48-50 1000 oilsandfats=mean(oilsandfats, na.rm=TRUE),
# 50-53 1100 sugar=mean(sugar, na.rm=TRUE),
# 53-60 1200 condiments=mean(condiments, na.rm=TRUE))
#aggregate(FOOD2013, by=(FOOD2013$hh_s5bq00), FUN=count, na.rm=TRUE)
# Construct dummy variables for food items: do not use as it produces wrong results
#NUTR2013 <-
# mutate(FOOD2013, count = ifelse(hh_s5bq01 == 1, hh_s5bq02, ifelse(NA))) %>%
# group_by(household_id) %>%
# spread(hh_s5bq00, count) %>%
# filter (! duplicated(household_id)) %>%
# replace(is.na(.), 0)
#NUTR2013CH <- NUTR2013[ c(1,2,3,4) ]
#NUTR2013 <- NUTR2013[ -c(2,3,4) ]
#summary(NUTR2013CH)
FOOD2013$FI01_Enjera <- 1*(FOOD2013$hh_s5bq00==1 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI02_OtherCereals <- 1*(FOOD2013$hh_s5bq00==2 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI03_Potatoes <- 1*(FOOD2013$hh_s5bq00==3 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI04_Pasta <- 1*(FOOD2013$hh_s5bq00==4 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI05_Sugar <- 1*(FOOD2013$hh_s5bq00==5 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI06_PulsesandNuts <- 1*(FOOD2013$hh_s5bq00==6 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI07_Vegetables <- 1*(FOOD2013$hh_s5bq00==7 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI08_Fruits <- 1*(FOOD2013$hh_s5bq00==8 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI09_RedMeat <- 1*(FOOD2013$hh_s5bq00==9 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI10_Poultry <- 1*(FOOD2013$hh_s5bq00==10 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI11_Eggs <- 1*(FOOD2013$hh_s5bq00==11 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI12_Fish <- 1*(FOOD2013$hh_s5bq00==12 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI13_FatsandOils <- 1*(FOOD2013$hh_s5bq00==13 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI14_DairyProducts <- 1*(FOOD2013$hh_s5bq00==14 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI15_Condiments <- 1*(FOOD2013$hh_s5bq00==15 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI16_KochoandBula <- 1*(FOOD2013$hh_s5bq00==16 & FOOD2013$hh_s5bq01==1)
NUTR2013a <- aggregate(FI01_Enjera ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- NUTR2013a; rm(NUTR2013a)
NUTR2013a <- aggregate(FI02_OtherCereals ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI03_Potatoes ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI04_Pasta ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI05_Sugar ~ household_id, FOOD2013, sum)
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI06_PulsesandNuts ~ household_id, FOOD2013, sum)
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI07_Vegetables ~ household_id, FOOD2013, sum)
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI08_Fruits ~ household_id, FOOD2013, sum)
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI09_RedMeat ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI10_Poultry ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI11_Eggs ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI12_Fish ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI13_FatsandOils ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI14_DairyProducts ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI15_Condiments ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013a <- aggregate(FI16_KochoandBula ~ household_id, FOOD2013, sum)
NUTR2013a <- NUTR2013a[2:3777,]
NUTR2013 <- left_join(NUTR2013, NUTR2013a); rm(NUTR2013a)
NUTR2013$FVS16 <- rowSums(NUTR2013[2:17])
# descriptives of food group dummy variables and FVS and DDS
descriptive.table(vars = d(FI01_Enjera, FI02_OtherCereals, FI03_Potatoes, FI04_Pasta, FI05_Sugar, FI06_PulsesandNuts,
FI07_Vegetables, FI08_Fruits, FI09_RedMeat, FI10_Poultry, FI11_Eggs, FI12_Fish,
FI13_FatsandOils, FI14_DairyProducts, FI15_Condiments, FI16_KochoandBula, FVS16),data= NUTR2013,
func.names = c("Mean","St. Deviation", "Min", "Max", "Valid N"))
FNS2013 <- NUTR2013[ c("household_id", "FVS16") ]
# ***************************************************************************************************
#Construction of DDS, uses the data of the FVC construction!
# ***************************************************************************************************
# Columns correspond to list of food items!
# sum fooditems into 12 foodgroups for FVS: columns correspond to list of food items!
NUTR2013$cereals <- 1*((NUTR2013$FI01_Enjera+NUTR2013$FI02_OtherCereals+NUTR2013$FI04_Pasta ) > 0 )
#NUTR2013$cereals <- 1*((NUTR2013[ c("FI01_Enjera", "FI02_OtherCereals", "FI04_Pasta") ] ) > 0 )
NUTR2013$rootsandtubers <- 1*((NUTR2013$FI03_Potatoes+NUTR2013$FI16_KochoandBula ) > 0 )
#NUTR2013$rootsandtubers <- 1*((NUTR2013[ c("FI03_Potatoes", "FI16_KochoandBula") ] ) > 0 )
NUTR2013$vegetables <- NUTR2013$FI07_Vegetables
NUTR2013$fruits <- NUTR2013$FI08_Fruits
NUTR2013$meat <- 1*((NUTR2013$FI09_RedMeat+NUTR2013$FI10_Poultry ) > 0 )
#NUTR2013$meat <- 1*((NUTR2013[ c("FI09_RedMeat", "FI10_Poultry")] ) > 0 )
NUTR2013$eggs <- NUTR2013$FI11_Eggs
NUTR2013$fish <- NUTR2013$FI12_Fish
NUTR2013$pulsesandnuts <- NUTR2013$FI06_PulsesandNuts
NUTR2013$dairyproducts <- NUTR2013$FI14_DairyProducts
NUTR2013$oilsandfats <- NUTR2013$FI13_FatsandOils
NUTR2013$condiments <- NUTR2013$FI15_Condiments
NUTR2013$sugar <- NUTR2013$FI05_Sugar
#install.packages("Hmisc")
#library(Hmisc)
#label(NUTR2013$cereals) <- "FG Cereals"
NUTR2013$DDS12 <- rowSums(NUTR2013[ c("cereals", "rootsandtubers", "vegetables",
"fruits", "meat", "eggs", "fish",
"pulsesandnuts", "dairyproducts", "oilsandfats",
"sugar","condiments")] )
DDS2013 <- NUTR2013[ c("household_id", "DDS12") ]
FNS2013 <-left_join(FNS2013, DDS2013)
rm(DDS2013)
# descriptives of food group dummy variables and FVS and DDS
descriptive.table(vars = d(cereals, rootsandtubers, vegetables, fruits, meat, eggs, fish,
pulsesandnuts, dairyproducts, oilsandfats, sugar, condiments, DDS12, FVS16),data= NUTR2013,
func.names = c("Mean","St. Deviation", "Min", "Max", "Valid N"))
# ***************************************************************************************************
#Construction of FCS
# ***************************************************************************************************
FOOD2013$hh_s5bq02t <- ifelse(is.na(FOOD2013$hh_s5bq02),0,FOOD2013$hh_s5bq02)
FOOD2013$FI01_Enjera <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==1 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI02_OtherCereals <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==2 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI03_Potatoes <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==3 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI04_Pasta <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==4 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI05_Sugar <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==5 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI06_PulsesandNuts <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==6 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI07_Vegetables <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==7 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI08_Fruits <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==8 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI09_RedMeat <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==9 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI10_Poultry <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==10 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI11_Eggs <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==11 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI12_Fish <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==12 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI13_FatsandOils <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==13 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI14_DairyProducts <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==14 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI15_Condiments <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==15 & FOOD2013$hh_s5bq01==1)
FOOD2013$FI16_KochoandBula <- FOOD2013$hh_s5bq02t*(FOOD2013$hh_s5bq00==16 & FOOD2013$hh_s5bq01==1)
NUTR2013 <- aggregate(FOOD2013, by=list(FOOD2013$household_id), FUN=max )
NUTR2013$FCS16w <-
NUTR2013$FI01_Enjera*2 +
NUTR2013$FI02_OtherCereals*2 +
NUTR2013$FI03_Potatoes*2 +
NUTR2013$FI04_Pasta*2 +
NUTR2013$FI05_Sugar*0.5 +
NUTR2013$FI06_PulsesandNuts*3 +
NUTR2013$FI07_Vegetables*1 +
NUTR2013$FI08_Fruits*1 +
NUTR2013$FI09_RedMeat*4 +
NUTR2013$FI10_Poultry*4 +
NUTR2013$FI11_Eggs*4 +
NUTR2013$FI12_Fish*4 +
NUTR2013$FI13_FatsandOils*0.5 +
NUTR2013$FI14_DairyProducts*4 +
NUTR2013$FI15_Condiments*0 +
NUTR2013$FI16_KochoandBula*2
NUTR2013$FCS16u <-
NUTR2013$FI01_Enjera +
NUTR2013$FI02_OtherCereals +
NUTR2013$FI03_Potatoes +
NUTR2013$FI04_Pasta +
NUTR2013$FI05_Sugar +
NUTR2013$FI06_PulsesandNuts +
NUTR2013$FI07_Vegetables +
NUTR2013$FI08_Fruits +
NUTR2013$FI09_RedMeat +
NUTR2013$FI10_Poultry +
NUTR2013$FI11_Eggs +
NUTR2013$FI12_Fish +
NUTR2013$FI13_FatsandOils +
NUTR2013$FI14_DairyProducts +
NUTR2013$FI15_Condiments +
NUTR2013$FI16_KochoandBula
#rm(NUTR2010, NUTR2013ALL, NUTR2013b, NUTR2013c, NUTR2013CH, NUTR2013d)
FCS2013 <- NUTR2013[ c("household_id", "FCS16w", "FCS16u")]
FNS2013 <- left_join(FNS2013, FCS2013)
rm(FCS2013)
# descriptives of food group dummy variables and FVS and DDS
library("Deducer")
descriptive.table(vars = d(FI01_Enjera , FI02_OtherCereals , FI03_Potatoes ,
FI04_Pasta , FI05_Sugar , FI06_PulsesandNuts ,
FI07_Vegetables , FI08_Fruits , FI09_RedMeat ,
FI10_Poultry , FI11_Eggs , FI12_Fish ,
FI13_FatsandOils , FI14_DairyProducts , FI15_Condiments , FI16_KochoandBula,
FCS16w, FCS16u),data= NUTR2013,
func.names = c("Mean","St. Deviation", "Min", "Max", "Skew","Valid N"))
# Histograms of nutrition indicators: FCS
hist(NUTR2013$FCS16w, freq = FALSE, ylim = c(0, 0.05), xlab="FCS weighted", ylab="%", main="Freguency of FCS (weighted) in 2010")
# Histograms of nutrition indicators: FCS
hist(NUTR2013$FCS16u, freq = FALSE, ylim = c(0, 0.05), xlab="FCS unweighted", ylab="%", main="Freguency of FCS (unweighted) in 2010")
# calculation of correlation coefficent of DDS and FVS
myvars <- c("FCS16u", "FCS16w")
NUTR2013sub <- NUTR2013[myvars]
cor(NUTR2013sub, use="complete.obs", method="pearson")
rm(NUTR2013sub, myvars)
# ***************************************************************************************************
#Construction of CSI
# ***************************************************************************************************
CSI2013 <- read_dta(file.path(dataPath, "Household/sect7_hh_w2.dta")) # CSI hh_s7q01 hh_s7q02_a hh_s7q02_b hh_s7q02_c hh_s7q02_d hh_s7q02_e hh_s7q02_f hh_s7q02_g hh_s7q02_h
CSI2013 <-CSI2013[ c("household_id", "hh_s7q01", "hh_s7q02_a", "hh_s7q02_b", "hh_s7q02_c", "hh_s7q02_d",
"hh_s7q02_e", "hh_s7q02_f", "hh_s7q02_g", "hh_s7q02_h" )]
#D:\Analyses\CIMMYT\NutritionTZA\SurveyData\2010\Data\TZNPS2HH1DTA
#D:\Analyses\CIMMYT\NutritionTZA\SurveyData\2010\Data\TZNPS2HH1DTA
descriptive.table(vars = d(hh_s7q02_a, hh_s7q02_b, hh_s7q02_c, hh_s7q02_d, hh_s7q02_e, hh_s7q02_f,
hh_s7q02_g, hh_s7q02_h),data= CSI2013,
func.names = c("Mean","St. Deviation", "Min", "Max", "Skew","Valid N"))
CSI2013$CSI <-
CSI2013$hh_s7q02_a*1 +
CSI2013$hh_s7q02_b*1 +
CSI2013$hh_s7q02_c*1 +
CSI2013$hh_s7q02_d*1 +
CSI2013$hh_s7q02_e*3 +
CSI2013$hh_s7q02_f*2 +
CSI2013$hh_s7q02_g*0 +
CSI2013$hh_s7q02_h*4
CSI2013$rCSI <-
CSI2013$hh_s7q02_a*1 +
CSI2013$hh_s7q02_c*1 +
CSI2013$hh_s7q02_d*1 +
CSI2013$hh_s7q02_e*3 +
CSI2013$hh_s7q02_f*2
descriptive.table(vars = d(hh_s7q02_a, hh_s7q02_b, hh_s7q02_c, hh_s7q02_d, hh_s7q02_e, hh_s7q02_f,
hh_s7q02_g, hh_s7q02_h, CSI, rCSI),data= CSI2013,
func.names = c("Mean","St. Deviation", "Min", "Max", "Skew","Valid N"))
CSI2013 <- CSI2013[ c("household_id", "CSI", "rCSI")]
FNS2013 <- left_join(FNS2013, CSI2013)
# ***************************************************************************************************
# Descriptive statistics of FNS indicators in 2013
# ***************************************************************************************************
# Histograms of nutrition indicators: DDS
hist(FNS2013$DDS12, freq = FALSE, ylim = c(0, 0.2), xlab="DDS", ylab="%", main="Frequency in 2013")
# Histograms of nutrition indicators: FVS
hist(FNS2013$FVS16, freq = FALSE, ylim = c(0, 0.2), xlab="FVS", ylab="%", main="Frequency in 2013")
# Histograms of nutrition indicators: FCS weighted
hist(FNS2013$FCS16w, freq = FALSE, ylim = c(0, 0.2), xlab="FCS weighted", ylab="%", main="Frequency in 2013")
# Histograms of nutrition indicators: FCS unweighted
hist(FNS2013$FCS16u, freq = FALSE, ylim = c(0, 0.2), xlab="FCS unweighted", ylab="%", main="Frequency in 2013")
# Histograms of nutrition indicators: CSI
hist(FNS2013$CSI, freq = FALSE, ylim = c(0, 0.2), xlab="CSI", ylab="%", main="Frequency in 2013")
# Histograms of nutrition indicators: rCSI
hist(FNS2013$rCSI, freq = FALSE, ylim = c(0, 0.2), xlab="rCSI", ylab="%", main="Frequency in 2013")
# calculation of correlation coefficent of DDS and FVS
myvars <- c("DDS12", "FVS16","FCS16w","FCS16u","CSI","rCSI")
FNS2013sub <- FNS2013[myvars]
FNS2013matrix <- cor(FNS2013sub, use="complete.obs", method="pearson")
rm(FNS2013sub, myvars)
# Simple Scatterplot of DDS and FVS
plot(FNS2013$DDS12, FNS2013$FVS16, main="Coherence between DDS and FVS in 2013",
xlab="DDS ", ylab="FVS ", pch=19)
# Simple Scatterplot of DDS and FVS
plot(FNS2013$FCS16w, FNS2013$FCS16u, main="Coherence between FCS (weighted) and FCS (unweighted) in 2013",
xlab="FCS16w ", ylab="FCS16u ", pch=19)
# Simple Scatterplot of DDS and FVS
plot(FNS2013$CSI, FNS2013$rCSI, main="Coherence between CSI and reduced CSI in 2013",
xlab="CSI ", ylab="rCSI ", pch=19)
# Histograms of nutrition indicators: rCSI
hist(FNS2013$rCSI, freq = FALSE, ylim = c(0, 0.2), xlab="rCSI", ylab="%", main="Frequency in 2013")
library(ggplot2)
plot=qplot(rCSI, data=FNS2013, geom="histogram")
ggsave(plot,file="Results/graph1.pdf")
#Several plots in one PDF
pdf(file = "hist_and_plots.pdf")
## set up the new plotting device (pdf)
par(mfrow = c(2,2))
## draw the plot
hist(iris$Sepal.Length, main = "Plot 1")
plot(iris$Petal.Length, iris$Petal.Width, main = "Plot 2")
plot(iris$Sepal.Length, iris$Petal.Length, main = "Plot 3")
plot(iris$Sepal.Width, iris$Petal.Width, main = "Plot 4")
## close the device to do the drawing
dev.off()
|
869f860b32813863821eeb7b068e8148b2380471
|
fc91ba37fccc23969ace7822d5e8562411427965
|
/cachematrix.R
|
81c7f0ac38b64cf499b067525cc703d7727a4ba4
|
[] |
no_license
|
eamoncayo/hariharan
|
40f1d58ec58bfce2384a8f7fc74d32c3ce212af8
|
2d53083650db8eb102ce4178e8f94e4f2859ed6f
|
refs/heads/master
| 2020-12-29T00:41:35.287310
| 2014-04-27T19:53:38
| 2014-04-27T19:53:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,966
|
r
|
cachematrix.R
|
## functions that helps in caching the inverse of a matrix
## matrix inverse is calculated and is cached
## cached inverse is displayed for unchanged data.. new inverse calculated for change in data
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) { ## defines x as a matrix
i <- NULL ## stores inverse value , initialised as NULL
set <- function(y){ ## set function to set the value of matrix
x <<- y ## assigns the matrix that is set to x
i <<- NULL
}
get <- function() x ## get function displays the matrix
setinverse <- function(inverse) i <<- inverse ## sets the inverse of the matrix to i
getinverse <- function() i ## displays the value of inverse stored in i
list(set= set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## this function calculates the inverse of the matrix created in the above function
## checks if the inverse is already calculated or not
## if calculated, it gets the value from cache and skips computing the inverse
## if not, it computes and sets the new inverse value using setinverse function
cacheSolve <- function(x, ...) {
i <- x$getinverse() ## gets the inverse from the above function
if (!is.null(i)){ ## checks if the inverse value is present or not ( already computed means not NULL )
message("getting cached data") ## if not NULL, value already present , so cached data is obtained
return(i) ## return inverse
}
data <- x$get() ## if not present , calculate the inverse for the data
i <- solve(data, ...) ## solve function calculates the inverse of a matrix
x$setinverse(i) ## new inverse value is set using setinverse()
i
}
|
897c4b080d314ebd568568d7f86b120bde7104f1
|
ea110112ffa5b4f0adc9b024882c74502c662142
|
/GS_MultiRunFZ.R
|
cbd8a2962d00914fbc1026ca7b754d61934db760
|
[] |
no_license
|
alenxav/GenomicSelection
|
3ce22dc6442c4f06b60a4198b3e6364f9802ae4d
|
40b84f33cc3af3944de9429b3ae3cfb31a05e1d9
|
refs/heads/main
| 2023-04-09T04:26:24.772797
| 2021-03-27T18:07:27
| 2021-03-27T18:07:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,893
|
r
|
GS_MultiRunFZ.R
|
### Simulation Breeding Selection Strategy, Index of Selection and GS Models ---
## Packagers -------------------------------------------------------------------
rm(list=ls())
if(TRUE){
load('Founders.RData')
}else{
library('AlphaSimR')
set.seed(123)
Ne <- 106 # Tsuda 2015 BMC
segSites <- round(20000/20,0) # Genome sequence of the palaeopolyploid soybean
founderPop <- quickHaplo(nInd=200,
nChr=20,
segSites=segSites,
genLen = 1.15,
ploidy = 2L,
inbred = TRUE)
}
Xpackagers <- c('AlphaSimR','bWGR','parallel','foreach','doParallel',
'reshape','ggplot2','gridExtra','lubridate','plyr',
'ranger','Rcpp','keras','verification','rrBLUP')
XXX <- lapply(Xpackagers, function(x){suppressMessages(require(x,quietly = TRUE, character.only = TRUE))})
# Simulation Parameters -------------------------------------------------------
Number_of_runs = 1
Number_of_generations = 100
Intensity <- c(2.5,5,7.5,10)/100#c(0.005,0.01,0.025,0.05)
FS <- list('1'=c(300,50),
'2'=c(250,60),
'3'=c(200,75),
'4'=c(150,100),
'5'=c(100,150))
GS_Model <- list(
#'XGB' = function(y,gen){require(xgboost); X = as(data.matrix(gen), "dgCMatrix"); fit0 = xgboost(data=X,label=y,params=list(subsample=0.25),nrounds=20,objective="reg:squarederror"); return(list(hat=predict(fit0,X)))},
#'DNN' = function(y,gen){FNN(as.matrix(y),gen)},
#'RF' = function(y,gen,...){list(hat=ranger::ranger(y~.,data.frame(y=y,gen),verbose = FALSE, save.memory = TRUE,write.forest = FALSE,...)$predictions)},
#'RKHS' = function(y,gen){ K = GAU(gen); diag(K)=diag(K)+0.00001; E = eigen(K,symmetric = T); fit = emML(y,E$vectors,E$values); return(fit)},
#'BayesCpi'=BayesCpi,
#'BayesDpi'=BayesDpi,
'GBLUP'=emML,
#'RR'=emRR,
'BayesA'=emBA,
'BayesB'=emBB,
#'BayesC'=emBC,
#'BayesL'=emBL,
'FLM' = emDE,
#'Randon' = function(y,gen){return(list(hat=sample(y)))},
'Gv'= function(y,gen){return(list(hat=NA))},
'Pheno' = function(y,gen){return(list(hat=NA))}
)
Sel_Strategy <- as.character(c('WBF', # Within the best family
'WIF', # Within family
'ACF' # Across Family
))
POSSI <- expand.grid(Sel_Strategy=as.character(Sel_Strategy),
run=1:Number_of_runs,
Intensity= Intensity,
Dmodel=1:length(GS_Model),
FS=names(FS))
FS <- ldply(FS)
colnames(FS) <- c('FS','F1','F2')
POSSI <- merge(POSSI,FS,all.x=TRUE)
cat(paste('Number of Simulation: ',nrow(POSSI),'\n'))
# Genetics parameters ----------------------------------------------------------
POPBestIntensity <- 0.3
NCgs <- 3
## Trait parameter -------------------------------------------------------------
#GxE_corr = 0.4912 ;#By_Loc_H2 = 0.0971 ; #Across_Loc_H2 = 0.5769;#h2= 0.12; #Acroos location 77; # GxE = 77; #Env = 120
mean = 60 # From SoyNAN
var = 77 # From SoyNAN
varGxE = 77 # From SoyNAN
varEnv= 200 # From SoyNAN
corA = matrix(1,nrow=1)
corGxE = matrix(1,nrow=1)
nReps=1
## Founder POP and Simulation Parameters ----------------------------------------
nSnpPerChr <- round(6000/20,0) # Illumina 20K same soyNAN http://journals.atlas-publishing.org/index.php/PGGB/article/view/154
nQtlPerChr <- segSites * 0.7
SP <- SimParam$new(founderPop)
SP$addSnpChip(nSnpPerChr=nSnpPerChr)
SP$addTraitAEG(nQtlPerChr=nQtlPerChr,
mean = mean,
var = var,
varEnv=0,
relAA = 0.5,
varGxE = varGxE,
corA =corA,
corGxE = corGxE)
SP$setVarE(varE=varEnv) ##0.281 from soyNAN ##0.281 from soyNAN
# function ---------------------------------------------------------------
Mysimu <- function(j,...){
run <- POSSI[j,'run']
Dmodel <- POSSI[j,'Dmodel']
Int <- POSSI[j,'Intensity']
Strategy <- as.character(POSSI[j,'Sel_Strategy'])
NF1 <- POSSI[j,'F1']
NF2 <- POSSI[j,'F2']
pop = newPop(founderPop, simParam=SP)
genMean = c();genVar = c();H2 = c();Accuracy = c();nIndSel = c();
AccuracyF = c();AccuracyGvPhe = c();AccuracyPheEbv = c();CRPS=c();
geno_MC <- data.frame(NULL);
pheno_MC <- data.frame(NULL);
Npred <- c();
useM <- ifelse(names(GS_Model)[Dmodel] == 'Pheno','pheno',
ifelse(names(GS_Model)[Dmodel] == 'Gv','gv',
'ebv'))
pop = randCross(pop, nCrosses=20, nProgeny = NF1, simParam=SP)
pop = makeDH(pop, nDH=1, simParam=SP)
for(i in 1:Number_of_generations){
Time_started <- ymd_hms(Sys.time())
if(i==1){
NIndSel <- pop@nInd
pop = selectCross(pop, nInd=pop@nInd, nCrosses=NF1, use="pheno",nProgeny=1, simParam=SP)
}else{
if(Strategy == 'WBF'){ #WithinBestFamily POPBestIntensity (0.3)
pop = selectFam(pop,nFam=round(NF1*POPBestIntensity),use=useM,simParam=SP)
pop = selectWithinFam(pop,nInd=round((NF2*Int)/POPBestIntensity),use=useM,simParam=SP)
nIndSel = pop@nInd
pop = selectCross(pop, nInd=pop@nInd, nCrosses=NF1,use=useM, nProgeny=1, simParam=SP)
}
if(Strategy == 'WIF'){ #WithinFamily
pop = selectWithinFam(pop,nInd=round(NF2*Int),use=useM,simParam=SP)
nIndSel = pop@nInd
pop = selectCross(pop, nInd=pop@nInd, nCrosses=NF1,use=useM, nProgeny=1, simParam=SP)
}
if(Strategy == 'ACF'){ #AcrossFamily
nIndSel = round(NF2*NF1*Int)
pop = selectCross(pop,nInd=round(NF2*NF1*Int),use=useM,nCrosses=NF1,nProgeny=1,simParam=SP) #F1
}
}
pop = self(pop, nProgeny=NF2, simParam=SP) #F2
pop = self(pop, nProgeny=1, simParam=SP)
pop = self(pop, nProgeny=1, simParam=SP)
gen = pullSnpGeno(pop, simParam = SP) ## Genotype
### multiple cycles prediction
geno_MC <- rbind(data.frame(i=i,gen),geno_MC)
pheno_MC <- rbind(data.frame(i=i,pop@pheno[,1]-mean(pop@pheno[,1])),pheno_MC)
cycles <- i:(i-NCgs+1)
cycles <- cycles[cycles>0]
geno_MC <- geno_MC[geno_MC$i %in% cycles,]
pheno_MC <- pheno_MC[pheno_MC$i %in% cycles,]
if(length(unique(gen%*%rnorm(ncol(gen))))<=5){
fit = list(hat=rnorm(length(fit$hat)))
}else{
fit = GS_Model[[Dmodel]](as.matrix(pheno_MC[,-1]),as.matrix(geno_MC[,-1]))
if(anyNA(fit$hat)){
fit$hat=rnorm(length(fit$hat))
}
}
pop@ebv <- as.matrix(fit$hat[pheno_MC$i == i],ncol=1)
genMean = c(genMean, meanG(pop))
genVar = c(genVar, varG(pop))
H2 = c(H2,varG(pop)/varP(pop))
Accuracy = c(Accuracy,cor(pop@gv,pop@ebv))
AccuracyGvPhe = c(AccuracyGvPhe,cor(pop@gv,pop@pheno))
AccuracyPheEbv = c(AccuracyPheEbv,cor(pop@pheno,pop@ebv))
CRPS = c(CRPS,crps(as.numeric(pop@ebv),c(mean(pop@gv),sd(pop@gv)))$CRPS)
NIndSel = c(NIndSel, nIndSel)
Npred = c(Npred,nrow(pheno_MC))
cat(paste('Simu: ',sprintf("%04i",j),'Generation: ',sprintf("%03i",i),'time: ',Sys.time(),'Processing: ',
sprintf("%03i",round(as.numeric(as.duration(interval(Time_started,ymd_hms(Sys.time()))),"minutes"))
),'min model: ',names(GS_Model)[Dmodel],'\n')) ## Remove
}
# Store run
RES <- list()
RES[[paste0(names(GS_Model)[Dmodel],'_',Strategy,'_',Int,'_',run)]] <- t(unlist(list(GS_Model=names(GS_Model)[Dmodel],
Strategy=as.character(Strategy),
Intensity=Int,
NF1=NF1,
NF2=NF2,
Mu=genMean,
GV=genVar,
He=H2,
Accuracy = Accuracy,
AccuracyGvPhe = AccuracyGvPhe,
AccuracyPheEbv = AccuracyPheEbv,
CRPS = CRPS,
IndSel=NIndSel,
Npred = Npred)))
return(RES)
rm(pop)
}
# Replicate loop ---------------------------------------------------------------
sysinf <- Sys.info()
os <- sysinf['sysname']
if(os == 'Windows'){
cl <- makeCluster(6,revtunnel = TRUE)
JOBID <- 0
}else{
registerDoSEQ()
hosts <- as.vector(unique(unlist(strsplit(as.character(Sys.getenv("LSB_HOSTS"))," "))))
nh <- length(hosts)
nc <- length(unlist(strsplit(as.character(Sys.getenv("LSB_HOSTS"))," ")))-1
cl <- parallel::makePSOCKcluster(names=rep(hosts , each = floor(nc/nh)),
outfile = "debug.txt",
master=nsl(Sys.info()['nodename']),
revtunnel = TRUE,
outfile='',
useXDR = TRUE)
JOBID <- Sys.getenv("LSB_JOBID")
}
print( rep(hosts , each = floor(nc/nh)))
print(hosts)
print(nh)
print(nc)
doParallel::registerDoParallel(cl=cl)
lP <- foreach(j = 1:nrow(POSSI),
.packages = Xpackagers,
.verbose=FALSE,
.inorder=FALSE) %dopar% {
tryCatch(Mysimu(j),error=function(e){return('try-error')})
}
doParallel::stopImplicitCluster()
RES <- plyr::ldply(lapply(lP[sapply(lP,function(x){return(!(x=='try-error'))})],plyr::ldply))
write.csv(RES,paste0(JOBID,'_o_Results.csv'),row.names = F)
save.image(file=paste0(JOBID,'_All.RData'))
#load('245180_All.RData')
### Get results and print ------------------------------------------------------
RES1 <- melt(RES[,-1],id=1:5)
RES1$par <- gsub('[0-9]','',RES1$variable)
RES1$sim <- gsub('[A-z]','',RES1$variable)
RES1 <- transform(RES1,
value=as.numeric(as.character(value)),
sim=as.numeric(as.numeric(sim)),
par=as.character(par),
FS=paste0(NF1,'_',NF2))
### Points ---------------------------
pMu <- ggplot(RES1[RES1$par %in% c('Mu'),],aes(x=sim, y=value,color=FS)) +
geom_point(cex=0.5)+
geom_smooth(method = "loess",lwd=1.5) +
xlab('Generation')+ylab(expression(mu))+
facet_grid(GS_Model+Strategy~Intensity)
pGv <- ggplot(RES1[RES1$par %in% c('GV'),],aes(x=sim, y=value,color=FS)) +
geom_point(cex=0.5)+
geom_smooth(method = "loess",lwd=1.5) +
xlab('Generation')+ylab(expression(sigma[g]^2))+
facet_grid(GS_Model+Strategy~Intensity)
pHe <- ggplot(RES1[RES1$par %in% c('He'),],aes(x=sim, y=value,color=FS)) +
geom_point(cex=0.5)+
geom_smooth(method = "loess",lwd=1.5) +
xlab('Generation')+ylab(expression(H^2))+
facet_grid(GS_Model+Strategy~Intensity)
pAc <- ggplot(RES1[RES1$par %in% c('Accuracy'),],aes(x=sim, y=value,color=FS)) +
geom_point(cex=0.5)+
geom_smooth(method = "loess",lwd=1.5) +
xlab('Generation')+ylab('Accuracy')+
facet_grid(GS_Model+Strategy~Intensity)
pdf(paste0(JOBID,'_ResultsAllPoints.pdf'),w=20,h=120)
grid.arrange(pMu,pGv,pHe,pAc,nrow = 4)
dev.off()
|
db2c9e97d96ceb545cd090942007c271fc862d10
|
c97fa9aadc45c44fad6433ae10c772060bde355c
|
/MyNotes/03 - Geting and Cleaning Data/Course Project/run_analysis.R
|
4b121f635e2a9ae8713280753599682d081622d2
|
[] |
no_license
|
vitorefigenio/datasciencecoursera
|
9866816242d39fa9fc9520bc4d543efc815afeb5
|
03722d0c7c6d219ec84f48e02065493f6657cc0a
|
refs/heads/master
| 2021-01-17T11:17:58.099767
| 2016-02-28T03:06:37
| 2016-02-28T03:06:37
| 29,034,385
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,176
|
r
|
run_analysis.R
|
# 1st: Merges the training and the test sets to create one data set.
# Create table wuth the variables names
features <- read.table("./data/UCI HAR Dataset/features.txt")
# Create TEST table: with test dataset
table_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt", col.names=features[,2])
test_labels <- read.table("./data/UCI HAR Dataset/test/y_test.txt", col.names=c("Lab"))
testID <- read.table("./data/UCI HAR Dataset/test/subject_test.txt", col.names=c("ID"))
# Create TRAIN table: with train dataset
table_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt", col.names=features[,2])
train_labels <- read.table("./data/UCI HAR Dataset/train/y_train.txt", col.names=c("Lab"))
trainID <- read.table("./data/UCI HAR Dataset/train/subject_train.txt", col.names=c("ID"))
# Create a MAIN TABLE: with TEST and TRAIN dataset
table_main <- rbind(table_test, table_train)
main_labels <- rbind(test_labels, train_labels)
mainID <- rbind(testID, trainID)
DataMerged <- cbind(mainID, main_labels, table_main)
# 2nd: Extracts only the measurements on the mean and standard deviation for each measurement.
DataMerged <- DataMerged[,c(1:8,43:48,83:88,123:128,163:168,203,204,216,217,229,230,
242,243,255,256,268:273,296:304,347:352,375:383,426:431,
454:462,505,506,515:519,528:532,541:545,554:556)]
# 3rd: Uses descriptive activity names to name the activities in the data set
activity <- read.table("./data/UCI HAR Dataset/activity_labels.txt")
DataMerged$Lab <- activity[DataMerged$Lab,2]
# 4th: Appropriately labels the data set with descriptive variable names.
library(dplyr)
DataMerged <- rename(DataMerged,Activity=Lab,Subject=ID)
# 5th: From the data set in step 4, creates a second, independent tidy data set with the average
# of each variable for each activity and each subject.
library(reshape2)
id_melt = c("Activity","Subject")
labels_melt = setdiff(colnames(DataMerged),id_melt)
DataMelt <- melt(DataMerged,id=id_melt,measure.vars=labels_melt)
LastData <- dcast(DataMelt,Subject+Activity~variable,mean)
write.table(LastData, file = "analysis.txt", sep = ";", eol = "\n", row.names = TRUE, col.names = TRUE)
|
868dac3309b9b12d585d36e701cbae71f619f990
|
0866bc5078826247d959a8063ef4d18aebd7523a
|
/01_Data_Construction.r
|
9f491ba67cd2ac78cc402bed412330feb4720362
|
[] |
no_license
|
TomoyaOzawa-DA/Rust
|
7261db070d17f2026ede1e4b721d2ba082cf8aac
|
67b0b148574dc212c75daf5b56dc562101162108
|
refs/heads/main
| 2023-06-14T22:41:24.886161
| 2021-07-08T03:29:14
| 2021-07-08T03:29:14
| 375,599,693
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,463
|
r
|
01_Data_Construction.r
|
# objective: Reading datasets in Rust 1987 and Processing them to data frames.
# install packages
if (!require("tidyverse")) install.packages("tidyverse")
library(tidyverse)
# read .asc files and make them matrix.
# Bus group 1: 1983 Grumman model 870 buses (15 buses total)
df_group_1 <-
read.csv("./dat/g870.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 15) %>%
t() %>%
as_tibble()
# Bus group 2: 1981 Chance RT-50 buses (4 buses total)
df_group_2 <-
read.csv("./dat/rt50.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 4) %>%
t() %>%
as_tibble()
# Bus group 3: 1979 GMC model t8h203 buses (48 buses total)
df_group_3 <-
read.csv("./dat/t8h203.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 48) %>%
t() %>%
as_tibble()
# Bus group 4: 1975 GMC model a5308 buses (37 buses total)
df_group_4 <-
read.csv("./dat/a530875.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 37) %>%
t() %>%
as_tibble()
# Bus group 5: 1974 GMC model a5308 buses (12 buses total)
df_group_5 <-
read.csv("./dat/a530874.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 12) %>%
t() %>%
as_tibble()
# Bus group 6: 1974 GMC model a4523 buses (10 buses total)
df_group_6 <-
read.csv("./dat/a452374.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 10) %>%
t() %>%
as_tibble()
# Bus group 7: 1972 GMC model a5308 buses (18 buses total)
df_group_7 <-
read.csv("./dat/a530872.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 18) %>%
t() %>%
as_tibble()
# Bus group 8: 1972 GMC model a4523 buses (18 buses total)
df_group_8 <-
read.csv("./dat/a452372.asc", header = FALSE) %>%
as.matrix() %>%
matrix(ncol = 18) %>%
t() %>%
as_tibble()
# make them list
list_01_constructed_data <- list(
group1 = df_group_1,
group2 = df_group_2,
group3 = df_group_3,
group4 = df_group_4,
group5 = df_group_5,
group6 = df_group_6,
group7 = df_group_7,
group8 = df_group_8
)
# fill column names
# define function
cols_base <- c("bus_number", "month_purchased", "year_purchased", "month_of_1st_engine_replacement", "year_of_1st_engine_replacement",
"odometer_at_1st_replacement", "month_of_2nd_engine_replacement", "year_of_2nd_engine_replacement","odometer_at_2nd_replacement",
"month_odometer_data_begins", "year_odometer_data_begins")
make_colnames <- function(dat){
check <- dat %>% pull(V1)
if(check[1] %in% c(2386, 2387, 2388, 2389)){
duration_data <- seq(as.Date("1981-5-1"), as.Date("1985-5-1"), by = "months")
col_duration_data <- paste0("odometer_reading_", duration_data)
}
else{
month_data_begin <- dat %>% pull(V10) %>% unique()
year_data_begin <- dat %>% pull(V11) %>% unique()
date_data_begin <- as.Date(paste(paste0("19", year_data_begin), month_data_begin, "1", sep = "-"))
duration_data <- seq(date_data_begin, as.Date("1985-5-1"), by = "months")
col_duration_data <- paste0("odometer_reading_", duration_data)
}
return(c(cols_base, col_duration_data))
}
for (i in 1:length(list_01_constructed_data)){
colnames(list_01_constructed_data[[i]]) <- make_colnames(list_01_constructed_data[[i]])
list_01_constructed_data[[i]] <-
list_01_constructed_data[[i]] %>%
mutate(Bus_group = i)
}
# save RDS
saveRDS(list_01_constructed_data, "./intermediate/list_01_constructed_data.rds")
|
65e0ba724c5fd366c1f79c0a532a757a40ad9fb0
|
61fe2d2c573fd1a58ba00e0f2d2213e61d72f1ff
|
/cachematrix.R
|
bee2a999061c02e9aedfabda664ce5ca063f2876
|
[] |
no_license
|
vaanxy/ProgrammingAssignment2
|
81215b090ae2b3400b1275f8709a4e7f8f891de3
|
6370f6fd1ce702b7c478b8fda5c5c01aa200eb01
|
refs/heads/master
| 2021-01-20T23:48:07.340819
| 2015-03-08T02:54:55
| 2015-03-08T02:54:55
| 31,776,301
| 0
| 0
| null | 2015-03-06T15:47:26
| 2015-03-06T15:47:25
| null |
UTF-8
|
R
| false
| false
| 1,193
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##makeCacheMatrix creates a special "matrix", which is really a list containing a function to
##set : set the value of the matrix
##get : get the value of the matrix
##setSolve: set the inverse of the matrix
##getSolve: get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function (y) {
x <<- y
s <<- NULL
}
get <- function () x
setSolve <- function (solved) s <<- solved
getSolve <- function () s
list (
set = set,
get = get,
setSolve = setSolve,
getSolve = getSolve
)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
s <- x$getSolve()
if(!is.null(s)) {
message("getting cached inversed matrix")
return(s)
}
data <- x$get()
s <- solve(data, ...)
x$setSolve(s)
s
}
|
5f1941df5d83c4806c697690c9d14c6e5e49b231
|
80514a9aef5f6075d7d07151aa565540da963e74
|
/man/generateGSC.Rd
|
dd8cac24bf863d3b0fb39c3c4d78e32f09e63978
|
[] |
no_license
|
bengtssonpalme/cafe
|
99a7710fdfc8393b006a6b0727eeb439b1b2415b
|
d3c159b268001784bc709f6d87a77b5a8d999a20
|
refs/heads/master
| 2023-01-14T07:05:19.060946
| 2023-01-04T16:59:10
| 2023-01-04T16:59:10
| 238,866,904
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 922
|
rd
|
generateGSC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateGSC.R
\name{generateGSC}
\alias{generateGSC}
\title{Generate a Gene Set Collection}
\usage{
generateGSC(file, columns = c(1, 3), header = FALSE)
}
\arguments{
\item{file}{Tab-separated table containing associations between genes and, e.g., pathways.}
\item{columns}{A pair of columns to be included in the GSC (1 and 3 by default).}
\item{header}{If TRUE, the table file is assumed to have a header line before the data.}
}
\value{
Returns a gene set collection (GSC) to be used with the genesetAnalysis function.
}
\description{
Loads a gene set collection from a tab-separated. The function will use the specified columns, by default column 1 and 3. The first column will be assumed to be gene names and the other column will be assumed to contain gene sets, e.g. pathways. By default, no header row is assumed in the input file.
}
|
dfa611fd757d0d53e431ecb7b31b8ad207f737c1
|
d31602f21f6c627b05a6889b94aab7a07b5ceb9b
|
/man/fc_mean.Rd
|
a73da1cb4df9cb2b7f022187726d601ce9c1cbf0
|
[
"MIT"
] |
permissive
|
evandeilton/cvforecast
|
ff7162983e163a5f180deed360074145e142fd4d
|
d6d934d1eb1e036b472a16c6ea5dafdce0a36111
|
refs/heads/master
| 2020-04-25T12:26:40.028320
| 2017-05-22T02:17:45
| 2017-05-22T02:17:45
| 37,792,953
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,131
|
rd
|
fc_mean.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cvForecastExtraFunctions.R
\name{fc_mean}
\alias{fc_mean}
\title{Mean forecast wrapper}
\usage{
fc_mean(x, h, level = 95, onlyfc = TRUE, ...)
fc_mean(x, h, level = 95, onlyfc = TRUE, ...)
}
\arguments{
\item{x}{'ts' data}
\item{h}{forecast horizon}
\item{level}{confidence level. Default is 0.95}
\item{onlyfc}{if TRUE return only forecasts, otherwise returns a full forecast classed object}
\item{...}{extra args, if needed.}
\item{x}{'ts' data}
\item{h}{forecast horizon}
\item{level}{confidence level. Default is 0.95}
\item{onlyfc}{if TRUE return only forecasts, otherwise returns a full forecast classed object}
\item{...}{extra args, if needed.}
}
\value{
forecasts from ts data or an object of class 'forecast'
forecasts from ts data or an object of class 'forecast'
}
\description{
Mean forecast wrapper
Meanf forecast wrapper
}
\examples{
fit <- fc_mean(AirPassengers, h=10, level = 95, onlyfc = FALSE)
plot(fit)
Mresid(fit)
tsSummary(fit)
fit <- fc_mean(AirPassengers, h=10, onlyfc = FALSE)
plot(fit)
Mresid(fit)
tsSummary(fit)
}
|
6544852528dd8a243c7fc3b94a04fab597578c41
|
7a50607ba3e3006368b024b97288c2610421dfd6
|
/chapter1/main.R
|
3723cb7190c1a2117c4bd244e3580a9191e20f3b
|
[
"MIT"
] |
permissive
|
maxwellb995/Statistical-Computing-with-R
|
5cbc8dae76698632cbe0ba1e3a5f7b378a66b484
|
72dc6dfe3ba63fc89ebd8c467630e406f705c166
|
refs/heads/master
| 2021-05-11T03:45:50.557955
| 2018-01-18T03:51:01
| 2018-01-18T03:51:01
| 117,921,900
| 0
| 0
|
MIT
| 2018-01-18T03:51:02
| 2018-01-18T02:40:47
|
R
|
UTF-8
|
R
| false
| false
| 464
|
r
|
main.R
|
# Statistical computing with R
# setwd("C:/Users/Maxwell Ramirez/Documents/Statistical Computing/chapter1/")
setwd("C:\\Users\\Maxwell Ramirez\\Documents\\R\\Statistical Computing\\chapter1")
getwd()
# install.packages("tidyverse")
# library("tidyverse")
######### Start Chapter 1
source("chapter1_helper.R")
r_syntax_table_fn()
r_common_fns_fn()
r_syntax_vector_matrix_fn()
# General data structures
# test dplyr with iris data
|
afb604f9d03c1564fd83084dec932ebed9008237
|
2f6d7a99ce3155d2c635c39013a0da1418208b40
|
/tests/testthat/test_devtools.R
|
5a1d1fff4b0b4049c8642dd2f2f433b20ab219b7
|
[
"MIT"
] |
permissive
|
oganm/ogbox
|
c75eb1d8f4df00be214731e085e6c19e141992cc
|
ba99a46487836af5ab4fb5b013bc92cf35ad8e95
|
refs/heads/master
| 2020-04-04T07:07:27.383911
| 2019-07-29T23:00:12
| 2019-07-29T23:00:12
| 37,562,559
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 215
|
r
|
test_devtools.R
|
context('devtools')
test_that('getVersion',{
testthat::expect_equal(getVersion(),'1.0')
})
test_that('setVersion',{
setVersion('1.2')
testthat::expect_equal(getVersion(),'1.2')
setVersion('1.0')
})
|
9c4d6df6eadcb3b356f21b1e5af94f3fb3c69dd6
|
dc1368704fc79912439369ac822802502781f873
|
/plot1.r
|
6c10fcdaf0d213f5a3e4e478e47745c069fd9593
|
[] |
no_license
|
chuhoaianh/ExData_Plotting1
|
a7e1f988c2d39481d822559fb4ab8023f2d7cb61
|
e29ac975837f4d3db6d33c2c1da1b76a6d44cef2
|
refs/heads/master
| 2021-01-17T14:48:44.524934
| 2015-07-12T15:01:57
| 2015-07-12T15:01:57
| 38,860,605
| 0
| 0
| null | 2015-07-10T04:18:13
| 2015-07-10T04:18:13
| null |
UTF-8
|
R
| false
| false
| 951
|
r
|
plot1.r
|
#########################################################################################
# Creator: Anh Chu
# Course: Exploratory Data Analysis
# Course Project 1 - plot1.R
# This assignment load data from
# https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
# and practice plotting base on the data
# detail is in https://github.com/rdpeng/ExData_Plotting1
#########################################################################################
plot1 <- function(){
#using sqldf package to load data in 1 function without using subset
library(sqldf)
fn <- "household_power_consumption.txt"
df <- read.csv.sql(fn, sql = 'select * from file where Date = "1/2/2007" or Date = "2/2/2007"', sep = ";", header = TRUE)
#plotting
png("plot1.png", width = 480, height = 480)
hist(df$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
dev.off()
}
|
cac9b04dcac44c54fc20b9b8c09bf7419d49a212
|
bbd48af369edc0eb5df2a6b844abb9a563641a0e
|
/test/test13.R
|
ab8ceaa7b6e4b8476607a6092fc753228034bdc1
|
[] |
no_license
|
doktorschiwago/kwai
|
5ff61dfb413e1bf7880b9b45e9c6f43b20b290ac
|
6f3003578ca5747dbc423bed3115b02a4d4b68f4
|
refs/heads/master
| 2020-04-17T09:41:14.257637
| 2015-11-29T12:57:33
| 2015-11-29T12:57:33
| 33,010,238
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 490
|
r
|
test13.R
|
myfunction <- function(arg1,arg2){
qq=arg1+1
qq2=arg1+1
if (qq2>arg2) {
return(qq)
} else {
return(-11)
}
}
myfunction2 <-function(a) {
return(myfunction(arg2=a+1,arg3=a-1))
}
library(kwai)
source("../R/compile.R")
source("../R/typelib.R")
source("../R/inferType.R")
source("../R/createIR2.R")
source("../R/llvm_helper.R")
source("../R/visitStackMachine3.R")
#debug(inferType2)
llvmFunc=byte2llvm(myfunction2)
print(myfunction2)
#print(myfunction2(-11))
print(llvmFunc(-11))
|
d03c55b30cad9cd7e57ffa1c1bb7218d9fc3e887
|
306c9c5808cfbbcbfe6b9bf42c0f3ad1a9502879
|
/R/TwoPart_MultiMS.R
|
63c7f2db86537d4b4a0e8203a54266736712b990
|
[
"MIT"
] |
permissive
|
YuliyaLab/ProteoMM
|
333386474d1d9cf984fad74ab299335c52f8003e
|
3058e12d44b9f2a64f74b4165bf563b5c594ed05
|
refs/heads/master
| 2022-05-15T08:21:13.133808
| 2022-04-10T05:20:14
| 2022-04-10T05:20:14
| 138,552,525
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54,526
|
r
|
TwoPart_MultiMS.R
|
###############################################################################
# Function to conduct multi-sample two-part test with
# permutation null distribution
# Initial implementation date: 22 July 2016
# Relesae data: February 2018
###############################################################################
#' hs_peptides - peptide-level intensities for human
#'
#' A dataset containing the protein and peptide information and peptide-level
#' intensities for 6 samples: 3 CG and 3 mCG groups. There are 69 proteins.
#' The columns are as follows:
#'
#' \itemize{
#' \item Sequence - peptide sequence - randomly chosen from a larger list of
#' sequences
#' \item MatchedID - numeric ID that links proteins in the two datasets,
#' unnecessary if datasets are for the same species
#' \item ProtID - protein ID, artificial protein ID, eg. Prot1, Prot2, ...
#' \item GeneID - gene ID, artificial gene ID, eg. Gene1, Gene2, ...
#' \item ProtName - artificial Protein Name
#' \item ProtIDLong - long protein ID, full protein name, here artificially
#' simulated
#' \item GeneIDLong - long gene ID, full gene name, here artificially
#' simulated
#' \item CG1 - raw intensity column for sample 1 in CG group
#' \item CG2 - raw intensity column for sample 2 in CG group
#' \item CG3 - raw intensity column for sample 3 in CG group
#' \item mCG1 - raw intensity column for sample 1 in mCG group
#' \item mCG2 - raw intensity column for sample 2 in mCG group
#' \item mCG3 - raw intensity column for sample 3 in mCG group
#' }
#'
#' @docType data
#' @keywords datasets
#' @name hs_peptides
#' @usage data(hs_peptides)
#' @format A data frame with 695 rows and 13 colummns, compiring 7 columns of
#' metadata and 6 columns of peptide intensities. 69 proteins.
NULL
#' mm_peptides - peptide-level intensities for mouse
#'
#' A dataset containing the protein and petide information and peptide-level
#' intensities for 6 samples: 3 CG and 3 mCG groups. There are 69 proteins.
#' The columns are as follows:
#'
#' \itemize{
#' \item Sequence - peptide sequence - randomly chosen from a larger list of
#' sequences
#' \item MatchedID - numeric ID that links proteins in the two datasets,
#' unnecessary if datasets are for the same species
#' \item ProtID - protein ID, artificial protein ID, eg. Prot1, Prot2, ...
#' \item GeneID - gene ID, artificial gene ID, eg. Gene1, Gene2, ...
#' \item ProtName - artificial Protein Name
#' \item ProtIDLong - long protein ID, full protein name, here artificially
#' simulated
#' \item GeneIDLong - long gene ID, full gene name, here artificially
#' simulated
#' \item CG1 - raw intensity column for sample 1 in CG group
#' \item CG2 - raw intensity column for sample 2 in CG group
#' \item CG3 - raw intensity column for sample 3 in CG group
#' \item mCG1 - raw intensity column for sample 1 in mCG group
#' \item mCG2 - raw intensity column for sample 2 in mCG group
#' \item mCG3 - raw intensity column for sample 3 in mCG group
#' }
#'
#' @docType data
#' @keywords datasets
#' @name mm_peptides
#' @usage data(mm_peptides)
#' @format A data frame with 1102 rows and 13 colummns, compiring 7 columns of
#' metadata and 6 columns of peptide intensities. 69 proteins.
NULL
#' Subdivide data into intensities columns only
#'
#' Subdivide a data frame of protein intensities and
#' metadata into intensities only.
#' No row names will be provided.
#'
#' @param mm data frame of metadata and intensities as a single data frame
#' @param use_cols column numbers to subset and return,
#' no range checking no range
#' checking on the column indexes is performed
#' @return m_ints data frame of intensities only
#' @examples
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13 # different from parameter names as R uses outer name
#' # spaces if variable is undefined
#' m_logInts = make_intencities(mm_peptides, intsCols)
#'
#' @export
make_intencities = function(mm, use_cols) {
m_ints = mm[,use_cols] # Sequences should be unique
return(m_ints)
}
#' Subdivide data into metadata columns only
#'
#' Subdivide a data frame of protein metadata and intensities
#' into a data frame of meta data only
#'
#' @param mm data frame of metadata and intensities as a single data frame
#' @param use_cols column numbers to subset and return,
#' no range checking on the column
#' indexes is performed
#' @return m_ints data frame of intensities only
#' @examples
#' data(mm_peptides)
#' head(mm_peptides)
#' metaCols = 1:7 # reusing this variable
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' @export
make_meta = function(mm, use_cols) {
m_meta = mm[,use_cols] # Sequences should be unique
return(m_meta)
}
#' Convert values in a matrix to log2 transformed values
#'
#' convert_log2 replaces 0's with NA's than does a log2 transformation
#' Replacing 0's with NA's is the correct approach to Proteomics data analysis
#' as 0's are not values that should be left in the data where no
#' observation was made, see citation below.
#' Karpievitch et al. 2009 "Normalization of peak intensities in
#' bottom-up MS-based proteomics using singular value decomposition".
#' PMID: 19602524
#' Karpievitch et al. 2009 "A statistical framework for protein
#' quantitation in bottom-up MS-based proteomics". PMID: 19535538
#'
#' @param mm a dataframe of raw intensities in format:
#' (# peptides)x(# samples+possibly peptide & protein information (metadata))
#'
#' @param use_cols vector of column indexes that make up the intensities
#' usually in sequential order but do not have to be
#' user is responsible for making sure that specified columns are
#' indeed numeric and correspond to intensities for each sample
#'
#' @return matrix of log2 transformed intensities where 0's were
#' replaced with NA's prior
#' to transformation
#'
#' @examples
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13
#' metaCols = 1:7
#' m_logInts = make_intencities(mm_peptides, intsCols)
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts) # 0's replaced with NAs and
#' # log2 transnform applied
#'
#' @export
convert_log2 = function(mm, use_cols) {
m_logInts = mm[,use_cols]
# replace 0's with NA as more appropriate for analysis
m_logInts[m_logInts==0] = NA
m_logInts = log2(m_logInts)
return(m_logInts)
}
# function compute_missing
# computes the number of missing and percent missing observations
# PARAMETER
# mm is a matrix returned by convert_log2 or matrix with NA's representing
# missing values
# PRINTS out % missing
# RETURN
# data frame with 3 columns (num_missing, num_total, perc_missing)
# and 1 row of values
compute_missing = function(mm) {
dims = dim(mm)
nummiss = sum(is.na(mm))
total = dims[1] * dims[2]
perc_miss = nummiss / total
message('Percent missing observations: ', perc_miss)
ret = data.frame(nummiss, total, perc_miss)
colnames(ret) = c('num_missing', 'num_total', 'perc_missing')
}
#' Volcano plot
#'
#' Function plots fold changes and p-values as a volcano plot.
#' Two lines are plotted for the p-value cutoff at p = PV_cutoff (solid line)
#' and p = 0.1 (dashed line).
#' @param FC vector of fold changes
#' @param PV vector of p-values, same length as FC
#' @param FC_cutoff fold change cutoff where to draw
#' vertical cutoff lines, default = 2
#' @param PV_cutoff p-value cutoff where to draw a
#' horizontal cutoff line, default ==.05
#' @param figtitle title to display at the top of the figure, default = ''
#' @return NULL
#' @examples
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13 # different from parameter names as
#' # R uses outer name spaces if variable is undefined
#' metaCols = 1:7
#' m_logInts = make_intencities(mm_peptides, intsCols)
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#'
#' # Normalize data
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#'
#' set.seed(123)
#' mm_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' mm_m_ints_eig1$h.c # check the number of bias trends detected
#'
#' # Impute missing values
#' mm_m_ints_norm = eig_norm2(rv=mm_m_ints_eig1)
#' mm_prot.info = mm_m_ints_norm$normalized[,1:7]
#' mm_norm_m = mm_m_ints_norm$normalized[,8:13]
#'
#' set.seed(125) # needed for reproducibility of imputation
#' imp_mm = MBimpute(mm_norm_m, grps, prot.info=mm_prot.info,
#' pr_ppos=2, my.pi=0.05, compute_pi=FALSE)
#' DE_res = peptideLevel_DE(imp_mm$y_imputed, grps, imp_mm$imp_prot.info,
#' pr_ppos=2)
#' plot_volcano(DE_res$FC, DE_res$BH_P_val, FC_cutoff=1.5,
#' PV_cutoff=.05, figtitle='Mouse DE')
#' @return Nil
#' @export
plot_volcano = function(FC, PV, FC_cutoff=2, PV_cutoff=.05, figtitle='') {
tmp_x = FC
tt = PV
# exact 0 values, replace with a small values as need to take a log
ppos_rep = tt == 0
tt[ppos_rep] = .001 # highly significant
tmp_y = -log10(tt)
ppos = tmp_y > 20
num_replace = sum(ppos)
message("number to replace ",num_replace)
if(num_replace) {
tmp_y[ppos] = jitter(rep(19.5, times=num_replace) )
}
xlimits = max(abs(FC)) + .2
ylimits = max(tmp_y) + .2
graphics::par(mfcol=c(1,1))
# add label to significantly different proteins
graphics::plot(tmp_x,tmp_y,pch=20,xlim=c(-xlimits,xlimits),ylim=c(0,ylimits),
xlab='FC',ylab='-log10(p-value)',main=figtitle)
graphics::lines(c(-(xlimits+2),(xlimits+2)), c(-log10(PV_cutoff),
-log10(PV_cutoff)), col='blue')
# also same as
graphics::abline(h = -log10(.1), col='blue', lty=2) # permanent line for now
graphics::lines(c(-FC_cutoff,-FC_cutoff), c(-10, 70), col='blue')
graphics::lines(c(FC_cutoff,FC_cutoff), c(-10, 70), col='blue')
}
#' Volcano plot with labels for the differentially expressed proteins
#'
#' Function plots fold changes and p-values as a volcano plot.
#' Two lines are plotted for the p-value cutoff at p = PV_cutoff
#' (solid line) and p = 0.1 (dashed line).
#' @param FC vector of fold changes
#' @param PV vector of p-values, same length as FC
#' @param ProtID vector of protein IDs, can be gene IDs, same length as FC & PV.
#' Names in this vector will be displayed in the volcano plot
#' for differentially expressed proteins for this reason short names
#' are preferred.
#' @param FC_cutoff fold change cutoff where to draw vertical cutoff
#' lines, default = 2
#' @param PV_cutoff p-value cutoff where to draw a horizontal cutoff line,
#' default ==.05
#' @param figtitle title to display at the top of the figure, default = ''
#' @return NULL
#' @examples
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13 # different from parameter names as
#' # R uses outer name spaces if variable is undefined
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(mm_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#'
#' # Normalize data
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#'
#' set.seed(135)
#' mm_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' mm_m_ints_eig1$h.c # check the number of bias trends detected
#'
#' # Impute missing values
#' mm_m_ints_norm = eig_norm2(rv=mm_m_ints_eig1)
#' mm_prot.info = mm_m_ints_norm$normalized[,1:7]
#' mm_norm_m = mm_m_ints_norm$normalized[,8:13]
#'
#' set.seed(125)
#' imp_mm = MBimpute(mm_norm_m, grps, prot.info=mm_prot.info,
#' pr_ppos=2, my.pi=0.05, compute_pi=FALSE)
#' DE_res = peptideLevel_DE(imp_mm$y_imputed, grps, imp_mm$imp_prot.info,
#' pr_ppos=2)
#' plot_volcano_wLab(DE_res$FC, DE_res$BH_P_val, DE_res$ProtID, FC_cutoff=1.5,
#' PV_cutoff=.05, figtitle='Mouse DE')
#'
#' @import ggrepel ggplot2
#' @return Nil
#' @export
plot_volcano_wLab = function(FC, PV, ProtID,
FC_cutoff=2,
PV_cutoff=.05, figtitle='') {
log_PV = PV
plotdata = data.frame(FC, PV, ProtID, log_PV) # combine into 2 data frame
ppos_rep = plotdata$PV == 0
plotdata$PV[ppos_rep] = .000000001
log_PV = -log10(plotdata$PV)
plotdata$log_PV = log_PV
plotdata$threshold = (abs(plotdata$FC) >= FC_cutoff) & (plotdata$PV < PV_cutoff)
dim(plotdata)
ggplot2::ggplot() +
ggplot2::geom_point(data=plotdata, aes(x=FC, y=log_PV), alpha=0.5, size=1) +
theme(legend.position = "none") +
xlab("log2 fold change") +
ylab("-log10 p-value") +
#ggrepel::geom_text_repel(data=filter(plotdata, threshold==TRUE),
ggrepel::geom_text_repel(data=plotdata[plotdata$threshold==TRUE,],
size = 3, alpha=.8, aes(x=FC, y=log_PV, label=ProtID) ) +
ggplot2::theme_classic(base_size = 8) +
ggplot2::geom_hline(yintercept=-log10(PV_cutoff), col='blue', alpha=.7) +
ggplot2::geom_hline(yintercept=-log10(0.1),
col='blue', linetype="dashed", alpha=.7) +
ggplot2::geom_vline(xintercept=-FC_cutoff, col='blue', alpha=.7) +
ggplot2::geom_vline(xintercept=FC_cutoff, col='blue', alpha=.7)
}
#' Multi-Matrix Differential Expression Analysis
#'
#' Multi-Matrix Differential Expression Analysis computes Model-Based
#' statistics for each dataset, the sum of individual statistics is the
#' final statistic. The significance is determined via a permutation test
#' which computed the same statistics and sums them after permuting
#' the values across treatment groups. As is outlined in Karpievitch
#' et al. 2018. Important to set the random number generator seed for
#' reproducibility with set.seed() function.
#'
#' @param mm_list list of matrices for each experiment, length = number of
#' datasets to compare internal dataset dimensions: numpeptides
#' x numsamples for each dataset
#' @param treat list of data frames with treatment information to compute
#' the statistic
#' in same order as mm_list
#' @param prot.info list of protein and peptide mapping for each matrix
#' in mm_list,
#' in same order as mm_list
#' @param prot_col_name column name in prot.info that contains protein
#' identifiers that link all datasets together. Not that
#' Protein IDs will differ across different organisms and
#' cannot be used as the linking identifier.
#' Function match_linker_ids() produces numeric identifiers
#' that link all datasets together
#' @param nperm number of permutations, default = 500,
#' this will take a while, test code
#' with fewer permutations
#' @param dataset_suffix vector of character strings that corresponds to the
#' dataset being analysed. Same length as mm_list. Names will be appended
#' to the columns names that will be generated for each analysed dataset.
#' For example, if analyzing mouse and human data this vector may be:
#' c('Mouse', 'Human')
#' @return data frame with the following columns
#' \describe{
#' \item{protIDused}{Column containing the protein IDs used to
#' link proteins across datasets}
#' \item{FC}{Average fold change across all datasets}
#' \item{P_val}{Permutation-based p-value for the differences
#' between the groups}
#' \item{BH_P_val}{Multiple testing adjusted p-values}
#' \item{statistic}{Statistic computed as a a sum of statistics
#' produced for each dataset}
#' \item{Protein Information}{all columns passed into the function
#' for the 1st dataset
#' in the list}
#' \item{FCs}{Fold changes for individual datasets, these values
#' should average to the
#' FC above. As many columns as there are datasets being analyzed.}
#' \item{PV}{p-values for individual datasets. As many
#' columns as there are datasets
#' being analyzed.}
#' \item{BHPV}{Multiple testing adjusted p-values for
#' individual datasets. As many
#' columns as there are datasets being analyzed.}
#' \item{NUMPEP}{Number of peptides presents in each protein
#' for each dataset. As many
#' columns as there are datasets being analyzed.}
#'}
#' @examples
#' # Load mouse dataset
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13 # different from parameter names as R uses
#' # outer name spaces if variable is undefined
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(mm_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#' set.seed(135)
#' mm_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,
#' prot.info=m_prot.info)
#' mm_m_ints_eig1$h.c # check the number of bias trends detected
#' mm_m_ints_norm = eig_norm2(rv=mm_m_ints_eig1)
#' mm_prot.info = mm_m_ints_norm$normalized[,1:7]
#' mm_norm_m = mm_m_ints_norm$normalized[,8:13]
#' set.seed(125) # Needed for reprodicibility of results
#' imp_mm = MBimpute(mm_norm_m, grps, prot.info=mm_prot.info,
#' pr_ppos=2, my.pi=0.05, compute_pi=FALSE)
#'
#' # Load human dataset
#' data(hs_peptides)
#' head(hs_peptides)
#' intsCols = 8:13 # different from parameter names as R uses
#' # outer name spaces if variable is undefined
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(hs_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(hs_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#' set.seed(1237) # needed for reproducibility
#' hs_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' hs_m_ints_eig1$h.c # check the number of bias trends detected
#' hs_m_ints_norm = eig_norm2(rv=hs_m_ints_eig1)
#' hs_prot.info = hs_m_ints_norm$normalized[,1:7]
#' hs_norm_m = hs_m_ints_norm$normalized[,8:13]
#'
#' set.seed(125) # or any value, ex: 12345
#' imp_hs = MBimpute(hs_norm_m, grps, prot.info=hs_prot.info,
#' pr_ppos=2, my.pi=0.05,
#' compute_pi=FALSE)
#'
#' # Multi-Matrix Model-based differential expression analysis
#' # Set up needed variables
#' mms = list()
#' treats = list()
#' protinfos = list()
#' mms[[1]] = imp_mm$y_imputed
#' mms[[2]] = imp_hs$y_imputed
#' treats[[1]] = grps
#' treats[[2]] = grps
#' protinfos[[1]] = imp_mm$imp_prot.info
#' protinfos[[2]] = imp_hs$imp_prot.info
#' nperm = 50
#'
#' # ATTENTION: SET RANDOM NUMBER GENERATOR SEED FOR REPRODUCIBILITY !!
#' set.seed(131) # needed for reproducibility
#'
#' comb_MBDE = prot_level_multi_part(mm_list=mms, treat=treats,
#' prot.info=protinfos,
#' prot_col_name='ProtID', nperm=nperm,
#' dataset_suffix=c('MM', 'HS'))
#'
#' # Analysis for proteins only present in mouse,
#' # there are no proteins suitable for
#' # Model-Based analysis in human dataset
#' subset_data = subset_proteins(mm_list=mms, prot.info=protinfos, 'MatchedID')
#' mm_dd_only = subset_data$sub_unique_mm_list[[1]]
#' hs_dd_only = subset_data$sub_unique_mm_list[[2]]
#' protinfos_mm_dd = subset_data$sub_unique_prot.info[[1]]
#' DE_mCG_CG_mm_dd = peptideLevel_DE(mm_dd_only, grps,
#' prot.info=protinfos_mm_dd, pr_ppos=2)
#'
#' @importFrom stats p.adjust
#' @export
prot_level_multi_part = function(mm_list, treat, prot.info,
prot_col_name, nperm=500, dataset_suffix){
warning("This function uses random namber generator. For reproducibility use
set.seed(12345) with your choce of numeric parameter",immediate.=TRUE)
# select proteins that were detected in each experiment
# make a list of unique protein IDs for each matrix in the list mm_list
# grps will not change
subset_data = subset_proteins(mm_list=mm_list, prot.info=prot.info,
prot_col_name)
# subset_data contains: "sub_mm_list" "sub_prot.info"
# "sub_unique_mm_list" "sub_unique_prot.info" "common_list"
message('Computing statistics')
sub_mm_list = subset_data$sub_mm_list
sub_prot.info = subset_data$sub_prot.info
nsets = length(mm_list)
tt = colnames(sub_prot.info[[1]])
ttt = tt == prot_col_name
# should be position of the column that was passed in for ID
pos_prot_id_col = seq_len(length(tt))[ttt]
# for each dataset loop through, compute, and add up t-stat values
tmp = peptideLevel_DE(sub_mm_list[[1]], treat[[1]], sub_prot.info[[1]],
pr_ppos=pos_prot_id_col)
tstat_all = list()
tstat_all[[1]] = tmp
# statistic is stored in col 5, it is actually an F-statistic,
# unless the test is equivalent to a t-test (2 treatment groups, 1 peptide)
# ProtID FC p-val BH_p-val t_value num_peptides
tstat = as.double(tmp[,5])
FCs = as.double(tmp[,2])
PV = as.double(tmp[,3])
BHPV = as.double(tmp[,4])
NUMPEP = as.numeric(tmp[,6])
col_FC = paste('FC_', dataset_suffix[1], sep='')
col_PV = paste('PV_', dataset_suffix[1], sep='')
col_BHPV = paste('BHPV_', dataset_suffix[1], sep='')
col_NUMPEP = paste('NUMPEP_', dataset_suffix[1], sep='')
# prot names will be the same, will not combine them in the loop
PROTIDS = tmp[,1]
for(ii in 2:nsets){ # for second and more datasets
tmp = peptideLevel_DE(sub_mm_list[[ii]], treat[[ii]],
sub_prot.info[[ii]], pr_ppos=pos_prot_id_col)
tstat_all[[ii]] = tmp
# yuliya: may need to subset here, tmp is complex var
tstat = cbind(tstat, as.double(tmp[,5]))
FCs = cbind(FCs, tmp[,2])
PV = cbind(PV, tmp[,3])
BHPV = cbind(BHPV, tmp[,4])
NUMPEP = cbind(NUMPEP, tmp[,6])
# column headers
col_FC = c(col_FC, paste('FC_', dataset_suffix[ii], sep=''))
col_PV = c(col_PV, paste('PV_', dataset_suffix[ii], sep=''))
col_BHPV = c(col_BHPV, paste('BHPV_', dataset_suffix[ii], sep=''))
col_NUMPEP = c(col_NUMPEP, paste('NUMPEP_', dataset_suffix[ii], sep=''))
}
colnames(FCs) = col_FC
colnames(PV) = col_PV
colnames(BHPV) = col_BHPV
colnames(NUMPEP) = col_NUMPEP
sum_tstat = rowSums(tstat)
message('Perfoming permutation test')
tstat_perm = list()
for(ii in seq_len(nsets)) {
message('Dataset ', as.character(ii) )
tstat_perm[[ii]] = NULL
for(jj in seq_len(nperm)) {
# get permuted labels for each iteration, then compute T_p
perm_pos = sample(length(treat[[ii]]), length(treat[[ii]]) )
tmp = peptideLevel_DE(sub_mm_list[[ii]],
treat[[ii]][perm_pos], sub_prot.info[[ii]],
pr_ppos=pos_prot_id_col)
if(jj == 1) {
tstat_perm[[ii]] = as.matrix(as.double(tmp[,5]))
} else {
tstat_perm[[ii]] =cbind(tstat_perm[[ii]],as.matrix(as.double(tmp[,5])))
}
}
}
# sum the matrices
T_perm = tstat_perm[[1]]
for(ii in 2:nsets) {
T_perm = T_perm + tstat_perm[[ii]]
}
num_prot = dim(tstat)[1]
p_vals = vector('numeric', length=num_prot)
pos_stat_pos = sum_tstat >= 0
for(ii in seq_len(2)) { # positive and negative values separately
if(ii == 1) {
ppos = which(pos_stat_pos)
for(kk in seq_len(length(ppos))) {
p_vals[ppos[kk]] = (.5+sum(T_perm[ppos[kk],] >=
sum_tstat[ppos[kk]])) / (nperm+1)
}
} else {
ppos = which(!pos_stat_pos)
for(kk in seq_len(length(ppos))) {
p_vals[ppos[kk]] = (.5+ sum(T_perm[ppos[kk],] <
sum_tstat[ppos[kk]])) / (nperm+1)
}
}
}
# Sometimes p-values produced by the permutation
# test are [0, .6], thus standard adjustments will not do very well.
# I will use 'fdr' option in p.adjust and then rescale the interval [0 1].
# p-values look the best, according to the theoretical
# distribution, after such adjustment
p_vals_tmp = stats::p.adjust(p_vals, method="fdr")
mmin = min(p_vals_tmp)
mmax = max(p_vals_tmp)
adj_PV = (p_vals_tmp - mmin) / (mmax-mmin) # rescales to [0 1]
# above line does nothing if p-values are on interval [0 1] already
FC = rowMeans(FCs)
# protein info is on peptide level, so convert to
# the protein level info, no duplication
# take prot IDs from dataset 1
unik = !duplicated(sub_prot.info[[1]][,prot_col_name])
ppos_u_prots = seq_along(sub_prot.info[[1]][,prot_col_name])[unik] # indices
u_prot_info = sub_prot.info[[1]][ppos_u_prots,]
res = data.frame(protIDused=PROTIDS, FC, P_val=p_vals,
BH_P_val=adj_PV, statistic=sum_tstat,
u_prot_info, FCs, PV, BHPV, NUMPEP)
# column names in res are inherited from the structures that are combined
return(res)
}
#' Presence/Absence peptide-level analysis
#'
#' Presence/Absence peptide-level analysis uses
#' all peptides for a protein as IID
#' to produce 1 p-value across multiple (2+) datasets.
#' Significance is estimated using a g-test which is suitable
#' for two treatment groups only.
#'
#' @param mm m x n matrix of intensities, num peptides x num samples
#' @param treatment vector indicating the treatment
#' group of each sample ie [1 1 1 1 2 2 2 2...]
#' @param prot.info 2+ column data frame of peptide ID, protein ID, etc columns
#' @param pr_ppos - column index for protein ID in
#' prot.info. Can restrict to be #2...
#'
#' @return A list of length two items:
#' \describe{
#' \item{ProtIDused}{protein identification information taken from prot.info,
#' a column used to identify proteins}
#' \item{FC}{Approximation of the fold change computed
#' as percent missing observations
#' group 1 minus in percent missing observations group 2}
#' \item{P_val}{p-value for the comparison between
#' 2 groups (2 groups only here)}
#' \item{BH_P_val}{Benjamini-Hochberg adjusted p-values}
#' \item{statistic}{statistic returned by
#' the g-test, not very useful as depends on
#' the direction of the test and can produce all 0's}
#' \item{num_peptides}{number of peptides within a protein}
#' \item{metadata}{all columns of metadata from the passed in matrix}
#'}
#' @examples
#' # Load mouse dataset
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13 # different from parameter names as R uses
#' # outer name spaces if variable is undefined
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(mm_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#'
#' set.seed(135)
#' mm_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' mm_m_ints_eig1$h.c # check the number of bias trends detected
#' mm_m_ints_norm = eig_norm2(rv=mm_m_ints_eig1)
#'
#' # Load human dataset
#' data(hs_peptides)
#' head(hs_peptides)
#' intsCols = 8:13 # different from parameter names as R
#' # uses outer name spaces if variable is undefined
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(hs_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(hs_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#'
#' set.seed(137) # different seed for different organism
#' hs_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' hs_m_ints_eig1$h.c # check the number of bias trends detected
#' hs_m_ints_norm = eig_norm2(rv=hs_m_ints_eig1)
#'
#' # Set up for presence/absence analysis
#' raw_list = list()
#' norm_imp_prot.info_list = list()
#' raw_list[[1]] = mm_m_ints_eig1$m
#' raw_list[[2]] = hs_m_ints_eig1$m
#' norm_imp_prot.info_list[[1]] = mm_m_ints_eig1$prot.info
#' norm_imp_prot.info_list[[2]] = hs_m_ints_eig1$prot.info
#'
#' protnames_norm_list = list()
#' protnames_norm_list[[1]] = unique(mm_m_ints_norm$normalized$MatchedID)
#' protnames_norm_list[[2]] = unique(hs_m_ints_norm$normalized$MatchedID)
#'
#' presAbs_dd = get_presAbs_prots(mm_list=raw_list,
#' prot.info=norm_imp_prot.info_list,
#' protnames_norm=protnames_norm_list,
#' prot_col_name=2)
#'
#' presAbs_de = peptideLevel_PresAbsDE(presAbs_dd[[1]][[1]],
#' grps, presAbs_dd[[2]][[1]],
#' pr_ppos=2)
#' @export
peptideLevel_PresAbsDE = function(mm, treatment, prot.info, pr_ppos=2){
# XsqDE (degrees of freedom) -- not used, place holder for where
# FC goes in abundance-based DE
# p-value for comparison between 2 groups (2 groups only here)
# as interested in pairwise differences.
# BH-adjusted p-value, Benjamini-Hochberg multiple testing adjustment
# Match to protein
all.proteins = unique(prot.info[,pr_ppos])
numProts = length(all.proteins) # 1569
y_out = data.frame(matrix(NA, numProts, 5))
nummiss = data.frame(matrix(NA, numProts, 2))
u_treat = unique(treatment)
numgrps = length(u_treat)
numeeachgroup = vector('numeric', length=numgrps)
for(ii in seq_len(numgrps)) {
numeeachgroup[ii] = sum(treatment == u_treat[ii])
} # needed for the FC estimation
de_ret = NULL
u_prot_info = NULL
for (kk in seq_len(length(all.proteins))) {
prot = all.proteins[kk]
pmid.matches = prot.info[prot.info[,pr_ppos]==prot,1]
curr_prot.info = prot.info[prot.info[,pr_ppos]==prot,]
idx.prot = which(prot.info[,1] %in% pmid.matches)
# need to return unique prot.info, make it as we go
ttt = prot.info[idx.prot,]
if(!is.null(dim(ttt))) {
u_prot_info = rbind(u_prot_info, ttt[1,])
} else {
u_prot_info = rbind(u_prot_info, ttt)
}
y_raw = mm[idx.prot,,drop=FALSE]
n.peptide = nrow(y_raw)
yy = as.vector(t(y_raw))
nn = length(yy)
# keep track of prIDs here...
curr_prot.info = curr_prot.info[kk,] # possibly a subset
# good to know how many peptides were in a given protein
y_out[kk,5] = n.peptide
# replicate treatment for # peptides
treatment_hold = treatment
treatment = rep(treatment, times=n.peptide)
# use g-test to compute stat significance in differences between groups
xx = is.na(yy)
treatsX = unique(treatment)
if(sum(xx) < nn & sum(xx) > 0) {
res = g.test(xx,treatment)
y_out[kk,2] = res$p.value
y_out[kk,1] = res$parameter
y_out[kk,4] = res$statistic
} else {
y_out[kk,2] = 2
# all values are OUT of RANGE of proper results, will be
# used to flag (and remove) all present or all absent values
y_out[kk,1] = 2
y_out[kk,4] = 2 # more likely to have all NAs
}
# count # of missing values in each treatment,
# will vary depending on the number of peptides
nummiss[kk,1] = sum(xx[treatment==treatsX[1]]==TRUE)
nummiss[kk,2] = sum(xx[treatment==treatsX[2]]==TRUE)
treatment = treatment_hold
} # end for each protein
colnames(y_out) = c('FC', 'P_val', 'BH_P_val', 'statistic', 'num_peptides')
# BH adjustment - only values on interval [0 1]
# we may have 2 if g.test cannot be performed
ppos = y_out[,2] <= 1
y_out[ppos,3] = stats::p.adjust(y_out[ppos,2],"BH")
y_out[!ppos,3] = 1 # these would have been 2 in Raw p-values
# XsqDF returned from the g-test is not useful, and is the same as
# statistic here so calculate my estimate of fold change as:
# (% pep missing grp1) / (% pep missing grp1)
# make a dataframe to be returned -
# add protein names as 1st col in a data frame
DE_res = data.frame(all.proteins, y_out, stringsAsFactors=FALSE)
de_ret$DE_res = DE_res
de_ret$prot.info = u_prot_info
num_obs = matrix(0, length(all.proteins), numgrps)
for(ii in seq_len(numgrps)) {
num_obs[,ii] = de_ret$DE_res$num_peptides * numeeachgroup[ii]
}
percmiss = nummiss / num_obs
de_ret$DE_res$FC = percmiss[,1] - percmiss[,2]
cols1 = colnames(de_ret$DE_res)
cols1[1] = "ProtIDused"
cols2 = colnames(de_ret$prot.info)
de_ret = data.frame(de_ret, stringsAsFactors = FALSE)
colnames(de_ret) = c(cols1, cols2)
return(de_ret)
}
#########################################################################
#' Multi-Matrix Presence Absence analysis
#'
#' Multi-Matrix Presence Absence Analysis computes Model-Based
#' statistics for each dataset and sums them up to produce the final
#' statistic. The significance is determined via a permutation
#' test which computes the same statistics and sums them
#' after permuting the values across treatment groups,
#' as is outlined in Karpievitch et al. 2018. Whenever possible
#' proteins should be analysed using the Model-Based
#' Differential Expression Analysis due to higher statistical
#' power over the Presence Absence analysis.
#'
#' @param mm_list list of matrices of intensities for each experiment,
#' dimensions: numpeptides x numsamples
#' @param treat list of data frames with treatment information to
#' compute the statistic,
#' parallel to mm_list and prot.info
#' @param prot.info list of protein metadata for each matrix in
#' mm_list, data.frame
#' parallel to mm_list and treat
#' @param prot_col_name column names present in all datasets that
#' identifies protein IDs
#' across all datasets
#' @param nperm number of permutations
#' @param dataset_suffix a list of strings that will be
#' appended to the column names
#' for FC, PV, BHPV and numbers of peptides
#'
#' @return a data frame with the following columns:
#' \describe{
#' \item{protIDused}{protein metadata, peptide sequence if was
#' passed in as one of the columns is the first peptide
#' sequence encountered in the data for that protein}
#' \item{FCs}{Averages across all datasets of the approximation
#' of the fold change computed as percent missing observations
#' group 1 minus in percent missing observations group 2 in
#' peptideLevel_PresAbsDE() function}
#' \item{P_val}{p-value for the comparison between 2 groups
#' (2 groups only here) obtained from a permutation test}
#' \item{BH_P_val}{Benjamini-Hochberg adjusted p-values}
#' \item{statistic}{statistic returned by the g-test and
#' summed across all datasets,
#' not very useful as depends on the direction of the
#' test and can produce all 0's}
#' \item{u_prot_info}{column containing ptoein identifiers
#' across all datasets}
#' \item{FCs}{Approximation of the fold change computed
#' as percent missing observations
#' group 1 minus in percent missing observations
#' group 2 in peptideLevel_PresAbsDE() function}
#' \item{PV}{p-values produced by g-test for individual datasets}
#' \item{BHPV}{adjusted p-values produced by g-test for individual datasets}
#' \item{NUMPEP}{number of peptides observed for
#' each protein in each of the datasets}
#' }
#' @export
#' @examples
#' # Load mouse dataset
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13
#' metaCols = 1:7
#' m_logInts = make_intencities(mm_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#'
#' set.seed(135)
#' mm_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' mm_m_ints_eig1$h.c # check the number of bias trends detected
#' mm_m_ints_norm = eig_norm2(rv=mm_m_ints_eig1)
#'
#' # Load human dataset
#' data(hs_peptides)
#' head(hs_peptides)
#' intsCols = 8:13
#' metaCols = 1:7
#' m_logInts = make_intencities(hs_peptides, intsCols)
#' m_prot.info = make_meta(hs_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#'
#' set.seed(137)
#' hs_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' hs_m_ints_eig1$h.c # check the number of bias trends detected
#' hs_m_ints_norm = eig_norm2(rv=hs_m_ints_eig1)
#'
#' # Set up for presence/absence analysis
#' raw_list = list()
#' norm_imp_prot.info_list = list()
#' raw_list[[1]] = mm_m_ints_eig1$m
#' raw_list[[2]] = hs_m_ints_eig1$m
#' norm_imp_prot.info_list[[1]] = mm_m_ints_eig1$prot.info
#' norm_imp_prot.info_list[[2]] = hs_m_ints_eig1$prot.info
#'
#' protnames_norm_list = list()
#' protnames_norm_list[[1]] = unique(mm_m_ints_norm$normalized$MatchedID)
#' protnames_norm_list[[2]] = unique(hs_m_ints_norm$normalized$MatchedID)
#'
#' presAbs_dd = get_presAbs_prots(mm_list=raw_list,
#' prot.info=norm_imp_prot.info_list,
#' protnames_norm=protnames_norm_list,
#' prot_col_name=2)
#'
#' ints_presAbs = list()
#' protmeta_presAbs = list()
#' ints_presAbs[[1]] = presAbs_dd[[1]][[1]] # Mouse
#' ints_presAbs[[2]] = presAbs_dd[[1]][[2]] # HS
#' protmeta_presAbs[[1]] = presAbs_dd[[2]][[1]]
#' protmeta_presAbs[[2]] = presAbs_dd[[2]][[2]]
#'
#' treats = list()
#' treats[[1]] = grps
#' treats[[2]] = grps
#'
#' subset_presAbs = subset_proteins(mm_list=ints_presAbs,
#' prot.info=protmeta_presAbs, 'MatchedID')
#'
#' nperm = 50 # set to 500+ for publication
#' set.seed(275937)
#' presAbs_comb = prot_level_multiMat_PresAbs(
#' mm_list=subset_presAbs$sub_mm_list,
#' treat=treats,
#' prot.info=subset_presAbs$sub_prot.info,
#' prot_col_name='MatchedID', nperm=nperm,
#' dataset_suffix=c('MM', 'HS') )
#'
#' plot_volcano(presAbs_comb$FC, presAbs_comb$BH_P_val,
#' FC_cutoff=.5, PV_cutoff=.05,
#' 'Combined Pres/Abs CG vs mCG')
#'
prot_level_multiMat_PresAbs=function(mm_list, treat, prot.info, prot_col_name,
nperm=500, dataset_suffix){
warning("This function uses random namber generator. For reproducibility use
set.seed(12345) with your choce of parameter", immediate.=TRUE)
# select proteins that were detected in each experiment
# make a list of unique protein IDs for each matrix in the list mm_list
subset_data = subset_proteins(mm_list=mm_list,
prot.info=prot.info,
prot_col_name) # grps will not change
# subset_data contains: "sub_mm_list" "sub_prot.info"
# "sub_unique_mm_list" "sub_unique_prot.info" "common_list"
message('Computing statistics')
sub_mm_list = subset_data$sub_mm_list
sub_prot.info = subset_data$sub_prot.info
nsets = length(mm_list)
tt = colnames(sub_prot.info[[1]])
ttt = tt == prot_col_name
# should be position of the column that was passed in for ID
pos_prot_id_col = seq_len(length(tt))[ttt]
tmp = peptideLevel_PresAbsDE(sub_mm_list[[1]],
treat[[1]], sub_prot.info[[1]],
pr_ppos=pos_prot_id_col)
tstat_all = list()
tstat_all[[1]] = tmp
# t_value is stored in col 5: ProtID FC p-val
# BH_p-val t_value num_peptides
tstat = as.double(tmp[,5])
FCs = as.double(tmp[,2])
PV = as.double(tmp[,3])
BHPV = as.double(tmp[,4])
NUMPEP = as.numeric(tmp[,6])
col_FC = paste('FC_', dataset_suffix[1], sep='')
col_PV = paste('PV_', dataset_suffix[1], sep='')
col_BHPV = paste('BHPV_', dataset_suffix[1], sep='')
col_NUMPEP = paste('NUMPEP_', dataset_suffix[1], sep='')
PROTIDS = tmp[,1]
# prot names will be the same, will not combine them in the loop
for(ii in 2:nsets){ # for second and more datasets
tmp = peptideLevel_PresAbsDE(sub_mm_list[[ii]],
treat[[ii]],
sub_prot.info[[ii]],
pr_ppos=pos_prot_id_col)
tstat_all[[ii]] = tmp
tstat = cbind(tstat, as.double(tmp[,5]))
FCs = cbind(FCs, tmp[,2])
PV = cbind(PV, tmp[,3])
BHPV = cbind(BHPV, tmp[,4])
NUMPEP = cbind(NUMPEP, tmp[,6])
col_FC = c(col_FC, paste('FC_', dataset_suffix[ii], sep=''))
col_PV = c(col_PV, paste('PV_', dataset_suffix[ii], sep=''))
col_BHPV = c(col_BHPV, paste('BHPV_', dataset_suffix[ii], sep=''))
col_NUMPEP = c(col_NUMPEP, paste('NUMPEP_', dataset_suffix[ii], sep=''))
}
colnames(FCs) = col_FC
colnames(PV) = col_PV
colnames(BHPV) = col_BHPV
colnames(NUMPEP) = col_NUMPEP
sum_tstat = rowSums(tstat) # check that correctly summed over columns
message('Perfoming permutation test')
tstat_perm = list()
for(ii in seq_len(nsets)) {
message('Dataset ', as.character(ii) )
tstat_perm[[ii]] = NULL
for(jj in seq_len(nperm)) {
# get permuted labels for each iteration, then compute T_p
perm_pos = sample(length(treat[[ii]]), length(treat[[ii]]) )
tmp = peptideLevel_PresAbsDE(sub_mm_list[[ii]],
treat[[ii]][perm_pos],
sub_prot.info[[ii]],
pr_ppos=2)
if(jj == 1) {
tstat_perm[[ii]] = as.matrix(as.double(tmp[,5]))
} else {
tstat_perm[[ii]] = cbind(tstat_perm[[ii]],
as.matrix(as.double(tmp[,5])))
}
}
}
# sum the matrices
T_perm = tstat_perm[[1]]
for(ii in 2:nsets) {
T_perm = T_perm + tstat_perm[[ii]]
}
num_prot = dim(tstat)[1]
p_vals = vector('numeric', length=num_prot)
pos_stat_pos = sum_tstat >= 0
for(ii in seq_len(num_prot)) { # positive and negative values separately
if(ii == 1) {
p_vals[ii] = (.5+ sum(T_perm[ii,] >= sum_tstat[ii])) / (nperm+1)
ppos = which(pos_stat_pos)
for(kk in seq_len(length(ppos))) {
p_vals[ppos[kk]] = (.5+ sum(T_perm[ppos[kk],] >=
sum_tstat[ppos[kk]])) / (nperm+1)
}
} else {
ppos = which(!pos_stat_pos)
for(kk in seq_len(length(ppos))) {
p_vals[ppos[kk]] = (.5+ sum(T_perm[ppos[kk],] <
sum_tstat[ppos[kk]])) / (nperm+1)
}
}
}
# multiple testing adjustment - Benjamini-Hochberg
adj_PV = stats::p.adjust(p_vals, method = 'BH')
FC = rowMeans(FCs)
# protein info is on peptide level, so convert to
# the protein level info, no duplication
# take prot IDs from dataset 1
unik = !duplicated(sub_prot.info[[1]][,prot_col_name])
ppos_u_prots = seq_along(sub_prot.info[[1]][,prot_col_name])[unik] # indices
u_prot_info = sub_prot.info[[1]][ppos_u_prots,]
res = data.frame(protIDused=PROTIDS, FC, P_val=p_vals,
BH_P_val=adj_PV, statistic=sum_tstat,
u_prot_info, FCs, PV, BHPV, NUMPEP)
# column names in res are inherited from the structures
# that are combined into the data frame
return(res)
}
#' Subset proteins
#'
#' Subset proteins into ones common to all datasets passed
#' into the function and unique to each dataset. Note: for 3+ datasets
#' no intermediate combinations of proteins are returned, only
#' proteins common to all datasets, the rest are
#' returned as unique to each dataset.
#'
#' @param mm_list list of matrices for each experiment,
#' length = number of datasets to compare
#' internal dataset dimensions:
#' numpeptides x numsamples for each dataset
#' @param prot.info list of protein and peptide mapping
#' for each matrix in mm_list,
#' in same order as mm_list
#' @param prot_col_name column name in prot.info that contains
#' protein identifiers that
#' link all datasets together.
#' Not that Protein IDs will differ across
#' different organisms and cannot be used
#' as the linking identifier.
#' Function match_linker_ids() produces
#' numeric identifiers that link all
#' datasets together
#' @return data frame with the following columns
#' \describe{
#' \item{sub_mm_list}{list of dataframes of intensities
#' for each of the datasets
#' passed in with proteins present in all datasets}
#' \item{sub_prot.info}{list of dataframes of metadata
#' for each of the datasets
#' passed in with proteins present in all datasets.
#' Same order as sub_mm_list}
#' \item{sub_unique_mm_list}{list of dataframes of
#' intensities not found in all
#' datasets}
#' \item{sub_unique_prot.info}{list of dataframes of
#' metadata not found in all
#' datasets}
#' \item{common_list}{list of protein IDs commnon to all datasets}
#' }
#' @examples
#' # Load mouse dataset
#' data(mm_peptides)
#' head(mm_peptides)
#' # different from parameter names as R uses
#' # outer name spaces if variable is undefined
#' intsCols = 8:13
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(mm_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#' set.seed(173)
#' mm_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' mm_m_ints_eig1$h.c # check the number of bias trends detected
#' mm_m_ints_norm = eig_norm2(rv=mm_m_ints_eig1)
#' mm_prot.info = mm_m_ints_norm$normalized[,1:7]
#' mm_norm_m = mm_m_ints_norm$normalized[,8:13]
#' set.seed(131)
#' imp_mm = MBimpute(mm_norm_m, grps,
#' prot.info=mm_prot.info, pr_ppos=2, my.pi=0.05,
#' compute_pi=FALSE)
#'
#' # Load human dataset
#' data(hs_peptides)
#' head(hs_peptides)
#' intsCols = 8:13
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(hs_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(hs_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#' hs_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' hs_m_ints_eig1$h.c # check the number of bias trends detected
#' hs_m_ints_norm = eig_norm2(rv=hs_m_ints_eig1)
#' hs_prot.info = hs_m_ints_norm$normalized[,1:7]
#' hs_norm_m = hs_m_ints_norm$normalized[,8:13]
#' set.seed(131)
#' imp_hs = MBimpute(hs_norm_m, grps,
#' prot.info=hs_prot.info, pr_ppos=2,
#' my.pi=0.05,
#' compute_pi=FALSE)
#'
#' # Multi-Matrix Model-based differential expression analysis
#' # Set up needed variables
#' mms = list()
#' treats = list()
#' protinfos = list()
#' mms[[1]] = imp_mm$y_imputed
#' mms[[2]] = imp_hs$y_imputed
#' treats[[1]] = grps
#' treats[[2]] = grps
#' protinfos[[1]] = imp_mm$imp_prot.info
#' protinfos[[2]] = imp_hs$imp_prot.info
#'
#' subset_data = subset_proteins(mm_list=mms, prot.info=protinfos, 'MatchedID')
#' mms_mm_dd = subset_data$sub_unique_mm_list[[1]]
#' protinfos_mm_dd = subset_data$sub_unique_prot.info[[1]]
#' # Differential expression analysis for mouse specific protiens
#' DE_mCG_CG_mm_dd = peptideLevel_DE(mms_mm_dd, grps,
#' prot.info=protinfos_mm_dd, pr_ppos=2)
#'
#' @export
subset_proteins = function(mm_list, prot.info, prot_col_name) {
ll = length(mm_list)
numuprots = vector('numeric', ll)
common_list = ''
uprots = list()
for(ii in seq_len(ll)) {
uprots[[ii]] = unique(prot.info[[ii]][,c(prot_col_name)])
numuprots = length(uprots[[ii]])
if(ii == 1) {
common_list = uprots[[ii]]
} else {
# match protein names across multiple datasets, keep only overlapping
common_list = intersect(common_list,uprots[[ii]])
}
}
# subset each experiment matrix to the proteins
# that are in ALL of the datasets/experiment
# stored in common_list; need to have unique proteins
# from each dataset, so not doing in the loop above
sub_mm_list = list()
sub_prot.info = list()
sub_unique_mm_list = list()
sub_unique_prot.info = list()
for(ii in seq_len(ll)) {
ppos = prot.info[[ii]][,c(prot_col_name)] %in% common_list
sub_mm_list[[ii]] = mm_list[[ii]][ppos,]
sub_prot.info[[ii]] = prot.info[[ii]][ppos,]
sub_unique_mm_list[[ii]] = mm_list[[ii]][!ppos,]
sub_unique_prot.info[[ii]] = prot.info[[ii]][!ppos,]
# each list with proteins in common between
# datasets needs to be in the same order for future comparisons.
# Sort in increasing order of ID being used by the values in ret$ix
indx = gtools::mixedorder(as.character(
sub_prot.info[[ii]][,c(prot_col_name)]))
sub_mm_list[[ii]] = sub_mm_list[[ii]][indx,]
sub_prot.info[[ii]] = sub_prot.info[[ii]][indx,]
}
ret = NULL
ret$sub_mm_list = sub_mm_list
ret$sub_prot.info = sub_prot.info
ret$sub_unique_mm_list = sub_unique_mm_list
ret$sub_unique_prot.info = sub_unique_prot.info
ret$common_list = common_list
return(ret)
}
#' Get Presence/Absence Proteins
#'
#' Function get_presAbs_prots() produces a
#' subset of protein meta data and intensities
#' for multiple datasets pass in as a list.
#' If a single dataset is passed in
#' (list of length one) it will be processed in the same way as longer lists.
#'
#' @param mm_list list of matrices of intensities for each experiment.
#' Dimensions: numpeptides x numsamples
#' different for each dataset.
#' @param prot.info list of protein and peptide metadata/mappings
#' for each matrix in mm_list, data.frames "parallel"
#' to matrices in mm_list.
#' @param protnames_norm list of protein identifies to be used
#' to determine peptides that will be placed into
#' Presence/Absence analysis category due to
#' too many missing peptides. Taken from the
#' return value from eig_norm2().
#' @param prot_col_name column name (string) that will be used to get
#' ProteinIDs in the raw data matrices
#'
#' @return list of lists of length 2
#' \describe{
#' \item{intensities}{list of intensities in the same
#' order and of the same length as
#' the number of datasets that were passed into the function}
#' \item{protein metadata}{list of protein metadata in the
#' same order and of the same length as
#' the number of datasets that as were passed into the function}
#' }
#'@examples
#' # Load mouse dataset
#' data(mm_peptides)
#' head(mm_peptides)
#' intsCols = 8:13
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(mm_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(mm_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#' mm_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' mm_m_ints_eig1$h.c # check the number of bias trends detected
#' mm_m_ints_norm = eig_norm2(rv=mm_m_ints_eig1)
#'
#' # Load human dataset
#' data(hs_peptides)
#' head(hs_peptides)
#' intsCols = 8:13
#' metaCols = 1:7 # reusing this variable
#' m_logInts = make_intencities(hs_peptides, intsCols) # will reuse the name
#' m_prot.info = make_meta(hs_peptides, metaCols)
#' m_logInts = convert_log2(m_logInts)
#' grps = as.factor(c('CG','CG','CG', 'mCG','mCG','mCG'))
#' hs_m_ints_eig1 = eig_norm1(m=m_logInts,treatment=grps,prot.info=m_prot.info)
#' hs_m_ints_eig1$h.c # check the number of bias trends detected
#' hs_m_ints_norm = eig_norm2(rv=hs_m_ints_eig1)
#'
#' # Set up for presence/absence analysis
#' raw_list = list()
#' norm_imp_prot.info_list = list()
#' raw_list[[1]] = mm_m_ints_eig1$m
#' raw_list[[2]] = hs_m_ints_eig1$m
#' norm_imp_prot.info_list[[1]] = mm_m_ints_eig1$prot.info
#' norm_imp_prot.info_list[[2]] = hs_m_ints_eig1$prot.info
#'
#' protnames_norm_list = list()
#' protnames_norm_list[[1]] = unique(mm_m_ints_norm$normalized$MatchedID)
#' protnames_norm_list[[2]] = unique(hs_m_ints_norm$normalized$MatchedID)
#'
#' presAbs_dd = get_presAbs_prots(mm_list=raw_list,
#' prot.info=norm_imp_prot.info_list,
#' protnames_norm=protnames_norm_list,
#' prot_col_name=2)
#' @export
get_presAbs_prots = function(mm_list, prot.info,
protnames_norm, prot_col_name) {
# function get_presAbs_prots() produces a subset
# of protein meta data and intensities
# for multiple datasets pass in as a list, single dataset
# will be processed in the same way
# INPUT
# mm_list - list of matrices of intensities for each
# experiment, dimensions: numpeptides x numsamples
# prot.info - list of protein and peptide mappings for each
# matrix in mm_list, data.frame parallel to mm-list
# protnames_norm - list of Protein Identifies to be used to
# determine peptides that will be
# placed into Presence/Absence analysis category due
# to too many missing peptides
# prot_col_name - column name (string) that will be
# used to get ProteinIDs in the raw data matrices
# OUTPUT
# list of lists - position 1: list of intensities,
# position 2: list of protein metadata,
# in the order matrices were pass in
ll = length(mm_list)
presAbs_ints = list()
presAbs_prot.info = list()
for(ii in seq_len(ll)) {
# negation of these are what we want...
prots_removed_pos = prot.info[[ii]][,c(prot_col_name)] %in%
protnames_norm[[ii]]
# peptides kept
message('Number of peptides normalized: ',sum(prots_removed_pos) )
# peptides eliminated
message('Number of peptides Pres/Abs: ',sum(!prots_removed_pos) )
presAbs_prot.info[[ii]] = prot.info[[ii]][!prots_removed_pos,]
presAbs_ints[[ii]] = mm_list[[ii]][!prots_removed_pos,]
}
return(list(presAbs_ints, presAbs_prot.info))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.