blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a4ce2f028ea218c926292f2a4da5420071af4f54 | 29585dff702209dd446c0ab52ceea046c58e384e | /expectreg/R/expectreg.ls.R | cbd5aa2fc72cb3058abca113a468e86b443a8c73 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,361 | r | expectreg.ls.R | expectreg.ls <-
function (formula, data = NULL, estimate = c("laws", "restricted",
"bundle", "sheets"), smooth = c("schall", "gcv", "cvgrid",
"aic", "bic", "lcurve", "fixed"), lambda = 1, expectiles = NA,
ci = FALSE)
{
smooth = match.arg(smooth)
estimate = match.arg(estimate)
if (!is.na(charmatch(expectiles[1], "density")) && charmatch(expectiles[1],
"density") > 0) {
pp <- seq(0.01, 0.99, by = 0.01)
}
else if (any(is.na(expectiles)) || !is.vector(expectiles) ||
any(expectiles > 1) || any(expectiles < 0)) {
pp <- c(0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 0.8, 0.9, 0.95,
0.98, 0.99)
}
else {
pp <- expectiles
}
np <- length(pp)
yy = eval(as.expression(formula[[2]]), envir = data, enclos = environment(formula))
attr(yy, "name") = deparse(formula[[2]])
m = length(yy)
design = list()
x = list()
types = list()
bnd = list()
Zspathelp = list()
nb = vector()
krig.phi = list()
center = TRUE
varying = list()
Blist = list()
Plist = list()
if (formula[[3]] == "1") {
design[[1]] = rb(matrix(1, nrow = m, ncol = 1), "parametric",
center = F)
smooth = "fixed"
}
else if (formula[[3]] == ".") {
design[[1]] = rb(data[, names(data) != all.vars(formula[[2]])],
"parametric")
smooth = "fixed"
}
else for (i in 1:length(labels(terms(formula)))) {
types[[i]] = strsplit(labels(terms(formula))[i], "(",
fixed = TRUE)[[1]][1]
if (types[[i]] == labels(terms(formula))[i]) {
design[[i]] = rb(eval(parse(text = labels(terms(formula))[i]),
envir = data, enclos = environment(formula)),
"parametric")
types[[i]] = "parametric"
design[[i]]$xname = labels(terms(formula))[i]
}
else design[[i]] = eval(parse(text = labels(terms(formula))[i]),
envir = data, enclos = environment(formula))
}
nterms = length(design)
varying[[1]] = design[[1]][[9]]
if (any(!is.na(varying[[1]]))) {
B = design[[1]][[1]] * varying[[1]]
Blist[[1]] = design[[1]][[1]] * varying[[1]]
}
else {
B = design[[1]][[1]]
Blist[[1]] = design[[1]][[1]]
}
DD = as.matrix(design[[1]][[2]])
Plist[[1]] = DD
x[[1]] = design[[1]][[3]]
names(x)[1] = design[[1]]$xname[1]
types[[1]] = design[[1]][[4]]
bnd[[1]] = design[[1]][[5]]
Zspathelp[[1]] = design[[1]][[6]]
nb[1] = ncol(design[[1]][[1]])
krig.phi[[1]] = design[[1]][[7]]
center = center && design[[1]][[8]]
constmat = as.matrix(design[[1]]$constraint)
if (length(design) > 1)
for (i in 2:length(labels(terms(formula)))) {
varying[[i]] = design[[i]][[9]]
if (any(!is.na(varying[[i]]))) {
B = cbind(B, design[[i]][[1]] * varying[[i]])
Blist[[i]] = design[[i]][[1]] * varying[[i]]
}
else {
B = cbind(B, design[[i]][[1]])
Blist[[i]] = design[[i]][[1]]
}
design[[i]][[2]] = as.matrix(design[[i]][[2]])
Plist[[i]] = design[[i]][[2]]
DD = rbind(cbind(DD, matrix(0, nrow = nrow(DD), ncol = ncol(design[[i]][[2]]))),
cbind(matrix(0, nrow = nrow(design[[i]][[2]]),
ncol = ncol(DD)), design[[i]][[2]]))
constmat = rbind(cbind(constmat, matrix(0, nrow = nrow(constmat),
ncol = ncol(design[[i]]$constraint))), cbind(matrix(0,
nrow = nrow(design[[i]]$constraint), ncol = ncol(constmat)),
design[[i]]$constraint))
x[[i]] = design[[i]][[3]]
names(x)[i] = design[[i]]$xname[1]
types[[i]] = design[[i]][[4]]
bnd[[i]] = design[[i]][[5]]
Zspathelp[[i]] = design[[i]][[6]]
nb[i] = ncol(design[[i]][[1]])
krig.phi[[i]] = design[[i]][[7]]
center = center && design[[i]][[8]]
}
if (center) {
B = cbind(1, B)
DD = rbind(0, cbind(0, DD))
constmat = rbind(0, cbind(0, constmat))
}
if (estimate == "laws")
coef.vector = laws(B, DD, yy, pp, lambda, smooth, nb,
center, constmat, types)
else if (estimate == "restricted") {
coef.vector = restricted(B, DD, yy, pp, lambda, smooth,
nb, center, constmat, types)
trend.coef = coef.vector[[4]]
residual.coef = coef.vector[[5]]
asymmetry = coef.vector[[6]]
}
else if (estimate == "bundle") {
coef.vector = bundle(B, DD, yy, pp, lambda, smooth, nb,
center, constmat, types)
trend.coef = coef.vector[[4]]
residual.coef = coef.vector[[5]]
asymmetry = coef.vector[[6]]
}
else if (estimate == "sheets")
coef.vector = sheets(Blist, Plist, yy, pp, lambda, smooth,
nb, center, types)
vector.a.ma.schall = coef.vector[[1]]
lala = coef.vector[[2]]
diag.hat = coef.vector[[3]]
covariance = NULL
if (ci) {
W = list()
covariance = list()
for (i in 1:np) {
W = as.vector(ifelse(yy > B %*% vector.a.ma.schall[,
i], pp[i], 1 - pp[i]))
square.dev = (yy - B %*% vector.a.ma.schall[, i])^2
correct = 1/(1 - diag.hat[, i])
if (any(is.na(W))) {
correct[!is.na(W)] = correct[1:(length(correct) -
length(which(is.na(W))))]
correct[is.na(W)] = 1
W[which(is.na(W))] = 1
square.dev[which(is.na(square.dev))] = 0
}
lahmda = rep(lala[, i], times = nb)
if (center)
lahmda = c(0, lahmda)
K = lahmda * t(DD) %*% DD
helpmat = solve(t(W * B) %*% B + K)
covariance[[i]] = helpmat %*% (t(B * (W^2 * (correct^2 *
square.dev)[, 1])) %*% B) %*% helpmat
}
}
Z <- list()
coefficients <- list()
final.lambdas <- list()
helper <- list()
fitted = B %*% vector.a.ma.schall
if (center) {
intercept = vector.a.ma.schall[1, ]
B = B[, -1, drop = FALSE]
vector.a.ma.schall = vector.a.ma.schall[-1, , drop = FALSE]
}
else intercept = rep(0, np)
for (k in 1:length(design)) {
final.lambdas[[k]] = lala[k, ]
names(final.lambdas)[k] = design[[k]]$xname
partbasis = (sum(nb[0:(k - 1)]) + 1):(sum(nb[0:k]))
if (types[[k]] == "pspline") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = NA
for (i in 1:np) {
Z[[k]][, i] <- design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "markov") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = list(bnd[[k]], Zspathelp[[k]])
for (i in 1:np) {
Z[[k]][, i] = design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "2dspline") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = NA
for (i in 1:np) {
Z[[k]][, i] = design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "radial") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = Zspathelp[[k]]
for (i in 1:np) {
Z[[k]][, i] = design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "krig") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = list(krig.phi[[k]], Zspathelp[[k]])
for (i in 1:np) {
Z[[k]][, i] = design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "random") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = NA
for (i in 1:np) {
Z[[k]][, i] <- design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "ridge") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = NA
for (i in 1:np) {
Z[[k]][, i] <- design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "parametric") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = NA
for (i in 1:np) {
Z[[k]][, i] <- design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
else if (types[[k]] == "special") {
Z[[k]] <- matrix(NA, m, np)
coefficients[[k]] = matrix(NA, nrow = nb[k], ncol = np)
helper[[k]] = NA
for (i in 1:np) {
Z[[k]][, i] <- design[[k]][[1]] %*% vector.a.ma.schall[partbasis,
i, drop = FALSE] + intercept[i]
coefficients[[k]][, i] = vector.a.ma.schall[partbasis,
i, drop = FALSE]
}
}
names(Z)[k] = design[[k]]$xname[1]
names(coefficients)[k] = design[[k]]$xname[1]
}
desmat = B
if (center)
desmat = cbind(1, B)
result = list(lambda = final.lambdas, intercepts = intercept,
coefficients = coefficients, values = Z, response = yy,
covariates = x, formula = formula, asymmetries = pp,
effects = types, helper = helper, design = desmat, bases = design,
fitted = fitted, covmat = covariance)
if (estimate == "restricted" || estimate == "bundle") {
result$trend.coef = trend.coef
result$residual.coef = residual.coef
result$asymmetry.coef = asymmetry
}
result$predict <- function(newdata = NULL) {
BB = list()
values = list()
bmat = NULL
for (k in 1:length(coefficients)) {
BB[[k]] = predict(design[[k]], newdata)
values[[k]] <- BB[[k]] %*% coefficients[[k]]
values[[k]] = t(apply(values[[k]], 1, function(x) {
x + intercept
}))
bmat = cbind(bmat, BB[[k]])
}
if (center) {
bmat = cbind(1, bmat)
vector.a.ma.schall = rbind(intercept, vector.a.ma.schall)
}
fitted = bmat %*% vector.a.ma.schall
names(values) = names(coefficients)
list(fitted = fitted, values = values)
}
class(result) = c("expectreg", estimate)
result
}
|
6a0ccef30043e0ff1c6a9609210d22eae7a9caac | 75b265e23131ce614640a804d1d7a4f18695bee3 | /man/period_ends.Rd | 747abe904635ea6b8327f8873368c990569bfb92 | [
"MIT"
] | permissive | ppd-dpp/calpr | 0db70d3dc182147fa73de9d7d670f902463f8f0f | 88884e93812d9d0d5a861d8f4c96b8bb6f6a5786 | refs/heads/master | 2022-11-04T13:07:49.311293 | 2020-06-15T16:53:38 | 2020-06-15T16:53:38 | 271,619,398 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 548 | rd | period_ends.Rd | \name{period_ends}
\alias{period_ends}
\title{Period ends}
\description{
End points for calendar periods
}
\usage{
period_ends(x, period, start.mon)
}
\arguments{
\item{x}{A vector of dates.}
\item{period}{One of 'month', 'quarter', or 'year'.}
\item{start.mon}{Starting month of a year, beginning at 1 for January.}
}
\details{
This is a simple helper function to make a sequence of dates to cut \code{x}. There is no argument checking, and it should probably not be called directly.
}
\value{
A pair of POSIXlt that bound \code{x}.
}
|
def0758566e720f97fe71cf4253ad237b5985c5e | 5db254fa65b50a095f4b44f9346bbb743aa894d1 | /R/method_EM.r | b0c6540961eb1ed09438545fbf5d3bec72bf7ca7 | [] | no_license | cran/nowcasting | 5866a701e83f61b063b337967fd64f75882499a5 | 55e3e19dad1bd6528116422b6fca2b12b4479fe7 | refs/heads/master | 2021-06-25T05:11:10.040964 | 2019-08-01T04:00:02 | 2019-08-01T04:00:02 | 100,689,288 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 27,679 | r | method_EM.r | #' @importFrom magic adiag
remNaNs_spline <-function(X,options){
TT <- dim(X)[1]
N <- dim(X)[2]
k <- options$k
indNaN <- is.na(X)
if(options$method == 1){ # replace all the missing values (método Giannone et al. 2008)
for (i in 1:N){
x = X[,i]
x[indNaN[,i]] = median(x,na.rm = T);
x_MA<-filter(x = c(rep(x[1],k),x,rep(x[length(x)],k)),filter = rep(1,2*k+1)/(2*k+1),sides = 1)
x_MA=x_MA[(2*k+1):length(x_MA)]
x[indNaN[,i]]=x_MA[indNaN[,i]]
X[,i]=x;
}
}else if(options$method == 2){ # replace missing values after removing leading and closing zeros
rem1 <- (rowSums(indNaN)>N*0.8)
nanLead <- which(rem1)
# nanEnd <- which(rem1[length(rem1):1])
# nanLE <- c(nanEnd,nanLead)
nanLE<-nanLead
X<-X[-nanLE,]
indNaN=is.na(X)
for (i in 1:N){
x = X[,i]
isnanx = is.na(x)
t1 = min(which(!isnanx))
t2 = max(which(!isnanx))
x1<-stats::spline(x[t1:t2],xout = 1:(t2-t1+1))
xx<-x1$y
x[t1:t2]<-x1$y
isnanx<-is.na(x)
x[isnanx] <- median(x,na.rm = T)
x_MA<-filter(x = c(rep(x[1],k),x,rep(x[length(x)],k)),filter = rep(1,2*k+1)/(2*k+1),sides = 1)
x_MA=x_MA[(2*k+1):length(x_MA)]
x[indNaN[,i]]=x_MA[indNaN[,i]]
X[,i]=x;
}
}else if(options$method == 3){ # only remove rows with leading and closing zeros
rem1 <- (rowSums(indNaN)==N)
nanLead <- which(rem1)
# nanEnd <- which(rem1[length(rem1):1])
# nanLE <- c(nanEnd,nanLead)
nanLE<-nanLead
if(length(nanLE) != 0){
X <- X[-nanLE,]
}
indNaN=is.na(X)
}else if(options$method == 4){ # remove rows with leading and closing zeros & replace missing values
rem1 <- (rowSums(indNaN)==N)
nanLead <- which(rem1)
# nanEnd <- which(rem1[length(rem1):1])
# nanLE <- c(nanEnd,nanLead)
nanLE<-nanLead
X<-X[-nanLE,]
indNaN=is.na(X)
for (i in 1:N){
x = X[,i]
isnanx = is.na(x)
t1 = min(which(!isnanx))
t2 = max(which(!isnanx))
x1<-stats::spline(x[t1:t2],xout = 1:(t2-t1+1))
xx<-x1$y
x[t1:t2]<-x1$y
isnanx<-is.na(x)
x[isnanx] <- median(x,na.rm = T)
x_MA<-filter(x = c(rep(x[1],k),x,rep(x[length(x)],k)),filter = rep(1,2*k+1)/(2*k+1),sides = 1)
x_MA=x_MA[(2*k+1):length(x_MA)]
x[indNaN[,i]]=x_MA[indNaN[,i]]
X[,i]=x;
}
}else if(options$method == 5){
indNaN=is.na(X)
for (i in 1:N){
x = X[,i]
isnanx = is.na(x)
t1 = min(which(!isnanx))
t2 = max(which(!isnanx))
x1<-stats::spline(x[t1:t2],xout = 1:(t2-t1+1))
xx<-x1$y
x[t1:t2]<-x1$y
isnanx<-is.na(x)
x[isnanx] <- median(x,na.rm = T)
x_MA<-filter(x = c(rep(x[1],k),x,rep(x[length(x)],k)),filter = rep(1,2*k+1)/(2*k+1),sides = 1)
x_MA=x_MA[(2*k+1):length(x_MA)]
x[indNaN[,i]]=x_MA[indNaN[,i]]
X[,i]=x;
}
}
return(list(X = X,indNaN=indNaN))
}
InitCond<-function(xNaN,r,p,blocks,optNaN,R_mat,q,nQ,i_idio){
x<-xNaN
Rcon<-R_mat
# library(magic)
pC = size(Rcon,2)
ppC = max(p,pC)
n_b = size(blocks,2)
res_remNaNs_spline <- remNaNs_spline(x,optNaN)
xBal<- res_remNaNs_spline$X
indNaN <- res_remNaNs_spline$indNaN
TT <- dim(xBal)[1] # time T of series
N <- dim(xBal)[2] # number of series
NM <- N-nQ # number of monthy frequency series
xNaN = xBal
for(i in 1:N){
xNaN[indNaN[,i],i] <- NA
}
# Initialize model coefficient output
C = {}
A = {}
Q = {}
initV = {}
res = xBal
resNaN = xNaN
# Set the first observations as NaNs: For quarterly-monthly aggreg. scheme
indNaN[1:pC-1,] <- T
for(i in 1:n_b){ # loop for each block
r_i<-r[i]
########################
# Observation equation #
########################
C_i = zeros(N,r_i*ppC) # Initialize state variable matrix helper
idx_i = find(blocks[,i]) # returns the series loaded in block i
# Note that the variables have been reshuffled so as to have all quarterly at the last columns of X
idx_iM = idx_i[idx_i<NM+1]; # index for monthly variables
idx_iQ = idx_i[idx_i>NM]; # index for quarterly variables
eig<-eigen(cov(res[,idx_iM]))
v<-eig$vectors[,1:r_i]
d<-eig$values[1:r_i]
C_i[idx_iM,1:r_i] = v
f = as.matrix(res[,idx_iM])%*%as.matrix(v)
for(kk in 0:(max(p+1,pC)-1)){
if(kk == 0){
FF<-f[(pC-kk):(dim(f)[1]-kk),]
}else{
FF <- cbind(FF,f[(pC-kk):(dim(f)[1]-kk),])
}
}
Rcon_i = kronecker(Rcon,eye(r_i))
q_i = kronecker(q,zeros(r_i,1));
ff = FF[,1:(r_i*pC)]
for(j in idx_iQ){ # Coeficiente "loadings" de Variáveis trimestrais
xx_j = resNaN[pC:dim(resNaN)[1],j]
if(sum(!is.na(xx_j)) < size(ff,2)+2){
xx_j = res[pC:dim(res)[1],j]
}
ff_j = ff[!is.na(xx_j),]
xx_j = xx_j[!is.na(xx_j)]
iff_j = solve(t(ff_j)%*%ff_j)
Cc = iff_j%*%t(ff_j)%*%xx_j
Cc = Cc - iff_j%*%t(Rcon_i)%*%solve(Rcon_i%*%iff_j%*%t(Rcon_i))%*%(Rcon_i%*%Cc-q_i);
C_i[j,1:(pC*r_i)] <- t(Cc)
}
ff = rbind(zeros(pC-1,pC*r_i),ff)
res = res - ff%*%t(C_i)
resNaN = res
for(i_aux in 1:dim(indNaN)[2]){
resNaN[indNaN[,i_aux],i_aux] <- NA
}
C <- cbind(C,C_i)
#######################
# Transition Equation #
#######################
z <- FF[,1:r_i]
Z <- FF[,(r_i+1):(r_i*(p+1))]
A_i = t(zeros(r_i*ppC,r_i*ppC))
A_temp = solve(t(Z)%*%Z)%*%t(Z)%*%z
A_i[1:r_i,1:(r_i*p)] <- t(A_temp)
A_i[(r_i+1):dim(A_i)[1],1:(r_i*(ppC-1))] <- eye(r_i*(ppC-1))
##########################
Q_i = zeros(ppC*r_i,ppC*r_i)
e = z - Z%*%A_temp # VAR residuals
Q_i[1:r_i,1:r_i] = cov(e); # VAR covariance matrix
initV_i = matrix(solve(eye((r_i*ppC)^2)-kronecker(A_i,A_i))%*%c(Q_i),r_i*ppC,r_i*ppC);
if(is.null(A)){
A<-A_i
}else{
A <- magic::adiag(A,A_i)
}
if(is.null(Q)){
Q<-Q_i
}else{
Q <- magic::adiag(Q,Q_i)
}
if(is.null(initV)){
initV<-initV_i
}else{
initV <- magic::adiag(initV,initV_i)
}
}
R = diag(apply(resNaN, 2, stats::var, na.rm = T))
eyeN = eye(N)
eyeN<-eyeN[,i_idio]
# Initial conditions
C <- cbind(C,eyeN)
ii_idio = find(i_idio)
n_idio = length(ii_idio)
B = zeros(n_idio)
S = zeros(n_idio)
BM <- zeros(n_idio)
SM <- zeros(n_idio)
# Loop for monthly variables
for (i in 1:n_idio){
# Set observation equation residual covariance matrix diagonal
R[ii_idio[i],ii_idio[i]] <- 1e-04
# Subsetting series residuals for series i
res_i <- resNaN[,ii_idio[i]]
# number of leading zeros
# leadZero = max( find( t(1:TT) == cumsum(is.na(res_i)) ) )
# endZero = max( find( TT:1 == cumsum(is.na(res_i[length(res_i):1])) ) );
# res_i<-res_i[(leadZero+1):(length(res_i)-endZero)]
res_i<-res_i[!is.na(res_i)]
# Linear regression: AR 1 process for monthly series residuals
BM[i,i] = solve(t(res_i[1:(length(res_i)-1)])%*%res_i[1:(length(res_i)-1)])%*%t(res_i[1:(length(res_i)-1)])%*%res_i[2:length(res_i)]
SM[i,i] = stats::var(res_i[2:length(res_i)]-res_i[1:(length(res_i)-1)]*BM[i,i])
}
# blocks for covariance matrices
initViM = diag(1/diag(eye(size(BM,1))-BM^2))%*%SM;
if(!nQ==0){
C<-cbind(C,rbind(zeros(NM,5*nQ),t(kronecker(eye(nQ),c(1,2,3,2,1)))))
Rdiag<-diag(R)
sig_e <- Rdiag[(NM+1):N]/19
Rdiag[(NM+1):N] <- 1e-04
R <- diag(Rdiag) # Covariance for obs matrix residuals
# for BQ, SQ
rho0 <- 0.1
temp <- zeros(5)
temp[1,1] <- 1
# BQ and SQ
BQ <- kronecker(eye(nQ),rbind(cbind(rho0,zeros(1,4)),cbind(eye(4),zeros(4,1))))
if(length(sig_e)>1){
SQ = kronecker(diag((1-rho0^2)*sig_e),temp)
}else{
SQ = kronecker((1-rho0^2)*sig_e,temp)
}
initViQ = matrix(solve(eye((5*nQ)^2)-kronecker(BQ,BQ))%*%c(SQ),5*nQ,5*nQ)
}else{
BQ <- NULL
SQ <- NULL
initViQ <- NULL
}
A1<-magic::adiag(A,BM,BQ)
Q1<-magic::adiag(Q, SM, SQ)
A<-A1 # Observation matrix
Q<-Q1 # Residual covariance matrix
# Initial conditions
initZ = zeros(size(A,1),1); # States
initV = magic::adiag(initV, initViM, initViQ) # Covariance of states
return(list(A = A, C = C, Q = Q, R = R, initZ = initZ, initV = initV))
}
EM_DFM_SS_block_idioQARMA_restrMQ<-function(X,Par){
# library(matlab)
thresh = 1e-4
r = Par$r
p = Par$p
max_iter = Par$max_iter
i_idio = Par$i_idio
R_mat = Par$Rconstr
q = Par$q
nQ = Par$nQ
blocks = Par$blocks
### Prepara??o dos dados
TT <- dim(X)[1]
N <- dim(X)[2]
### Standardise X
Mx = colMeans(X,na.rm=T)
Wx = sapply(1:N,function(x) sd(X[,x],na.rm = T))
xNaN <- (X-kronecker(t(Mx),rep(1,TT)))/kronecker(t(Wx),rep(1,TT))
### Initial conditions
# Removing missing values (for initial estimator)
optNaN<-list()
optNaN$method = 2; # Remove leading and closing zeros
optNaN$k = 3;
res_InitCond<-InitCond(xNaN,r,p,blocks,optNaN,R_mat,q,nQ,i_idio);
A<-res_InitCond$A
C<-res_InitCond$C
Q<-res_InitCond$Q
R<-res_InitCond$R
Z_0<-res_InitCond$initZ
V_0<-res_InitCond$initV
# some auxiliary variables for the iterations
previous_loglik = -Inf
num_iter = 0
LL = -Inf
converged = F
# y for the estimation is WITH missing data
y = t(xNaN)
#--------------------------------------------------------------------------
#THE EM LOOP
#--------------------------------------------------------------------------
#The model can be written as
#y = C*Z + e;
#Z = A*Z(-1) + v
#where y is NxT, Z is (pr)xT, etc
#remove the leading and ending nans for the estimation
optNaN$method = 3
y_est <- remNaNs_spline(xNaN,optNaN)
y_est_indNaN<-t(y_est$indNaN)
y_est<-t(y_est$X)
num_iter = 0
loglik_aux = -Inf
while ((num_iter < max_iter) & !converged){
res_EMstep = EMstep(y_est, A, C, Q, R, Z_0, V_0, r,p,R_mat,q,nQ,i_idio,blocks)
# res_EMstep <- list(C_new, R_new, A_new, Q_new, Z_0, V_0, loglik)
C = res_EMstep$C_new;
R = res_EMstep$R_new;
A = res_EMstep$A_new;
Q = res_EMstep$Q_new;
Z_0<-res_EMstep$Z_0
V_0<-res_EMstep$V_0
loglik<-res_EMstep$loglik
# updating the user on what is going on
if((num_iter%% 5)==0&&num_iter>0){message(num_iter,"th iteration: \nThe loglikelihood went from ",round(loglik_aux,4)," to ",round(loglik,4))}
loglik_aux <- loglik
# Checking convergence
if (num_iter>2){
res_em_converged = em_converged(loglik, previous_loglik, thresh,1)
converged<-res_em_converged$converged
decreasse<-res_em_converged$decrease
}
LL <- cbind(LL, loglik)
previous_loglik <- loglik
num_iter <- num_iter + 1
}
# final run of the Kalman filter
res_runKF = runKF(y, A, C, Q, R, Z_0, V_0)
Zsmooth<-t(res_runKF$xsmooth)
x_sm <- Zsmooth[2:dim(Zsmooth)[1],]%*%t(C)
Res<-list()
Res$x_sm <- x_sm
Res$X_sm <- kronecker(t(Wx),rep(1,TT))*x_sm + kronecker(t(Mx),rep(1,TT))
Res$FF <- Zsmooth[2:dim(Zsmooth)[1],]
#--------------------------------------------------------------------------
# Loading the structure with the results
#--------------------------------------------------------------------------
Res$C <- C;
Res$R <- R;
Res$A <- A;
Res$Q <- Q;
Res$Mx <- Mx;
Res$Wx <- Wx;
Res$Z_0 <- Z_0;
Res$V_0 <- V_0;
Res$r <- r;
Res$p <- p;
return(Res)
}
EMstep <- function(y = NULL, A = NULL, C = NULL, Q = NULL, R = NULL, Z_0 = NULL, V_0 = NULL,
r = NULL, p = NULL, R_mat = NULL, q = NULL, nQ = NULL, i_idio = NULL, blocks = NULL){
n <- size(y,1)
TT <- size(y,2)
nM <- n - nQ
pC <- size(R_mat,2)
ppC <- max(p,pC)
n_b <- size(blocks,2)
# Compute the (expected) sufficient statistics for a single Kalman filter sequence.
#Running the Kalman filter with the current estimates of the parameters
res_runKF = runKF(y, A, C, Q, R, Z_0, V_0);
Zsmooth<-res_runKF$xsmooth
Vsmooth<-res_runKF$Vsmooth
VVsmooth<-res_runKF$VVsmooth
loglik<-res_runKF$loglik
A_new <- A
Q_new <- Q
V_0_new <- V_0
for(i in 1:n_b){
r_i <- r[i]
rp <- r_i*p
if(i == 1){
rp1 <- 0*ppC
}else{
rp1 <- sum(r[1:(i-1)])*ppC
}
A_i <- A[(rp1+1):(rp1+r_i*ppC), (rp1+1):(rp1+r_i*ppC)]
Q_i <- Q[(rp1+1):(rp1+r_i*ppC), (rp1+1):(rp1+r_i*ppC)]
if(rp==1){
EZZ <- t(Zsmooth[(rp1+1):(rp1+rp),2:ncol(Zsmooth)]) %*% Zsmooth[(rp1+1):(rp1+rp),2:ncol(Zsmooth)] +
sum(Vsmooth[(rp1+1):(rp1+rp),(rp1+1):(rp1+rp),2:dim(Vsmooth)[3]]) # E(Z'Z)
EZZ_BB <- t(Zsmooth[(rp1+1):(rp1+rp),1:(ncol(Zsmooth)-1)]) %*% Zsmooth[(rp1+1):(rp1+rp),1:(ncol(Zsmooth)-1)] +
sum(Vsmooth[(rp1+1):(rp1+rp),(rp1+1):(rp1+rp),1:(dim(Vsmooth)[3]-1)]) #E(Z(-1)'Z_(-1))
EZZ_FB <- t(Zsmooth[(rp1+1):(rp1+rp),2:ncol(Zsmooth)]) %*% Zsmooth[(rp1+1):(rp1+rp),1:(ncol(Zsmooth)-1)] +
sum(VVsmooth[(rp1+1):(rp1+rp),(rp1+1):(rp1+rp),]) #E(Z'Z_(-1))
}else{
EZZ <- (Zsmooth[(rp1+1):(rp1+rp),2:ncol(Zsmooth)]) %*% t(Zsmooth[(rp1+1):(rp1+rp),2:ncol(Zsmooth)]) +
apply(Vsmooth[(rp1+1):(rp1+rp),(rp1+1):(rp1+rp),2:dim(Vsmooth)[3]],c(1,2),sum) # E(Z'Z)
EZZ_BB <- (Zsmooth[(rp1+1):(rp1+rp),1:(ncol(Zsmooth)-1)]) %*% t(Zsmooth[(rp1+1):(rp1+rp),1:(ncol(Zsmooth)-1)]) +
apply(Vsmooth[(rp1+1):(rp1+rp),(rp1+1):(rp1+rp),1:(dim(Vsmooth)[3]-1)],c(1,2),sum) #E(Z(-1)'Z_(-1))
EZZ_FB <- (Zsmooth[(rp1+1):(rp1+rp),2:ncol(Zsmooth)]) %*% t(Zsmooth[(rp1+1):(rp1+rp),1:(ncol(Zsmooth)-1)]) +
apply(VVsmooth[(rp1+1):(rp1+rp),(rp1+1):(rp1+rp),],c(1,2),sum) #E(Z'Z_(-1))
}
A_i[1:r_i,1:rp] <- EZZ_FB[1:r_i,1:rp] %*% solve(EZZ_BB[1:rp,1:rp])
Q_i[1:r_i,1:r_i] <- (EZZ[1:r_i,1:r_i] - A_i[1:r_i,1:rp] %*% t(matrix(EZZ_FB[1:r_i,1:rp],r_i,rp))) / TT
A_new[(rp1+1):(rp1+r_i*ppC),(rp1+1):(rp1+r_i*ppC)] <- A_i
Q_new[(rp1+1):(rp1+r_i*ppC),(rp1+1):(rp1+r_i*ppC)] <- Q_i;
V_0_new[(rp1+1):(rp1+r_i*ppC),(rp1+1):(rp1+r_i*ppC)] <- Vsmooth[(rp1+1):(rp1+r_i*ppC),(rp1+1):(rp1+r_i*ppC),1]
}
rp1 <- sum(sum(r))*ppC
niM <- sum(i_idio[1:nM])
# idiosyncratic
EZZ <- diag(diag(Zsmooth[(rp1+1):nrow(Zsmooth),2:ncol(Zsmooth)] %*% t(Zsmooth[(rp1+1):nrow(Zsmooth),2:ncol(Zsmooth)]))) + diag(diag(apply(Vsmooth[(rp1+1):dim(Vsmooth)[1],(rp1+1):dim(Vsmooth)[2],2:dim(Vsmooth)[3]], MARGIN = 1:2, FUN = sum))) #E(Z'Z)
EZZ_BB <- diag(diag(Zsmooth[(rp1+1):nrow(Zsmooth),1:(ncol(Zsmooth)-1)] %*% t(Zsmooth[(rp1+1):nrow(Zsmooth),1:(ncol(Zsmooth)-1)]))) + diag(diag(apply(Vsmooth[(rp1+1):dim(Vsmooth)[1],(rp1+1):dim(Vsmooth)[2],1:(dim(Vsmooth)[3]-1)], MARGIN = 1:2, FUN = sum))) #E(Z(-1)'Z_(-1))
EZZ_FB <- diag(diag(Zsmooth[(rp1+1):nrow(Zsmooth),2:ncol(Zsmooth)] %*% t(Zsmooth[(rp1+1):nrow(Zsmooth),1:(ncol(Zsmooth)-1)]))) + diag(diag(apply(VVsmooth[(rp1+1):dim(VVsmooth)[1],(rp1+1):dim(VVsmooth)[2],], MARGIN = 1:2, FUN = sum))) #E(Z'Z_(-1))
A_i <- EZZ_FB %*% diag(1/diag(EZZ_BB))
Q_i <- (EZZ - A_i %*% t(EZZ_FB)) / TT
A_new[(rp1+1):(rp1+niM),(rp1+1):(rp1+niM)] <- A_i[1:niM,1:niM]
Q_new[(rp1+1):(rp1+niM),(rp1+1):(rp1+niM)] <- Q_i[1:niM,1:niM]
V_0_new[(rp1+1):(rp1+niM),(rp1+1):(rp1+niM)] <- diag(diag(Vsmooth[(rp1+1):(rp1+niM),(rp1+1):(rp1+niM),1]))
Z_0 <- Zsmooth[,1] #zeros(size(Zsmooth,1),1); #
# nanY <- is.nan(y)
nanY<-is.na(y)
y[nanY] <- 0
# LOADINGS
C_new <- C
# Blocks
bl <- unique(blocks)
n_bl <- size(bl,1)
bl_idxM <- NULL
bl_idxQ <- NULL
R_con <- NULL
q_con <- NULL
for(i in 1:n_b){
bl_idxQ <- cbind(bl_idxQ, repmat(bl[,i],1,r[i]*ppC))
bl_idxM <- cbind(bl_idxM, repmat(bl[,i],1,r[i]), zeros(n_bl,r[i]*(ppC-1)))
if(i == 1){
R_con <- kronecker(R_mat,eye(r[i]))
}else{
R_con <- as.matrix(Matrix::bdiag(R_con, kronecker(R_mat,eye(r[i]))))
}
q_con <- rbind(q_con, zeros(r[i]*size(R_mat,1),1))
}
bl_idxM <- bl_idxM == 1
bl_idxQ <- bl_idxQ == 1
#idio
i_idio_M <- i_idio[1:nM]
n_idio_M <- length(find(i_idio_M))
c_i_idio <- cumsum(i_idio)
for(i in 1:n_bl){
bl_i <- bl[i,]
rs <- sum(r[bl_i == 1])
idx_i <- NULL
for(k in 1:nrow(blocks)){
idx_i[k] <- sum(blocks[k,] == bl_i) == size(blocks,2)
}
idx_i <- find(idx_i)
# MONTHLY
idx_iM <- idx_i[idx_i < (c(nM) + 1)]
n_i <- length(idx_iM)
if(n_i==0){
denom <- NULL
nom <- NULL
} else {
denom <- zeros(n_i*rs,n_i*rs)
nom <- zeros(n_i,rs)
i_idio_i <- i_idio_M[idx_iM] == 1
i_idio_ii <- c_i_idio[idx_iM]
i_idio_ii <- i_idio_ii[i_idio_i]
for(t in 1:TT){
nanYt <- diag(!nanY[idx_iM,t])
nn <- sum(bl_idxM[i,])
denom <- denom + kronecker(Zsmooth[bl_idxM[i,],t+1][1:nn] %*% t(Zsmooth[bl_idxM[i,],t+1][1:nn]) + Vsmooth[bl_idxM[i,],bl_idxM[i,],t+1][1:nn,1:nn], nanYt)
nom <- nom + y[idx_iM,t] %*% t(Zsmooth[bl_idxM[i,],t+1][1:nn]) - nanYt[,i_idio_i] %*% (Zsmooth[rp1+i_idio_ii,t+1] %*% t(Zsmooth[bl_idxM[i,],t+1][1:nn]) + Vsmooth[rp1+i_idio_ii,bl_idxM[i,],t+1][,1:nn])
}
vec_C <- solve(denom) %*% c(nom)
C_new[idx_iM,bl_idxM[i,]][,1:nn] <- matrix(vec_C,n_i,rs)
}
# QUARTERLY
idx_iQ <- idx_i[idx_i>c(nM)]
rps <- rs*ppC
R_con_i <- R_con[,bl_idxQ[i,]]
q_con_i <- q_con
no_c <- !(rowSums(R_con_i == 0) == ncol(R_con_i))
R_con_i <- R_con_i[no_c,]
q_con_i <- q_con_i[no_c,]
for(j in idx_iQ){
denom <- zeros(rps,rps)
nom <- zeros(1,rps)
idx_jQ <- j-c(nM)
if(i != 1){
i_idio_jQ <- (rp1+n_idio_M+5*(idx_jQ-1)+1):(rp1+n_idio_M+5*idx_jQ)
V_0_new[i_idio_jQ,i_idio_jQ] <- Vsmooth[i_idio_jQ,i_idio_jQ,1]
A_new[i_idio_jQ[1],i_idio_jQ[1]] <- A_i[i_idio_jQ[1]-rp1,i_idio_jQ[1]-rp1]
Q_new[i_idio_jQ[1],i_idio_jQ[1]] <- Q_i[i_idio_jQ[1]-rp1,i_idio_jQ[1]-rp1]
for(t in 1:TT){
nanYt <- as.vector(!nanY[j,t])*1
nn2 <- sum(bl_idxQ[i,])
denom <- denom + kronecker(Zsmooth[bl_idxQ[i,],t+1][1:nn2] %*% t(Zsmooth[bl_idxQ[i,],t+1][1:nn2]) + Vsmooth[bl_idxQ[i,],bl_idxQ[i,],t+1][1:nn2,1:nn2],nanYt)
nom <- nom + y[j,t] %*% t(Zsmooth[bl_idxQ[i,],t+1][1:nn2])
nom <- nom - nanYt %*% (matrix(c(1,2,3,2,1), nrow = 1) %*% Zsmooth[i_idio_jQ,t+1] %*% t(Zsmooth[bl_idxQ[i,],t+1][1:nn2]) +
matrix(c(1,2,3,2,1), nrow = 1) %*% Vsmooth[i_idio_jQ,bl_idxQ[i,],t+1][,1:nn2])
}
C_i <- solve(denom) %*% t(nom)
C_i_constr <- C_i - solve(denom) %*% t(R_con_i) %*% solve(R_con_i %*% solve(denom) %*% t(R_con_i)) %*% (R_con_i %*% C_i - q_con_i)
nn3 <- sum(bl_idxQ[i,])
C_new[j,bl_idxQ[i,]][1:nn3] <- C_i_constr
}
}
}
R_new <- zeros(n,n)
for(t in 1:TT){
nanYt <- diag(!nanY[,t])*1 == 1
R_new <- R_new + (y[,t] - nanYt %*% C_new %*% Zsmooth[,t+1]) %*% t(y[,t] - nanYt %*% C_new %*% Zsmooth[,t+1]) + nanYt %*% C_new %*% Vsmooth[,,t+1] %*% t(C_new) %*% nanYt + (eye(n)-nanYt) %*% R %*% (eye(n)-nanYt)
}
R_new <- R_new/TT
RR <- diag(R_new) #RR(RR<1e-2) = 1e-2;
RR[i_idio_M] <- 1e-04
if(nM<length(RR)){
RR[(nM+1):length(RR)] <- 1e-04
}
R_new <- diag(RR)
if(!is.matrix(Z_0)){
Z_0<-matrix(Z_0,length(Z_0),1)
}
# output
return(list(C_new = C_new, R_new = R_new, A_new = A_new, Q_new = Q_new, Z_0 = Z_0, V_0 = V_0, loglik = loglik))
}
FIS <- function(Y,Z,R,TT,Q,S){
# library(corpcor)
# %______________________________________________________________________
# % Fixed intervall smoother (see Harvey, 1989, p. 154)
# % FIS returns the smoothed state vector AmT and its covar matrix PmT
# % Use this in conjunction with function SKF
# %______________________________________________________________________
# % INPUT
# % Y Data (nobs x n)
# % S Estimates from Kalman filter SKF
# % S.Am : Estimates a_t|t-1 (nobs x m)
# % S.Pm : P_t|t-1 = Cov(a_t|t-1) (nobs x m x m)
# % S.AmU : Estimates a_t|t (nobs x m)
# % S.PmU : P_t|t = Cov(a_t|t) (nobs x m x m)
# % OUTPUT
# % S Smoothed estimates added to above
# % S.AmT : Estimates a_t|T (nobs x m)
# % S.PmT : P_t|T = Cov(a_t|T) (nobs x m x m)
# % S.PmT_1 : Cov(a_ta_t-1|T)
# % where m is the dim of state vector and t = 1 ...T is time
m<-dim(S$Am)[1]
nobs<-dim(S$Am)[2]
S$AmT = zeros(m,nobs+1)
S$PmT = array(0,c(m,m,nobs+1))
S$AmT[,nobs+1] <- S$AmU[,nobs+1]
S$PmT[,,nobs+1] <- S$PmU[,,nobs+1]
S$PmT_1<-array(0,c(m,m,nobs))
S$PmT_1[,,nobs] <- (eye(m)-S$KZ)%*%TT%*%S$PmU[,,nobs]
pinv<-corpcor::pseudoinverse(S$Pm[,,nobs])
J_2 <- S$PmU[,,nobs]%*%t(TT)%*%pinv
for (t in nobs:1){
PmU <- S$PmU[,,t]
Pm1 <- S$Pm[,,t]
P_T <- S$PmT[,,t+1]
P_T1 <- S$PmT_1[,,t]
J_1 <- J_2
S$AmT[,t] <- S$AmU[,t] + J_1%*%(S$AmT[,t+1] - TT%*%S$AmU[,t])
S$PmT[,,t] <- PmU + J_1%*%(P_T - Pm1)%*%t(J_1)
if(t>1){
pinv<-corpcor::pseudoinverse(S$Pm[,,t-1])
J_2 <- S$PmU[,,t-1]%*%t(TT)%*%pinv
S$PmT_1[,,t-1] = PmU%*%t(J_2)+J_1%*%(P_T1-TT%*%PmU)%*%t(J_2)
}
}
return(S)
}
SKF <-function(Y,Z,R,TT,Q,A_0,P_0){
#Y = y; Z = C; TT = A; A_0 = x_0; P_0 = Sig_0
# %______________________________________________________________________
# % Kalman filter for stationary systems with time-varying system matrices
# % and missing data.
# %
# % The model is y_t = Z * a_t + eps_t
# % a_t+1 = TT * a_t + u_t
# %
# %______________________________________________________________________
# % INPUT
# % Y Data (nobs x n)
# % OUTPUT
# % S.Am Predicted state vector A_t|t-1 (nobs x m)
# % S.AmU Filtered state vector A_t|t (nobs+1 x m)
# % S.Pm Predicted covariance of A_t|t-1 (nobs x m x m)
# % S.PmU Filtered covariance of A_t|t (nobs+1 x m x m)
# % S.loglik Value of likelihood function
#
# % Output structure & dimensions
n <- dim(Z)[1]
m <- dim(Z)[2]
nobs <- size(Y,2)
S<-list()
S$Am <- array(NA,c(m,nobs))
S$Pm <- array(NA,c(m,m,nobs))
S$AmU <- array(NA,c(m,nobs+1))
S$PmU <- array(NA,c(m,m,nobs+1))
S$loglik <- 0
# ______________________________________________________________________
Au <- A_0; # A_0|0;
Pu <- P_0; # P_0|0
S$AmU[,1] = Au;
S$PmU[,,1] = Pu;
for(t in 1:nobs){
# t
# A = A_t|t-1 & P = P_t|t-1
A <- TT%*%Au;
P <- TT%*%Pu%*%t(TT) + Q;
P <- 0.5*(P+t(P))
# handling the missing data
res_MissData <- MissData(Y[,t],Z,R)
y_t <- res_MissData$y
Z_t <- res_MissData$C
R_t <- res_MissData$R
L_t <- res_MissData$L
# if(is.null(y_t)){
if(sum(is.na(y_t))==length(y_t)){
Au <- A
Pu <- P
} else {
if(!is.matrix(Z_t)){
Z_t<-t(as.matrix(Z_t))
}
PZ <- P%*%t(Z_t)
iF <- solve(Z_t%*%PZ + R_t)
PZF <- PZ%*%iF
V <- y_t - Z_t%*%A
Au <- A + PZF%*%V
Pu <- P - PZF%*%t(PZ)
Pu <- 0.5*(Pu+t(Pu))
S$loglik <- S$loglik + 0.5*(log(det(iF)) - t(V)%*%iF%*%V)
}
S$Am[,t] <- A
S$Pm[,,t] <- P
# Au = A_t|t & Pu = P_t|t
S$AmU[,t+1] <- Au
S$PmU[,,t+1] <- Pu
} # t
if(sum(is.na(y_t))==length(y_t)){
S$KZ <- zeros(m,m)
}else{
S$KZ <- PZF%*%Z_t
}
return(S)
}
MissData <- function(y,C,R){
# library(matlab)
# ______________________________________________________________________
# PROC missdata
# PURPOSE: eliminates the rows in y & matrices Z, G that correspond to
# missing data (NaN) in y
# INPUT y vector of observations at time t (n x 1 )
# S KF system matrices (structure)
# must contain Z & G
# OUTPUT y vector of observations (reduced) (# x 1)
# Z G KF system matrices (reduced) (# x ?)
# L To restore standard dimensions (n x #)
# where # is the nr of available data in y
# ______________________________________________________________________
# [y,C,R,L]
ix <- !is.na(y)
e <- eye(length(y))
L <- e[,ix]
y <- y[ix]
C <- C[ix,]
R <- R[ix,ix]
return(list(y=y,C=C,R=R,L=L))
}
# %%% Replication files for:
# %%% ""Nowcasting", 2010, (by Marta Banbura, Domenico Giannone and Lucrezia Reichlin),
# %%% in Michael P. Clements and David F. Hendry, editors, Oxford Handbook on Economic Forecasting.
# %%%
# %%% The software can be freely used in applications.
# %%% Users are kindly requested to add acknowledgements to published work and
# %%% to cite the above reference in any resulting publications
# %--------------------------------------------------------------------------
# % KALMAN FILTER
# %--------------------------------------------------------------------------
runKF <- function(y, A, C, Q, R, x_0, Sig_0){
# x_0 = Z_0; Sig_0 = V_0
S <- SKF(y,C,R,A,Q, x_0, Sig_0);
S <- FIS(y,C,R,A,Q,S);
xsmooth <- S$AmT;
Vsmooth <- S$PmT;
VVsmooth <- S$PmT_1;
loglik <- S$loglik;
return(list(xsmooth = xsmooth,Vsmooth = Vsmooth,VVsmooth = VVsmooth,loglik = loglik))
}
em_converged <- function(loglik = NULL, previous_loglik = NULL, threshold = NULL, check_increased = NULL){
# EM_CONVERGED Has EM converged?
# [converged, decrease] = em_converged(loglik, previous_loglik, threshold)
#
# We have converged if the slope of the log-likelihood function falls below 'threshold',
# i.e., |f(t) - f(t-1)| / avg < threshold,
# where avg = (|f(t)| + |f(t-1)|)/2 and f(t) is log lik at iteration t.
# 'threshold' defaults to 1e-4.
#
# This stopping criterion is from Numerical Recipes in C p423
#
# If we are doing MAP estimation (using priors), the likelihood can decrase,
# even though the mode of the posterior is increasing.
nargin <- 4 - sum(c(is.null(loglik), is.null(previous_loglik), is.null(threshold), is.null(check_increased)))
if(nargin < 3){threshold <- 1e-4}
if(nargin < 4){check_increased <- 1}
converged <- 0
decrease <- 0
if(!is.null(check_increased)){
if(loglik - previous_loglik < -1e-3){ # allow for a little imprecision
message(paste("******likelihood decreased from",round(previous_loglik,4),"to",round(loglik,4)))
decrease <- 1
}
}
delta_loglik <- abs(loglik - previous_loglik)
avg_loglik <- (abs(loglik) + abs(previous_loglik) + 2.2204e-16)/2
if((delta_loglik / avg_loglik) < threshold){converged <- 1}
# output
list(converged = converged, decrease = decrease)
} |
6d42a46c99998e1abf73f918a157618b1e914653 | b6e49b79809de5adc9ade2863db75bc3200b164f | /seurat_object_analysis_v1_and_v2.R | 15584a86fef03cb2f61c3e3dc8df9244501f1395 | [] | no_license | guokai8/2018-Developmental-single-cell-RNA-sequencing | 764b32c3a0c053f20869dd2a5a9064b0ddd486b6 | 3662d349cfedc11ea1da7e6525ec07a0e31666cd | refs/heads/master | 2022-02-24T17:42:25.710686 | 2019-09-17T15:56:24 | 2019-09-17T15:56:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,511 | r | seurat_object_analysis_v1_and_v2.R | ##### Analyze Seurat Object (or Subclustered Seurat Object) ####
### sections do not necessarily need to be run in order
library(Seurat) ## version 2.3.0. Can still be used with seurat v1, some default settings change
library(dplyr)
#### Inputs for Program ####
input_file <- "path_to_seurat_object.Rdata"
output="~/Documents/"
load(input_file)
## rename seurat object
seurat_ob <- seurat_object ## if using object created in seurat v1, can update object with seurat_ob=UpdateSeuratObject(seurat_object)
#######################################################################################
########## Choose Resolution for downstream analysis, if required #########
seurat_ob <- SetAllIdent(seurat_ob, id = "res.0.8")
######### PCA Analysis ############
png(paste(output,"pca1_2.png",sep=""))
PCAPlot(seurat_ob,1,2)
dev.off()
pdf(paste(output,"pcaheatmap_top10.pdf",sep=""))
PCHeatmap(seurat_ob, pc.use = 1:10, cells.use=500,do.balanced = TRUE, cexRow=0.5)
dev.off()
ProjectPCA(seurat_ob)
PrintPCA(seurat_ob, pcs.print = 1:2, use.full=T)
png(paste(root_dir,file_name, "_vizpca_1_2.png",sep=""))
VizPCA(seurat_ob, 2:3)
dev.off()
########## Plot TSNE results ##########
png(paste(output, "_res08.png",sep=""), width=695, height=538)
TSNEPlot(seurat_ob,do.label=T,pt.size=3,do.return=T, label.size=8)
dev.off()
########## Find Markers of Clusters ################
## Find Cluster Markers ##
### seurat v1 - default is "bimod"
### seurat v2 - default is "wilcox", "MAST" is also available
### can alter with parameter test.use
all_markers <- FindAllMarkers(seurat_ob,only.pos=T, min.pct=0.25, thresh.use=0.25)
write.csv(all_markers, file=paste(output, "_res08_markers.csv",sep=""))
df <- NULL
for(x in unique(all_markers$cluster)){clusx <- all_markers[which(all_markers$cluster==x),]
sorted_clus <- clusx[order(clusx$avg_diff,decreasing=TRUE),]
df <- rbind(df, sorted_clus)}
write.csv(df, file=paste(output,"_res08_sorted_markers_avgdiff.csv",sep=""))
# Find Pairwise Markers #
## between two clusters
markers <- FindMarkers(seurat_ob,2,3,min.pct=0.25, thresh.use=0.25, only.pos=F)
sorted_markers <- markers[order(markers$avg_diff,decreasing=TRUE),]
## 1 cluster vs. 2 others
markers <- FindMarkers(seurat_ob,1,c(2:3),min.pct=0, thresh.use=0, only.pos=F)
write.csv(sorted_markers, file=paste(output, "_clus1_clus2_3_avgdiff_markers.csv",sep=""))
######## Feature and Violin Plots for Differentially Expressed Genes #######
### load in already calculated markers if needed ###
all_markers <- read.csv("path_to_markers.csv") ## from FindAllMarkers
sorted <- all_markers %>% group_by(cluster) %>% top_n(50, avg_diff)
genes <- unique(sorted$gene)
endo_genes=c("Gcg","Ins1","Fev","Neurog3")
feature_plots <- function(genes){
for(x in genes){
pdf(paste(output, "feature_plot_",x,".pdf",sep=""), width=7, height=5,useDingbats = F)
FeaturePlot(seurat_ob,x,cols.use = c("gray","red"), pt.size=2,no.legend=T,no.axes=T)
dev.off()
}
}
feature_plots(endo_genes)
feature_plots(genes)
violin_plots <- function(genes){
for(x in genes){
png(paste(output, "_violin_",x,".png",sep=""),width=1000,height=200)
VlnPlot(seurat_ob, x,size.x.use = 12,size.y.use=10,size.title.use=20)
dev.off()
}
}
violin_plots(endo_genes)
violin_plots(genes)
######## Dot Plots ###########
pdf(paste(output, "_dotplot.pdf"),width=15, height=4)
DotPlot(seurat_ob,genes,cols.use=myPalette(low = "blue",high="red"), cex.use = 2)
dev.off()
########################### Change TSNE Resolution ###############################
seurat_ob <- StashIdent(seurat_ob, save.name = "orig.res")
seurat_ob <- FindClusters(seurat_ob,resolution=1,print.output = F)
png(paste(output, "_res1.png",sep=""),width=695, height=538)
TSNEPlot(seurat_ob,
do.label=T,
do.return=T,
pt.size=2,
label.size=8,
no.legend=F)
dev.off()
### get cell names of particular cluster ###
new_ob = SubsetData(seurat_ob,ident.use = 15)
new_cell_names = colnames(new_ob@data)
########################## Adding Metadata ##################################
current.cluster.ids <- levels(seurat_ob@ident)
new.cluster.ids <- c("Acinar","Mature Acinar","Prolif. Acinar","Prolif. Ductal","Ductal","Ngn3","Fev","Beta","Alpha","Epsilon")
seurat_ob@ident <- plyr::mapvalues(seurat_ob@ident, from = current.cluster.ids, to = new.cluster.ids)
meta <- data.frame(Cluster_Names=seurat_ob@ident)
seurat_ob <- AddMetaData(seurat_ob,metadata=meta)
save(seurat_ob, file=paste(output,"analysis.Rdata",sep=""))
|
4a699018c982a33f9854609fad8d4c9c9a7a3309 | afd52451e8845963de4ad1243005834fa0958beb | /sta_r/sta_glm.R | b13566ba37a5050b95aca00a50c5973e8223c017 | [] | no_license | plus4u/R | 7c0d867767ae948b24a15322df11b500abcfd920 | c8c25313567bd8bcf5142a04187a24e0d5ad12d1 | refs/heads/master | 2021-09-19T13:52:40.115595 | 2021-08-11T06:47:22 | 2021-08-11T06:47:22 | 155,179,952 | 1 | 0 | null | null | null | null | UHC | R | false | false | 9,448 | r | sta_glm.R |
getwd()
setwd("C:/Users/beomc/OneDrive/바탕 화면/SPSS_DATA")
data <- read.csv(file = "drama_genre.csv")
# 데이터 필터링
data <- subset(data, genre %in% c("막장드라마", "멜로드라마"))
# factor의 레벨을 3에서 2로 바꾸어 주기 위함줄여주기 위함
data$genre <- as.factor(as.character(data$genre))
# 색 투명도 설정을 위한 alpha 함수를 사용하기 위해 scales 패키지를 설치하고 라이브러리 불러오기
install.packages("scales")
library(scales)
#1. 산점도 그리기
plot(formula = sum_age_mainactors ~ avg_slap_face,
data = data,
col = alpha(c("blue", "green"), 0.8)[data$genre],
xlab = "회당 뺨 맞는 횟수",
ylab = "주연배우 나이 합계",
main = "드라마 장르 분포")
# 범례 그리기
legend("topleft",
legend = levels(data$genre),
pch = 1,
col = alpha(c("blue", "green"), 0.8),
cex = 0.9,
bty = "n")
# 2. 재현성을 위한 seed 설정
set.seed(9876)
# idx 설정 (6 : 4)
idx <- sample(x = c("train_valid", "test"),
size = nrow(data),
replace = TRUE,
prob = c(6, 4))
# idx에 따라 데이터 나누기
train_valid <- data[idx == "train_valid", ]
test <- data[idx == "test", ]
# test 데이터 설명변수/반응변수 나누기
test_x <- test[, -3]
test_y <- test[, 3]
# alpha 함수를 사용하려면 미리 scales 라이브러리를 불러와야 한다.
library(scales)
# train_valid 산점도 그리기
plot(formula = sum_age_mainactors ~ avg_slap_face,
data = train_valid,
col = alpha(c("blue", "green"), 0.8)[train_valid$genre],
pch = 0,
main = "드라마 장르 분포",
xlab = "회당 뺨 맞는 횟수",
ylab = "주연배우 나이 합계")
# test 데이터 표시하기
points(formula = sum_age_mainactors ~ avg_slap_face,
data = test,
pch = 16,
cex = 1.2,
col = alpha(c("blue", "green"), 0.5)[test$genre])
# 범례 그리기
legend("topleft",
c(paste0("train_valid ", levels(train_valid$genre)), paste0("test ", levels(test$genre))),
pch = c(0, 0, 16, 16),
col = c(alpha(c("blue", "green"), 0.8), alpha(c("blue", "green"), 0.5)),
cex = 0.9,
bty = "n")
##
result <- data.frame(fold = rep(c(1, 2, 3), each = 82),
mdl = rep(c("full", "step"), 41),
i = rep(seq(0, 1, length.out = 41), 6),
accuracy = rep(rep(NA, 41), 6))
##
idx <- sample(x = c(1:3), size = nrow(data), replace = TRUE, prob = c(1, 1, 1))
for(k in c(1, 2, 3)){
# idx에 따라 train vs. valid 데이터 나누기
valid <- train_valid[idx == k, ]
train <- train_valid[idx != k, ]
# valid 데이터 설명변수/반응변수 나누기
valid_x <- valid[, -3]
valid_y <- valid[, 3]
}
### 3.
# 로지스틱 회귀분석 모델 생성
full <- glm(formula = genre ~ .,
data = train,
family = "binomial")
# Stepwise
step <- step(object = full,
trace = F)
# full과 step 모델별 확률 예측
full_pred_p <- as.numeric(predict(object = full,
newdata = valid_x,
type = "response"))
step_pred_p <- as.numeric(predict(object = step,
newdata = valid_x,
type = "response"))
# # 4. 분류 정확도의 분모
l <- length(valid_y)
for(i in unlist(unique(result$i))){
# i를 기준으로 0 또는 1로 분류
full_pred_class <- ifelse(full_pred_p < i, levels(valid_y)[1], levels(valid_y)[2])
step_pred_class <- ifelse(step_pred_p < i, levels(valid_y)[1], levels(valid_y)[2])
# 분류 정확도 계산
full_accuracy <- sum(full_pred_class == valid_y) / l
step_accuracy <- sum(step_pred_class == valid_y) / l
# result 테이블에 분류 정확도 입력
result[result$fold == k
& result$mdl == "full"
& result$i == i, "accuracy"] <- full_accuracy
result[result$fold == k
& result$mdl == "step"
& result$i == i, "accuracy"] <- step_accuracy
}
##
# Stepwise
step <- step(object = full,
trace = F)
# full과 step 모델별 확률 예측
full_pred_p <- as.numeric(predict(object = full,
newdata = valid_x,
type = "response"))
step_pred_p <- as.numeric(predict(object = step,
newdata = valid_x,
type = "response"))
# 분류 정확도의 분모
l <- length(valid_y)
for(i in unlist(unique(result$i))){
# i를 기준으로 0 또는 1로 분류
full_pred_class <- ifelse(full_pred_p < i, levels(valid_y)[1], levels(valid_y)[2])
step_pred_class <- ifelse(step_pred_p < i, levels(valid_y)[1], levels(valid_y)[2])
# 분류 정확도 계산
full_accuracy <- sum(full_pred_class == valid_y) / l
step_accuracy <- sum(step_pred_class == valid_y) / l
# result 테이블에 분류 정확도 입력
result[result$fold == k
& result$mdl == "full"
& result$i == i, "accuracy"] <- full_accuracy
result[result$fold == k
& result$mdl == "step"
& result$i == i, "accuracy"] <- step_accuracy
}
##
# full 모델 k-Fold = 1
plot(accuracy ~ i,
result[result$mdl == "full" & result$fold == 1, ],
type = "l",
col = alpha("purple", 0.4),
ylim = c(0.3, 1),
xlab = "임계치",
ylab = "분류 정확도",
main = "분류 정확도 in full/step 3-Fold CV")
# full 모델 k-Fold = 2
lines(accuracy ~ i,
result[result$mdl == "full" & result$fold == 2, ],
col = alpha("orange", 0.4))
# full 모델 k-Fold = 3
lines(accuracy ~ i,
result[result$mdl == "full" & result$fold == 3, ],
col = alpha("green", 0.4))
# step 모델 k-Fold = 1
lines(accuracy ~ i,
result[result$mdl == "step" & result$fold == 1, ],
col = alpha("purple", 0.5),
lty = 2)
# step 모델 k-Fold = 2
lines(accuracy ~ i,
result[result$mdl == "step" & result$fold == 2, ],
col = alpha("orange", 0.5),
lty = 2)
# step 모델 k-Fold = 3
lines(accuracy ~ i,
result[result$mdl == "step" & result$fold == 3, ],
col = alpha("green", 0.5),
lty = 2)
# 범례 그리기
legend("topleft",
c("full k=1", "full k=2", "full k=3",
"step k=1", "step k=2", "step k=3"),
col = c(alpha(c("purple", "orange", "green"), 0.4),
alpha(c("purple", "orange", "green"), 0.5)),
lty = rep(c(1, 2), each = 3),
bty = "n",
cex = 0.9)
##
# 그룹핑을 위해 plyr 패키지를 설치하고 라이브러리 불러오기
install.packages("plyr")
library(plyr)
# 모델별 임계치별 평균 분류 정확도 계산하기
tmp <- ddply(result, .(mdl, i), summarise, avg_accuracy = mean(accuracy))
# full 모델
lines(avg_accuracy ~ i,
tmp[tmp$mdl == "full", ],
col = alpha("red", 0.7),
lty = 1,
type = "o",
pch = 20)
# step 모델
lines(avg_accuracy ~ i,
tmp[tmp$mdl == "step", ],
col = alpha("red", 0.7),
lty = 2,
type = "o",
pch = 20)
# <span style="font-size: 14.6667px;">
# 범례 그리기
legend("topright",
c("full avg accuracy", "step avg accuracy"),
pch = 20,
col = alpha("red", 0.7),
lty = rep(c(1, 2)),
bty = "n",
cex = 0.9)
# </span>
tmp[tmp$avg_accuracy == max(tmp$avg_accuracy), ]
# test_x가 멜로드라마일 확률을 step 모델로 구하기
test_p <- as.numeric(predict(object = step,
newdata = test_x,
type = "response"))
# 임계치 0.45를 기준으로 막장드라마와 멜로드라마 분류하기
test_class <- ifelse(test_p < 0.45, levels(data$genre)[1], levels(data$genre)[2])
# 분류 정확도 계산하기
sum(test_class == test_y) / length(test_y)
##
# 정답/오답 만들기 (pch 옵션을 위해)
ox <- as.factor(ifelse(test_class == test_y, "O", "X"))
# train_valid 산점도 그리기
plot(formula = sum_age_mainactors ~ avg_slap_face,
data = train_valid,
col = alpha(c("blue", "green"), 0.8)[train_valid$genre],
xlab = "회당 뺨 맞는 횟수",
ylab = "주연배우 나이 합계",
main = "드라마 장르 분포")
# test 산점도 그리기
points(formula = sum_age_mainactors ~ avg_slap_face,
data = test,
col = alpha(c("blue", "green"), 0.6)[test$genre],
pch = c(19, 17)[ox])
# 범례 그리기
legend("topleft",
legend = c(paste0("train_valid ", levels(train_valid$genre)),
paste0("test ", levels(test$genre), " 정답"),
paste0("test ", levels(test$genre), " 오답")),
pch = c(1, 1, 19, 19, 17, 17),
col = c(alpha(c("blue", "green"), 0.8),
alpha(c("blue", "green"), 0.6),
alpha(c("blue", "green"), 0.6)),
cex = 0.9,
bty = "n")
|
d1606f459e894c68d1d52f8912559ca5afd2d721 | 8511b55f2f66301c434fca6594d9c6df79644419 | /plot2.R | 0619cfc6fcb46a73683718787be3fa1a89e74387 | [] | no_license | hpykala/ExData_Plotting2 | 8a3965f288b407080fc6b0e69ad7860c87f95ea0 | 252d8e0237f77d70843f09b66a8f450e4ab08bf3 | refs/heads/master | 2016-09-14T08:04:51.457790 | 2016-04-25T08:08:00 | 2016-04-25T08:08:00 | 56,834,817 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,077 | r | plot2.R | ##Have total emissions from PM2.5 decreased in the Baltimore City, Maryland (fips == "24510")
##from 1999 to 2008? Use the base plotting system to make a plot answering this question.
library(data.table)
#download data if needed
if(!file.exists("summarySCC_PM25.rds")) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", "FNEI_data.zip", mode="wb")
unzip("FNEI_data.zip")
}
NEI <- readRDS("summarySCC_PM25.rds")
#subset Baltimore City
baltimore <- data.table(subset(NEI, fips == "24510"))
#calculate yearly sums to find total pm2.5 emissions
yearly <- baltimore[,sum(Emissions), by = year]
setnames(yearly, "V1", "pm25tot")
#scale total emissions to thousands of tons
yearly[,pm25tot := pm25tot/10^3]
png("plot2.png")
#scatterplot
plot(yearly$year,
yearly$pm25tot,
pch = 19,
xlab = "Year",
ylab = "Total PM2.5 emissions (Thousand tons)",
main = "Total yearly PM2.5 emissions in the Baltimore City")
#add trend line to show clear downward trend
abline(lm(pm25tot ~ year, data = yearly))
dev.off()
|
e65b5249b630e052ac00404fe046282e72fe00aa | 3a91fc71b1ecfdda05598d535000444a6808e1fa | /functions/get_sri.R | 0b681d4fe3306b6207f64e475f9a8b391a4d682c | [] | no_license | qwebber/social-issa | bd870201f3266de6092c505f173b58f74b394969 | a98a4eb1d223ecc9acc0684a9a204c91e55f95bf | refs/heads/master | 2023-04-10T11:58:09.840312 | 2023-02-22T17:01:26 | 2023-02-22T17:01:26 | 340,049,482 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,644 | r | get_sri.R | #' Dynamic network
#'
#' @inheritParams hr_network
#'
#' @return Graph strength for each individual.
#' @export
#'
get_sri <- function(DT = NULL, id = NULL, by = NULL) {
if (is.null(DT) | is.null(id)) {
stop('DT, and id must be provided')
}
DT[, {
d <- data.table::dcast(.SD, formula = groupEnd ~ get(id),
fun.aggregate = length, value.var = 'groupEnd')
gbi_df <- data.matrix(d[, !'groupEnd', with = FALSE])
rownames(gbi_df) <- d$groupEnd
gbi.net_df <- asnipe::get_network(gbi_df, data_format = "GBI",
association_index = "SRI")
gbi.net_df[lower.tri(gbi.net_df)] <- NA
diag(gbi.net_df) <- NA
gbi.grph_df <- igraph::graph_from_adjacency_matrix(gbi.net_df,
mode = "undirected",
diag = FALSE,
weighted = TRUE)
out <- na.omit(reshape2::melt(gbi.net_df))
out <- data.table(ID1 = out$Var1,
ID2 = out$Var2,
sri = out$value)
#memb <- data.table(membershipID1 = membership(fastgreedy.community(gbi.grph_df)),
# ID1 = names(igraph::degree(gbi.grph_df)))
#memb2 <- data.table(membershipID2 = membership(fastgreedy.community(gbi.grph_df)),
# ID2 = names(igraph::degree(gbi.grph_df)))
#all <- merge(out, memb, by = "ID1")
#all2 <- merge(all, memb2, by = "ID2")
}, by = by]
} |
fe35e171659ee526c185b7718e7b3eb578f6fd59 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rvgtest/examples/plot.ierror.Rd.R | ffd81c5d94446bfd98bfb6c1b8d511ef3a20c734 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 917 | r | plot.ierror.Rd.R | library(rvgtest)
### Name: plot.rvgt.ierror
### Title: Plot Errors in Inversion Methods
### Aliases: plot.rvgt.ierror
### Keywords: distribution hplot htest
### ** Examples
## Create a table of u-errors for spline interpolation of
## the inverse CDF of the standard normal distribution and
## the beta distribution
aqn <- splinefun(x=pnorm((-100:100)*0.05), y=(-100:100)*0.05,
method="monoH.FC")
uerrn <- uerror(n=1e5, aqdist=aqn, pdist=pnorm)
aqb <- splinefun(x=pbeta((0:100)*0.01,shape1=2,shape2=5),
y=(0:100)*0.01, method="monoH.FC")
uerrb <- uerror(n=1e5, aqdist=aqb, pdist=pbeta, shape1=2, shape2=5)
## Plot u-errors of the normal distribution
plot(uerrn)
## Plot maximal u-errors of the normal distribution
plot(uerrn,maxonly=TRUE)
## Compare the u-errors of these two distributions and
## draw maximal tolerated error
plot.rvgt.ierror(list(uerrn,uerrb),tol=1.e-6)
|
9b5f32e139428591c9000d802b405ea951da0ce2 | 43fd4395e3ed61a40ae4283757a9ae6fadbd6c52 | /R/fitted.uniNmix.R | efa19f9f773885e3d72c1b212df1b59dc08efe18 | [] | no_license | cran/jointNmix | f96d01a741bc466b6ee925263d62e8a65441dfaf | e142b6d98fce3c774e9498160656d42a8b234a46 | refs/heads/master | 2020-09-06T19:31:55.212810 | 2016-11-12T00:38:21 | 2016-11-12T00:38:21 | 73,520,306 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 78 | r | fitted.uniNmix.R | fitted.uniNmix <-
function(object, ...) {
return(list("sp1"=object$fv1))
}
|
14a4d51436cc074a0951cf7452a75cd645c347cf | 52ae69c4fb2943e06e335cbd22490ae5085f6ca3 | /day06.R | fe6ac028f47dc84ac97d791d798f6306579d062d | [] | no_license | kwichmann/AdventOfCode2020 | aa91d06e442a4e88adea4512952ae38a36690ed6 | 4b3fe46e426bcde0603e6ceb931c372503ee8dd1 | refs/heads/main | 2023-02-03T13:07:39.441697 | 2020-12-17T19:46:38 | 2020-12-17T19:46:38 | 318,871,274 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,060 | r | day06.R | # Make sure to setwd to the right directory
library(readr)
library(stringr)
library(dplyr)
dataString <- read_file("day06.txt")
# Part 1
questionnaires <- str_split(dataString, "\n\n")
questionnaires_some <- questionnaires %>%
sapply(function(q) gsub("\n", "", q))
q_some_df <- as.data.frame(matrix(rep(NA, 26 * length(questionnaires_some)), ncol = 26))
colnames(q_some_df) <- letters
for (i in 1:length(questionnaires_some)) {
for (letter in letters) {
q_some_df[[letter]][i] <- str_detect(questionnaires_some[i], letter)
}
}
print(sum(as.matrix(q_some_df)))
# Part 2
questionnaires_all <- (questionnaires %>%
lapply(function(q) str_split(q, "\n")))[[1]] %>%
lapply(function(q) str_split(q, "")) %>%
lapply(function(q) Reduce(intersect, q))
q_all_df <- as.data.frame(matrix(rep(NA, 26 * length(questionnaires_all)), ncol = 26))
colnames(q_all_df) <- letters
for (i in 1:length(questionnaires_all)) {
for (letter in letters) {
q_all_df[[letter]][i] <- letter %in% questionnaires_all[[i]]
}
}
print(sum(as.matrix(q_all_df)))
|
89be4e60890f5fae92501624905860e719f37c9a | 704fb9134e11ff44bbf5393bb4749c9205543f5e | /run_analysis.R | b25546debdbb781b7d93bbf7a1a72e8a22c396e5 | [] | no_license | pavarit/coursera-data-cleaning | 837cc2e2e25ac348e71ccc29d0c455d590a20720 | f82c2b7d796ffecacf10afde8e42f7ba0e4cc350 | refs/heads/master | 2020-05-22T09:05:11.828369 | 2017-03-11T23:53:22 | 2017-03-11T23:53:22 | 84,686,288 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,863 | r | run_analysis.R | ## 1) Merges the training and the test sets to create one data set.
## 2) Extracts only the measurements on the mean and standard deviation for each measurement.
## 3) Uses descriptive activity names to name the activities in the data set
## 4) Appropriately labels the data set with descriptive variable names.
## 5) From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
##Need to set working directory to the location of UCI HAR Dataset folder before running
library(dplyr)
##Read all relavant data files
setwd("UCI HAR Dataset/train")
subjtrain <- read.table("subject_train.txt", colClasses = "numeric")
xtrain <- read.table("X_train.txt", colClasses = "numeric")
ytrain <- read.table("y_train.txt", colClasses = "numeric")
setwd("..")
setwd("test")
subjtest <- read.table("subject_test.txt", colClasses = "numeric")
xtest <- read.table("X_test.txt", colClasses = "numeric")
ytest <- read.table("y_test.txt", colClasses = "numeric")
setwd("..")
actlabel <- read.table("activity_labels.txt", colClasses = "character")
features <- read.table("features.txt", colClasses = "character")
##Bind data together
alltrain <- cbind(subjtrain, ytrain, xtrain)
alltest <- cbind(subjtest, ytest, xtest)
dt <- rbind(alltrain, alltest)
colnames(dt) <- c("subject", "activity", features[, 2])
##Extract only columns with mean or std data
dtmeanstd <- dt[, c("subject", "activity", grep("mean\\(\\)|std\\(\\)", features[, 2], value = TRUE))]
##Replace activity number with activity names
for(i in 1:length(dtmeanstd$activity)) {
dtmeanstd$activity[i] <- actlabel[dtmeanstd$activity[i], 2]
}
#Create tidy data
dttidy <- dtmeanstd %>% group_by(subject, activity) %>% summarize_each(funs(mean))
##Create txt file
write.table(dttidy, file = "tidy data.txt", row.names = FALSE) |
cdc226f713e19cc09ef4c856a814d3d035ef7cdf | 5e3a624a4c869bf7a03419a71ab6fcf904d69108 | /R/optimizeMbo.R | a8e06fa8e6b0e3af3ba13c804ec2e32433c64166 | [] | no_license | ascheppach/EBO | 20296345296f15c202485dc4c18480cd14404f34 | f7d9d90a935887266d31a16269b664973d3f2e12 | refs/heads/master | 2022-12-24T13:16:22.036546 | 2020-07-16T09:21:32 | 2020-07-16T09:21:32 | 302,585,497 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 569 | r | optimizeMbo.R | optimizeMBO = function(configuration, objNormal, info, funcEvals) {
# create MBOControl and MBOsurrogate (infillcrit + surrogate)
mboControlLearner = createMboControlSurrogate(x = configuration)
# compute mlrMBO
res = configMboFunc(instance = list(objNormal, info),
funcEvals = funcEvals,
design = configuration$design,
amountDesign = configuration$amountDesign,
control = mboControlLearner[[1]],
surrogate = mboControlLearner[[2]])
return(res)
}
|
6d7ff87f28420d5647cb384f09cb9c7c215caca1 | 29e424922b3d0e510736f71314795500e8e99341 | /00a_functions_ml_approach_20180625.R | c4a49a806a8e179125fbde12432982cdff381608 | [] | no_license | holgersr/Bayesian-inference-and-simulation-of-Elo-scores-in-analysis-of-social-behaviour | f9e31801820658500b0f679d16ec40c3991c1b9b | 2a0c67188c8c1be98b7ad65de83590cbd9150022 | refs/heads/master | 2021-09-19T09:20:20.299115 | 2018-06-25T06:41:06 | 2018-06-25T06:41:06 | 95,112,337 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,220 | r | 00a_functions_ml_approach_20180625.R | ## Functions needed to implement the ML approach:
pAfun <- function(EloA, EloB){
1/(1 + exp(EloB - EloA))
}
Elo_pA <- function(EloStart_logk, X, show_k = FALSE){
EloStart <- EloStart_logk[-length(EloStart_logk)]
EloStart <- EloStart - mean(EloStart)
k <- exp(EloStart_logk[length(EloStart_logk)])
if (show_k) {
cat(paste0(round(k, 3), paste(rep(" ", 20), collapse = " ")))
cat("\r")
}
EloNow <- EloStart
Elo <- matrix(nrow = nrow(X), ncol = length(EloStart), 0)
colnames(Elo) <- colnames(X)
pA <- rep(0, nrow(X))
for (i in 1:nrow(X)) {
A <- which(X[i, ] == 1)
B <- which(X[i, ] == -1)
pA[i] <- pAfun(EloA = EloNow[A], EloB = EloNow[B])
toAdd <- (1 - pA[i]) * k
EloNow[A] <- EloNow[A] + toAdd
EloNow[B] <- EloNow[B] - toAdd
Elo[i, ] <- EloNow
}
return(list(pA = pA, Elo = Elo))
}
logLik <- function(EloStart_logk, X, show_k = FALSE){
pA <- Elo_pA(EloStart_logk = EloStart_logk, X = X, show_k = show_k)$pA
return(-sum(log(pA)))
}
logLik_model1 <- function(logk, X, show_k = FALSE){
pA <- Elo_pA(EloStart_logk = c(rep(0, ncol(X)), logk), X = X, show_k = show_k)$pA
return(-sum(log(pA)))
}
pAfun_001factor <- function(EloA, EloB){
1/(1 + exp(0.01*(EloB - EloA)))
}
Elo_pA_001factor <- function(EloStart_logk, X, show_k = FALSE){
EloStart <- EloStart_logk[-length(EloStart_logk)]
EloStart <- EloStart - mean(EloStart)
k <- exp(EloStart_logk[length(EloStart_logk)])
if (show_k) {
cat(paste0(round(k, 3), paste(rep(" ", 20), collapse = " ")))
cat("\r")
}
EloNow <- EloStart
Elo <- matrix(nrow = nrow(X), ncol = length(EloStart), 0)
colnames(Elo) <- colnames(X)
pA <- rep(0, nrow(X))
for (i in 1:nrow(X)) {
A <- which(X[i, ] == 1)
B <- which(X[i, ] == -1)
pA[i] <- pAfun_001factor(EloA = EloNow[A], EloB = EloNow[B])
toAdd <- (1 - pA[i]) * k
EloNow[A] <- EloNow[A] + toAdd
EloNow[B] <- EloNow[B] - toAdd
Elo[i, ] <- EloNow
}
return(list(pA = pA, Elo = Elo))
}
logLik_001factor <- function(EloStart_logk, X, show_k = FALSE){
pA <- Elo_pA_001factor(EloStart_logk = EloStart_logk, X = X, show_k = show_k)$pA
return(-sum(log(pA)))
} |
85748a1b997fe697ea301c7ca22ef19d5af7532e | 982dcc77db4a58e81527649b33db10014fd2edb3 | /day03/day3.R | 5d513721096cf79a9dab8abb370081f07ac337aa | [] | no_license | jaewon-jun9/R-in-multi | c734aaa1bce11b583a64b2309b852989924e51c4 | acd5f828557b8c54fcfc64a799a80340d3e6a086 | refs/heads/master | 2020-12-03T01:17:14.655310 | 2020-01-01T03:10:51 | 2020-01-01T03:10:51 | 231,172,519 | 1 | 0 | null | 2020-01-01T03:47:20 | 2020-01-01T03:47:19 | null | UTF-8 | R | false | false | 3,044 | r | day3.R | y <- c(0,25,50,75,100)
z <- c(50,50,50,50,50)
y == z
y != z
y > z
y < z
y >= z
y <= z
y == 50 #알아서 복사 ex) 50 50 50 50 50
y > 50
num1 <- 11 #c(11)
num2 <- 3 #c(3)
num1/num2
num1%%num2
num1 %/% num2
#LIST 리스트
lds <- list(1,2,3) #각각 묶음
lds
lds +100 #오류
lds[1] #첫번째 보따리
lds[1]+10 #오류
lds[[1]]+10 #가능
names(lds) <- LETTERS[1:3]
lds
lds[[2]]
lds[["B"]]
lds$B
a <- list(
a=1:3,
b="a string",
c=pi,
d=list(-1,-5)
)
a
a[1]
a[[1]]
a$a
a[1]+1 #오류 unlist(a[1])+1 (O)
a[[1]]+1
a[[1]][2] +100
new_a <-unlist(a[1])
a[1]; new_a
names(new_a) <- NULL
new_a
names(a) <- NULL
a
ls()
length(ls())
save(list=ls(),file="all.rda")
rm(list=ls())
ls()
load("all.rda")
ls()
#read file data
nums<- scan("data/sample_num.txt", what="")
word_ansi <- scan("data/sample_ansi.txt",what="")
word_utf8 <- scan("data/sample_utf8.txt",what="",encoding = "UTF-8")
word_utf8
word_utf8_new <- scan("data/sample_utf8.txt",what="")
word_utf8_new
lines_ansi <- readLines("data/sample_ansi.txt")
lines_utf8 <- readLines("data/sample_utf8.txt",encoding = "UTF-8")
#if else
randomNum <- sample(1:10,1)
if(randomNum>5){
cat(randomNum,":5보다 크군요","\n")
}else{
cat(randomNum,":5보다 작거나 같군요","\n")
}
if(randomNum%%2){
cat(randomNum,";홀수","\n")
}else{
cat(randomNum,";짝수","\n")
}
score <- c(50)
if (score >= 90) {
cat(score, "는 A등급입니다","\n")
}else if (score >=80){
cat(score, "는 B등급입니다","\n")
}else if (score >=70){
cat(score, "는 C등급입니다","\n")
}else if (score >=60){
cat(score, "는 D등급입니다","\n")
}else{
cat(score, "는 F등급입니다","\n")
}
score <- sample(0:100,1)
if (score >= 90){
cat(score, "는 A등급입니다","\n")
}else if (score >=80){
cat(score, "는 B등급입니다","\n")
}else if (score >=70){
cat(score, "는 C등급입니다","\n")
}else if (score >=60){
cat(score, "는 D등급입니다","\n")
}else{
cat(score, "는 F등급입니다","\n")
}
#for 실습
for(data in month.name)
print(data)
for(data in month.name) print(data); print("ㅋㅋ")
for(data in month.name){print(data);print("ㅋㅋ")}
for(n in 1:5)
cat("hello?","\n")
for(i in 1:5){
for(j in 1:5){
cat("i",i,"j=",j,"\n")
}
}
#구구단
for(dan in 1:9){
for(num in 1:9){
cat(dan,"x",num,"=",dan*num,"\t")
}
cat("\n")
}
#switch 문을 대신하는 함수
month <- sample(1:12,1)
month <- paste(month,"월",sep="") #sep="" 생략하면 "3 월"로 나옴. "3 월"과"3월"은 다름.
result <- switch(EXPR = month,
"12월"=,"1월"=,"2월"="겨울",
"3월"=,"4월"=,"5월"="봄",
"6월"=,"7월"=,"8월"="여름",
"가을")
cat(month,"은"," ",result,"입니다.\n",sep="")
num <- sample(1:10,1)
num
switch(EXPR = num,"A","B","C","D")
for(num in 1:10){
cat(num,":",switch(EXPR = num,"A","B","C","D"),"\n")
}
for(num in 1:10){
num<- as.character(num)
cat(num,":",switch(EXPR = num,"7"="A","8"="B","9"="C","10"="D","ㅋ"),"\n")
}
|
0405e9525b0062293293ae421ccfe01aeac482e4 | db12b990924703cd74748d8585cd9c11fafa6746 | /h2o-r/tests/testdir_autoGen/runit_complexFilterTest_iris_test_numeric_missing_extra_40.R | 2382248085c4bf1fd5ba12800811d396ce26827c | [
"Apache-2.0"
] | permissive | h2oai/h2o-3 | 919019a8f297eec676011a9cfd2cc2d97891ce14 | d817ab90c8c47f6787604a0b9639b66234158228 | refs/heads/master | 2023-08-17T18:50:17.732191 | 2023-08-17T16:44:42 | 2023-08-17T16:44:42 | 17,371,412 | 6,872 | 2,345 | Apache-2.0 | 2023-09-14T18:05:40 | 2014-03-03T16:08:07 | Jupyter Notebook | UTF-8 | R | false | false | 1,933 | r | runit_complexFilterTest_iris_test_numeric_missing_extra_40.R | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
##
# Author: Autogenerated on 2013-12-18 17:01:19
# gitHash: 2581a0dfa12a51892283830529a5126ea49f0cb9
# SEED: 2481425483200553751
##
complexFilterTest_iris_test_numeric_missing_extra_40 <- function() {
Log.info("A munge-task R unit test on data <iris_test_numeric_missing_extra> testing the functional unit <['!', '!=', '&', '>=', '|', '>=']> ")
Log.info("Uploading iris_test_numeric_missing_extra")
hex <- h2o.importFile(locate("smalldata/iris/iris_test_numeric_missing_extra.csv"), "riris_test_numeric_missing_extra.hex")
Log.info("Performing compound task !( ( hex[,c(\"species\")] != 1.2108297567 ) & ( hex[,c(\"petal_len\")] >= 5.18451212374 ) | ( ( hex[,c(\"petal_len\")] >= 3.39830058306 )) ) on dataset <iris_test_numeric_missing_extra>")
filterHex <- hex[!( ( hex[,c("species")] != 1.2108297567 ) & ( hex[,c("petal_len")] >= 5.18451212374 ) | ( ( hex[,c("petal_len")] >= 3.39830058306 )) ),]
Log.info("Performing compound task !( ( hex[,c(\"petal_len\")] != 4.56344348577 ) & ( hex[,c(\"petal_len\")] >= 3.54674974992 ) | ( ( hex[,c(\"species\")] >= 2.26145385905 )) ) on dataset iris_test_numeric_missing_extra, and also subsetting columns.")
filterHex <- hex[!( ( hex[,c("petal_len")] != 4.56344348577 ) & ( hex[,c("petal_len")] >= 3.54674974992 ) | ( ( hex[,c("species")] >= 2.26145385905 )) ), c("petal_wid","petal_len","sepal_len","species")]
Log.info("Now do the same filter & subset, but select complement of columns.")
filterHex <- hex[!( ( hex[,c("petal_len")] != 4.56344348577 ) & ( hex[,c("petal_len")] >= 3.54674974992 ) | ( ( hex[,c("species")] >= 2.26145385905 )) ), c("sepal_wid")]
}
doTest("compoundFilterTest_ on data iris_test_numeric_missing_extra unit= ['!', '!=', '&', '>=', '|', '>=']", complexFilterTest_iris_test_numeric_missing_extra_40)
|
cbeecfbcdbe25fdebc364e69a83efabdf384caa6 | 73591225035cc34cf9995a857f84e8f991535ae0 | /cachematrix.R | 38242627b1da5b2528503e37c144f3693a0590dd | [] | no_license | HeinrichZiegler/ProgrammingAssignment2 | 984428339d16851a8004292f279e1d748d124236 | 8380f895b589f9d6b3cd602a33993b3f8a6880fb | refs/heads/master | 2022-12-17T02:10:59.248750 | 2020-09-15T18:22:57 | 2020-09-15T18:22:57 | 295,770,090 | 0 | 0 | null | 2020-09-15T15:26:27 | 2020-09-15T15:26:26 | null | UTF-8 | R | false | false | 1,321 | r | cachematrix.R | ## This file provides two R functions to facilitate the caching of the
## inverse matrix or a given matrix
## Assumption: the provided matrix is always valid!
## makeCacheMatrix is a function which creates an object containing the
## matrix and adding the ability to remember the inverse matrix
## it will also allow reset the inverse matrix value, or change the matrix
## Whenever the matrix contained is changed, the inverse matrix will be
## reset as well
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setInverse <- function(solve) i <<- solve
getInverse <- function() i
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## This function "cacheSolve" will calculate the inverse matrix of "special"
## matrix provided with makeCacheMatrix, and store the inverse in the object
## provided
## However, if an inverse matrix already exists it will use the buffered value
## and return it without performing the solve() operation
cacheSolve <- function(x, ...) {
i <- x$getInverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
## Return a matrix that is the inverse of 'x'
i
}
|
b0173b02bb3c0f5932287b29a39f3a9ed36b64a7 | f1bc608c0dd10fba6ec997ab774c17632073933b | /Skeletons/rfe.R | d9d085dde8b2a89567213bdbc92a54a5a83b3337 | [] | no_license | junonforthis/JUSMASTERARBEIT | 9312926099bb8337fcddd384eeaf9a27aa3fec66 | 82c9ecb9867d2c9df1520af987969d8e536af69d | refs/heads/master | 2022-07-22T22:53:54.835339 | 2019-04-25T23:03:49 | 2019-04-25T23:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,342 | r | rfe.R | setwd("D:/")
cal <- readRDS("E:/calfinal.rds")
length(colnames(cal))
sapply(cal,function(x) any(is.na(x)))
# reorder columns of cal to set target variables first
sapply(cal,function(x) any(is.na(x)))
colnames(cal)
# choose numeric X-variables
calx <- colnames(cal)[c(3:30)]
set.seed(7)
num <- as.vector(which(sapply(cal[,c(calx)],class)=="numeric"))
# calculate correlation matrix
correlationMatrix <- cor(as.matrix(cal[,calx[num]]), method = "pearson")
# summarize the correlation matrix
#print(correlationMatrix)
# find attributes that are highly corrected (ideally >0.75)
highlyCorrelated <- findCorrelation(correlationMatrix, cutoff=0.5)
# print indexes of highly correlated attributes
print(highlyCorrelated)
sort(highlyCorrelated)
calfilter <- calx[num][-highlyCorrelated]
calgone <- calx[num][highlyCorrelated]
fac <- as.vector(which(sapply(cal[,c(calx)],class)=="factor"))
calfac <- colnames(cal[,calx])[fac]
chisq.test(table(cal[,"Month"], cal[,"Quarter"]))
chisq.test(table(cal[,"Weekday_No"], cal[,"Weekend"]))
chisq.test(table(cal[,"Holiday"], cal[,"HolidayWeek"]), simulate.p.value = T)
chis = list()
twofac <- combn(calfac,2)
for (i in 1:ncol(twofac)){
chis[i] <- chisq.test(table(cal[,twofac[1,i]], cal[,twofac[2,i]]), simulate.p.value = T)$p.value
}
sigchis <- which(chis < 0.05)
twofac[,sigchis]
# prepare training scheme
controlimp <- trainControl(method="timeslice", initialWindow = 7, horizon=1, fixedWindow=T)
# train the model
model <- train(Weight~., data=cal3, method="cforest", trControl=controlimp)
# estimate variable importance
importance <- varImp(model, scale=FALSE)
# summarize importance
print(importance)
# plot importance
plot(importance)
set.seed(84)
controlrfe <- rfeControl(functions=caretFuncs, method="timeslice")
# run the RFE algorithm
results <- rfe(x=cal3[,nonerr], y=cal3[,26], rfeControl=controlrfe, preProc = c("center", "scale"),method="svmRadial", initialWindow=1)
# summarize the results
print(results)
# list the chosen features
predictors(results)
# plot the results
plot(results, type=c("g", "o"))
myTimeControl <- trainControl(method = "timeslice",
initialWindow = 7,
horizon = 1,
fixedWindow = F)
plsFitTime <- train(Weight ~ .,
data = cal3[,c(nonerr,"Weight")],
method = "svmRadial",
preProc = c("center", "scale"),
trControl = myTimeControl)
# leave out one variable:
cal3 <- cal[,c(calx,"Weight")]
saveRDS(cal3, "E:/cal3.rds")
fac <- as.vector(which(sapply(cal3,class)=="factor"))
cal3[,c(fac)] <- sapply(cal3[,c(fac)], as.numeric)
num <- as.vector(which(sapply(cal3[,c(calx)],class)=="numeric"))
loo <- function(xvars) {
loo <- list()
for (i in 1:length(xvars)){
loo[[i]] <- xvars[-i]
}
return(loo)
}
foo <- loo(calx)
calts <- ts(cal3)
nonerr <- colnames(cal3[,c(1:14,16:25)])
iteratevars <- function(target, cal3, nonerr) {
firstmod <- auto.arima(target, xreg=as.matrix(cal3[,nonerr ]))
nonerr <- loo(nonerr)
for (j in 1:length(nonerr)) {
model <- auto.arima(target, xreg=as.matrix(cal3[,nonerr[[j]]]))
if (model$aic < firstmod$aic) {
firstmod <- model
thisisit <- j
}
}
return(firstmod)
}
whazzup <- iteratevars(calts[,26], cal3, nonerr)
answer <- names(whazzup$coef)
answer <- answer[4:length(answer)]
huh <- which(calx %in% answer)
calx[huh]
answer
whazzup$aic
firstmod <- auto.arima(calts[,26], xreg=as.matrix(cal3[,nonerr ]))
firstmod$aic
far1 <- function(target,h) {forecast(auto.arima(target, xreg=as.matrix(cal3[,nonerr])), h = h)}
firstmod <- tsCV(calts[,26], forecastfunction = far1, h=1, window=1)
itvars <- function(target, cal3, nonerr) {
nonerr1 <- nonerr
far1 <- function(target1,h) {forecast(auto.arima(target1, xreg=as.matrix(cal3[,nonerr1]), h = h))}
firstmod <- tsCV(target, forecastfunction = far1, h = 1, window=1)
nonerr2 <- loo(nonerr)
for (j in 1:length(nonerr2)) {
nonerr1 <- nonerr2[[j]]
model <- tsCV(target, forecastfunction = far1, h = 1, window=1)
if (rmse(model) < rmse(firstmod)) {
firstmod <- model
thisisit <- j
}
}
return(firstmod)
}
whazzupi <- itvars(calts[,26], cal3, nonerr)
nonerr1 <- nonerr
far1 <- function(target,h) {forecast(auto.arima(target, xreg=as.matrix(cal3[,nonerr1])), h = h)}
firstmod <- tsCV(calts[,26], forecastfunction = far1, h=1, window=1)
nonerr <- loo(nonerr)
nonerr1 <- nonerr[[1]]
model <- tsCV(calts[,26], forecastfunction = far1, h = 1, window=1)
if (rmse(model) < rmse(firstmod)) {
firstmod <- model
thisisit <- j
}
}
#
# nonerr2 <- loo(nonerr1[[thisisit]])
# for (k in 1:length(nonerr2)) {
# model <- auto.arima(calts[,26], xreg=as.matrix(cal3[,nonerr2[[k]]]))
# if (model$aic < firstmod$aic) {
# firstmod <- model
# thisisit <- k
# } else{print(thisisit); break}
# }
# firstmod
# nonerr1[[j-1]]
# #print(any(is.na(cal[complete.cases(cal[,j]),j])))
#
#
# model <- auto.arima(calts, xreg=as.matrix(cal[complete.cases(cal[,foo[[1]]]),foo[[1]]]))
#
#
# calts <- ts(cal$Weight)
# sapply(cal[complete.cases(cal[,foo[[1]]]),foo[[1]]], class)
# sapply(foo, class)
# # computationally intense:
# # install.packages("sets")
# # library(sets)
# # set_power(calx)
# # set_power(calx[1:5])
|
1d5c4aec708100adaf57467c1d06cb182afb7e4f | ebe618be227430b4764203e06ae805b6d0fe0d40 | /plot3.R | 15eb3121ba526ded91eb4177763f965f300e29ff | [] | no_license | goodnick/ExData_Plotting1 | 492ed1cb679b8d14c48d936dd85925300d7a7a6d | bf55d48597d39459e06cabfe009e78af262d64b0 | refs/heads/master | 2021-01-18T05:56:53.539395 | 2016-02-08T03:01:50 | 2016-02-08T03:01:50 | 51,243,178 | 0 | 0 | null | 2016-02-07T10:07:54 | 2016-02-07T10:07:51 | null | UTF-8 | R | false | false | 906 | r | plot3.R | ## source the data loading file if it hasn't been sourced already
if(!exists("readHousePowerConsumptionData", mode="function")) {
source("ReadData.R")
}
## Create plot 3 and create png file
createPlot3File <- function() {
filename <- "plot3.png"
png(filename = filename, bg = "transparent")
generatePlot3()
dev.off()
message(paste("plot3 created -", filename))
}
## Generate plot 3 on the current device.
generatePlot3 <- function () {
houseData <- readHousePowerConsumptionData()
plot(houseData$Time, houseData$Sub_metering_1, type = "l", xlab = "",
ylab = "Energy sub metering")
lines(houseData$Time, houseData$Sub_metering_2, col = "red")
lines(houseData$Time, houseData$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = "solid")
} |
55e777abe81e0bc6b3fbc36c3a36f331672b859d | 91c6a5300abe4c27e113582d028f5ea7442cd144 | /user_dist_DHMM.R | afe589d315ce91abd017fd639b967a926d135592 | [] | no_license | bw4sz/Nimble | fa66d7e0dfb9ece7ebaeba97be2ac0cc2be45fa5 | 4c27441edb47e88ecd7f27e4532e10b962f78a5b | refs/heads/master | 2021-01-09T20:05:38.785219 | 2016-06-27T22:40:00 | 2016-06-27T22:40:00 | 62,092,944 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,402 | r | user_dist_DHMM.R | ## ----eval = FALSE--------------------------------------------------------
## {
##
## # -------------------------------------------------
## # Parameters:
## # s: survival probability
## # psiV: transitions from vegetative
## # psiF: transitions from flowering
## # psiD: transitions from dormant
## # -------------------------------------------------
## # States (S):
## # 1 vegetative
## # 2 flowering
## # 3 dormant
## # 4 dead
## # Observations (O):
## # 1 seen vegetative
## # 2 seen flowering
## # 3 not seen
## # -------------------------------------------------
##
## # Priors and constraints
## # Survival: uniform
## for (t in 1:(n.occasions-1)){
## s[t] ~ dunif(0, 1)
## }
## # Transitions: gamma priors
## for (i in 1:3){
## a[i] ~ dgamma(1, 1)
## psiD[i] <- a[i]/sum(a[])
## b[i] ~ dgamma(1, 1)
## psiV[i] <- b[i]/sum(b[])
## c[i] ~ dgamma(1, 1)
## psiF[i] <- c[i]/sum(c[])
## }
##
## # Define state-transition and observation matrices
## for (i in 1:nind){
## # Define probabilities of state S(t+1) given S(t)
## for (t in 1:(n.occasions-1)){
## ps[1,i,t,1] <- s[t] * psiV[1]
## ps[1,i,t,2] <- s[t] * psiV[2]
## ps[1,i,t,3] <- s[t] * psiV[3]
## ps[1,i,t,4] <- 1-s[t]
## ps[2,i,t,1] <- s[t] * psiF[1]
## ps[2,i,t,2] <- s[t] * psiF[2]
## ps[2,i,t,3] <- s[t] * psiF[3]
## ps[2,i,t,4] <- 1-s[t]
## ps[3,i,t,1] <- s[t] * psiD[1]
## ps[3,i,t,2] <- s[t] * psiD[2]
## ps[3,i,t,3] <- s[t] * psiD[3]
## ps[3,i,t,4] <- 1-s[t]
## ps[4,i,t,1] <- 0
## ps[4,i,t,2] <- 0
## ps[4,i,t,3] <- 0
## ps[4,i,t,4] <- 1
##
## # Define probabilities of O(t) given S(t)
## po[1,i,t,1] <- 1
## po[1,i,t,2] <- 0
## po[1,i,t,3] <- 0
## po[2,i,t,1] <- 0
## po[2,i,t,2] <- 1
## po[2,i,t,3] <- 0
## po[3,i,t,1] <- 0
## po[3,i,t,2] <- 0
## po[3,i,t,3] <- 1
## po[4,i,t,1] <- 0
## po[4,i,t,2] <- 0
## po[4,i,t,3] <- 1
## } #t
## } #i
##
## # Likelihood
## for (i in 1:nind){
## # Define latent state at first capture
## z[i,f[i]] <- y[i,f[i]]
## for (t in (f[i]+1):n.occasions){
## # State process: draw S(t) given S(t-1)
## z[i,t] ~ dcat(ps[z[i,t-1], i, t-1,])
## # Observation process: draw O(t) given S(t)
## y[i,t] ~ dcat(po[z[i,t], i, t-1,])
## } #t
## } #i
## }
## ----eval=FALSE----------------------------------------------------------
## ## Filter MCMC for the orchid model
##
## ## load nimble library
## library(nimble)
##
## ## define custom distribution
## dDHMMorchid <- nimbleFunction(
## run = function(x = double(1), length = double(),
## prior = double(1), Z = double(2),
## T = double(3), log.p = double()) {
## pi <- prior
## logL <- 0
## for(t in 1:length) {
## Zpi <- Z[x[t], ] * pi
## sumZpi <- sum(Zpi)
## logL <- logL + log(sumZpi)
## if(t != length)
## pi <- (T[,,t] %*% Zpi / sumZpi)[ ,1]
## }
## returnType(double())
## return(logL)
## }
## )
##
## # this is just a stump that doesn't simulate anything,
## # as we know the MCMC we'll use doesn't need to simulate()
## # from DHMMorchid
## rDHMMorchid <- nimbleFunction(
## run = function(n = integer(), length = double(),
## prior = double(1), Z = double(2),
## T = double(3)) {
## declare(x, double(1, length))
## returnType(double(1))
## return(x)
## }
## )
##
## registerDistributions(list(
## dDHMMorchid = list(
## BUGSdist = 'dDHMMorchid(length, prior, Z, T)',
## types = c('value = double(1)', 'length = double()', 'prior = double(1)',
## 'Z = double(2)', 'T = double(3)'),
## discrete = TRUE
## )
## ))
##
## ## define hierarchical model
## code <- nimbleCode({
## for (t in 1:(k-1)) {
## s[t] ~ dunif(0, 1)
## }
## for (i in 1:3) {
## a[i] ~ dgamma(1, 1)
## psiD[i] <- a[i]/sum(a[1:3])
## b[i] ~ dgamma(1, 1)
## psiV[i] <- b[i]/sum(b[1:3])
## c[i] ~ dgamma(1, 1)
## psiF[i] <- c[i]/sum(c[1:3])
## }
## for (t in 1:(k-1)) {
## T[1,1,t] <- s[t] * psiV[1]
## T[2,1,t] <- s[t] * psiV[2]
## T[3,1,t] <- s[t] * psiV[3]
## T[4,1,t] <- 1-s[t]
## T[1,2,t] <- s[t] * psiF[1]
## T[2,2,t] <- s[t] * psiF[2]
## T[3,2,t] <- s[t] * psiF[3]
## T[4,2,t] <- 1-s[t]
## T[1,3,t] <- s[t] * psiD[1]
## T[2,3,t] <- s[t] * psiD[2]
## T[3,3,t] <- s[t] * psiD[3]
## T[4,3,t] <- 1-s[t]
## T[1,4,t] <- 0
## T[2,4,t] <- 0
## T[3,4,t] <- 0
## T[4,4,t] <- 1
## }
## T[1,1,k] <- 1
## T[2,1,k] <- 0
## T[3,1,k] <- 0
## T[4,1,k] <- 0
## T[1,2,k] <- 0
## T[2,2,k] <- 1
## T[3,2,k] <- 0
## T[4,2,k] <- 0
## T[1,3,k] <- 0
## T[2,3,k] <- 0
## T[3,3,k] <- 1
## T[4,3,k] <- 0
## T[1,4,k] <- 0
## T[2,4,k] <- 0
## T[3,4,k] <- 0
## T[4,4,k] <- 1
## for (i in 1:nind) {
## y[i, f[i]:k] ~ dDHMMorchid(length = k-f[i]+1,
## prior = prior[1:4],
## Z = Z[1:3,1:4],
## T = T[1:4,1:4,f[i]:k])
## }
## })
##
|
479c2797ee2ea53495d46c97afb09a87c818e158 | 6c9ca22438b72e8037abe069397b26002ad6761e | /youbike/1105youbike.R | 1dcd190b0fbdc795e36caad2f8b296feb7ff1d3c | [] | no_license | m0904104/myR | 4a5e72f537838f686b7f8eff56da1db710c97814 | a5c3d27db477966eeb7e65d4eb32e05fc675e2e1 | refs/heads/main | 2023-01-14T01:13:55.975082 | 2020-11-26T10:33:11 | 2020-11-26T10:33:11 | 306,333,536 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 595 | r | 1105youbike.R | setwd("C:/myR1027/Youbike")
install.packages('jsonlite')
library(jsonlite)
file <- "https://quality.data.gov.tw/dq_download_json.php?nid=123026&md5_url=4d8de527a0bcd8a7b1aeae91120f021d"
download.file(file,"ubikeNTP.json")
jdata <- fromJSON("ubikeNTP.json")
str(jdata)
jdata$sarea
x=table(jdata$sarea)
y=sort(x,decreasing = TRUE)
z=as.data.frame(y)
summary(z$Fr)
boxplot(z$Freq)
install.packages('data.table')
library(data.table)
file <- "https://tcgbusfs.blob.core.windows.net/blobyoubike/YouBikeTP.json"
download.file(file,"ubikeTP.json")
m <- fromJSON("ubikeTP.json")
n <- rbindlist(m$retVal) |
8b582c2280f2b8224432ee0ea688629fe92d1b4b | 347f49f7b0133687424cbd7567bdaef94ef9a02e | /02-Lab exercise - Body Image and Academic Performance of College Students/01_Question1.R | 4be8d71d1f908623825aed4b9a31759710b05a22 | [] | no_license | kayomotunde/probability-and-statistics | 48af600b5f7b320e0daec5b7129c228d2a0c6b91 | efb94675646baf28bafdb0b2896256e6abb2b182 | refs/heads/master | 2022-12-05T01:20:12.682290 | 2020-08-31T16:49:42 | 2020-08-31T16:49:42 | 291,742,911 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,865 | r | 01_Question1.R | # Question 1
# Is there a relationship between students' college GPAs and their high school GPAs?
# ----
# reflect on question
# Typically, there should be a positive trend and moderate to high strength
# of the relationship between the two variables, as it is expected that
# High School performance is a pre-cursor to results in College
# ----
# variables selected - HS_GPA and GPA
# both variables are quantitative
# HS_GPA - explanatory variable ('x')
# GPA - response variable ('y')
# Exploratory Data Analysis
# Using a scatter-plot and correlation r (if applicable),
# let's describe the relationship between the two variables
# conduct analysis
x_scale <- c(1.2, 4.0)
y_scale <- c(1.2, 4.5)
plot(data$HS_GPA, data$GPA, xlim = x_scale, ylim = y_scale) # Scatter-plot
cor(data$HS_GPA, data$GPA, use = "c")
# ----
# The results indicate that in general the students who have a higher
# high school GPA will tend to also have a higher college GPA, and vice versa.
# The results further indicate that this relationship is linear
# and that it is moderately strong.
# ----
# Since the relationship between the students' high school GPAs and college GPAs
# is linear (as displayed by the scatterplot and supported by the
# correlation coefficient r being relatively close to 1),
# it makes sense to go ahead and find the regression line -
# the line that best fits the linear pattern of our data
L=lm(data$GPA~data$HS_GPA);
abline(L);
cf=coefficients(L);
lt=paste("GPA = ",round(cf[1],2),"+",round(cf[2],2),"HS_GPA")
legend(1.7,4.3,lt)
# The results are consistent with what can be expected.
# ----
# Suggestion
# We should intervene and counsel students while still in high school.
# Knowing that students with low GPAs are at risk of not doing well in college,
# colleges should develop programs, such as peer or faculty mentors, for these students. |
a5824b406394ae4aa24b6c6113060ce9e0ed4af0 | f4f588a622c6c93e3261dbf4b90852c5a3833757 | /upload_to_trello.R | 33f3c7d1e52fd6432df7ccb37713bd4e864a025d | [] | no_license | SteveOhh/trello_trade_board | 82e9185c3e4d4cb78c3ce267e9b7353ecc8fa818 | c864b59de9d07f5a2cc568eaaa29b38e29791ae1 | refs/heads/main | 2023-03-04T16:57:12.568411 | 2021-02-04T19:54:35 | 2021-02-04T19:54:35 | 336,013,948 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,628 | r | upload_to_trello.R | require(trelloR)
### get key & secret from https://trello.com/app-key ### use personal Trello account
# Only do this once https://github.com/jchrom/trelloR/issues/32
my_token = get_token(appname = "Trade board project",
key = "30278732032ff952a83401244dd94968",
secret = NULL, # should read from cache
scope = c("read", "write"),
expiration = "never"
)
### Load most recent data
load("./Data/coffees.Rdata")
############ ############ ############
############ Set up ###########
############ ############ ############
### Navigated in browser to
# create the board
# delete the lists I don't want,
# create a list I do want ("Trade coffee list)
# get the board ID
# create custom fields
###################### Make card function #########################
### <final> Create one card for each coffee, with names a concat of roaster/product names
# For each...
# Make card name
# Make card description
# Create card
# Store card ID in the same ? dataframe (new_card$id)
##################################################################
##### ##### ##### #####
##### Prep ##### #####
##### ##### ##### #####
### Global variables
# Store board ID
board_id <- "5fe8d603949d7e3f69354b90"
# Target list
list_ids <- get_board_lists(board_id)
coffee_list_id <- list_ids[list_ids$name=="Trade coffee list","id"] %>% as.character()
# Get card custom fields' IDs
board_custom_fields <- get_board_cards(board_id)[1,1] %>% get_card_fields()
# Store field names
cfs <- c("Price", "Producer", "Sub_region", "Process", "Variety", "Elevation", "URL")
# Function for attachments
add_attachments <- function(card_id, coffee) {
# global variables
token <- my_token$credentials$oauth_token
key <- "30278732032ff952a83401244dd94968" # obviously not global, but it could be
# defined by inputs
put_url <- paste0("https://api.trello.com/1/cards/", card_id, "/attachments/")
image_payload <- list(name = "PNG", url = coffee$image, mimeType = "image/png")
url_payload <- coffee$url
httr::POST(url = paste0(put_url, '?key=', key, '&token=', token),
body = image_payload,
encode = "json"
)
httr::POST(url = paste0(put_url, '?key=', key, '&token=', token),
body = url_payload,
encode = "json"
)
}
##### ##### ##### #####
##### Make a card #####
##### ##### ##### #####
make_coffee_card <- function(coffee) {
# Description function
source("./populate_card_description.R")
# Card title
roaster <- coffee$roaster_name
product_name <- coffee$product_name
# Description
roaster_taste_notes <- coffee$roaster_taste_notes
trade_taste_notes <- coffee$trade_taste_notes
taste_profile <- coffee$taste_profile
description <- populate_card_description(roaster_taste_notes, trade_taste_notes, taste_profile)
gps_coordinates <- coffee$source_gps_string # not sure how to handle NAs; trying it out
# Card list, name, description, and map coordindates to send with request
card_details <- list(name = paste0(product_name, " - ", roaster),
desc = description,
coordinates = gps_coordinates,
pos = "bottom")
# Create card & store details (so you can call the ID)
new_card <- add_card(list = coffee_list_id, body = card_details, token = my_token)
# Update custom fields
for(cf in cfs) {
column_name <- tolower(cf)
field_id <- board_custom_fields[board_custom_fields$name==cf,"id"]%>% as.character()
field_key <- "text"
raw_field_value <- coffee[1,column_name] %>%
# coerce tibble to character
as.character() %>%
# handle empty result
ifelse(.=="character(0)", "", .)
#
field_value <- ifelse(raw_field_value=="character(0)", "", raw_field_value)
# # this is deprecated, but I don't see how to use update_resource()
update_card_field(card = new_card$id,
field = field_id,
key = field_key,
value = field_value,
token = my_token)
}
# Attach image and URL (* need to learn how to add attachment)
add_attachments(card_id = new_card$id, coffee = coffee)
# Add decaf label (*need to learn)
if(coffee$decaf==TRUE) {
add_label(card = new_card$id, color = "yellow", name = "Decaf")
}
return(new_card$id)
}
### Upload from all rows in the df
for(i in 1:nrow(coffees)) {
df <- tibble(coffees[i,])
make_coffee_card(df)
}
|
57ed6fb54c9ce904a9d17ce1b3e69b8d4d309f86 | 3f0a9af0e7d37102e6872fed916ddf0b670b0164 | /man/SAVE.controls.Rd | e9f9689a354afd1e4816e2bbdefd774f99909eca | [] | no_license | cran/SAVE | 530468b32e40f5d5031f9b37185a42cbe9cd8dae | c59892f2db42057e6be88aa895673257c7751f66 | refs/heads/master | 2016-09-06T07:51:42.532014 | 2015-03-14T00:00:00 | 2015-03-14T00:00:00 | 17,693,483 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,707 | rd | SAVE.controls.Rd | \name{SAVE.controls}
\alias{SAVE.controls}
\title{ Control parameters for kriging process. }
\description{ Returns a list suitable as the \code{kriging.controls} argument of the \code{SAVE} function.}
\usage{SAVE.controls(lower = NULL, upper = NULL, optim.method = "BFGS", parinit = NULL,...)}
\arguments{
\item{lower}{(see below)}
\item{upper}{optional vector containing the bounds of the correlation
parameters for optimization in function \code{\link[DiceKriging]{km}} in package \code{DiceKriging}. The default values are given by \code{\link[DiceKriging]{covParametersBounds}}.}
\item{optim.method}{an optional character string indicating which
optimization method is chosen for the likelihood
maximization. \code{"BFGS"} is the \code{optim} quasi-Newton procedure
of package \code{stats}, with the method "L-BFGS-B". \code{"gen"} is
the \code{genoud} genetic algorithm (using derivatives) from
package \code{rgenoud} (>= 5.3.3).}
\item{parinit}{an optional vector containing the initial values for the
variables to be optimized over. If no vector is given, an
initial point is generated as follows. For method \code{"gen"},
the initial point is generated uniformly inside the
hyper-rectangle domain defined by \code{lower} and \code{upper}. For
method \code{"BFGS"}, some points (see \code{control} below) are
generated uniformly in the domain. Then the best point with
respect to the likelihood (or penalized likelihood, see
\code{penalty}) criterion is chosen.}
\item{\dots}{arguments for \code{SAVE.controls} if they are not given explicitly. Current version deprecates any other argument different than those listed above.}
}
\value{ a list of parameters that are allowed to be passed to the \code{\link[DiceKriging]{km}} function in package \code{\link[DiceKriging]{DiceKriging}}.
\item{lower}{(see above)}
\item{upper}{(see above)}
\item{optim.method}{(see above)}
\item{parinit}{(see above)}
}
\author{
Jesus Palomo, Rui Paulo and Gonzalo Garcia-Donato
}
\seealso{See Also \code{\link[DiceKriging]{DiceKriging}} for more details on the parametes.}
\examples{
\dontrun{
library(SAVE)
sc <- SAVE.controls(lower = 0.2, upper = 0.7, optim.method = "BFGS")
sc1 <- SAVE.controls(lower = 0.2, upper = 0.7, optim.method = "BFGS",
coef.var=1, coef.trend=0)
### In this case, the parameters \code{coef.var} and \code{coef.trend}
### will be deprecated since \code{\link{SAVE}} does not allow to fix
### any parameter different than: \code{lower}, \code{upper},
### \code{optim.method} and \code{parinit}.
}
}
\keyword{ internal }
|
353baa8b9c99a98c90407c591492cb15aa114a85 | 50899f7d432f8604984301a86936f580cab012cd | /analyses/examine_batch_ethnicity_effects_by_gene_wrap_parallelized.R | 0024d18c98e7e682c68a962e62c0a90ad4ae5fb4 | [
"MIT"
] | permissive | Huang-lab/SequenceVariantCallEvaluation | e4476419b1cbdaaf4fbd4eadbcb87c7fd9b0ac15 | 994e52d87f40c781ebe55a80fd6109f8aa9ec7de | refs/heads/master | 2020-04-04T20:18:38.711915 | 2018-12-04T19:53:21 | 2018-12-04T19:53:21 | 156,241,982 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,807 | r | examine_batch_ethnicity_effects_by_gene_wrap_parallelized.R | ##### examine_batch_ethnicity_effects.R #####
# Kuan-lin Huang @ MSSM
# implementation of LMG to find independent contribution of regressors
# library("relaimpo") # couldn't get this to work, seems to require all quantitative variables
library("hier.part")
system("mkdir out")
# take input arguments
args = commandArgs(trailingOnly=TRUE)
# read in files
if (length(args)<1) {
stop("At least two argument must be supplied [input file] [output tag name].n", call.=FALSE)
}
input_file = args[1]
out_name = args[2]
# read in clinical data file
clin_f = "PanCan_ClinicalData_V4_wAIM_filtered10389.txt"
clin = read.table(header=T, quote = "", sep="\t", fill =T, file = clin_f, stringsAsFactors=FALSE)
# read in and process meta data file
meta_f = "tcga_meta_data.normals_only.Cases_N10389.txt"
meta = read.table(header=F, sep="\t", file=meta_f, fill=T)
colnames(meta) = c("bcr_patient_barcode","ID","cancer","X","assay","center","date","platform","Y","reagent","capture")
meta$capture_brief = gsub("\\|.*","",meta$capture)
meta$year = gsub(".*/.*/([0-9]+)","\\1",meta$date)
meta = meta[order(meta$year,decreasing = T),]# keep only the later year
meta_uniq = meta[!duplicated(meta$bcr_patient_barcode),]
meta_uniq$analyte = gsub("TCGA-..-....-...-..(.)-.*","\\1",meta_uniq$ID)
meta_uniq$analyte[meta_uniq$analyte=="X"] = "D"
# read in and process istat variant count file
istat_all = read.table(header=F, sep="\t", file=gzfile(input_file), fill=T)
colnames(istat_all) = c("ID","NALT","NMIN","NHET","NVAR","RATE","SING","TITV","PASS","PASS_S","QUAL","DP","geneName","fileName")
istat_all$geneName = gsub("istat/","",istat_all$geneName)
istat_all$bcr_patient_barcode = substr(istat_all$ID,1,12)
##### individual level stats #####
for (gene in unique(istat_all$geneName)){
tryCatch({
# gene = unique(istat_all$geneName)[1] # trouble-shoot
istat = istat_all[istat_all$geneName==gene,]
istat_clin = merge(istat,clin[,c("bcr_patient_barcode","consensus_call")],by="bcr_patient_barcode")
istat_clin_meta = merge(istat_clin, meta_uniq[,-which(colnames(meta_uniq) %in% c("bcr_sample_barcode","X","Y","assay"))], by=c("ID"))
# note: can consider using capture_brief instead to avoid over-fitting/attribution
# LMG test
istat_clin_eur = istat_clin_meta[istat_clin_meta$consensus_call=="eur",]
# pdf("variation_BRCA1.pdf",width=10)
variation_explained = hier.part(istat_clin_meta$NVAR, istat_clin_meta[,c("capture","platform","reagent","year","center","analyte","consensus_call")], family = "gaussian", gof = "Rsqu")$I.perc
# dev.off()
variation_explained_eur = hier.part(istat_clin_eur$NVAR, istat_clin_eur[,c("capture","platform","reagent","year","center","analyte")], family = "gaussian", gof = "Rsqu")$I.perc
# # regression test
# fit = lm(data = istat_clin, NVAR ~ consensus_call + type + Center + Analyte) # the order of importance found using LMG
# summary(fit)
# results = data.frame(anova(fit))
#
# fit = lm(data = istat_clin, NVAR ~ consensus_call + type + Center + Analyte) # the order of importance found using LMG
# summary(fit)
# results = data.frame(anova(fit))
#
# fit = lm(data = istat_clin_eur, NVAR ~ type + Center + Analyte) # the order of importance found using LMG
# summary(fit)
# results = data.frame(anova(fit))
# concatenate gene name into output
variation_explained$gene = gene
variation_explained_eur$gene = gene
write.table(variation_explained, quote=F,col.names = F,file = paste("out/all_gene_istat_full_exon_",out_name,".tsv",sep=""),
append = TRUE, sep = "\t")
write.table(variation_explained_eur, quote=F,col.names = F,file = paste("out/all_gene_istat_eur_full_exon_",out_name,".tsv",sep=""),
append = TRUE, sep = "\t")
}, error=function(e){cat("ERROR for gene:",gene, "\n")})
}
|
6606ba45b4ee4543f88f4cd75c0e8771db390573 | 8d119dec4eee3d38e8a72456b09e2f95002bbf93 | /Bike_SF/pull.R | c5e52d3f991df990de37d7f1f138f75eeeb40c91 | [] | no_license | ksk8/SFbiking | a918470c7abd873d568864c280da94441d76fd4c | 7cd0a8d508b3c0a0281f5f46a15030e45193e670 | refs/heads/master | 2020-04-01T20:27:37.625868 | 2016-07-11T19:22:41 | 2016-07-11T19:22:41 | 63,095,624 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 104 | r | pull.R | pull <- function(x,y) {x[,if(is.name(substitute(y))) deparse(substitute(y)) else y, drop = FALSE][[1]]}
|
0cc3b2cde1d20e0bc5ba521e1dc0b6aea37f1eba | 62ee1070ede548d33df78210a1aafa0831309dc5 | /global.R | 0239f847071b0cdc1bbb45473ff5bb94bb705e0d | [] | no_license | brianmwangy/Teen-pregnancy-application | fd4666283dab4cf0c0e7185b4ba06e2af3c2bd8f | ed8b8fe357e8ee16b73a36533e1a236377ff657e | refs/heads/main | 2023-01-13T17:32:46.603982 | 2020-11-21T06:16:21 | 2020-11-21T06:16:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 848 | r | global.R | library(dplyr)
library(readr)
library(zoo)
library(tidyr)
library(rgdal)
library(raster)
#loading national teen pregnancy stats
national<-read_csv("./www/nation.csv")
national$period<-as.yearqtr(national$period,"%Y Q%q")
#loading ward teen pregnancy stats
wardf<-read_csv("./www/wardf2.csv")
wardf$period<-as.yearqtr(wardf$period,"%Y Q%q")
yr<-unique(wardf$period)
cnty<-unique(wardf$county)
preg<-unique(wardf$Type)
#loading county stats
county<-read_csv("./www/county.csv")
county$period<-as.yearqtr(county$period,"%Y Q%q")
cnty2<-unique(county$county)
#loading subcounty stats
subcounty<-read_csv("./www/subcounty.csv")
subcounty$period<-as.yearqtr(subcounty$period,"%Y Q%q")
cnty3<-unique(subcounty$county)
preg2<-unique(subcounty$type)
#loading the kenya basemap
bsmap<-readOGR("./www/year2.shp")
|
6d4a9d9b40580c1f5e4361ad1c6c429c7eaf95c9 | 58f46e3a93ef5369ad8dc73ea3b21db829f1dcac | /tests/test-all.R | fe37b93d3a6f37bc90cfeb9db3902c734d95cf3d | [] | no_license | firebitsbr/sslsaran | 4702023b7eae80a60e95e8021aa7ee67b488e332 | 059dce4fb35e18596642216e05895ca3e18f9e6c | refs/heads/master | 2020-04-07T21:23:27.975802 | 2018-02-20T17:51:43 | 2018-02-20T17:51:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 41 | r | test-all.R | library(testthat)
test_check("sslsaran")
|
b4c7304abf4b1c8e960bf5b7b9dad3b3aba625eb | f18e1210ca120c9116e356a8549e89e04219dc75 | /tests/by_hand_tests/test_trelliscope.R | 5ba37aa8d5cc00b277e78a8664705b3f066660db | [
"BSD-2-Clause"
] | permissive | EMSL-Computing/ftmsRanalysis | 46c73a727d7c5d5a5320bf97a07e9dac72abd281 | dd3bc3afbf6d1250d1f86e22b936dcc154f4101d | refs/heads/master | 2023-07-21T18:13:26.355313 | 2023-02-09T17:03:09 | 2023-02-09T17:03:09 | 122,233,846 | 14 | 10 | NOASSERTION | 2023-07-11T16:34:15 | 2018-02-20T17:52:18 | R | UTF-8 | R | false | false | 4,837 | r | test_trelliscope.R | ## Tests of creating trelliscope displays with the convenience functions in ftmsRanalysis
library(ftmsRanalysis)
library(trelliscope)
vdbDir <- vdbConn(file.path(tempdir(), "trell_test"), autoYes = TRUE)
data('exampleProcessedPeakData')
##########################################################
## SAMPLE PLOTS
sampleDdo <- divideBySample(exampleProcessedPeakData)
## TEST S-1: VK plot, color by VK category
panelFnS1 <- panelFunctionGenerator("vanKrevelenPlot", vkBoundarySet="bs2", title="Test")
makeDisplay(sampleDdo,
panelFn=panelFnS1,
cogFn=vanKrevelenCognostics(vkBoundarySet="bs2"),
name = "Trelliscope test S_1 with VK",
group = "Sample")
## TEST S-2: Kendrick plot, color by VK category
panelFnS2 <- panelFunctionGenerator("kendrickPlot", vkBoundarySet="bs1")
makeDisplay(sampleDdo,
panelFn=panelFnS2,
cogFn=kendrickCognostics(vkBoundarySet="bs1"),
name = "Trelliscope test S_2 with Kendrick",
group = "Sample")
## TEST S-3: VK plot, color by Intensity
panelFnS3 <- panelFunctionGenerator("vanKrevelenPlot", colorCName="Intensity")
makeDisplay(sampleDdo,
panelFn=panelFnS3,
cogFn=vanKrevelenCognostics(),
name = "Trelliscope test S_3 with VK",
group = "Sample")
## TEST S-4: densityPlot of NOSC
panelFn4 <- panelFunctionGenerator("densityPlot", variable="NOSC")
makeDisplay(sampleDdo,
panelFn=panelFn4,
cogFn=densityCognostics("NOSC"),
name = "Trelliscope test S_4 with density",
group = "Sample")
##########################################################
## GROUP PLOTS
groupDdo <- divideByGroup(exampleProcessedPeakData)
groupSummaryDdo <- summarizeGroups(groupDdo, summary_functions = c("prop_present", "n_present"))
## TEST G-1: VK group plot, color by proportion present
panelFnG1 <- panelFunctionGenerator("vanKrevelenPlot", colorCName=expr(paste0(getSplitVar(v, "Group"), "_prop_present")),
legendTitle="Proportion<br>Present")
makeDisplay(groupSummaryDdo,
panelFn=panelFnG1,
cogFn=vanKrevelenCognostics(),
name = "Trelliscope test G_1 with VK plot per group",
group = "Group")
## TEST G-2: Kendrick group plot, color by n present
panelFnG2 <- panelFunctionGenerator("kendrickPlot", colorCName=expr(paste0(getSplitVar(v, "Group"), "_n_present")),
legendTitle="Number<br>Present")
makeDisplay(groupSummaryDdo,
panelFn=panelFnG2,
cogFn=kendrickCognostics(),
name = "Trelliscope test G_2 with Kendrick plot per group",
group = "Group")
## TEST G-3: classes plot for each group
panelFnG3 <- panelFunctionGenerator("classesPlot")
makeDisplay(groupDdo,
panelFn=panelFnG3,
name = "Trelliscope test G_3 with classesPlot",
group = "Group")
## TEST G-4: density plot for each group
panelFnG4 <- panelFunctionGenerator("densityPlot", variable="NOSC", groups=NA)
makeDisplay(groupDdo,
panelFn=panelFnG4,
cogFn=densityCognostics("NOSC"),
name = "Trelliscope test G_4 with densityPlot",
group = "Group")
view()
##########################################################
## GROUP COMPARISON PLOTS
exampleProcessedPeakData <- assign_class(exampleProcessedPeakData, "bs1")
grpCompDdo <- divideByGroupComparisons(exampleProcessedPeakData, "all")
grpCompSummaryDdo <- summarizeGroupComparisons(grpCompDdo, summary_functions="uniqueness_gtest",
summary_function_params=list(uniqueness_gtest=list(pres_fn="nsamps", pres_thresh=2, pvalue_thresh=0.05)))
## TEST GC-1: VK group comparison plot
panelFnGC1 <- panelFunctionGenerator("vanKrevelenPlot", colorCName="uniqueness_gtest")
makeDisplay(grpCompSummaryDdo,
panelFn=panelFnGC1,
cogFn=vanKrevelenCognostics(),
name = "Trelliscope test GC_1 with VK group comparison",
group = "Group_Comparison")
## TEST GC-2: Kendrick group comparison plot
panelFnGC2 <- panelFunctionGenerator("kendrickPlot", colorCName="uniqueness_gtest")
makeDisplay(grpCompSummaryDdo,
panelFn=panelFnGC2,
cogFn=kendrickCognostics(),
name = "Trelliscope test GC_2 with Kendrick group comparison",
group = "Group_Comparison")
## TEST GC-3: Density group comparison plot
panelFnGC3 <- panelFunctionGenerator("densityPlot", variable="NOSC", samples=FALSE, groups=NA)
makeDisplay(grpCompDdo,
panelFn=panelFnGC3,
cogFn=densityCognostics("NOSC"),
name = "Trelliscope test GC_3 with density on group comparison",
group = "Group_Comparison")
|
9fd435e8320de866c44a3947a51a83e4e9077202 | 747564318a56ac683165b03ee6f1157712c2e4ce | /inst/examples/list_experiments.R | 78e8d5bd92d6155c4e73659821804e3b32250311 | [] | no_license | dataandcrowd/gamar | 626a9758ec58b81db71f3363a77e732cf092eecc | 3e9dd2b61b55816f79c16d152d5de32d6e9ea71d | refs/heads/master | 2023-04-02T18:00:01.423668 | 2021-04-09T14:44:43 | 2021-04-09T14:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 94 | r | list_experiments.R | gaml_file <- system.file("models", "sir.gaml", package = "gamar")
list_experiments(gaml_file)
|
df1980c294cb72c38e496743aad1c58eec9cacba | 3a5fa834091a8fd9d9749fcd6cb2a0bfea46ac62 | /trying_so_hard.R | 147a3899e2bc1c9c6043b577d54f8be33d67a082 | [] | no_license | foundinblank/study1adults | 47100eb05ef9237ef452bb8e6e5497a6a17241e2 | 16239dbdb1a2b88678a072eb398a6d1a025c7d9c | refs/heads/master | 2022-08-03T17:41:14.491830 | 2020-05-23T12:11:23 | 2020-05-23T12:11:23 | 102,015,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,949 | r | trying_so_hard.R | # Run to line 300
monica_y <- monica %>% pull(y_ma5)
rhand_y <- rhand_expanded %>% pull(y)
monica_new <- monica_y[121:2400]
rhand_new <- rhand_y[121:2400]
mlpar <- list(lgM = 120,
radiusspan = 100,
radiussample = 10,
normalize = 2,
rescale = 2,
mindiagline = 2,
minvertline = 2,
tw = 0,
whiteline = FALSE,
recpt = FALSE,
fnnpercent = 10,
typeami = "maxlag")
lists <- data_ma %>%
group_by(name) %>%
fill(y_ma5, .direction = "down") %>%
fill(y_ma5, .direction = "up") %>%
# slice(121:(n()-120)) %>%
summarise(y = list(y_ma5))
library(furrr)
plan(multiprocess)
optimize_params <- lists %>%
mutate(params = future_map(y, ~ optimizeParam(.x, rhand_y, mlpar)))
output_params <- optimize_params %>%
group_by(name) %>%
mutate(r = pluck(params, 1, 1),
dim = pluck(params, 1, 2),
delay = pluck(params, 1, 3))
param_means <- output_params %>%
ungroup() %>%
summarise(r_mean = ceiling(mean(r)),
dim_mean = ceiling(mean(dim)),
delay_mean = ceiling(mean(delay)))
crqa_data <- lists %>%
group_by(name) %>%
mutate(rhand = future_map(y, ~ crqa(.x, rhand_y,
delay = param_means$delay_mean,
embed = param_means$dim_mean,
rescale = 2,
radius = param_means$r_mean+2,
normalize = 2,
mindiagline = 2,
minvertline = 2,
tw = 0,
whiteline = FALSE,
recpt = FALSE,
side = 'both')))
write_rds(crqa_data, "~/Desktop/crqa.rds")
crqa_results <- crqa_data %>%
mutate(rhand_rr = map_dbl(rhand, pluck, "RR"),
rhand_det = map_dbl(rhand, pluck, "DET")) %>%
left_join(participants) %>%
select(name, maingroup, rhand_rr, rhand_det)
crqa_results %>%
ggplot(aes(x = maingroup, y = rhand_rr, color = maingroup)) +
geom_jitter()
crqa_results %>%
ggplot(aes(x = rhand_rr)) + geom_histogram()
params_and_output <- crqa_results %>%
left_join(output_params, by = "name") %>%
select(-y, -params)
params_and_output %>%
ggplot(aes(x = rhand_det, y = rhand_rr, label = name)) +
geom_point()
library(GGally)
ggpairs(params_and_output[,3:7])
# fitler out one extreme value...
params_and_output %>%
ungroup() %>%
filter(rhand_rr < 60) %>%
ggstatsplot::ggbetweenstats(x = maingroup, y = rhand_rr)
that1 <- optimizeParam(monica_y, rhand_y, mlpar)
that1
rqaAns <- crqa(monica_y, rhand_y, radius = 33, embed = 14, delay = 117, rescale = 2, normalize = 2, mindiagline = 2, minvertline = 2, tw = 0, whiteline = FALSE, recpt = FALSE, side = "both")
rqaMetrics <- c(rqaAns[1], rqaAns[2], rqaAns[5]); rqaMetrics
mRP <- data.table::melt(as.matrix(rqaAns$RP), varnames=c("TimeV1", "TimeV2"), value.name="Recurrence")
binary <- ggplot(mRP, aes(x = TimeV1, y = TimeV2, fill = Recurrence)) +
geom_raster() +
theme(axis.line = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.title = element_blank()) +
ggtitle("Binary Cross Recurrence Plot") +
scale_fill_manual(values = c("#9999ff","#000066"),
breaks = c(TRUE, FALSE)) +
theme(legend.position = "none", plot.title = element_text(size = 16, face = "bold"))
binary
library(tseriesChaos)
lists %>%
group_by(name) %>%
mutate(ami = map(y, ~ mutual(.x, lag.max = 240, plot = F))) %>%
mutate(that = map_dbl(ami, min))
OKAY BREAKTHROUGH |
42bada5f61d1b7f0587017bb00a44f42c2f036e8 | 441f9a4ab2fd14bd529dc2edcc12b15495c034fb | /src/eval_shared_pathway_auc.R | 10c67321d9878c05854257b9f04555e7cdf3df65 | [
"MIT"
] | permissive | alorchhota/spice_analysis | 9e6670225c69be7ece92ed9b5f81d1c0659b995b | f66b43f9aa75e4bed416abdaaa5327bc3c27c6f9 | refs/heads/master | 2023-03-22T04:07:41.109076 | 2021-03-11T22:10:46 | 2021-03-11T22:10:46 | 300,128,702 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,168 | r | eval_shared_pathway_auc.R | library(argparser)
library(spice)
args <- arg_parser("program");
args <- add_argument(args, "--net", help="network file (rds)", default="/Users/ashissaha/github/spice_analysis/results/spice_results/gtex_v8/results/Muscle_Skeletal/corrected/AXPVAR/1500/pearson_network.rds")
args <- add_argument(args, "--pathway", help="pathway file (rds)", default="/Users/ashissaha/github/spice_analysis/results/spice_results/shared_data/msigdb/kegg_genesets.rds")
args <- add_argument(args, "--curve", help="Should the curves be returned?", default=TRUE)
args <- add_argument(args, "--max", help="compute max auc?", default=TRUE)
args <- add_argument(args, "--min", help="compute min auc?", default=TRUE)
args <- add_argument(args, "--rand", help="compute random auc?", default=FALSE)
args <- add_argument(args, "--dg", help="compute auc using Davis and Goadrich algorithm?", default=FALSE)
args <- add_argument(args, "--na_rm", help="remove NA?", default=FALSE)
args <- add_argument(args, "--neg", help="negative value handling", default="error")
args <- add_argument(args, "--o", help="Output file (rds)", default="results/shared_pathway_auc.rds")
### parse args
argv = parse_args(args)
net_fn = argv$net
pathway_fn = argv$pathway
curve = as.logical(argv$curve)
max_compute = as.logical(argv$max)
min_compute = as.logical(argv$min)
rand_compute = as.logical(argv$rand)
dg_compute = as.logical(argv$dg)
na_rm = as.logical(argv$na_rm)
neg_treat = argv$neg
out_fn = argv$o
### check variables
stopifnot(file.exists(net_fn))
stopifnot(file.exists(pathway_fn))
stopifnot(neg_treat %in% c("none", "warn", "error"))
stopifnot(dir.exists(dirname(out_fn)))
### load from inputs
net_mat = readRDS(net_fn)
pathways = readRDS(pathway_fn)
### check network
stopifnot(is.matrix(net_mat))
stopifnot(length(rownames(net_mat))>0)
### compute shared-pathway auc
shared_pathway_auc = spice::coexpression_shared_pathway_auc(
net = net_mat,
pathways = pathways,
curve = curve,
max.compute = max_compute,
min.compute = min_compute,
rand.compute = rand_compute,
dg.compute = dg_compute,
na.rm = na_rm,
neg.treat = neg_treat)
### save
saveRDS(shared_pathway_auc, file = out_fn)
|
0af33358309e438be8e1511cadaee92107651510 | 4d07eecae0429dc15066b34fbe512b8ff2ae53ea | /mds/aral/pointplot.R | 6a1880e4a0f8852a5a9b16302231f4148d681669 | [] | no_license | distanceModling/phd-smoothing | 7ff8ba7bace1a7d1fa9e2fcbd4096b82a126c53c | 80305f504865ce6afbc817fff83382678864b11d | refs/heads/master | 2020-12-01T09:31:24.448615 | 2012-03-27T18:35:45 | 2012-03-27T18:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 348 | r | pointplot.R | # load the fitted model data file and look at what happened.
load("aral/aralfit.RData")
pdf(file="../thesis/mds/figs/aral-pp.pdf",width=6,height=3)
par(mfrow=c(1,2),las=1,mgp=c(1.5,0.75,0),mar=c(3,3,2,2),cex.axis=0.5,cex.lab=0.7)
plot(pred.grid,pch=".",xlab="x",ylab="y",asp=1)
plot(pred.grid.mds,pch=".",xlab="x*",ylab="y*",asp=1)
dev.off()
|
23ae3ae9b2e0b92ec844d4b92d7247385a3c6d0a | d97e2169ce9cd893920a54cffa3e754d1e309e6f | /R/gen_coreDataManip.r | 0767953b7b157a3508bb2c246bb0ea52c9111741 | [] | no_license | bpollner/aquap2 | 5ccef0ba4423413e56df77a1d2d83967bffd1d04 | 7f7e2cf84056aad4c8a66f55d099b7bdaa42c0be | refs/heads/master | 2021-07-22T15:07:22.912086 | 2021-05-27T12:50:22 | 2021-05-27T12:50:22 | 30,932,899 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 36,467 | r | gen_coreDataManip.r | #' @title Perform Smoothing and / or m-th Derivative
#' @description Performs a smoothing and / or the calculation of the m-th
#' derivative on the given dataset using the Savitzky-Golay filter.
#' @details The underlying function is \code{\link[signal]{sgolayfilt}}.
#' @param dataset An object of class 'aquap_data" as produced by
#' \code{\link{gfd}} or as can be extracted from a 'cube' via
#' \code{\link{getcd}}.
#' @param p Numeric length one, the order of the filter. Default = 2.
#' @param n Numeric length one, the filter length, must be odd. Default = 21.
#' @param m Numeric length one, Return the m-th derivative of the filter
#' coefficients. Default = 0.
#' @param exportModel Logical. If a possible model should be stored in the set,
#' (leave at \code{FALSE}).
#' @return Returns the dataset with the NIR-data smoothed or transformed.
#' @examples
#' \dontrun{
#' fd <- gfd() # get the full dataset
#' fd <- selectWls(fd, 1100, 1800)
#' fd_avg <- do_avg(fd, n=71) # apply strong smoothing
#' plot(fd - fd_avg, pg.where="", pg.main="| smoothed subtracted")
#' }
#' @family Data pre-treatment functions
#' @family dpt modules documentation
#' @export
do_sgolay <- function(dataset, p=2, n=21, m=0, exportModel=FALSE) {
autoUpS()
cns <- colnames(dataset$NIR)
rns <- rownames(dataset$NIR)
NIR <- t(apply(dataset$NIR, 1, signal::sgolayfilt, p=p, n=n, m=m))
exportAdditionalModelToAp2Env(doExport=exportModel, thisMod=NULL, thisType=pv_dptModules[1]) # sgol
colnames(NIR) <- cns
rownames(NIR) <- rns
dataset$NIR <- I(NIR)
return(dataset)
} # EOF
# is used when we have no access to .ap2 like in parallel operations
do_sgolay_sys <- function(dataset, p=2, n=21, m=0) {
cns <- colnames(dataset$NIR)
rns <- rownames(dataset$NIR)
NIR <- t(apply(dataset$NIR, 1, signal::sgolayfilt, p=p, n=n, m=m))
colnames(NIR) <- cns
rownames(NIR) <- rns
dataset$NIR <- I(NIR)
return(dataset)
} # EOF
### for baseline removal
#' @title Calculate Standard Normal Variation SNV
#' @description Calculate the standard normal variation (SNV) by autoscaling the
#' transformed NIR-data
#' @inheritParams do_sgolay
#' @return Returns the dataset with the transformed NIR data.
#' @examples
#' \dontrun{
#' fd <- gfd() # get the full dataset
#' fd <- selectWls(fd, 1100, 1800)
#' fd_snv <- do_snv(fd)
#' plot(fd - fd_snv, pg.where="", pg.main="| snv subtracted")
#' }
#' @family Data pre-treatment function
#' @family dpt modules documentation
#' @export
do_snv <- function(dataset, exportModel=FALSE) {
autoUpS()
NIR <- t(scale(t(dataset$NIR),center=TRUE,scale=TRUE))
exportAdditionalModelToAp2Env(doExport=exportModel, thisMod=NULL, thisType=pv_dptModules[2]) # snv
colnames(NIR) <- colnames(dataset$NIR)
rownames(NIR) <- rownames(dataset$NIR)
dataset$NIR <- I(NIR)
return(dataset)
} # EOF
#' @title Perform MSC
#' @description Performs MSC (multiplicative scatter correction) on the
#' provided dataset.
#' @details If no reference is provided, the average of all spectra of the provided
#' dataset is used as a reference for the baseline correction. Provide a dataset
#' with a single spectrum (as e.g. produced by \code{\link{do_avg}}) to use this
#' as a reference for baseline correction. Internally, the function
#' \code{\link[pls]{msc}} is used.
#' @inheritParams do_sgolay
#' @param ref An object of class 'aquap_data' containing a single spectrum,
#' i.e. a single row (as e.g. produced by \code{\link{do_avg}}, or by
#' subscripting via '[]'). If no reference is provided, the average of all spectra
#' of the provided dataset is used as a reference for the baseline correction.
#' @param extMscModel Provide an external msc model to use this for predicting
#' the data instead of running msc on the data. Experimental feature.
#' @return Returns the dataset with the transformed NIR data.
#' @seealso \code{\link{ssc}} for sub-selecting in a datset using a specific
#' criterion.
#' @examples
#' \dontrun{
#' fd <- gfd() # get the full dataset
#' fd <- selectWls(fd, 1100, 1800)
#' fd_msc <- do_msc(fd)
#' plot(fd - fd_msc, pg.where="", pg.main="| msc subtracted")
#' fd_msc_ref <- do_msc(fd, ref=fd[4]) # use the 4th row of fd as a reference
#' plot(fd - fd_msc_ref, pg.where="", pg.main="| 4th row as reference")
#' fd_msc_mq <- do_msc(fd, ref=do_avg(ssc(fd, C_Group=="MQ"))) # is assuming the
#' # existence of a column name "C_Group" and one or more of its values being "MQ"
#' plot(fd - fd_msc_mq, pg.where="", pg.main="| average of MQ as reference")
#' }
#' @seealso \code{\link{getcd}}
#' @family Data pre-treatment functions
#' @family dpt modules documentation
#' @export
do_msc <- function(dataset, ref=NULL, extMscModel=NULL, exportModel=FALSE) {
autoUpS()
if (!is.null(ref)) {
if (class(ref) != "aquap_data") {
stop("Please provide an object of class 'aquap_data' to the argument 'ref'.", call.=FALSE)
}
if (nrow(ref) != 1) {
stop("Please provide a dataset with only one single row, i.e. only one single spectrum, to the argument 'ref'.", call.=FALSE)
}
if (ncol(ref$NIR) != ncol(dataset$NIR)) {
stop("Please provide a dataset containing the same number of wavelengths to the argument 'ref'", call.=FALSE)
}
refInput <- as.numeric(ref$NIR)
} else {
refInput <- NULL
}
if (is.null(extMscModel)) {
NIR <- pls::msc(dataset$NIR, refInput)
} else {
NIR <- predict(extMscModel, as.matrix(dataset$NIR))
}
exportAdditionalModelToAp2Env(doExport=exportModel, thisMod=NIR, thisType=pv_dptModules[3]) # msc
colnames(NIR) <- colnames(dataset$NIR)
rownames(NIR) <- rownames(dataset$NIR)
dataset$NIR <- I(NIR)
return(dataset)
} # EOF
#' @title Average spectra in a dataset
#' @description Calculate the average spectrum of all the given spectra in a
#' dataset, or define groups to be averaged by providing one or more
#' class-variable.
#' @details The total and group-wise average spectrum is returned each in a
#' single row with the header-data taken from the first row of the subset of
#' the original dataset as defined by the grouping. If parameter \code{clv} is
#' left at the default NULL, all the spectra in the dataset will be averaged
#' together into a single row.
#' @inheritParams do_sgolay
#' @param clv Character vector, one or more valid class-variables defining the
#' subsets of the dataset to be averaged.
#' @param inParallel Logical, if the averaging of spectra should be done in
#' parallel. Defaults to TRUE.
#' @return The transformed dataset.
#' @examples
#' \dontrun{
#' fd <- gfd() # get the full dataset
#' fd <- selectWls(fd, 1100, 1800)
#' fd_avg <- do_avg(fd)
#' fd_avg2 <- do_avg(ssc(fd, C_water=="MQ")) # assumes the existence of column "C_water"
#' plot(fd_avg, pg.where="", pg.main="| averaged")
#' plot(fd - fd_avg, pg.where="", pg.main="| avg subtracted")
#' #######
#' fd_avg_group <- do_avg(fd, "C_Group") # averaging within groups
#' fd_avg_time_group <- do_avg(fd, c("C_Time", "C_Group")) # averaging within a
#' # subset defined by "C_Time" and "C_Group"
#' }
#' @family Data pre-treatment functions
#' @seealso \code{\link{getcd}}
#' @export
do_avg <- function(dataset, clv=NULL, inParallel=TRUE) {
autoUpS()
if (is.null(clv)) {
NIR <- matrix(apply(dataset$NIR, 2, mean), nrow=1)
colnames(NIR) <- colnames(dataset$NIR)
dsNew <- dataset[1,]
dsNew$NIR <- I(NIR)
return(dsNew)
} # end is.null(clv)
#
cns <- colnames(dataset$header)
if (!all(clv %in% cns)) {
ind <- which(!clv %in% cns)
stop(paste0("Sorry, the class-variables '", paste(clv[ind], collapse="', '"), "' seem not to be present in the provided dataset."), call.=FALSE)
}
fdf <- makeFlatDataFrameMulti(dataset, clv)
if (inParallel) {
registerParallelBackend()
} else {
registerDoSEQ()
}
##
fdfMean <- plyr::ddply(fdf, .variables=clv, .fun=plyr::colwise(mean), .parallel=inParallel) ##### CORE #####
NIR <- as.matrix(fdfMean[,-(1:length(clv))])
smh <- fdfMean[, 1:length(clv)] # XXX problem with collapsing !! XXX
##
header <- as.data.frame(matrix(rep(NA, ncol(dataset$header)), nrow=1))
colnames(header) <- colnames(dataset$header)
colRep <- as.data.frame(matrix(rep(NA, ncol(dataset$colRep)), nrow=1))
colnames(colRep) <- colnames(dataset$colRep)
ds <- dataset
for (i in 1: nrow(smh)) {
for (k in 1: ncol(smh)) {
ds <- ssc_s(ds, clv[k], smh[i,k]) # search the matching data to construct the header
} # end for k
ds <- ds[1,] # only take the first row
siH <- ds$header
class(siH) <- "data.frame"
header <- rbind(header, siH)
siCol <- ds$colRep
class(siCol) <- "data.frame"
colRep <- rbind(colRep, siCol)
ds <- dataset
} # end for i
header <- header[-1,] # leave out all the NAs
colRep <- colRep[-1,]
rownames(NIR) <- rownames(header)
newData <- data.frame(I(header), I(colRep), I(NIR))
dataset@.Data <- newData
dataset@row.names <- rownames(header)
for (i in 1: ncol(dataset$header)) {
if (all(is.character(dataset$header[,i]))) {
dataset$header[i] <- factor(dataset$header[,i])
}
}
return(dataset)
} # EOF
### input is a data frame with one or 2 loading vectors or one regression vector
calc_emsc <- function(dataset, input) { ## this one possibly used "external"
autoUpS()
cnsWls <- colnames(dataset$NIR)
rns <- rownames(dataset$NIR)
NIRdata <- dataset$NIR
class(NIRdata) <- "matrix"
wls <- getWavelengths(dataset)
ColMeansNIR <- colMeans(NIRdata)
if (ncol(input) == 1) {
Xcal1 <- cbind(rep(1, ncol(NIRdata)), ColMeansNIR, input[1])
Ycor2_tmp <- t(rep(NA, ncol(NIRdata)))
for (i in 1: nrow(NIRdata)) {
Ycal <- as.data.frame(matrix(NIRdata[i,], ncol=1))
tmp <- lm(Ycal$V1 ~ Xcal1[,1] + Xcal1[,2] + Xcal1[,3])
b <- tmp$coefficients[-2]
Ycor2_tmp <- rbind(Ycor2_tmp, t(((Ycal-b[1])-b[3]*Xcal1[,3])/b[2])) ## here things are happening !!
} # end for i
Ycor2 <- out <- Ycor2_tmp[-1,]
} else {
Xcal1 <- cbind(rep(1, ncol(NIRdata)), ColMeansNIR, input[1], input[2])
Ycal <- NA
Ycor2_tmp <- t(rep(NA, ncol(NIRdata)))
for ( i in 1: nrow(NIRdata)) {
Ycal <- as.data.frame(matrix(NIRdata[i,], ncol=1))
tmp <- lm(Ycal$V1 ~ Xcal1[,1] + Xcal1[,2] + Xcal1[,3] + Xcal1[,4] )
b <- tmp$coefficients[-2]
Ycor2_tmp <- rbind(Ycor2_tmp, t(((Ycal-b[1]) - b[3]*Xcal1[,3]-b[4]*Xcal1[,4]) / b[2]) )
} # end for i
Ycor2 <- out <- Ycor2_tmp[-1,]
} # all is done
colnames(Ycor2) <- cnsWls
rownames(Ycor2) <- rns
out <- as.data.frame(Ycor2)
return(out)
} # EOF
### input the vecLoad directly from the model via: pcaModel$loadings[, c(x,y)]
#' @title Perform EMSC
#' @description Performs EMSC with up to two signals on the provided dataset
#' @details A data frame with one or two loadings or with one regression vector
#' can be used as input to (try to) remove the effect on the data described by
#' said loadings / regr. vector. For example, from a pca-model the first and
#' second loading vector can be extracted with \code{pcaModel$loadings[, c(1,2)]}.
#' @inheritParams do_sgolay
#' @param vecLoad A data frame x (\code{ncol(x) <= 2}) with one or two loading
#' vectors or one regression vector.
#' @return Returns the dataset with the transformed NIR data.
#' @seealso \code{\link{getcm}} for easy extraction of single models where
#' loading vectors or a regression vector can be obtained.
#' @examples
#' \dontrun{
#' fd <- gfd()
#' cu <- gdmm(fd, getap(do.pca=TRUE)) # assumes no split
#' loadings <- getcm(cu, 1, what="pca")$loadings[, c(1,2)]
#' fd_emsc <- do_emsc(fd, loadings)
#' }
#' @family Data pre-treatment functions
#' @family dpt modules documentation
#' @export
do_emsc <- function(dataset, vecLoad=NULL, exportModel=FALSE) {
autoUpS()
input <- as.data.frame(vecLoad)
if (ncol(input) > 2) {
stop("At the moment, not more than 2 effects can be removed from the data. Please be a bit more content.", call.=FALSE)
}
if (is.null(vecLoad)) {
stop("Please provide a data frame with one or two loading vectors or one regression vector to the argument 'vecLoad' (do_emsc).", call.=FALSE)
}
NIR <- as.matrix(calc_emsc(dataset, input))
exportAdditionalModelToAp2Env(doExport=exportModel, thisMod=NULL, thisType=pv_dptModules[4]) # emsc
rownames(NIR) <- rownames(dataset)
colnames(NIR) <- colnames(dataset$NIR)
dataset$NIR <- I(NIR)
return(dataset)
} #EOF
do_scale <- function(dataset) {
NIR <- som::normalize(dataset$NIR, byrow=FALSE)
colnames(NIR) <- colnames(dataset$NIR)
rownames(NIR) <- rownames(dataset$NIR)
dataset$NIR <- NIR
return(dataset)
} # EOF
do_scale_fc <- function(dataset, calibAvgTable) { # used in aquagram norm foreign center
avg <- apply(calibAvgTable, 2, mean)
NIR <- scale(as.matrix(dataset$NIR), center=avg, scale=TRUE)
colnames(NIR) <- colnames(dataset$NIR)
rownames(NIR) <- rownames(dataset$NIR)
dataset$NIR <- I(NIR)
return(dataset)
} # EOF
#' @title Perform gap-segment derivatives
#' @description Performs gap-segment derivatives on the provided dataset. The
#' behaviour of the filter can be modified via its arguments, please see also
#' the documentation for \code{\link[prospectr]{gapDer}}.
#' @details The first column of the wavelengths and the last one get doubled in
#' order to have the same number of wavelengths in the resulting dataset as in
#' the original dataset. The underlying function is \code{\link[prospectr]{gapDer}}.
#' @inheritParams do_sgolay
#' @param m Numeric length one. The order of the derivative, can be between 1 and
#' 4. Default is 1.
#' @param w Numeric length one. The filter length (should be odd), ie. the spacing
#' between points
#' over which the derivative is computed. Default is 1.
#' @param s Numeric length one. The segment size, i.e. the range over which the
#' points are averaged.Default is 1.
#' @param deltaW Numeric length one. The sampling interval / band spacing. Default
#' is 1.
#' @section Note:
#' The documentation for the parameters was mostly taken from
#' \code{\link[prospectr]{gapDer}} by Antoine Stevens.
#' @seealso \code{\link[prospectr]{gapDer}}
#' @examples
#' \dontrun{
#' fd <- gfd()
#' fd_gsd <- do_gapDer(fd)
#' fd_gsd2 <- do_gapDer(fd, 1,11,13,1)
#' plot(fd_gsd - fd_gsd2)
#' }
#' @family Data pre-treatment functions
#' @family dpt modules documentation
#' @export
do_gapDer <- function(dataset, m=1, w=1, s=1, deltaW, exportModel=FALSE) {
autoUpS()
nir <- getNIR(dataset)
NIR <- prospectr::gapDer(nir, m=m, w=w, s=s, delta.wav=deltaW) # gives back a matrix with one columne less at the beginning and end, so in total two wavelengths missing !!
# NIR <- as.matrix(NIR)
# wlsGap <- as.numeric(gsub("w", "", colnames(NIR)))
# wlsD <- getWavelengths(dataset)
# indLow <- which(wlsD == min(wlsGap))
# indHigh <- which(wlsD == max(wlsGap))
# nrLostLow <- length(1:indLow)
# nrLostHigh <- length(indHigh: length(wlsD))
# cat(paste("Low wls lost:", nrLostLow, "\n"))
# cat(paste("High wls lost:", nrLostHigh, "\n"))
# return(NIR)
# first <- as.data.frame(NIR[,1])
# last <- as.data.frame(NIR[, ncol(NIR)])
# NIR <- cbind(first, NIR, last)
# colnames(NIR) <- colnames(dataset$NIR)
# rownames(NIR) <- rownames(dataset$NIR)
# NIR <- as.matrix(NIR) # because otherwise the "AsIs" is behaving strange
exportAdditionalModelToAp2Env(doExport=exportModel, thisMod=NULL, thisType=pv_dptModules[7]) # gapDer
dataset$NIR <- I(NIR)
return(dataset)
} # EOF
checkDeTrendSrcTrgInput <- function(dataset, src=NULL, trg="src") {
addInfo <- "\n(For the latter, please see ?dpt_modules for further information.)"
checkNumLengthTwo <- function(arg) {
argName <- deparse(substitute(arg))
if (!all(is.numeric(arg)) | length(arg) != 2) {
stop(paste0("Please provide a numeric length two to the argument '", argName, "' resp. in the de-trend argument in the analysis procedure / your input.", addInfo), call.=FALSE)
}
} # EOIF
##
checkWls <- function(arg, allWls= getWavelengths(dataset)) {
argName <- deparse(substitute(arg))
if (min(arg) < min(allWls) | max(arg) > max(allWls)) {
stop(paste0("Sorry, the range in the argument '", argName, "' is not within the wavelength-range of the provided dataset [", min(allWls) , " to ", max(allWls), "]. Please check your input at the argument '", argName, "' resp. in the de-trend argument in the analysis procedure / your input.", addInfo), call.=FALSE)
}
} # EOIF
checkRange <- function(arg) {
argName <- deparse(substitute(arg))
if (diff(arg) <= 0) {
stop(paste0("Please provide a wavelength range greater than zero for the argument '", argName, "' resp. in the de-trend argument in the analysis procedure / your input.", addInfo), call.=FALSE)
}
}
if (!is.null(src)) {
checkNumLengthTwo(src)
checkWls(src)
checkRange(src)
}
if (any(is.character(trg))) {
if (!trg %in% c("src", "all")) {
stop(paste0("Please provide one of 'src', 'all' or a numeric length two to the argument 'trg' resp. in the de-trend argument in the analysis procedure / your input.", addInfo), call.=FALSE)
}
}
if (all(trg == "src")) {
trg <- src
}
if (all(trg == "all")) {
trg <- range(getWavelengths(dataset))
}
if (!is.null(trg)) {
checkNumLengthTwo(trg)
checkWls(trg)
checkRange(trg)
}
assign("trg", trg, pos=parent.frame(n=1))
##
} # EOF
#' @title Perform De-Trend
#' @description Perform de-trending of the given dataset. For each observation,
#' the linear model \code{lm(absorbance ~ wavelength)} is calculated. The
#' coefficients of the resulting model are then used to modify the absorbance
#' values after the formula
#' \code{newAbsorbance = absorbance - intercept - k*wavelength}. It is possible
#' to separately specify the source and the target of the de-trend operation.
#' @details Via the arguments \code{src} ('source') and \code{trg} ('target')
#' it is possible to specify separately from which wavelength-range the values
#' for the de-trend should be taken resp. to which wavelength-range the
#' resulting de-trend should be applied. Please see documentation for the
#' arguments \code{src} and \code{trg} for further details. If the target
#' wavelengths are only a part of the whole wavelength-range in the dataset,
#' the de-trend will be applied only to this wavelengths, and the rest of the
#' wavelengths will remain unchanged. Abrupt steps in the resulting spectra can
#' be the result of this. If both arguments \code{src} and \code{trg} are left
#' at their defaults, the full range of wavelengths in the dataset is used for
#' calculating the de-trend values, i.e. the linear models, and the resulting
#' de-trend is also applied to the full range of wavelengths present in the
#' dataset.
#' @inheritParams do_sgolay
#' @param dataset An object of class 'aquap_data' as produced e.g. by
#' \code{\link{gfd}}.
#' @param src 'source'; the wavelength-range from where the values for de-trend
#' should be calculated (i.e. the linear models). Leave at the default NULL
#' to use the full range of wavelengths in the provided dataset, or provide
#' a numeric length two to use this wavelength range for calculating the values
#' for de-trend.
#' @param trg 'target'; character length one or numeric length two. The wavelengths
#' where the de-trend should be applied. If left at the default 'src' the same
#' wavelength as specified in \code{src} is used. Possible values are:
#' \describe{
#' \item{src}{Apply the de-trend to the same wavelength-range as specified in
#' argument \code{src} (the default).}
#' \item{all}{Apply the de-trend to all the wavelengths in the provided dataset.}
#' \item{Numeric length two}{Provide a numeric length two to the argument
#' \code{trg} to apply the de-trend only to this wavelength range.}
#' }
#' @return The transformed dataset
#' @examples
#' \dontrun{
#' fd <- gfd()
#' fdDT <- do_detrend(fd) # use the whole wavelength range of 'fd' as source and
#' # target for the de-trend
#' plot(fd)
#' plot(fdDT)
#' ###
#' fdc <- selectWls(fd, 1300, 1600)
#' plot(fdc)
#' plot(do_detrend(fdc)) # whole range as source and target
#' plot(do_detrend(fdc, src=c(1400, 1500))) # same target as soruce
#' plot(do_detrend(fdc, src=c(1400, 1500), trg="all")) # apply to full range
#' plot(do_detrend(fdc, src=c(1400, 1500), trg=c(1300, 1600))) # same as above
#' plot(do_detrend(fdc, src=c(1300, 1400), trg=c(1380, 1580)))
#' }
#' @family Data pre-treatment functions
#' @family dpt modules documentation
#' @export
do_detrend<- function(dataset, src=NULL, trg="src", exportModel=FALSE) {
autoUpS()
checkDeTrendSrcTrgInput(dataset, src, trg) # is assigning trg
# source
if (is.null(src)) {
absSrc <- getNIR(dataset)
wlsSrc <- getWavelengths(dataset)
} else {
ds <- selectWls(dataset, src[1], src[2])
absSrc <- getNIR(ds)
wlsSrc <- getWavelengths(ds)
}
mods <- apply(absSrc, 1, function(x) lm(x ~ wlsSrc)$coefficients) ### calculate the models !! one for every observation ### gives back a matrix with two rows and one column for every observation
# target
if (is.null(trg)) {
nirTrg <- getNIR(dataset)
wlsTrg <- getWavelengths(dataset)
} else {
if (is.numeric(trg)) {
ds <- selectWls(dataset, trg[1], trg[2])
nirTrg <- getNIR(ds)
wlsTrg <- getWavelengths(ds)
} else {
}
}
# NIR <- matrix(NA, nrow(nirTrg), ncol(nirTrg))
# for (i in 1: nrow(nirTrg)) {
# NIR[i,] <- as.numeric(nirTrg[i,]) - mods[1,i] - mods[2,i]*wlsTrg
# }
NIRnew <- t(sapply(1:nrow(nirTrg), function(i) as.numeric(nirTrg[i,]) - mods[1,i] - mods[2,i]*wlsTrg))
exportAdditionalModelToAp2Env(doExport=exportModel, thisMod=NULL, thisType=pv_dptModules[6]) # deTrend
colnames(NIRnew) <- cnsNew <- colnames(nirTrg)
cnsOld <- colnames(dataset$NIR)
indHere <- which(cnsOld %in% cnsNew)
dataset$NIR[, indHere] <- NIRnew
return(dataset)
} # EOF
#' @title Perform Data-Pretreatment Sequence
#' @description Manually perform a sequence of data pre-treatments defined in a
#' string.
#' @details For internal use.
#' @param dataset An object of class 'aquap_data'.
#' @param dptSeq A character holding at least one valid string for data pre-treatment.
#' @param extraModelList A list of possible external models to hand over to the
#' data pre-treatment process.
#' @param silent Logical. If status info should be printed or not.
#' @return The transformed dataset.
#' @export
do_dptSeq <- function(dataset, dptSeq, extraModelList=NULL, silent=TRUE) {
autoUpS(cfs=FALSE)
.ap2$stn$allSilent <- silent
return(performDPT_Core(dataset, dptSeq, extraModelList, allExportModels=FALSE))
} # EOF
#' @title Resample data to new Wavelengths
#' @description Resample the data in the provided dataset to new, possibly
#' evenly spaced, wavelengths.
#' @details If nothing is provided for the argument \code{targetWls}, an evenly
#' spaced vector from the lowest to the highest available wavelength
#' is automatically generated as the target wavelengths to be resampled to.
#' @param dataset An object of class 'aquap_data' as produced e.g. by
#' \code{\link{gfd}}.
#' @param targetWls Numeric vector, the target wavelengths. If left at the
#' default \code{NULL}, an evenly spaced vector from the lowest to the
#' highest available wavelength is automatically generated as the target
#' wavelengths to be resampled to.
#' @param tby Numeric length one, the spacing in nm of the automatically
#' generated target wavelength. Only applies if \code{targetWls} is left at
#' \code{NULL}.
#' @param method The resampling method. For details see
#' \code{\link[pracma]{interp1}}. Defaults to 'cubic'; the default can be
#' changed in the settings.r file (parameter \code{gen_resample_method}).
#' @return The resampled dataset.
#' @examples
#' \dontrun{
#' fd <- gfd()
#' fdR <- do_resampleNIR(fd)
#' }
#' @family Data pre-treatment functions
#' @export
do_resampleNIR <- function(dataset, targetWls=NULL, tby=0.5, method=get("stn", envir=.ap2)$gen_resample_method) {
ncpwl <- dataset@ncpwl
charPrevWls <- substr(colnames(dataset$NIR)[1], 1, (ncpwl))
#
x <- getWavelengths(dataset)
if (is.null(targetWls)) {
xNew <- seq(ceiling(x[1]/2) * 2, floor(x[length(x)]/2) * 2, tby)
} else {
xNew <- targetWls
# if (identical(x, xNew)) {
# return(dataset) # To save possibly time. In the merging, it could be that we provide the same target than the present wls.
# } # end if
} # end else
NIR <- t(apply(dataset$NIR, 1, pracma::interp1, x = x, xi = xNew, method = method))
colnames(NIR) <- paste0(charPrevWls, xNew)
dataset$NIR <- NIR
return(dataset)
} # EOF
checkBlowUpInput_grouping <- function(header, grp) {
cns <- colnames(header)
cPref <- .ap2$stn$p_ClassVarPref
cns <- cns[grep(cPref, cns)]
###
checkClassExistence <- function(X, char, ppv=TRUE) {
if (!all(is.character(X))) {
stop(paste0("Please only provide characters as input for the argument '", char, "'. Please check your input."), call.=FALSE)
}
for (i in 1: length(X)) {
if (!X[i] %in% cns) {
msg1 <- paste0("Sorry, it seems that the class-variable `", X[i], "` as grouping variable (", char, ") does not exist in the provided dataset.")
msg2 <- ""; if (ppv) { msg2 <- paste0("\nPossible values are: '", paste(cns, collapse="', '"), ".") }
stop(paste0(msg1, msg2), call.=FALSE)
}
} # end for i
} # EOIF
checkClassExistence(grp, "grp")
} # EOF
checkTransformBlowUpInput_TnAn <- function(minPart, tn, an) {
if (is.character(tn)) {
if (substr(tn,1,1) != "x") {
stop(paste0("Please provide `tn` in the format `xN`, with N being a positive integer"), call.=FALSE)
}
options(warn=-1)
tn <- round(as.numeric(substr(tn, 2, nchar(tn)))) * minPart # multiply with minPart, as we want to have it xFold
options(warn=0)
if (!is.numeric(tn)) {
stop(paste0("Please provide an integer after the character `x`in the argument `tn`."), call.=FALSE)
}
} else {
if (!is.numeric(tn)) {
stop(paste0("Please provide either an integer or a character in the format `xN`, with N being a positive integer, to the argument `tn`."), call.=FALSE)
}
rn <- round(tn)
} # end else
if (tn < minPart) {tn <- minPart}
#
if (is.character(an)) {
if (substr(an, nchar(an), nchar(an)) != "%") {
stop(paste0("Please provide `an` in the format `N%`, with N being a positive integer"), call.=FALSE)
}
options(warn=-1)
an <- round(as.numeric(substr(an, 1, nchar(an)-1)))
options(warn=0)
if (an > 100) { an <- 100}
an <- round((an * minPart) / 100) # as we want to have a percentage
if (!is.numeric(an)) {
stop(paste0("Please provide an integer before the character `%`in the argument `an`."), call.=FALSE)
}
} else {
if (!is.numeric(an)) {
stop(paste0("Please provide either an integer or a character in the format `N%`, with N being a positive integer, to the argument `an`."), call.=FALSE)
}
an <- round(an)
if (an < 1) {an <- 1}
} # end else
###
return(list(tn=tn, an=an))
} # EOF
lotto_loop <- function(tn, an, size, n=2000) {
lotto <- function(tn, an=3, size=8) {
nums <- lapply(1:tn, function(x) sample(1:size, an, replace = TRUE))
nums <- lapply(nums, sort)
numsChar <- unlist(lapply(nums, function(x) paste(x, collapse="-")))
out <- vector("logical", length(numsChar))
for (i in 1: length(numsChar)){
out[i] <- numsChar[i] %in% numsChar[-i]
}
le <- length(which(out))
perc <- round((le / tn)*100,0)
return(invisible(perc))
} # EOIF
percOut <- vector("numeric", n)
for (i in 1:n) {
percOut[i] <- lotto(tn, an, size)
}
percOut <- round(mean(percOut),1)
# cat(paste0("total average: ", out))
return(invisible(percOut))
}
#' @title Increase the Numbers of Observation in Dataset
#' @description Use random observations of the provided dataset (within a possible
#' grouping), calculate their average and add the resulting spectrum as new
#' observation to the dataset.
#' @details The random observations are sampled with replacement. The provenience
#' of each observation is marked in an additional column in the header named
#' `blowup`, where the character `orig` denotes an original observation, while the
#' character `artif` is marking an artificially generated observation.
#' @param dataset An object of class 'aquap_data' as produced e.g. by
#' \code{\link{gfd}}.
#' @param tn Numeric or character length one. If numeric, \code{tn} is denoting
#' the target-number, i.e. the desired number of observations in the expanded
#' dataset. If a grouping is provided via \code{grp}, the target number is
#' defining the desired number of observations within each subgroup obtained
#' by the grouping. If \code{tn} is a character it hast to be in the format
#' \code{xN}, with \code{N} being a positive integer. In this case it means
#' an N-fold increase of the dataset resp. of each subgroup as defined by the
#' grouping in \code{grp}. Defaults to \code{x10}.
#' @param an Numeric or character length one. If numeric, \code{an} is denoting
#' the "average-number", i.e. the number of observations resp. their spectra that
#' should be drawn (with replacement) from the dataset resp. from the respective
#' subgroup. These drawn samples then are averaged together into a new spectrum.
#' If \code{an} is a character it has to be in the format \code{N\%}, with
#' \code{N} being a positive integer. In this case it denotes the percentage of
#' observations in the dataset resp. in each of the subgroups possibly obtained
#' via \code{grp} that should be drawn (with replacement) and averaged together
#' into a new observation. Defaults to \code{100\%}, what is the recommended value.
#' @param grp Character. One ore more valid class-variable names that should be
#' used to from subgroups within the dataset. Similar to the \code{spl.var}
#' argument in the analysis procedure.
#' @param cst Logical. If consecutive scans should always be kept together.
#' NOT YET IMPLEMENTED - value is ignored, and consec. scans are NOT kept together.
#' @param conf Logical. If numbers should be presented and confirmation requested
#' before commencing the actual calculations.
#' @section Warning: Do take care of a correct grouping, otherwise the inclusion
#' of observations into the same group that do not belong together will destroy
#' any information.
#' @param replace Logical. If the sample drawing should be done with replacement.
#' Recommended value is TRUE.
#' @param pred Logical. If an estimation of the number of identical spectra should
#' be made. Only presented if \code{conf} is TRUE.
#' @return The dataset with increased numbers of observations.
#' @examples
#' \dontrun{
#' fd <- gfd()
#' fdPlus <- do_blowup(fd, tn="x4", an=4)
#' fdPlus <- do_blowup(fd, tn="x8", an="10%", grp=c("C_Foo", "C_Bar"))
#' fdPlus <- do_blowup(fd, tn=1000, an=5)
#' }
#' @family Data pre-treatment functions
#' @export
do_blowup <- function(dataset, grp=NULL, tn="x10", an="100%", cst=TRUE, conf=TRUE, pred=TRUE, replace=TRUE) {
cPref <- .ap2$stn$p_ClassVarPref
yPref <- .ap2$stn$p_yVarPref
snColSet <- .ap2$stn$p_sampleNrCol
snCol <- paste0(yPref, snColSet)
txtOrig <- "orig"
txtBlow <- "artif"
colNameBlow <- "blowup"
colOrig <- 1
colBlow <- 2
lottoLoopN <- .ap2$stn$cl_extDatPred_N_lottoLoop
#
header <- headerFac <- getHeader(dataset) # headerFac only used for grouping
colRep <- getColRep(dataset)
blowupDf <- data.frame(X=rep(txtOrig, nrow(header))) ## add a new column to the header telling us what samples are new and what are old
colnames(blowupDf) <- paste0(cPref, colNameBlow)
header <- cbind(header, blowupDf)
blowCol <- data.frame(X=rep(colOrig, nrow(header)))
colnames(blowCol) <- paste0(cPref, colNameBlow)
colRep <- cbind(colRep, blowCol)
#
# first get out the factors
facInd <- vector("logical", length=ncol(header))
for (i in 1: ncol(header)) {
facInd[i] <- isFac <- is.factor(header[,i])
if (isFac) {
header[,i] <- as.character(header[,i])
}
} # end for i
#
minPart <- nrow(header)
splitList <- list(header)
NIR <- getNIR(dataset)
NIRsplitList <- list(NIR)
colRepSplitList <- list(colRep)
if (!is.null(grp)) {
checkBlowUpInput_grouping(header, grp) # stops if wrong or so
splitList <- split(header, headerFac[,grp]) # returns a list with a dataframe in each element
NIRsplitList <- split(NIR, headerFac[,grp])
colRepSplitList <- split(colRep, headerFac[,grp])
minPart <- min(unlist(lapply(splitList, nrow))) # get the minium number of participants from all the splitelements
}
aa <- checkTransformBlowUpInput_TnAn(minPart, tn, an)
tn <- aa$tn
an <- aa$an
# print(tn); print(an); print(minPart); print("-----")
# now we have the correct target and average number
requiredN <- tn - minPart
if (requiredN == 0) {
message("No dataset expansion has been performed.")
return(dataset)
}
eachNPart <- lapply(splitList, function(x) 1: nrow(x)) # gives a list with a vector from 1:N, with N being the number of participants from each element of the splitList
indList <- newNirList <- newHeaderList <- newColRepList <- vector("list", length=length(splitList))
for (i in 1: length(splitList)) { # now for every element within the splitList (could be only 1) we perform tn iterations of the resampling
###### CORE ######
indList[[i]] <- lapply(1:requiredN, function(x, npa, ana) sample(npa[[i]], ana, replace=replace), npa=eachNPart, ana=an) # gives a list for each element of the splitList
###### CORE ######
newNirList[[i]] <- matrix(NA, nrow=requiredN , ncol=ncol(NIR))
newHeaderList[[i]] <- data.frame(matrix(NA, nrow=requiredN, ncol=ncol(header)))
newColRepList[[i]] <- data.frame(matrix(NA, nrow=requiredN, ncol=ncol(colRep)))
} # end i
if (conf) {
avgDoubleMsg <- ""
if (pred) {
avgDoubles <- lotto_loop(tn=requiredN, an=an, size=minPart, n=lottoLoopN)
avgDoubleMsg <- paste0("The expected percentage of identical spectra is ~", avgDoubles, "%.")
}
cat(paste0(requiredN, " observations will be added to each of ", length(indList), " subgroups (the smallest containing ", minPart, " observations) by drawing from ", an, " observations.\n", avgDoubleMsg, "\n\nPress enter to continue or escape to abort:"))
scan(file = "", n = 1, quiet = TRUE)
}
# future: check here the IndList for doubles !!
# now we have indList containing the header segments, NIRsplitList containing the NIR segments, and the indList that contains the indices to be averaged for each segment
if (!.ap2$stn$allSilent) {cat(paste0("Calculating (draw from ", an, ") and adding ", requiredN, " new observations to each of ", length(indList), " subgroups:\n"))}
for (i in 1: length(indList)) { # i is the number of the splitSegment as defined by the grouping
for (k in 1: length(indList[[i]])) { # k is the number of spectra we will add to each subgroup (k can be very large) ######(parallelize this, NOT above !!)
inds <- indList[[i]][[k]]
newNirList[[i]][k,] <- apply(NIRsplitList[[i]][inds,], 2, mean) # subselect from the NIR segment and calculate colwise average
newHeaderList[[i]][k,] <- splitList[[i]][inds[1],] # (splitList contains the header segments) just get the first of the obervations to be averaged
newColRepList[[i]][k,] <- colRepSplitList[[i]][inds[1],] # same
} # end for k
if (!.ap2$stn$allSilent) {cat(".")}
} # end for i
# now we have to get the new elements out of the list into each one object
allNewHeader <- do.call("rbind", newHeaderList)
allNewHeader[,ncol(allNewHeader)] <- rep(txtBlow, nrow(allNewHeader))
colnames(allNewHeader) <- colnames(header)
allNewColRep <- do.call("rbind", newColRepList)
allNewColRep[,ncol(allNewColRep)] <- rep(colBlow, nrow(allNewColRep))
colnames(allNewColRep) <- colnames(colRep)
allNewNir <- do.call("rbind", newNirList)
colnames(allNewNir) <- colnames(NIR)
# (? maybe a problem with rownames?)
# now fuse together with the original data
tsi <- which(colnames(header) == "Timestamp")
if (length(tsi) != 0) {
allNewHeader[,tsi] <- as.POSIXct(allNewHeader[,tsi], origin="1970-01-01")
}
header <- rbind(header, allNewHeader)
snrs <- range(header[,snCol])
header[,snCol] <- seq(min(snrs), max(snrs), length.out=nrow(header))
colRep <- rbind(colRep, allNewColRep)
NIR <- as.matrix(rbind(NIR, allNewNir))
for (i in 1: ncol(header)) {
if (facInd[i]) {
header[,i] <- as.factor(header[,i]) # re-factorize if necessary
}
} # end for i
out <- new("aquap_data", data.frame(I(header), I(colRep), I(NIR)), metadata=dataset@metadata, anproc=dataset@anproc, ncpwl=dataset@ncpwl, version=dataset@version)
if (!.ap2$stn$allSilent) {cat(" ok.\n")}
return(out)
## XXX still required:
# include consec. scans together
} # EOF
|
074697051eac2b3e23737f2360c4a25fdb4d686e | ee77fb8b2c8a4de8aad82de953a462ae9b38668e | /man/update_rating.Rd | 5aef30aee34499d319b0fd183a50a22393fc34d2 | [] | no_license | lazappi/aflelo | 700a281c9eb086887128f3ea968de8d0bbbd87a4 | a5c3d964140fbcd092b13a6672fffd474fb67692 | refs/heads/master | 2021-04-12T12:24:49.305211 | 2018-04-19T11:45:22 | 2018-04-19T11:45:22 | 126,563,142 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 520 | rd | update_rating.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{update_rating}
\alias{update_rating}
\title{Update rating}
\usage{
update_rating(model, team, new_rating)
}
\arguments{
\item{model}{aflelo_model to update}
\item{team}{name of team to update}
\item{new_rating}{new rating value}
}
\value{
afelo_object with updated rating
}
\description{
Update rating for a single team in an AFLELO Model
}
\examples{
model <- aflelo_model()
aflelo:::update_rating(model, "Richmond", 1600)
}
|
22b40186010c52e9b99153e8f055081039087d82 | 39e89c03c14590143dc3543b58766ec12ca7432f | /man/pdb.quaternary.Rd | 225c5d15620b0e818fe51fe6fbdf704a355376ad | [] | no_license | jcaledo/ptm_0.1.1 | 65a4733284a3d2e11bfe4a949fb54dfe356b05b8 | ad4f1f04b87a53d6e49ab90d3fe391ec011cced7 | refs/heads/master | 2021-03-25T22:56:30.250750 | 2020-03-27T13:19:31 | 2020-03-27T13:19:31 | 247,651,998 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 877 | rd | pdb.quaternary.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pdb_ancillary.R
\name{pdb.quaternary}
\alias{pdb.quaternary}
\title{Protein Subunit Composition}
\usage{
pdb.quaternary(pdb, keepfiles = FALSE)
}
\arguments{
\item{pdb}{the path to the PDB of interest or a 4-letter identifier.}
\item{keepfiles}{logical, if TRUE the fasta file containing the alignment of the subunits is saved in the current directory, as well as the splitted pdb files.}
}
\value{
This function returns a list with four elements: (i) a distances matrix, (ii) the sequences, (iii) chains id, (iv) the PDB ID used.
}
\description{
Determines the subunit composition of a given protein.
}
\details{
A fasta file containing the alignment among the subunit sequences can be saved in the current directory if required.
}
\examples{
pdb.quaternary('1bpl')
}
\author{
Juan Carlos Aledo
}
|
38bfcb3222e5b58b2413d6938d347b452ffabb35 | 8ac06e475183e8519f543fce41e72ec0e7226309 | /R/shadePlot.R | d61ec913f638314503abc27ba766d21c5bad6dc7 | [] | no_license | kashenfelter/Dmisc | 0a43b7fbd83c874996501c83f54b2f46ca050af7 | 7e8ed7c1477f67376de6832fa1bfaf20170e5136 | refs/heads/master | 2020-03-13T22:06:00.018345 | 2017-08-21T17:08:19 | 2017-08-21T17:08:19 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,035 | r | shadePlot.R |
#' Shade a section of a pdf
#'
#' Plot a pdf and shade a section of it.
#'
#' @param func The density function to be plotted (dnorm, dt, df)
#' @param xlim The x-limits for the plotting region
#' @param ylim The y-limits for the plotting region. If left
#' as NULL then it will automatically use a reasonable ylimit
#' for the upper bound by figuring out the highest point
#' in your plotting region
#' @param shadelim A vector containing the upper endpoint
#' and the lower endpoint of the region you want to shade
#' @param col The color of the shaded region
#' @param xlab Label for the x-axis
#' @param ylab Label for the y-axis
#' @param main Title for the plot
#' @param \ldots Additional parameters to pass onto the density
#' function. For instance if you wanted to plot a normal density
#' with a mean of 5 and sd of 2 you would also pass in mean=5, sd=2
#' in the parameter list (assuming you used dnorm as your 'func').
#'
#' @export
shadePlot <- function(func = dnorm,
xlim = c(-3,3),
ylim = NULL,
shadelim = c(0, 1),
col = "red",
xlab = "x",
ylab = expression(paste(f[X](x))),
main = "",
...){
xs <- seq(xlim[1], xlim[2], length.out = 1000)
ys <- func(xs, ...)
# Find appropriate shading region
lb <- max(shadelim[1], xlim[1])
ub <- min(shadelim[2], xlim[2])
# Make sure plot has lower bound of y = 0
xy <- xy.coords(xs, ys)
if (is.null(ylim)){
ylim <- range(xy$y[is.finite(xy$y)])
ylim[1] <- 0
}
# Plot the function
plot(xs, ys, type = "l", xlim = xlim, ylim = ylim, xlab = xlab, ylab = ylab, main = main)
# Find which xs are inside of our bounds
idx <- ((xs >= lb) & (xs <= ub))
# Shade the appropriate region.
polygon(c(lb, lb, xs[idx], ub, ub), c(0, func(lb, ...), ys[idx], func(ub, ...), 0), col = col)
} |
3a4b823471088e83557c757758162e2b88cca823 | ed1ace377a08f57aa4f5ba25797861ff67598f7e | /UK/analyse.R | cedf67db7eaf3fafc6c83ff00c857a32a258423d | [] | no_license | haosifan/turnout_rightwingpopulists | a66702dbe3c85f59d1d4512679d93a99e3191fd4 | e8fec2c18f06b0b3ecd86677fb1fb7cc3f73547c | refs/heads/master | 2021-01-21T11:23:15.512271 | 2017-03-01T14:45:21 | 2017-03-01T14:45:21 | 83,562,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,478 | r | analyse.R | #############################################
##### Grafiken UK ###########################
#############################################
source("datenaufbereitung.R")
library(ggplot2)
library(ggthemes)
corr_df <- uk_df[uk_df$Election.Year==2015,]
corr_uk1 <- round(cor(corr_df$turnout,corr_df$UKIP, use = "complete.obs"),2)
p_uk1 <- ggplot(uk_df[uk_df$Election.Year==2015,], aes(x = turnout, y = UKIP)) +
geom_point(alpha = 0.7, size=2, color="gray25") +
geom_smooth(method = 'lm', se = F, color = 'black', show.legend = F, linetype="dashed") +
geom_text(aes(label=paste("Pearson R: ", corr_uk1)),x=-Inf, y=Inf, hjust=-0.2, vjust=1.2) +
theme_minimal() + scale_color_economist() +
xlab('Wahlbeteiligung (%)') + ylab('UKIP-Stimmanteil (%)')
#ggtitle('General Election UK')
ggsave("../zgfx/UK_GE2015_wb.tif", dpi = 300, device = "tiff")
corr_uk2 <- round(cor(corr_df$diff_turnout,corr_df$diff_ukip, use = "complete.obs"),2)
p_uk2 <- ggplot(uk_df, aes(x = diff_turnout, y = diff_ukip)) +
geom_point(alpha = 0.7, size=2, color="gray25") +
geom_smooth(method = 'lm', se = F, color = 'black', show.legend = F, linetype="dashed") +
geom_text(aes(label=paste("Pearson R: ", corr_uk2)),x=-Inf, y=Inf, hjust=-0.2, vjust=1.2) +
theme_minimal() + scale_color_economist() +
xlab('Differenz in der Wahlbeteiligung (%)') + ylab('Differenz UKIP-Stimmanteil (%)')
#ggtitle('General Election UK')
ggsave("../zgfx/UK_GE2015_diff.tif", dpi = 300, device = "tiff") |
fbb3cd931f80f1c68b25b4eba48be7850ade7963 | 9cce02f026cbdb383bed192efdf8ffb03af1234d | /Mapper/Mapper examples 1.R | 83a6c86e30943b56eca53f7e14f3b67514335809 | [
"MIT"
] | permissive | lancelotdacosta/CMP_project | 32669f2ef8029a9d4e75131d48b08b1aab4e6485 | 0eb6befc2aec485a92187be5de76c3cf9876e339 | refs/heads/master | 2020-03-25T02:07:08.612571 | 2018-08-10T14:48:09 | 2018-08-10T14:48:09 | 143,274,534 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,187 | r | Mapper examples 1.R | library(networkD3)
library(TDAmapper)
library(igraph)
First.Example.data = data.frame( x=1:1000, y=sin(1:1000) )
plot(First.Example.data$x,First.Example.data$y)
First.Example.dist = dist(First.Example.data)
First.Example.mapper <- mapper1D(distance_matrix = First.Example.dist,
filter_values = First.Example.data$y,
num_intervals = 20,
percent_overlap = 50,
num_bins_when_clustering = 2)
First.Example.graph <- graph.adjacency(First.Example.mapper$adjacency, mode="undirected")
plot(First.Example.graph, layout = layout.auto(First.Example.graph) )
#Mean value of First.Example.data$x in each vertex:
y.mean.vertex <- rep(0,First.Example.mapper$num_vertices)
for (i in 1:First.Example.mapper$num_vertices){
points.in.vertex <- First.Example.mapper$points_in_vertex[[i]]
y.mean.vertex[i] <-mean((First.Example.data$x[points.in.vertex]))
}
#Vertex size:
vertex.size <- rep(0,First.Example.mapper$num_vertices)
for (i in 1:First.Example.mapper$num_vertices){
points.in.vertex <- First.Example.mapper$points_in_vertex[[i]]
vertex.size[i] <- length((First.Example.mapper$points_in_vertex[[i]]))
}
#Mapper graph with the vertices colored in function of First.Example.data$y and vertex size proportional to the number of points inside:
y.mean.vertex.grey <- grey(1-(y.mean.vertex - min(y.mean.vertex))/(max(y.mean.vertex) - min(y.mean.vertex) ))
V(First.Example.graph)$color <- y.mean.vertex.grey
V(First.Example.graph)$size <- vertex.size
plot(First.Example.graph,main ="Mapper Graph")
legend(x=-2, y=-1, c("y small","y medium", "large y"),pch=21,
col="#777777", pt.bg=grey(c(1,0.5,0)), pt.cex=2, cex=.8, bty="n", ncol=1)
#Interactive
library(networkD3)
MapperNodes <- mapperVertices(First.Example.mapper, 1:2000 )
MapperLinks <- mapperEdges(First.Example.mapper)
forceNetwork(Nodes = MapperNodes, Links = MapperLinks,
Source = "Linksource", Target = "Linktarget",
Value = "Linkvalue", NodeID = "Nodename",
Group = "Nodegroup", opacity = 1,
linkDistance = -1, charge = -400)
|
519330bdb3325f7a6c27eb03c7c022e66507446a | d08d401957330f5a5326544c02d53ecfaa1fa6ae | /macro/tests/testthat/test-continuous_step.R | cbd9c4031e557c94bdf436fd1300edfc6f597cdf | [] | no_license | dd-harp/MASH | 103733fa1b0cc0f0f2f4fdeef41b84e33b828349 | 3786b6904a82fc25cd5709df73f195302a9d9b08 | refs/heads/main | 2021-08-11T09:15:46.371074 | 2021-06-08T18:44:48 | 2021-06-08T18:44:48 | 204,227,705 | 0 | 0 | null | 2021-06-08T18:44:49 | 2019-08-25T00:32:06 | C++ | UTF-8 | R | false | false | 4,131 | r | test-continuous_step.R | test_that("individual two state bounce", {
set.seed(32432)
is_enabled <- list(
infect = function(state, time) with(state, disease == "S"),
recover = function(state, time) with(state, disease == "I")
)
when <- list(
infect = function(state, time) rexp(1, 1/50),
recover = function(state, time) rexp(1, 1/200)
)
fire <- list(
infect = function(state, time) {within(state, {disease = "I"})},
recover = function(state, time) {within(state, {disease = "S"})}
)
transitions <- list(
is_enabled = is_enabled, when = when, fire = fire
)
individual <- list(disease = "S", when = 0.32, infect = 0.32, recover = Inf)
status <- update_individual(individual, transitions, observe_continuous)$individual
expect_equal(status$disease, "I")
expect_equal(status$infect, Inf)
expect_gt(status$recover, 0.32)
expect_equal(status$when, status$recover)
individual <- list(disease = "I", when = 0.44, infect = Inf, recover = 0.44)
status <- update_individual(individual, transitions, observe_continuous)$individual
expect_equal(status$disease, "S")
expect_equal(status$recover, Inf)
expect_gt(status$infect, 0.44)
expect_equal(status$when, status$infect)
})
#' The individual is in an absorbing state, so there is nothing
#' else to fire. Check that it goes to Inf for the next time.
test_that("individual removed goes to inf", {
set.seed(32432)
is_enabled <- list(
infect = function(state, time) with(state, disease == "S")
)
when <- list(
infect = function(state, time) rexp(1, 1/50)
)
fire <- list(
infect = function(state, time) {within(state, {disease = "I"})}
)
transitions <- list(
is_enabled = is_enabled, when = when, fire = fire
)
individual <- list(disease = "S", when = 0.32, infect = 0.32)
# browser()
status <- update_individual(individual, transitions, observe_continuous)$individual
expect_equal(status$disease, "I")
expect_equal(status$infect, Inf)
expect_equal(status$when, Inf)
})
#' A transition was enabled and fired, but it's now enabled again
#' in the new state. Check that it gets scheduled again.
test_that("individual transition fires again", {
set.seed(32432)
is_enabled <- list(
infect = function(state, time) with(state, disease == "S")
)
when <- list(
infect = function(state, time) rexp(1, 1/50)
)
# Returns to same state when it fires.
fire <- list(
infect = function(state, time) {within(state, {disease = "S"})}
)
transitions <- list(
is_enabled = is_enabled, when = when, fire = fire
)
individual <- list(disease = "S", when = 0.32, infect = 0.32)
# browser()
status <- update_individual(individual, transitions, observe_continuous)$individual
expect_equal(status$disease, "S")
expect_gt(status$infect, 0.32)
expect_equal(status$when, status$infect)
})
test_that("can use data.table", {
set.seed(97234)
# This sets up the transitions.
program_globals <- new.env()
program_globals$b <- 0.2
program_globals$c <- 3.7
transitions <- list(
infect = list(
is_enabled = function(state, time) with(state, disease == "S"),
when = function(state, time) rexp(1, 1/50),
fire = function(state, time) {
b <<- 0.3
within(state, {disease = "I"})
}
),
recover = list(
is_enabled = function(state, time) with(state, disease == "I"),
when = function(state, time) rexp(1, 1/200),
fire = function(state, time) {
b <<- 0.25
within(state, {disease = "S"})
}
)
)
# This sets up the data.table of individuals.
individuals <- data.table::data.table(
disease = factor(c("S", "I", "S"), levels = c("S", "I"))
)
simulation <- continuous_simulation(
individuals,
transitions,
observe_continuous,
program_globals
)
simulation <- init_continuous(simulation)
simulation <- run_continuous(simulation, 200)
trajectory <- simulation$trajectory[1:simulation$trajectory_cnt]
expect_gt(length(trajectory), 1)
for (check_idx in 1:length(trajectory)) {
expect_true(trajectory[[check_idx]]$name %in% c("infect", "recover"))
}
})
|
c63213881e4648b6e61578666aefd4270968e7a4 | de15a984ea7b066a452ad48b01c3b7b6eca2d12d | /scripts/01-data_cleaning-post-strat.R | 35d9661209be64c41b01ad594ed6d04793f8f3d9 | [
"MIT"
] | permissive | Xeon0312/forecasting_us_election | e092f1ccf3362aaf322f0f68d91383b413114149 | 376e7ba5025ae8efb9ca0ef386a2612cdfcbfe73 | refs/heads/forecasting_us_election | 2023-01-03T12:52:18.103725 | 2020-11-04T03:54:19 | 2020-11-04T03:54:19 | 308,555,098 | 0 | 1 | MIT | 2020-11-04T02:53:25 | 2020-10-30T07:25:10 | R | UTF-8 | R | false | false | 10,247 | r | 01-data_cleaning-post-strat.R | #### Preamble ####
# Purpose: Prepare and clean the survey data downloaded from IPUMS
# Author: Boyu Cao
# Data: 22 October 2020
# Contact: boyu.cao@mail.utoronto.ca
# License: MIT
# Pre-requisites:
# - Need to have downloaded the ACS data and saved it to inputs/data
# - Don't forget to gitignore it!
#### Workspace setup ####
library(haven)
library(tidyverse)
# Read in the raw data.
raw_data <- read_dta("../../inputs/data/usa_00001.dta"
)
# Add the labels
raw_data <- labelled::to_factor(raw_data)
# Just keep some variables that may be of interest (change
# this depending on your interests)
names(raw_data)
reduced_data_p <-
raw_data %>%
select(age,
sex,
educd,
stateicp,
inctot,
race,
perwt,
citizen,
labforce)
# Cleaning data
# Remove people who can't vote
reduced_data_p$age<-as.numeric(reduced_data_p$age)
reduced_data_p<-reduced_data_p %>% filter(age>=18 &
(citizen=="naturalized citizen"|citizen=="born abroad of american parents")
)
# Adjust the NA
reduced_data_p$inctot<-ifelse(reduced_data_p$inctot==9999999,
NaN,reduced_data_p$inctot)
reduced_data_p$labforce<-ifelse(reduced_data_p$labforce=="n/a",
NA,reduced_data_p$labforce)
# Drop NA
reduced_data_p<-na.omit(reduced_data_p)
reduced_data_p <- labelled::to_factor(reduced_data_p)
# Clean memory
rm(raw_data)
#### What's next? ####
# Making some age-groups
reduced_data_p<-reduced_data_p %>%
mutate(agegroup = case_when(age <=20 ~ '20 or less',
age >20 & age <= 30 ~ '21 to 30',
age >30 & age <= 40 ~ '31 to 40',
age >40 & age <= 50 ~ '41 to 50',
age >50 & age <= 60 ~ '51 to 60',
age >60 & age <= 70 ~ '61 to 70',
age >70 & age <= 80 ~ '71 to 80',
age >80 ~ 'above 80'
))
# Unified the columns name
## gender
reduced_data_p$sex<-ifelse(reduced_data_p$sex=="female","Female","Male")
reduced_data_p$labforce<-ifelse(reduced_data_p$labforce==2,"No","Yes")
reduced_data_p<-rename(reduced_data_p,gender=sex)
## education
grade_3toless<-c("no schooling completed","nursery school, preschool","kindergarten","grade 1","grade 2","grade 3")
grade_4to8<-c("grade 4","grade 5","grade 6","grade 7","grade 8")
grade_9to12<-c("grade 9","grade 10","grade 11","12th grade, no diploma")
high_school_grad<-c("ged or alternative credential","regular high school diploma")
col_not_grad<-c("some college, but less than 1 year",
"1 or more years of college credit, no degree")
reduced_data_p<-reduced_data_p %>%
mutate(educd2 = case_when(educd =="associate's degree, type not specified" ~ 'Associate Degree',
educd=="doctoral degree"~'Doctorate degree',
educd =="master's degree" ~ 'Masters degree',
educd=="professional degree beyond a bachelor's degree" ~ "College Degree (such as B.A., B.S.)",
educd =="bachelor's degree" ~ "College Degree (such as B.A., B.S.)",
educd %in% col_not_grad~"Completed some college, but no degree",
educd %in% high_school_grad~"High school graduate",
educd %in% grade_9to12~"Completed some high school",
educd %in% grade_4to8~"Middle School - Grades 4 - 8",
educd %in% grade_3toless ~"3rd Grade or less"
))
### drop educd & rename educd2
reduced_data_p<-rename(reduced_data_p,education=educd2)
reduced_data_p$educd<-NULL
## race
reduced_data_p<-reduced_data_p %>%
mutate(race2 = case_when(race=="white"~"White",
race=="chinese"~"Chinese",
race=="black/african american/negro"~"Black, or African American",
race=="two major races"~"Other race",
race=="other race, nec"~"Other race",
race=="japanese"~"Japanese",
race=="american indian or alaska native"~"American Indian or Alaska Native",
race=="three or more major races"~"Other race",
race=="other asian or pacific islander"~"other asian or pacific islander"
))
reduced_data_p$race<-reduced_data_p$race2
reduced_data_p$race2<-NULL
## Short the states name
reduced_data_p<-reduced_data_p %>%
mutate(state = case_when(stateicp=="alabama"~"AL",
stateicp=="alaska"~"AK",
stateicp=="arizona"~"AZ",
stateicp=="arkansas"~"AR",
stateicp=="california"~"CA",
stateicp=="colorado"~"CO",
stateicp=="connecticut"~"CT",
stateicp=="delaware"~"DE",
stateicp=="florida"~"FL",
stateicp=="georgia"~"GA",
stateicp=="hawaii"~"HI",
stateicp=="idaho"~"ID",
stateicp=="illinois"~"IL",
stateicp=="indiana"~"IN",
stateicp=="iowa"~"IA",
stateicp=="kansas"~"KS",
stateicp=="kentucky"~"KY",
stateicp=="louisiana"~"LA",
stateicp=="maine"~"ME",
stateicp=="maryland"~"MD",
stateicp=="massachusetts"~"MA",
stateicp=="michigan"~"MI",
stateicp=="minnesota"~"MN",
stateicp=="mississippi"~"MS",
stateicp=="missouri"~"MO",
stateicp=="montana"~"MT",
stateicp=="nebraska"~"NE",
stateicp=="nevada"~"NV",
stateicp=="new hampshire"~"NH",
stateicp=="new jersey"~"NJ",
stateicp=="new mexico"~"NM",
stateicp=="new york"~"NY",
stateicp=="north carolina"~"NC",
stateicp=="north dakota"~"ND",
stateicp=="ohio"~"OH",
stateicp=="oklahoma"~"OK",
stateicp=="oregon"~"OR",
stateicp=="pennsylvania"~"PA",
stateicp=="rhode island"~"RI",
stateicp=="south carolina"~"SC",
stateicp=="south dakota"~"SD",
stateicp=="tennessee"~"TN",
stateicp=="texas"~"TX",
stateicp=="utah"~"UT",
stateicp=="vermont"~"VT",
stateicp=="virginia"~"VA",
stateicp=="washington"~"WA",
stateicp=="west virginia"~"WV",
stateicp=="wisconsin"~"WI",
stateicp=="wyoming"~"WY",
stateicp=="district of columbia"~"DC"))
reduced_data_p$stateicp<-NULL
# reference: https://www.50states.com/abbreviations.htm
## House hold income
reduced_data_p<-reduced_data_p %>%
mutate(household_income = case_when(inctot<=14999 ~ "Less than $14,999",
inctot>=15000 & inctot<=19999~"$15,000 to $19,999",
inctot>=20000 & inctot<=24999~"$20,000 to $24,999",
inctot>=25000 & inctot<=29999~"$25,000 to $29,999",
inctot>=30000 & inctot<=34999~"$30,000 to $34,999",
inctot>=35000 & inctot<=39999~"$35,000 to $39,999",
inctot>=40000 & inctot<=44999~"$40,000 to $44,999",
inctot>=45000 & inctot<=49999~"$45,000 to $49,999",
inctot>=50000 & inctot<=54999~"$50,000 to $54,999",
inctot>=55000 & inctot<=59999~"$55,000 to $59,999",
inctot>=60000 & inctot<=64999~"$60,000 to $64,999",
inctot>=65000 & inctot<=69999~"$65,000 to $69,999",
inctot>=70000 & inctot<=74999~"$70,000 to $74,999",
inctot>=75000 & inctot<=79999~"$75,000 to $79,999",
inctot>=80000 & inctot<=84999~"$80,000 to $84,999",
inctot>=85000 & inctot<=89999~"$85,000 to $89,999",
inctot>=90000 & inctot<=94999~"$90,000 to $94,999",
inctot>=95000 & inctot<=99999~"$95,000 to $99,999",
inctot>=100000 & inctot<=124999~"$100,000 to $124,999",
inctot>=125000 & inctot<=149999~"$125,000 to $149,999",
inctot>=150000 & inctot<=174999~"$150,000 to $174,999",
inctot>=175000 & inctot<=199999~"$175,000 to $199,999",
inctot>=200000 & inctot<=249999~"$200,000 to $249,999",
inctot>=250000~"$250,000 and above"
))
reduced_data_p$inctot<-NULL
reduced_data_p%>% select(perwt,agegroup,gender,education,state,household_income,race,labforce)->census_data
# Add cells
census_data$cell<-paste(census_data$agegroup,census_data$gender)
# Convert variables to factors
f.cols.census<-c("agegroup","gender","education","state","household_income" ,"race", "cell","labforce")
census_data[f.cols.census] <- lapply(census_data[f.cols.census], factor)
rm(reduced_data_p)
|
23c47f62694db63cd76df9ede30edea833574f1c | 74651f98951cbaf136fc2d2d8ad03b6f8bc0f958 | /zi_pi.r | 4d5a96fd82a6d3743124ef8fdc4dd30b99f34bb1 | [] | no_license | Xuyifei-NAU/MyScript | 65de9c1876ac3ed1c30a9be98d58c2ba296c492b | c7e63b661029a16cbe076ea2859942482b6af4d5 | refs/heads/main | 2023-06-26T10:40:41.103806 | 2021-07-23T00:45:16 | 2021-07-23T00:45:16 | 311,651,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,959 | r | zi_pi.r | ##定义函数
zi.pi<-function(nodes_bulk, z.bulk, modularity_class, degree){
z.bulk[abs(z.bulk)>0]<-1
module<-which(colnames(nodes_bulk)==modularity_class)
module.max<-max(nodes_bulk[,module])
degree<-which(colnames(nodes_bulk)==degree)
#按照模块将相关矩阵分割
bulk.module<-list(NA)
length(bulk.module)<-module.max
for(i in 1:max(nodes_bulk[,module])){
bulk.module[[i]]<-z.bulk[which(nodes_bulk[,module]==i),which(nodes_bulk[,module]==i)]
bulk.module[[i]]<-as.data.frame(bulk.module[[i]])
rownames(bulk.module[[i]])<-rownames(z.bulk)[which(nodes_bulk[,module]==i)]
colnames(bulk.module[[i]])<-colnames(z.bulk)[which(nodes_bulk[,module]==i)]
}
# within-module degree z
z_bulk<-list(NA)
length(z_bulk)<-module.max
for(i in 1:length(z_bulk)){
z_bulk[[i]]<-bulk.module[[i]][,1]
z_bulk[[i]]<-as.data.frame(z_bulk[[i]])
colnames(z_bulk[[i]])<-"z"
rownames(z_bulk[[i]])<-rownames(bulk.module[[i]])
}
#计算z值
for(i in 1:max(nodes_bulk[,module])){
if(length(bulk.module[[i]])==1){
z_bulk[[i]][,1]<-0
}else if(sum(bulk.module[[i]])==0){
z_bulk[[i]][,1]<-0
}else{
k<-rowSums(bulk.module[[i]])
mean<-mean(k)
sd<-sd(k)
if (sd==0){
z_bulk[[i]][,1]<-0
}else{
z_bulk[[i]][,1]<-(k-mean)/sd
}
}
}
#z值合并
for(i in 2:max(nodes_bulk[,module])) {
z_bulk[[i]]<-rbind(z_bulk[[i-1]],z_bulk[[i]])
}
z_bulk<-z_bulk[[module.max]]
#按照模块将相关矩阵列分割
bulk.module1<-list(NA)
length(bulk.module1)<-module.max
for(i in 1:max(nodes_bulk[,module])){
bulk.module1[[i]]<-z.bulk[,which(nodes_bulk[,module]==i)]
bulk.module1[[i]]<-as.data.frame(bulk.module1[[i]])
rownames(bulk.module1[[i]])<-rownames(z.bulk)
colnames(bulk.module1[[i]])<-colnames(z.bulk)[which(nodes_bulk[,module]==i)]
}
#among-module connectivity c
c_bulk<-list(NA)
length(c_bulk)<-module.max
for(i in 1:length(c_bulk)){
c_bulk[[i]]<-z.bulk[,1]
c_bulk[[i]]<-as.matrix(c_bulk[[i]])
colnames(c_bulk[[i]])<-"c"
rownames(c_bulk[[i]])<-rownames(z.bulk)
c_bulk[[i]][,1]<-NA
}
#每个节点各模块连接数平方
for(i in 1:max(nodes_bulk[,module])){
c_bulk[[i]]<-rowSums(bulk.module1[[i]])
c_bulk[[i]]<-as.matrix(c_bulk[[i]])
c_bulk[[i]]<-c_bulk[[i]]*c_bulk[[i]]
colnames(c_bulk[[i]])<-"c"
rownames(c_bulk[[i]])<-rownames(z.bulk)
}
#平方和
for(i in 2:max(nodes_bulk[,module])){
c_bulk[[i]]<-c_bulk[[i]]+c_bulk[[i-1]]
}
c_bulk<-c_bulk[[module.max]]
c_bulk1<-1-(c_bulk/(nodes_bulk[,degree]*nodes_bulk[,degree]))
colnames(c_bulk1)<-"c"
#z,c整合
z_c_bulk<-c_bulk1
z_c_bulk<-as.data.frame(z_c_bulk)
z_c_bulk$z<-z_bulk[match(rownames(c_bulk1),rownames(z_bulk)),]
z_c_bulk<-z_c_bulk[,c(2,1)]
names(z_c_bulk)[1:2]<-c('within_module_connectivities','among_module_connectivities')
z_c_bulk$nodes_id<-rownames(z_c_bulk)
nodes_bulk$nodes_id<-rownames(nodes_bulk)
z_c_bulk<-merge(z_c_bulk,nodes_bulk,by='nodes_id')
z_c_bulk
}
|
b1da38b273f4ad7d13a7606e0a1f552bc1494657 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/yorkr/examples/bowlerWicketsAgainstOpposition.Rd.R | 59be0cf55e6299f78e2ca8d9c501d808c6b3a44b | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 376 | r | bowlerWicketsAgainstOpposition.Rd.R | library(yorkr)
### Name: bowlerWicketsAgainstOpposition
### Title: Bowler wickets versus different teams
### Aliases: bowlerWicketsAgainstOpposition
### ** Examples
## Not run:
##D # Get the data frame for RA Jadeja
##D jadeja <- getBowlerWicketDetails(team="India",name="Jadeja",dir=pathToFile)
##D bowlerWicketsAgainstOpposition(jadeja,"RA Jadeja")
## End(Not run)
|
73735b73816713afe37be15f71fa767bc177360b | 503284431cfffe00693e6b52931c614b235887fb | /QuantStuff.r | baf14857da3dfc9244229fb2b2c583db564cbd4b | [] | no_license | tpopenfoose/quantstuff | 5cb37f4c7b4a8bc148a97d6b173aef9867caaf0b | 031775f639394276882b7557e6dbd6e4fedbe857 | refs/heads/master | 2021-01-13T03:46:55.359239 | 2016-06-30T15:43:49 | 2016-06-30T15:43:49 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,849 | r | QuantStuff.r | library('quantmod')
#library('zoo')
#library('xts')
#library('PerformanceAnalytics')
techSector <- c("XLK","AAPL","GOOG") #,"MSFT","VZ","IBM","T","ORCL","QCOM","CSCO","INTC","V","FB","MA","EBAY","EMC","TXN","ACN","HPQ","ADP","YHOO","CRM")
#loadSymbols <- c("XPH","XBI","IHF","IHI","JO","MSFT")
getSymbols(techSector,src="yahoo")
mySymbol <- XLK
dayRange <- 500
#candleChart(last(mySymbol,dayRange),dn.col="black",up.col="white",theme="white")
chartSeries(last(mySymbol,dayRange),
type="candlesticks",
name="XLK Tech Sector SPDR Fund",
theme=chartTheme("white"),
up.col="white",
dn.col="black",
grid.col="gray"
)
addBBands()
addMACD()
#chartStock("XLK","90")
getChart("XLK",500,XLK)
#My Indicators
# not used: addTA(LoHi(last(mySymbol,dayRange)))
#Absolute daysBody <- abs(XLK$XLK.Close - XLK$XLK.Open) # Body Size
daysBody <- XLK$XLK.Close - XLK$XLK.Open # Body Size
daysRange <- XLK$XLK.High - XLK$XLK.Low # Low to High
daysUpShadow <- ifelse(XLK$XLK.Close > XLK$XLK.Open, XLK$XLK.High - XLK$XLK.Close, XLK$XLK.High - XLK$XLK.Open) # Upper Shadow Size
daysLoShadow <- ifelse(XLK$XLK.Close > XLK$XLK.Open, XLK$XLK.Open - XLK$XLK.Low, XLK$XLK.Close - XLK$XLK.Low) # Lower Shadow Size
daySummary <- daysBody + daysRange + daysUpShadow + daysLoShadow
addTA(last(daysBody,dayRange), col="blue", overlay=TRUE)
addTA(last(daysRange,dayRange), col="brown")
addTA(last(daysUpShadow,dayRange), col="green")
addTA(last(daysLoShadow,dayRange), col="red")
addTA(last(daySummary,dayRange), col="purple")
# Cross Correlation
#ccf(last(drop(AUY$AUY.Open), 90),last(drop(daysRange), 90))
#ccf(last(drop(AUY$AUY.Open), 400),last(drop(GLD$GLD.Open), 400))
#Plot various metrics...
#plot(last(daysRange,90),col="blue")
#lines(last(daysBody,90),col="red")
|
7264d6f7b8a312f0ad0f8052099a877fbaf47440 | 3be7a6917ae3e03701954a4360a6c5b612ddbf32 | /R/nature.R | bfda9e058d3b0a585d6101bdd48c2aff42e29cbb | [
"MIT"
] | permissive | zambujo/editorials | 9fa6abcb86dedea268ef949d7d9d40f44c9c4ba1 | 2f4c8c71e93c5d59748f750324350b814d15a932 | refs/heads/main | 2023-01-25T04:06:51.366937 | 2020-12-09T11:16:57 | 2020-12-09T11:16:57 | 311,373,661 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,117 | r | nature.R | #' Retrieve selected data from a Nature journal URL
#'
#' @param addr Relative path to page.
#' @param polite_bow An HTTP polite::bow session object.
#' @param csv_path Path to CSV file.
#'
#' @return A tibble with scraped data, if `csv_path` is missing.
#' @family nature scraping function
#' @name get_nature
#' @examples
#' \dontrun{
#' get_nature_volumes(bow("https://www.nature.com"))
#' }
#'
NULL
#' @rdname get_nature
#' @export
get_nature_volumes <- function(addr,
polite_bow,
csv_path) {
if (missing(addr))
addr <- "nature/volumes"
volumes_html <- get_page(addr, polite_bow)
volumes <- rvest::html_nodes(volumes_html, "#volume-decade-list")
res <- tibble::tibble(
volume_key = volumes %>%
rvest::html_nodes("a") %>%
rvest::html_attr("href"),
date = volumes %>%
rvest::html_nodes("time") %>%
rvest::html_text()
)
# cast time
res <- dplyr::mutate(res,
year = as.numeric(stringr::str_extract(date, "\\d{4}")))
return_df(res, csv_path)
}
#' @rdname get_nature
#' @export
get_nature_issues <- function(addr, polite_bow, csv_path) {
issues_html <- get_page(addr, polite_bow)
issues <- rvest::html_nodes(issues_html, ".flex-box-item")
res <- tibble::tibble(
issue_key = issues %>%
rvest::html_attr("href"),
issue_date = issues %>%
rvest::html_nodes(".text-gray") %>%
rvest::html_text()
)
return_df(res, csv_path)
}
#' @rdname get_nature
#' @export
get_nature_contents <- function(addr, polite_bow, csv_path) {
toc_html <- get_page(addr, polite_bow)
contents <- toc_html %>%
rvest::html_nodes("#ThisWeek-content")
if (length(contents) == 0) {
contents <- toc_html %>%
rvest::html_nodes("#ResearchHighlights-section")
}
res <- tibble::tibble(
issue_key = addr,
article_key = contents %>%
rvest::html_nodes("a") %>%
rvest::html_attr("href") %>%
stringr::str_subset("^/articles/"),
contents_labels = contents %>%
rvest::html_nodes(".mb4 span:nth-child(1)") %>%
rvest::html_text()
)
if (nrow(res) > 0)
return_df(res, csv_path)
}
#' @rdname get_nature
#' @export
get_nature_articles <- function(addr, polite_bow, csv_path) {
article_html <- get_page(addr, polite_bow)
article_title <- article_html %>%
rvest::html_node(".c-article-title, .article-item__title") %>%
rvest::html_text()
article_subject <- article_html %>%
rvest::html_node(".c-article-title__super, .article-item__subject") %>%
rvest::html_text()
doi_txt <- NA_character_
doi_link <- NA_character_
if (stringr::str_to_lower(article_title) == "research highlights") {
# early volumes -----------------------------------------
coda <- c("journal club",
"rights and permissions",
"about this article",
"comments")
article_title <- article_html %>%
rvest::html_nodes(".c-article-section__title") %>%
html_text
stop_at <-
which(stringr::str_to_lower(article_title) %in% coda) %>% min()
## stop short of journal club
article_title <- head(article_title, stop_at - 1)
external_refs <- article_html %>%
rvest::html_nodes(".c-article-section__content")
link_list <- external_refs %>%
purrr::map(rvest::html_nodes, css = "a") %>%
head(length(article_title)) %>%
purrr::map(rvest::html_attr, name = "href")
txt_list <- external_refs %>%
purrr::map(rvest::html_node, css = "p") %>%
head(length(article_title)) %>%
purrr::map(rvest::html_text) %>%
stringr::str_squish()
df_refs <- tibble::tibble(ttl = article_title,
txt = txt_list,
a = link_list) %>%
unnest(a, keep_empty = TRUE)
article_title <- dplyr::pull(df_refs, ttl)
doi_txt <- dplyr::pull(df_refs, txt)
doi_link <- dplyr::pull(df_refs, a)
} else {
# later volumes -----------------------------------------
external_refs <- article_html %>%
rvest::html_nodes(".c-article-section__content a, .serif")
doi_idx <- external_refs %>%
rvest::html_attr("href") %>%
stringr::str_detect("doi[.]org")
if (any(doi_idx)) {
doi_txt <- external_refs %>%
rvest::html_text() %>%
subset(doi_idx) %>%
head(1) %>% ## ! leap of faith
stringr::str_squish()
doi_link <- external_refs %>%
rvest::html_attr("href") %>%
subset(doi_idx) %>%
head(1) ## ! leap of faith
}
}
if (is.na(article_subject)) {
# try to parse subject from title
subject_title <- stringr::str_split(article_title, ": ", n = 2)
article_subject <- purrr::map_chr(subject_title, head, 1)
article_title <- purrr::map_chr(subject_title, tail, 1)
if (identical(article_subject, article_title))
article_subject <- NA_character_
}
res <- tibble::tibble(
article_key = addr,
title = article_title,
topic = article_subject,
citation = doi_txt,
doi = doi_link
)
return_df(res, csv_path)
}
|
7e4869d1f844482512fa50639c7abb6b720ffed7 | ddb6801f17ad79f09dfccb1a17af980dc432bedc | /usefulFunctions/numGenesChanged.R | f0f5aaeae95b4368166d3d6817d6c1c87dd89cec | [] | no_license | alexsanchezpla/scripts | 49177da789965d71a4bf9453a740482070d37f6b | d833be5653d1aac657379f0e8ef49511d5fa5082 | refs/heads/master | 2023-03-05T02:49:56.718712 | 2023-02-19T11:10:14 | 2023-02-19T11:10:14 | 41,293,582 | 0 | 11 | null | 2016-12-14T11:20:46 | 2015-08-24T09:11:10 | HTML | UTF-8 | R | false | false | 2,098 | r | numGenesChanged.R | #' Given a limma-outputted topTable (TT) it returns the number of up or down regulated genes that woud be returned if the cutoff was set at different values.
#' @param TT A top table object such as produced by the limma package or the UEB pipeline.
#' @param cName Name to the comparison that produced the top Table. Defaults to "comparison".
#' @keywords genelists, filtering
#' @seealso limma
#' @export
#' @examples
#' AvsB <- read.table("https://raw.githubusercontent.com/alexsanchezpla/scripts/master/Gene_List_Management/dades/ExpressAndTop_AvsB.csv2", head=T, sep=";", dec=",", row.names=1)
#' genesChanged <- numGenesChanged (AvsB, "Group A vs group B")
numGenesChanged <- function (TT, cName="comparison"){
Bup <- sum(TT$t>0 & TT$B >0 )
Bdown <- sum(TT$t <=0 & TT$B >0 )
adjP001Up <- sum(TT$t>0 & TT$adj.P.Val < 0.01 )
adjP001Down <- sum(TT$t< 0 & TT$adj.P.Val < 0.01 )
adjP005Up <- sum(TT$t>0 & TT$adj.P.Val < 0.05 )
adjP005Down <- sum(TT$t< 0 & TT$adj.P.Val < 0.05 )
adjP025Up <- sum(TT$t>0 & TT$adj.P.Val < 0.25 )
adjP025Down <- sum(TT$t< 0 & TT$adj.P.Val < 0.25 )
P001Up <- sum(TT$t>0 & TT$P.Value < 0.01 )
P001Down <- sum(TT$t< 0 & TT$P.Value < 0.01 )
P005Up <- sum(TT$t>0 & TT$P.Value < 0.05 )
P005Down <- sum(TT$t< 0 & TT$P.Value < 0.05 )
nGenes <- data.frame(comparisonName= c(Bup, Bdown,
adjP001Up, adjP001Down, adjP005Up, adjP005Down, adjP025Up, adjP025Down,
P001Up, P001Down, P005Up, P005Down))
rowNames <- c("upReg-B>0", "downReg-B>0",
"upReg-Adjusted-p-val < 0.01", "downReg-Adjusted-p-val < 0.01",
"upReg-Adjusted-p-val < 0.05", "downReg-Adjusted-p-val < 0.05",
"upReg-Adjusted-p-val < 0.25", "downReg-Adjusted-p-val < 0.25",
"upReg-P value < 0.01 ", "downReg-P value < 0.01",
"upReg-P value < 0.05", "downReg-P value < 0.05")
rownames(nGenes) <-rowNames
colnames(nGenes)[1] <- cName
return(nGenes)
}
|
d2c556c551566fb12ff81d5bbb3daf304b4b0f0b | 0ab0d9377260d2a6f37503fc1de64f1111cbc6b4 | /RProgramming/week4.R | 474380d110d8a24ff68be23ebac59ba50747c5ae | [] | no_license | a-igo/DataAnalysis | 4aaa7df63714431f02d0b8f060be303cb7dbbe1b | 55a7f5f865325fa1a998d4914ee5f3be31a89f94 | refs/heads/master | 2020-04-17T09:17:34.376359 | 2016-04-10T07:53:56 | 2016-04-10T07:53:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 294 | r | week4.R | outcome <- read.csv('outcome-of-care-measures.csv', colClasses = 'character')
head(outcome)
#Hist of "Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack"
outcome[, 11] <- as.numeric(outcome[, 11])
hist(outcome[, 11])
options(warn = -1)
source('rankall.R')
rankall("heart failure", 10)
|
c9a055bd728ef74eaa50a95047d110c1336ca1be | 9fe31d58c07d8d667e08e41c0213d84d4587e721 | /viz_stacked_bar.R | 2e92637e2110274a80d423db1d4e33c9ec62f7c8 | [] | no_license | Ecboxer/roi_sports | b8fe46f3c05052d01fd87080e79e5e8304e02766 | 6cc8c506b00a5d82df90f7f92d1bdb1f86730eb2 | refs/heads/master | 2020-04-26T11:28:44.211328 | 2019-04-09T16:42:57 | 2019-04-09T16:42:57 | 173,517,846 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,972 | r | viz_stacked_bar.R | library(tidyverse)
library(Lahman)
library(baseballr)
library(plotly)
df_action <- read_csv('df_action.csv')
df_action %>% filter(Sport == 'Baseball')
action_dur <- df_action %>% filter(Sport == 'Baseball') %>%
select(action_duration)
action_min <- action_dur$action_duration / 60
df_mlb <- read_csv('mlb_misc.csv')
df_mlb %>% head()
df_innings <- read_csv('innings.csv')
df_innings %>% head()
br_teams <- c('LAA','ARI','ATL','BAL','BOS','CHC',
'CHW','CIN','CLE','COL','DET','MIA',
'HOU','KCR','LAD','MIL','MIN','NYM',
'NYY','OAK','PHI','PIT','SDP','SEA',
'SFG','STL','TBR','TEX','TOR','WSN')
df_action %>% filter(Sport=='Baseball') %>%
select(perc_action, perc_comm) %>%
mutate(perc_other=1-perc_action-perc_comm) %>%
gather(key=Key, value=Value) %>%
ggplot() +
geom_bar(aes(x=1, y=Value, fill=Key),
stat='identity',
position='fill')
df_mlb %>% filter(Year > 2013) %>%
select(Year, Time)
action_duration_sec <- df_action %>%
filter(Sport=='Baseball') %>%
select(action_duration) %>%
pull
df_innings$`Seconds of Action` <- action_duration_sec
df_innings %>%
filter(Inning==9) %>%
select(Year,
`Commercials`=`Commercial time (game)`,
`Action`=`Seconds of Action`,
`Other`=`Time of game`)
g_stacked <- df_innings %>%
filter(Inning==9) %>%
select(Year,
`Commercials`=`Commercial time (game)`,
`Action`=`Seconds of Action`,
`Other`=`Time of game`) %>%
gather(key=Category, value=Value, -Year) %>%
ggplot() +
geom_bar(aes(x=Year, y=Value, fill=Category),
width=28,
stat='identity', position='fill') +
theme_eric() +
xlab('Year') + ylab('Proportion of the game') +
ggtitle('Have modern baseball broadcasts lost action?') +
scale_fill_brewer(type='qual', palette=2, 'Share') +
scale_x_discrete(limits=c(1984,2014)) +
theme(legend.title=element_blank())
g_stacked_p <- ggplotly(g_stacked) %>%
add_annotations(text="Category",
xref="paper", yref="paper",
x=1.03, xanchor="left",
y=0.81, yanchor="bottom",
legendtitle=T, showarrow=F) %>%
layout(legend=list(y=0.8, yanchor='top'))
g_stacked_p
# Slopegraph
h_slope <- df_innings %>%
filter(Inning==9) %>%
select(Year,
`Commercials`=`Commercial time (game)`,
`Action`=`Seconds of Action`,
`Other`=`Time of game`) %>%
gather(key=Category, value=Value, -Year) %>%
group_by(Year) %>%
mutate(Total=sum(Value)) %>%
ungroup() %>%
mutate(Proportion=round(Value/Total,2)) %>%
ggplot() +
geom_line(aes(x=Year,
y=Proportion,
color=Category),
size=4, alpha=.5) +
geom_point(aes(x=Year,
y=Proportion,
fill=Category),
size=8, shape=21, color='black') +
theme_eric() +
xlab('Year') + ylab('Proportion of the game') +
ggtitle('Have modern baseball broadcasts lost action?') +
scale_color_brewer(type='qual',
palette=2, 'Share',
labels=c('Action',
'Commercials',
'Other')) +
scale_fill_brewer(type='qual',
palette=2, 'Share',
labels=c('Action',
'Commercials',
'Other')) +
scale_x_continuous(limits=c(1980,2018),
breaks=c(1984, 2014)) +
scale_y_continuous(breaks=seq(0.1, 0.8, by=0.1)) +
theme(legend.title=element_blank(),
axis.title.x=element_text(size=20))
h_slope_p <- ggplotly(h_slope) %>%
add_annotations(text="Category",
xref="paper", yref="paper",
x=1.023, xanchor="left",
y=0.81, yanchor="bottom",
legendtitle=T, showarrow=F) %>%
layout(legend=list(y=0.8, yanchor='top'))
h_slope_p |
b8d5267bf4bef3e15a0ea92552ffce00e6ea6f43 | 7c4ecb3b2102146d5dfbca7dcd11500e9536c42f | /cachematrix.R | 756010593b1db86aae1e644597be0a8130ca6bf6 | [] | no_license | vldmr-voznyuk/ProgrammingAssignment2 | 08e979def662a059cf5b3d5f34bc88293334fe72 | 3f1fb015d27b02f56e85a861555882b882cd80e2 | refs/heads/master | 2020-09-07T17:11:06.876694 | 2019-11-12T07:22:46 | 2019-11-12T07:22:46 | 220,856,151 | 0 | 0 | null | 2019-11-10T21:51:52 | 2019-11-10T21:51:51 | null | UTF-8 | R | false | false | 2,062 | r | cachematrix.R |
# The below functions are created to avoid costly re-computing of the inverse
# of a matrix.
# Function "makeCacheMatrix" creates a list of funcions to:
# 1.set the value of the matrix
# 2.get the value of the matrix
# 3.set the value of the inverse matrix (uses a built-in solve() function)
# 4.get the value of the inverse matrix
# Set functions from the list uses <<- operator to store values in the parent function enviroment.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# The "cacheSolve" function takes the output of "makeCacheMatrix" as an argument. It checks
# if the inverse of the matrix was stored in cache, and either takes the result from cache or
# calculates the inverse of the matrix and stores it there.
# If "makeCacheMatrix" is called with a matrix as an argument, the inverse of that matrix is
# calculated. If the "makeCacheMatrix" is called without an argument (or for calculating the
# inverse of a new matrix), the "set" function from the list should be called before executing
# the "cacheSolve" function. Please, see use-examples below.
# NOTE: it is assumed the matrix supplied is always invertible.
# Example 1. ---------------
# > mx<-makeCacheMatrix()
# > mx$set(matrix(c(1:4),2,2))
# > cacheSolve(mx)
# Example 2. ---------------
# > mx<-cacheSolve( makeCacheMatrix( matrix( c(1:4),2,2) ) )
# Now, when the inverse of the same matrix is needed again, the "cacheSolve" function
# may be called or the $getinverse function used. In both cases the cashed value will be used.
# > cacheSolve(mx)
# > mx$getinverse()
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
# Return a matrix that is the inverse of 'x'
} |
531ba296bf37ea9aee9bf266709e91568e2cff0e | b50c76b10eeae84c3e972deab7effdecb22ebb43 | /02_ dados/02_ RFB/scripts/function extract table unid.r | 04ad332a5d5a5f3a2595d6b8cd09559a0580aa60 | [
"MIT"
] | permissive | matth3us/tccENAP | 9002cfcd3825e989a235ba5190d9112ebd9d46ca | bd3b607dee01ea82cef2a45b48815dd147ff5113 | refs/heads/master | 2022-05-25T17:45:34.538088 | 2020-04-26T22:33:26 | 2020-04-26T22:33:26 | 176,340,283 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 322 | r | function extract table unid.r | tabularUnid <- function(url){
remDr$navigate(url)
df <- readHTMLTable(htmlParse(remDr$getPageSource()[[1]], encoding = 'UTF-8')) [[1]] %>%
spread(key = V1, value = V2) %>%
select(Unidade, Estado, Cidade, Bairro, CEP, Logradouro, Atendimento, Titular, 'Telefone(s)', Observações)
return(df)
}
|
7053fad7d8ab5536dd3b63560a3b5cbf786ab2a7 | a94fafa0e1ba5fefc8c711db78468baca7179374 | /Code/Data Analysis/main_doube_stance_analysis.R | 2f5fc9f5c833960e25dc54e537a1b2b9b31d4138 | [] | no_license | CatStrain/Cat_skin | 1c76fe7870e1ace2e1f9612fd166ac5135771785 | 9b6823cf68467a87d3af39b3e1cb0709dd432815 | refs/heads/master | 2023-04-10T11:16:52.024737 | 2021-02-25T04:01:43 | 2021-02-25T04:01:43 | 273,569,581 | 1 | 2 | null | 2021-04-26T00:33:50 | 2020-06-19T19:16:47 | C++ | UTF-8 | R | false | false | 5,167 | r | main_doube_stance_analysis.R | #Initialization:
rm(list = ls()); # clear workspace variables
cat("\014") # clear window
library(rgl)
library(class)
library(ggplot2)
library(caret) # "R package that provides general interface to +- 150 ML alg"
library("lattice")
#######
# loading RAW files:
mypath_1 <- "C:/Users/dario/Documents/Github/Cat_skin/Data/Backup/Biped/4_points_5V_Sensors_in_Center_V2/initial_force_plate_data_2_021321.txt"
forceplate_trainingdata <- read.csv(mypath_1) # Creating dataframe from csv file
mypath_2 <- "C:/Users/dario/Documents/Github/Cat_skin/Data/Backup/Biped/4_points_5V_Sensors_in_Center_V2/force_plate_with_biped_data_2_021321.txt"
labels_CoP <- read.csv(mypath_2)
mypath_3 <- "C:/Users/dario/Documents/Github/Cat_skin/Data/Backup/Biped/4_points_5V_Sensors_in_Center_V2/biped_leg_test_simut_0213_2.txt"
features_strainin_signals <- read.csv(mypath_3)
########
#DOWNSAMPLING data, and adding column names
downsample_with_labels <- function(x){ # downsampling funtion
CoP_posotions_all = rep(c(1:4), times = ceiling(nrow(x)/(25*3))) # generating label patterns
data.downsampled = x[seq(12,nrow(x),25),] # down sampling prebiped_data seq.int(from, to, by, length.out, along.with, ...)
data.downsampled[,5] = CoP_posotions_all[1:250] # selecting labels to fit the prebiped_data size
return (data.downsampled)
}
downsample_no_labels <- function(x){ # downsampling funtion
data.downsampled = x[seq(12,nrow(x),25),] # down sampling prebiped_data seq.int(from, to, by, length.out, along.with, ...)
return (data.downsampled)
}
forceplate_trainingdata <- downsample_with_labels(forceplate_trainingdata) #Downsampling with labels for Force Plate Data
labels_CoP <- downsample_no_labels(labels_CoP) #Downsampling with no labels for using as ground truth provided by Force Plate
features_strainin_signals <- downsample_no_labels(features_strainin_signals) #Downsampling with no labels for left foot biped
newheaders <- c("LC_1", "LC_2", "LC_3", "LC_4","CoP_location")
colnames(forceplate_trainingdata) <- newheaders
newheaders <- c("LC_1", "LC_2", "LC_3", "LC_4")
colnames(labels_CoP) <- newheaders
biped_data <- features_strainin_signals
newheaders <- c("SL_1", "SL_2", "SL_3", "SL_4","SL_5", "SL_6", "SL_7", "SL_8")
colnames(biped_data) <- newheaders
########
#Reprocessing (normalizing)
trans_1 <- preProcess(forceplate_trainingdata, method = c("range"))
forceplate_trainingdata = predict(trans_1, forceplate_trainingdata[,1:5])
trans_2 <- preProcess(labels_CoP, method = c("range"))
labels_CoP = predict(trans_2, labels_CoP[,1:4])
trans_3 <- preProcess(biped_data, method = c("range"))
biped_data = predict(trans_3, biped_data[,1:8])
######
# Training force plate (with KNN):
set.seed(99) # required to reproduce the results
forceplate_trainingdata['CoP_location'] = factor(forceplate_trainingdata[,'CoP_location'])
trControl <- trainControl(method = "cv", number = 5) # 5 fold Cross-Validation
fit <- train(CoP_location ~ .,
method = "knn",
tuneGrid = expand.grid(k = 1:20),
trControl = trControl,
metric = "Accuracy",
data = forceplate_trainingdata) # test KNN for K values: 1:20
print(fit) # print results
print(confusionMatrix(fit))
levelplot(confusionMatrix(fit)$table) # show the confusion matrix
#########
#Using force plate as ground truth for the incoming biped data
test_pred <- predict(fit, newdata = labels_CoP) #Labels for biped sensory data (GROUND TRUTH)
#test_pred
print(test_pred)
biped_data[,9] = test_pred # adding labels to biped data
newheaders <- c("SL_1", "SL_2", "SL_3", "SL_4","SL_5", "SL_6", "SL_7", "SL_8","CoP_location")
colnames(biped_data) <- newheaders
#########
# Training and testing biped
set.seed(99)
biped_data['CoP_location'] = factor(biped_data[,'CoP_location'])
trControl <- trainControl(method = "cv", number = 5)
fit <- train(CoP_location ~ .,
method = "knn",
tuneGrid = expand.grid(k = 1:20),
trControl = trControl,
metric = "Accuracy",
data = biped_data)
print(fit)
print(confusionMatrix(fit))
levelplot(confusionMatrix(fit)$table)
|
90de2cb61190f6f53e9471ba84984578f2f038cb | 1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5 | /codeSchool/tryR/chapter-2.R | fd85ab4119ac4d5cdc911bf87980880d636b3601 | [
"MIT"
] | permissive | sagarnikam123/learnNPractice | f0da3f8acf653e56c591353ab342765a6831698c | 1b3b0cb2cff2f478006626a4c37a99102acbb628 | refs/heads/master | 2023-02-04T11:21:18.211654 | 2023-01-24T14:47:52 | 2023-01-24T14:47:52 | 61,184,927 | 2 | 1 | MIT | 2022-03-06T11:07:18 | 2016-06-15T06:57:19 | Python | UTF-8 | R | false | false | 10,280 | r | chapter-2.R | #################
# Chapter-2 #
#################
# - basics of manipulating vectors - creating and accessing them, doing math with them, and making sequences.
# - how to make bar plots and scatter plots with vectors.
# - how R treats vectors where one or more values are not available
# Data structures in R
# All R objects have a type or mode, as well as a class, which can be determined with typeof, mode & class.
# 1- Vectors & its Access
# Vector is a basic structure,it is list of values
# in the following atomic modes (data types):
# numeric, integer, character, logical, complex, raw
# A vector's values can be numbers, strings, logical values, or any other type, as long as they're all the same type
c(12,39,45) # "c" function (c is short for Combine) creates a new vector by combining/concanating a list of values.
c('a','b','c')
a = c(1, 2, 3, 4)
# Basic Arithmetic Operations
a+5
a - 10
a/5
b <- a - 10
exp(a)
log(a)
sin(a)
a^2 # squre
a*b # multiplication
#Basic Statistics
min(a,b)
max(a,b)
mean(a)
median(a)
quantile(a)
var(a)
sd(b)
# Vectors cannot hold values with different modes (types)
# All the values were converted to a single mode (characters) so that the vector can hold them all.
> c(22,"Trying ...R",'z')
[1] "22" "Trying ...R" "z"
#Accessing:- retrieve an individual value within a vector by providing its numeric index in square brackets.
> alphaList <- c('a','b','c','d','e','f')
> alphaList
[1] "a" "b" "c" "d" "e" "f"
> str(alphaList) # internal structure of alphaList
chr [1:6] "a" "b" "c" "d" "e" "f"
mode(alphaList) #view storage mode of object passed i.e. alphaList
> # Many languages start array indices at 0, but R's vector indices start at 1
> alphaList[1]
[1] "a"
> alphaList[2:5]
[1] "b" "c" "d" "e"
> #assign new values within an existing vector
> alphaList[1] <- 'FirstElement'
> alphaList
[1] "FirstElement" "b" "c" "d" "e"
[6] "f"
> alphaList[7] = "Seventh Element"
> alphaList
[1] "FirstElement" "b" "c" "d"
[5] "e" "f" "Seventh Element"
> line= c ('one','two','three','four','five','six')
> line
[1] "one" "two" "three" "four" "five" "six"
> line[2:6] # retrieve ranges of values
[1] "two" "three" "four" "five" "six"
> line[c(1,3)]# You can use a vector within the square brackets to access multiple values
[1] "one" "three"
> line[5:6] <- c("Fifth-5","Sixth-6") # set ranges of values; just provide the values in a vector.
> line
[1] "one" "two" "three" "four" "Fifth-5" "Sixth-6"
# Assigning names to a vector's elements (can be used as labels for your data.)
# by passing a second vector filled with names to the names assignment function
numbers<- c(1,2,3,4,5)
names(numbers)<-c('one','two','three','four','five')
numbers
# You can also use the names to access the vector's values
numbers['four']
# 2- Sequence Vectors
#vector with a sequence of numbers, with start:end notation
3:8
seq(40,47,by=1) # using seq function
seq(1,4,by =0.5) # you can increments elements in seq other than 1
20:15 # reverse vector
# 3- Plotting One Vector
# barplot function draws a bar chart with a vector's values.
plotValues <- c(4, 5, 1)
barplot(plotValues)
names(plotValues) <- c("ShriLanka", "Australia", "India")
barplot(plotValues)
barplot(1:20)
# 4- Vector Math
# If you add a scalar (a single value) to a vector,
# the scalar will be added to each value in the vector, returning a new vector with the results.
a <- c(1, 2, 3)
a + 1 #addition
a / 2 # division
a * 2 #Multiplication
# If you add two vectors, R will take each value from each vector and add them
b <- c(4, 5, 6)
a + b
a - b
#Compairing two vectors
a == c(1, 99, 3)
# Notice that R didn't test whether the whole vectors were equal; it checked each value in the a vector against the value at the same index in our new vector.
sin(a) # Trignometric function
# 5- Scatter Plots
# takes two vectors, one for X values and one for Y values, and draws a graph of them.
x <- seq(1, 20, 0.1)
y <- sin(x)
plot(x, y) # first argument (x) are used for the horizontal axis, and values from the second (y) for the vertical.
values <- -10:10
absolutes <- abs(values)
plot(values, absolutes)
# 6 -NA Values (dealing with Missing Data)
# when working with sample data, a given value isn't available
# R explicitly indicates value as 'NA' if sample was not available
a <- c(1, 3, NA, 7, 9)
is.na(a) # returns FALSE FALSE TRUE FALSE FALSE
sum(a)
# We can explicitly tell sum (and many other functions) to remove NA values before they do their calculations,
sum(a, na.rm = TRUE)
mean(a,na.rm=T)
newA = na.omit(a) # Handle Missing Values in Objects
na.exclude(a)
# 7- Histogram
# First, generate a set of 10,000 Gaussian distributed random numbers
data <- rnorm(1e4) # Gaussian distributed numbers with mean=0 & sigma=1
hist(data) # Plots a histogram, with sensible bin choices by default
hist(data, breaks=7)
# 8 -Plotting
?plot # Help page for plot command
?par # Help page for graphics parameter control
?Devices # or "?device"
# (R can output in postscript, PDF, bitmap, PNG, JPEG and more formats)
dev.list() # list graphics devices
colours() # or "colors()" List all available colours
?plotmath
# To create an output file copy of a plot for printing or including in a document etc.
# 9- Functions
cat # Type function name without brackets to list contents
args(cat) # Return arguments of any function
body(cat) # Return main body of function
formals(fun) # Get or set the formal arguments of a function
debug(fun); undebug(fun) # Set or unset the debugging flag on a function
# Create your own function
fun <- function(x, a, b, c) (a*x^2) + (b*x^2) + c
fun(3, 1, 2, 3)
fun(5, 1, 2, 3)
# A more complicated example of a function. First, create some data:
set.seed(123) # allow reproducible random numbers
a <- rnorm(1000, mean=10, sd=1) # 1000 Gaussian random numbers
b <- rnorm(100, mean=50, sd=15) # smaller population of higher numbers
x <- c(a, b) # Combine datasets
hist(x) # Shows outlier population clearly
sd(x) # Strongly biased by outliers
mad(x) # Robustly estimates sd of main sample
mean(x) # biased
median(x) # robust
# 10- Simple Graphs
# Dot Plots
# Simple Dotplot
dotchart(mtcars$mpg,labels=row.names(mtcars),cex=.7,main="Gas Milage for Car Models", xlab="Miles Per Gallon")
# Dotplot: Grouped Sorted and Colored
# Sort by mpg, group and color by cylinder
x <- mtcars[order(mtcars$mpg),] # sort by mpg
x$cyl <- factor(x$cyl) # it must be a factor
x$color[x$cyl==4] <- "red"
x$color[x$cyl==6] <- "blue"
x$color[x$cyl==8] <- "darkgreen"
dotchart(x$mpg,labels=row.names(x),cex=.7,groups= x$cyl,main="Gas Milage for Car Models\ngrouped by cylinder", xlab="Miles Per Gallon", gcolor="black", color=x$color)
# Bar Plots
# Simple Bar Plot
counts <- table(mtcars$gear)
barplot(counts, main="Car Distribution",xlab="Number of Gears")
# Stacked Bar Plot with Colors and Legend
counts <- table(mtcars$vs, mtcars$gear)
barplot(counts, main="Car Distribution by Gears and VS", xlab="Number of Gears", col=c("darkblue","red"),legend = rownames(counts))
# Grouped Bar Plot
counts <- table(mtcars$vs, mtcars$gear)
barplot(counts, main="Car Distribution by Gears and VS", xlab="Number of Gears", col=c("darkblue","red"),legend = rownames(counts), beside=TRUE)
# Line Graphs
# Define the cars vector with 5 values
cars <- c(1, 3, 6, 4, 9) #
trucks <- c(2, 5, 4, 5, 12)
# Graph the cars vector with all defaults
plot(cars)
# Graph cars using blue points overlayed by a line
plot(cars, type="o", col="blue")
# Create a title with a red, bold/italic font
title(main="Autos", col.main="red", font.main=4)
# Graph trucks with red dashed line and square points
lines(trucks, type="o", pch=22, lty=2, col="red")
# Create a title with a red, bold/italic font
title(main="Autos", col.main="red", font.main=4)
# Start PNG device driver to save output to figure.png
png(filename="LinePlot.png", height=295, width=300,bg="white")
#More Advanced Line Graphs
x <- c(1:5); y <- x # create some data
par(pch=22, col="red") # plotting symbol and color
par(mfrow=c(2,4)) # all plots on one page
opts = c("p","l","o","b","c","s","S","h")
for(i in 1:length(opts)){
heading = paste("type=",opts[i])
plot(x, y, type="n", main=heading)
lines(x, y, type=opts[i])
}
#Pie chart
# Pie Chart with Percentages
slices <- c(10, 12, 4, 16, 8)
lbls <- c("US", "UK", "Australia", "Germany", "France")
pct <- round(slices/sum(slices)*100)
lbls <- paste(lbls, pct) # add percents to labels
lbls <- paste(lbls,"%",sep="") # ad % to labels
pie(slices,labels = lbls, col=rainbow(length(lbls)),main="Pie Chart of Countries")
#Box plots
# Boxplots can be created for individual variables or for variables by group.
# Boxplot of MPG by Car Cylinders
boxplot(mpg~cyl,data=mtcars, main="Car Milage Data",xlab="Number of Cylinders", ylab="Miles Per Gallon")
# Notched Boxplot of Tooth Growth Against 2 Crossed Factors
# boxes colored for ease of interpretation
boxplot(len~supp*dose, data=ToothGrowth, notch=TRUE, col=(c("gold","darkgreen")),main="Tooth Growth", xlab="Suppliment and Dose")
# Histograms
# Colored Histogram with Different Number of Bins
hist(mtcars$mpg, breaks=12, col="red")
# Add a Normal Curve (Thanks to Peter Dalgaard)
x <- mtcars$mpg
h<-hist(x, breaks=10, col="red", xlab="Miles Per Gallon",
main="Histogram with Normal Curve")
xfit<-seq(min(x),max(x),length=40)
yfit<-dnorm(xfit,mean=mean(x),sd=sd(x))
yfit <- yfit*diff(h$mids[1:2])*length(x)
lines(xfit, yfit, col="blue", lwd=2)
# Scatterplots
# Simple Scatterplot
attach(mtcars)
plot(wt, mpg, main="Scatterplot Example",xlab="Car Weight ", ylab="Miles Per Gallon ", pch=19)
# Add fit lines
abline(lm(mpg~wt), col="red") # regression line (y~x)
lines(lowess(wt,mpg), col="blue") # lowess line (x,y)
|
a2014f7d98be626d2fb489735a9f8416d4dd4227 | dec53b9fae27d2e2f71b937f6e77a7d8c82b7aec | /Biomod2_singleSpecies.R | dc2ada09bc38819f4bdc2ebf5c905758ab753c32 | [] | no_license | npcastaneda/_playground | 6219eb7b09e81a12a866e7ca4cf213aa77a03643 | 25e7df35e47193df29e5a1208a0ebf00a07313e5 | refs/heads/master | 2021-01-22T06:36:57.914690 | 2014-10-03T09:03:53 | 2014-10-03T09:03:53 | 21,495,995 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,020 | r | Biomod2_singleSpecies.R | ##################################################
# SINGLE-SPECIES DISTRIBUTION MODELING WITH BIOMOD2
##################################################
# set dir
dir <- "D:/CWR/Biomod2/Test.apr.24.2013"
setwd(dir)
# load the library
library(biomod2)
# load our species data
DataSpecies <- read.csv(system.file("external/species/mammals_table.csv",
package="biomod2"))
head(DataSpecies)
# the name of studied species
myRespName <- 'GuloGulo'
# the presence/absences data for our species
myResp <- as.numeric(DataSpecies[,myRespName])
# the XY coordinates of species data
myRespXY <- DataSpecies[,c("X_WGS84","Y_WGS84")]
# load the environmental raster layers (could be .img, ArcGIS
# rasters or any supported format by the raster package)
# Environmental variables extracted from Worldclim (bio_3, bio_4,
# bio_7, bio_11 & bio_12)
myExpl = stack( system.file( "external/bioclim/current/bio3.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio4.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio7.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio11.grd",
package="biomod2"),
system.file( "external/bioclim/current/bio12.grd",
package="biomod2"))
# Formating data
myBiomodData <- BIOMOD_FormatingData(resp.var = myResp,
expl.var = myExpl,
resp.xy = myRespXY,
resp.name = myRespName)
# This is my version
myBiomodData <- BIOMOD_FormatingData(resp.var = myResp,
expl.var = myExpl,
resp.xy = myRespXY,
resp.name = myRespName,
eval.resp.var = NULL,
eval.expl.var = NULL,
eval.resp.xy = NULL,
PA.nb.rep = 0,
PA.nb.absences = 1000,
PA.strategy = 'SRE',
PA.dist.min = 0,
PA.dist.max = NULL,
PA.sre.quant = 0.025,
PA.table = NULL,
na.rm = TRUE)
# Check the format of your current data
myBiomodData
plot(myBiomodData)
# ----------- OJO: the function BIOMOD_FormatingData prepares the pseudo-abscences of data
# (use preferably: SRE)
#?BIOMOD_FormatingData
# Modeling
#------------ OJO: BIOMOD_ModelingOptions: Use this for setting the parameters of each model
?BIOMOD_ModelingOptions
#------------------------------------------------------
# DO NOT RUN THIS PART!
# default BIOMOD.model.option object
myBiomodOptions <- BIOMOD_ModelingOptions()
# print the object
myBiomodOptions
# you can copy a part of the print, change it and custom your options
# here we want to compute quadratic GLM and select best model with 'BIC' criterium
myBiomodOptions <- BIOMOD_ModelingOptions(
GLM = list( type = 'quadratic',
interaction.level = 0,
myFormula = NULL,
test = 'BIC',
family = 'binomial',
control = glm.control(epsilon = 1e-08, maxit = 1000, trace = FALSE) ))
#------------------------------------------------------
# 2. Defining Models Options using default options.
myBiomodOption <- BIOMOD_ModelingOptions()
# 3. Computing the models
myBiomodModelOut <- BIOMOD_Modeling(
myBiomodData,
models = c('SRE','CTA','RF','MARS','FDA'),
models.options = myBiomodOption,
NbRunEval=3,
DataSplit=80,
Prevalence=0.5,
VarImport=3,
models.eval.meth = c('TSS','ROC'),
SaveObj = TRUE,
rescal.all.models = TRUE,
do.full.models = FALSE,
modeling.id = paste(myRespName,"FirstModeling",sep=""))
# Modeling summary
myBiomodModelOut
# get all models evaluation
myBiomodModelEval <- getModelsEvaluations(myBiomodModelOut)
# print the dimnames of this object
dimnames(myBiomodModelEval)
# let's print the TSS scores of Random Forest
myBiomodModelEval["TSS","Testing.data","RF",,]
# let's print the ROC scores of all selected models
myBiomodModelEval["ROC","Testing.data",,,]
# print variable importances
getModelsVarImport(myBiomodModelOut)
# Ensamble modeling
myBiomodEM <- BIOMOD_EnsembleModeling(
modeling.output = myBiomodModelOut,
chosen.models = 'all',
em.by='all',
eval.metric = c('TSS'),
eval.metric.quality.threshold = c(0.7),
prob.mean = T,
prob.cv = T,
prob.ci = T,
prob.ci.alpha = 0.05,
prob.median = T,
committee.averaging = T,
prob.mean.weight = T,
prob.mean.weight.decay = 'proportional' )
# print summary
myBiomodEM
# get evaluation scores
getEMeval(myBiomodEM)
# Model projection
# projection over the globe under current conditions
myBiomodProj <- BIOMOD_Projection(
modeling.output = myBiomodModelOut,
new.env = myExpl,
proj.name = 'current',
selected.models = 'all',
binary.meth = 'TSS',
compress = 'xz',
clamping.mask = F,
output.format = '.grd')
# summary of crated projections
myBiomodProj
# files created on hard drive
list.files("GuloGulo/proj_current/")
# make some plots sub-selected by str.grep argument
plot(myBiomodProj, str.grep = 'MARS')
# if you want to make custom plots, you can also get the projected map
myCurrentProj <- getProjection(myBiomodProj)
myCurrentProj
# load environmental variables for the future.
myExplFuture = stack( system.file( "external/bioclim/future/bio3.grd",
package="biomod2"),
system.file( "external/bioclim/future/bio4.grd",
package="biomod2"),
system.file( "external/bioclim/future/bio7.grd",
package="biomod2"),
system.file( "external/bioclim/future/bio11.grd",
package="biomod2"),
system.file( "external/bioclim/future/bio12.grd",
package="biomod2"))
myBiomodProjFuture <- BIOMOD_Projection(
modeling.output = myBiomodModelOut,
new.env = myExplFuture,
proj.name = 'future',
selected.models = 'all',
binary.meth = 'TSS',
compress = 'xz',
clamping.mask = T,
output.format = '.grd')
# make some plots, sub-selected by str.grep argument
plot(myBiomodProjFuture, str.grep = 'MARS')
# Ensemble forecast
myBiomodEF <- BIOMOD_EnsembleForecasting(
projection.output = myBiomodProj,
EM.output = myBiomodEM,
binary.meth = 'TSS')
proj_current_GuloGulo_TotalConsensus_EMbyTSS <-
stack("GuloGulo/proj_current/proj_current_GuloGulo_TotalConsensus_EMbyTSS.grd")
proj_current_GuloGulo_TotalConsensus_EMbyTSS
# reduce layer names for plotting convegences
names(proj_current_GuloGulo_TotalConsensus_EMbyTSS) <-
sapply(strsplit(names(proj_current_GuloGulo_TotalConsensus_EMbyTSS),"_"), tail, n=1)
levelplot(proj_current_GuloGulo_TotalConsensus_EMbyTSS) |
a4f42c3e88a90125107735e7489795ea90cb3fba | 364dcb95aac6dff3f8548768dc99bba945ec81b6 | /R/plot.R | 4494b774bc682546f526d3a7e5e7b08834969a56 | [
"MIT"
] | permissive | tidyverse/ggplot2 | 3ef62b72861c246b13ffc2d95678079984fe65c0 | c76b9aeda648e9b6022b7169021e854c3d3890cb | refs/heads/main | 2023-08-31T07:08:20.846510 | 2023-08-17T16:19:44 | 2023-08-17T16:19:44 | 19,438 | 4,632 | 1,971 | NOASSERTION | 2023-09-14T13:25:40 | 2008-05-25T01:21:32 | R | UTF-8 | R | false | false | 7,531 | r | plot.R | #' Create a new ggplot
#'
#' `ggplot()` initializes a ggplot object. It can be used to
#' declare the input data frame for a graphic and to specify the
#' set of plot aesthetics intended to be common throughout all
#' subsequent layers unless specifically overridden.
#'
#' `ggplot()` is used to construct the initial plot object,
#' and is almost always followed by a plus sign (`+`) to add
#' components to the plot.
#'
#' There are three common patterns used to invoke `ggplot()`:
#'
#' * `ggplot(data = df, mapping = aes(x, y, other aesthetics))`
#' * `ggplot(data = df)`
#' * `ggplot()`
#'
#' The first pattern is recommended if all layers use the same
#' data and the same set of aesthetics, although this method
#' can also be used when adding a layer using data from another
#' data frame.
#'
#' The second pattern specifies the default data frame to use
#' for the plot, but no aesthetics are defined up front. This
#' is useful when one data frame is used predominantly for the
#' plot, but the aesthetics vary from one layer to another.
#'
#' The third pattern initializes a skeleton `ggplot` object, which
#' is fleshed out as layers are added. This is useful when
#' multiple data frames are used to produce different layers, as
#' is often the case in complex graphics.
#'
#' The `data =` and `mapping =` specifications in the arguments are optional
#' (and are often omitted in practice), so long as the data and the mapping
#' values are passed into the function in the right order. In the examples
#' below, however, they are left in place for clarity.
#'
#' @param data Default dataset to use for plot. If not already a data.frame,
#' will be converted to one by [fortify()]. If not specified,
#' must be supplied in each layer added to the plot.
#' @param mapping Default list of aesthetic mappings to use for plot.
#' If not specified, must be supplied in each layer added to the plot.
#' @param ... Other arguments passed on to methods. Not currently used.
#' @param environment `r lifecycle::badge("deprecated")` Used prior to tidy
#' evaluation.
#' @export
#' @examples
#' # Create a data frame with some sample data, then create a data frame
#' # containing the mean value for each group in the sample data.
#' set.seed(1)
#'
#' sample_df <- data.frame(
#' group = factor(rep(letters[1:3], each = 10)),
#' value = rnorm(30)
#' )
#'
#' group_means_df <- setNames(
#' aggregate(value ~ group, sample_df, mean),
#' c("group", "group_mean")
#' )
#'
#' # The following three code blocks create the same graphic, each using one
#' # of the three patterns specified above. In each graphic, the sample data
#' # are plotted in the first layer and the group means data frame is used to
#' # plot larger red points on top of the sample data in the second layer.
#'
#' # Pattern 1
#' # Both the `data` and `mapping` arguments are passed into the `ggplot()`
#' # call. Those arguments are omitted in the first `geom_point()` layer
#' # because they get passed along from the `ggplot()` call. Note that the
#' # second `geom_point()` layer re-uses the `x = group` aesthetic through
#' # that mechanism but overrides the y-position aesthetic.
#' ggplot(data = sample_df, mapping = aes(x = group, y = value)) +
#' geom_point() +
#' geom_point(
#' mapping = aes(y = group_mean), data = group_means_df,
#' colour = 'red', size = 3
#' )
#'
#' # Pattern 2
#' # Same plot as above, passing only the `data` argument into the `ggplot()`
#' # call. The `mapping` arguments are now required in each `geom_point()`
#' # layer because there is no `mapping` argument passed along from the
#' # `ggplot()` call.
#' ggplot(data = sample_df) +
#' geom_point(mapping = aes(x = group, y = value)) +
#' geom_point(
#' mapping = aes(x = group, y = group_mean), data = group_means_df,
#' colour = 'red', size = 3
#' )
#'
#' # Pattern 3
#' # Same plot as above, passing neither the `data` or `mapping` arguments
#' # into the `ggplot()` call. Both those arguments are now required in
#' # each `geom_point()` layer. This pattern can be particularly useful when
#' # creating more complex graphics with many layers using data from multiple
#' # data frames.
#' ggplot() +
#' geom_point(mapping = aes(x = group, y = value), data = sample_df) +
#' geom_point(
#' mapping = aes(x = group, y = group_mean), data = group_means_df,
#' colour = 'red', size = 3
#' )
ggplot <- function(data = NULL, mapping = aes(), ...,
environment = parent.frame()) {
UseMethod("ggplot")
}
#' @export
ggplot.default <- function(data = NULL, mapping = aes(), ...,
environment = parent.frame()) {
if (!missing(mapping) && !inherits(mapping, "uneval")) {
cli::cli_abort(c(
"{.arg mapping} should be created with {.fn aes}.",
"x" = "You've supplied a {.cls {class(mapping)[1]}} object"
))
}
data <- fortify(data, ...)
p <- structure(list(
data = data,
layers = list(),
scales = scales_list(),
guides = guides_list(),
mapping = mapping,
theme = list(),
coordinates = coord_cartesian(default = TRUE),
facet = facet_null(),
plot_env = environment
), class = c("gg", "ggplot"))
p$labels <- make_labels(mapping)
set_last_plot(p)
p
}
#' @export
ggplot.function <- function(data = NULL, mapping = aes(), ...,
environment = parent.frame()) {
# Added to avoid functions end in ggplot.default
cli::cli_abort(c(
"{.arg data} cannot be a function.",
"i" = "Have you misspelled the {.arg data} argument in {.fn ggplot}"
))
}
plot_clone <- function(plot) {
p <- plot
p$scales <- plot$scales$clone()
p
}
#' Reports whether x is a ggplot object
#' @param x An object to test
#' @keywords internal
#' @export
is.ggplot <- function(x) inherits(x, "ggplot")
#' Explicitly draw plot
#'
#' Generally, you do not need to print or plot a ggplot2 plot explicitly: the
#' default top-level print method will do it for you. You will, however, need
#' to call `print()` explicitly if you want to draw a plot inside a
#' function or for loop.
#'
#' @param x plot to display
#' @param newpage draw new (empty) page first?
#' @param vp viewport to draw plot in
#' @param ... other arguments not used by this method
#' @keywords hplot
#' @return Invisibly returns the original plot.
#' @export
#' @method print ggplot
#' @examples
#' colours <- list(~class, ~drv, ~fl)
#'
#' # Doesn't seem to do anything!
#' for (colour in colours) {
#' ggplot(mpg, aes_(~ displ, ~ hwy, colour = colour)) +
#' geom_point()
#' }
#'
#' # Works when we explicitly print the plots
#' for (colour in colours) {
#' print(ggplot(mpg, aes_(~ displ, ~ hwy, colour = colour)) +
#' geom_point())
#' }
print.ggplot <- function(x, newpage = is.null(vp), vp = NULL, ...) {
set_last_plot(x)
if (newpage) grid.newpage()
# Record dependency on 'ggplot2' on the display list
# (AFTER grid.newpage())
grDevices::recordGraphics(
requireNamespace("ggplot2", quietly = TRUE),
list(),
getNamespace("ggplot2")
)
data <- ggplot_build(x)
gtable <- ggplot_gtable(data)
if (is.null(vp)) {
grid.draw(gtable)
} else {
if (is.character(vp)) seekViewport(vp) else pushViewport(vp)
grid.draw(gtable)
upViewport()
}
if (isTRUE(getOption("BrailleR.VI")) && rlang::is_installed("BrailleR")) {
print(asNamespace("BrailleR")$VI(x))
}
invisible(x)
}
#' @rdname print.ggplot
#' @method plot ggplot
#' @export
plot.ggplot <- print.ggplot
|
0b0446adc37e55ef5cf7064ac13a5fd1928e0a16 | 77e9997f1d751ec975c98d4a01b29cf512c47688 | /factors.R | adeb178395b6ab99e97db88391aa10b47882352c | [] | no_license | Khan996/Hello-R | e52ef1ee3f0a73ed608f079299a58f4ed3ff5a57 | acb2f22ca68d5a72b6c1647356692f1368133135 | refs/heads/main | 2023-08-27T13:54:03.935191 | 2021-10-27T14:34:49 | 2021-10-27T14:34:49 | 410,268,353 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 621 | r | factors.R | x <- factor(c("Yes", "Yes", "No", "No", "Yes"))
x
table(x)
unclass(x)
z <- factor(c("Punab", "KPk", "KPk", "Sindh"))
z
unclass(z)
c <- factor(c("Yes","Yes","No","No", "Yes"),
levels = c("Yes", "No"))
c
weeks <- factor(c("Monday","Tuesday","Saturday",
"Wednesday","Friday", "Thursday",
"Sunday"),
levels = c("Monday","Tuesday","Wednesday",
"Thursday","Friday","Saturday",
"Sunday"))
weeks
#NA & NaN are used for Null where data is not present
D <- c(1,2,3, NA, 3, NaN, 4, NA)
D
is.na(D)
is.nan(D)
|
0dd5f225f76f76511bf9b6521837c15d277bbc8c | c4ca20a12315b278cda1875867d399b15adc4337 | /Analysis.R | 48ceec23ad46f7fecc9f39fc9afc544438cae6ba | [] | no_license | StefanoAllesina/stima_nepo | 167fca5420c1da01154a11f6a6161bb110a032cd | 87caaeba0b05a7e0606f20e8b840323b57599f82 | refs/heads/master | 2021-01-18T17:57:37.537850 | 2016-09-26T14:37:37 | 2016-09-26T14:37:37 | 69,259,339 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 746 | r | Analysis.R | library(dplyr)
# Read names
names <- read.csv("ITA-Names.csv", stringsAsFactors = FALSE)
# How many unique names in a sample of x academics?
for_random <- matrix(0, 1000, nrow(names))
for (i in 1:nrow(for_random)) for_random[i,] <- cumsum(!duplicated(sample(names$Last)))
# Read the input
sectors <- read.csv("Input.csv")
# Calculate expected number of people
sectors$Expected_people <- rep(NA, nrow(sectors))
for (i in 1:nrow(sectors)){
num_names <- sectors$Names[i]
tmp <- apply(for_random, 1, function(x) which(x == num_names)[1])
sectors$Expected_people[i] <- mean(tmp)
print(sectors[i,])
}
sectors$Excess_people <- sectors$People - sectors$Expected_people
#Save output
write.csv(sectors, file = "Output.csv", row.names = FALSE)
|
2f1a2c8503137990f8dd717f3677308cacfd2f6f | 11641bb10056da1bf416315b255cb9326553d667 | /R/rmd2ipynb.R | fcebc457cdbb3c8e2a4ae8be2af1ccaef490405e | [
"MIT"
] | permissive | jullybobble/rmd2ipynb | ee655d8db2f3a85ec3df92ecd7ad82e6917bc79f | 14b2cc494b68eeece21df4ccb6753052983ab33d | refs/heads/master | 2023-03-06T12:01:51.897302 | 2021-02-19T18:06:56 | 2021-02-19T18:06:56 | 336,350,624 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,927 | r | rmd2ipynb.R | #' Convert an RMarkdown HTML notebook to an IPython (or Jupyter) notebook.
#'
#' Note that you must pass the HTML file with extension `.nb.html` that is rendered
#' automatically for RMarkdown documents with output with `html_notebook` output.
#' Passing the RMarkdown document directly will fail.
#'
#' @param html_notebook_file path to the RMarkdown HTML document to be converted
#' @param ipynb_file path to the desired output IPython notebook to be generated
#'
#' @return the resulting JSON of the IPython notebook, invisibly, as a character
#' vector
#'
#' @export
#' @importFrom magrittr `%>%`
#' @importFrom readr write_file
#' @importFrom purrr map map_df
#' @importFrom stringr str_replace
#' @importFrom rmarkdown parse_html_notebook
#' @importFrom xml2 read_html xml_find_first
#' @importFrom rvest html_text
#' @import dplyr tidyr
html_nb_to_ipynb <- function(html_notebook_file, ipynb_file = str_replace(html_notebook_file, "\\.nb\\.html$", ".ipynb"), verbose = F, remove_html_comments = T) {
stopifnot(file.exists(html_notebook_file))
parsed <- parse_html_notebook(html_notebook_file)
title <- parsed$header %>% parse_html() %>% xml_find_first("/html/head/title") %>% html_text()
json <- parsed %>%
rmd_annotations() %>%
ipynb_from_rmd(title, remove_html_comments)
write_file(json, ipynb_file)
if(verbose) {
message("wrote file ", ipynb_file)
}
invisible(json)
}
#' @importFrom jsonlite toJSON
ipynb_from_rmd <- function(annotations, title = NULL, remove_html_comments = T) {
cells <- annotations %>%
rowwise %>%
group_map( ~ ipynb_cell_from_rmd_part(.x$label, .x$source, .x$outputs, remove_html_comments)) %>%
discard(is.null)
title_cell <- if(is.null(title)) {
list()
} else {
list(ipynb_cell_markdown(paste("#", title)))
}
list(
nbformat = 4,
nbformat_minor = 4,
metadata = list(
kernelspec = list(
display_name = "R",
language = "R",
name = "ir"
),
language_info = list(
codemirror_mode = "r",
file_extension = ".r",
mimetype = "text/x-r-source",
name = "R",
pygments_lexer = "r",
version = paste(version$major, sep = ".", version$minor)
)
),
cells = c(title_cell, cells)
) %>%
toJSON(auto_unbox = T,
pretty = T,
na = "null")
}
#' @importFrom xml2 xml_find_first
#' @importFrom rvest html_text html_attr
#' @importFrom stringr str_detect str_remove str_remove_all
#' @importFrom purrr discard
ipynb_cell_from_rmd_part <- function(label, source, outputs, remove_html_comments) {
if (label == "text") {
if(source %>% str_detect('^<div id="rmd-source-code">')) {
NULL
} else {
md <- html_to_md(source)
if(remove_html_comments) {
md <- str_remove_all(md, "(?m)^:::.*\n")
}
if(str_detect(md, "^\\s*$")) {
NULL
} else {
ipynb_cell_markdown(md)
}
}
} else if (label == "chunk") {
code <- if(source == "") {
NULL
} else {
source %>% read_html() %>% html_text()
}
out <- if(!is.null(outputs[[1]])) {
outputs[[1]] %>%
rowwise %>%
group_map( ~ ipynb_cell_from_rmd_part(.x$label, .x$source, .x$outputs, remove_html_comments)) %>%
discard(is.null)
} else {
NULL
}
ipynb_cell_code(code, out)
} else if (label == "plot") {
img_base64 <- source %>%
read_html %>%
xml_find_first("//img") %>%
html_attr("src") %>%
str_remove("data:image/png;base64,")
ipynb_out_img(img_base64)
} else if (label == "output") {
text <- source %>%
read_html %>%
xml_find_first("//pre/code") %>%
html_text()
ipynb_out_stream(text)
} else if (label == "frame") {
table <- source %>%
read_html() %>%
html_text() %>%
parse_pagedtable_json()
ipynb_out_table(table)
} else {
NULL
}
}
|
9c9dfd69a2299dfa4f4dee7ff426328af084439c | 7ab0b6d8bad7e7824528d1f05c10792759cabab1 | /R/get-scene.R | 430eacf2998b32e19f8187946380cc31c3500e43 | [] | permissive | tbradley1013/dundermifflin | a4711e3cd02d494885a30a34ddca877f04cb2ff9 | 691045dbfe6ab526caa4db4240ea378f81f5262d | refs/heads/master | 2020-05-01T06:33:37.546458 | 2020-02-27T13:18:15 | 2020-02-27T13:18:15 | 177,332,858 | 20 | 1 | MIT | 2020-02-04T14:45:54 | 2019-03-23T19:42:59 | R | UTF-8 | R | false | false | 2,421 | r | get-scene.R | # Get Scene
#' Get a random scene dialog from the Office
#'
#' @examples
#'
#' \dontrun{random_scene()}
#'
#' @export
random_scene <- function(){
ep_n_scene <- dundermifflin::ep_n_scene
office_quotes <- dundermifflin::office_quotes
season <- sample(c(1:4, 6:9), 1)
episode <- sample(ep_n_scene$episode[ep_n_scene$season == season], 1)
scene <- sample(ep_n_scene$scene[ep_n_scene$season == season & ep_n_scene$episode == episode], 1)
scene_quotes <- office_quotes[office_quotes$season == season & office_quotes$episode == episode & office_quotes$scene == scene,]
class(scene_quotes) <- c("dunder_scene", class(scene_quotes))
return(scene_quotes)
}
#' @export
print.dunder_scene <- function(x, ...){
screen_width <- options()$width
max_char_length <- max(nchar(x$character))
if (length(unique(x$character)) <= 7){
cols <- c("red", "green", "yellow", "blue", "magenta", "cyan", "silver")
cols <- sample(cols, length(unique(x$character)), replace = FALSE)
names(cols) <- unique(x$character)
} else {
cols <- c("red", "green", "yellow", "blue", "magenta", "cyan", "silver")
cols <- sample(cols, length(unique(x$character)), replace = TRUE)
names(cols) <- unique(x$character)
}
header <- paste0("THE OFFICE - ", unique(x$name), " (Season ", unique(x$season), ", Episode ", unique(x$episode), ", Scene ", unique(x$scene), ")\n")
header_len <- nchar(header)
cat(rep("=", min(c(header_len, screen_width))), "\n", sep = "")
cat("THE OFFICE - ", crayon::red(unique(x$name)), " (Season ", unique(x$season), ", Episode ", unique(x$episode), ", Scene ", unique(x$scene), ")\n", sep = "")
cat(rep("=", min(c(header_len, screen_width))), "\n", sep = "")
for (i in 1:nrow(x)){
qt <- x[i,]
cat(
char_color(qt$character, cols),
": ", rep(" ", (max_char_length - nchar(qt$character))),
qt$quote,
"\n",
sep = ""
)
}
}
#' @export
as.character.dunder_scene <- function(x, ...){
max_char_length <- max(nchar(x$character))
out <- character(nrow(x))
for (i in 1:nrow(x)){
qt <- x[i,]
out[i] <- paste0(
qt$character, ": ", paste(rep(" ", (max_char_length - nchar(qt$character))), collapse = ""),
qt$quote
)
}
out <- paste(out, collapse = "\n")
return(out)
}
char_color <- function(character, cols){
color <- cols[names(cols) == character]
crayon::style(character, crayon::make_style(color))
}
|
6fbabf808672411b693c059201a82aa7c5f48fa4 | edf2d3864db8751074133b2c66a7e7995a960c6b | /man/MCNearestMeanClassifier.Rd | 5efbeb3f5d38e471f1a89472e9a255de9f8f89ae | [] | no_license | jkrijthe/RSSL | 78a565b587388941ba1c8ad8af3179bfb18091bb | 344e91fce7a1e209e57d4d7f2e35438015f1d08a | refs/heads/master | 2023-04-03T12:12:26.960320 | 2023-03-13T19:21:31 | 2023-03-13T19:21:31 | 7,248,018 | 65 | 24 | null | 2023-03-28T06:46:23 | 2012-12-19T21:55:39 | R | UTF-8 | R | false | true | 2,611 | rd | MCNearestMeanClassifier.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MCNearestMeanClassifier.R
\name{MCNearestMeanClassifier}
\alias{MCNearestMeanClassifier}
\title{Moment Constrained Semi-supervised Nearest Mean Classifier}
\usage{
MCNearestMeanClassifier(X, y, X_u, update_sigma = FALSE, prior = NULL,
x_center = FALSE, scale = FALSE)
}
\arguments{
\item{X}{matrix; Design matrix for labeled data}
\item{y}{factor or integer vector; Label vector}
\item{X_u}{matrix; Design matrix for unlabeled data}
\item{update_sigma}{logical; Whether the estimate of the variance should be updated after the means have been updated using the unlabeled data}
\item{prior}{matrix; Class priors for the classes}
\item{x_center}{logical; Should the features be centered?}
\item{scale}{logical; Should the features be normalized? (default: FALSE)}
}
\description{
Update the means based on the moment constraints as defined in Loog (2010). The means estimated using the labeled data are updated by making sure their weighted mean corresponds to the overall mean on all (labeled and unlabeled) data. Optionally, the estimated variance of the classes can be re-estimated after this update is applied by setting update_sigma to \code{TRUE}. To get the true nearest mean classifier, rather than estimate the class priors, set them to equal priors using, for instance \code{prior=matrix(0.5,2)}.
}
\references{
Loog, M., 2010. Constrained Parameter Estimation for Semi-Supervised Learning: The Case of the Nearest Mean Classifier. In Proceedings of the 2010 European Conference on Machine learning and Knowledge Discovery in Databases. pp. 291-304.
}
\seealso{
Other RSSL classifiers:
\code{\link{EMLeastSquaresClassifier}},
\code{\link{EMLinearDiscriminantClassifier}},
\code{\link{GRFClassifier}},
\code{\link{ICLeastSquaresClassifier}},
\code{\link{ICLinearDiscriminantClassifier}},
\code{\link{KernelLeastSquaresClassifier}},
\code{\link{LaplacianKernelLeastSquaresClassifier}()},
\code{\link{LaplacianSVM}},
\code{\link{LeastSquaresClassifier}},
\code{\link{LinearDiscriminantClassifier}},
\code{\link{LinearSVM}},
\code{\link{LinearTSVM}()},
\code{\link{LogisticLossClassifier}},
\code{\link{LogisticRegression}},
\code{\link{MCLinearDiscriminantClassifier}},
\code{\link{MCPLDA}},
\code{\link{MajorityClassClassifier}},
\code{\link{NearestMeanClassifier}},
\code{\link{QuadraticDiscriminantClassifier}},
\code{\link{S4VM}},
\code{\link{SVM}},
\code{\link{SelfLearning}},
\code{\link{TSVM}},
\code{\link{USMLeastSquaresClassifier}},
\code{\link{WellSVM}},
\code{\link{svmlin}()}
}
\concept{RSSL classifiers}
|
9b66537874dbd64f89f0948955119b02e5a3980f | f43ff1e09138649558c2e90a75bd2d4f3cbbdbb6 | /source/Windows/R-Portable-Win/library/plotly/examples/shiny/proxy_relayout/app.R | 8f4da760fdffa1cf9b9f5930edafdc81402bf816 | [
"MIT",
"CC-BY-3.0",
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | romanhaa/Cerebro | 5b2d9371403c52f60341894f84cd0f6a006cc930 | 946ed178c986027d60af6013e63d1fc51ae8b371 | refs/heads/master | 2022-12-02T15:49:57.705873 | 2021-11-20T11:47:12 | 2021-11-21T17:09:37 | 164,686,297 | 87 | 23 | MIT | 2022-11-10T18:21:44 | 2019-01-08T16:09:59 | HTML | UTF-8 | R | false | false | 1,317 | r | app.R | library(shiny)
library(plotly)
ui <- fluidPage(
plotlyOutput("plot")
)
server <- function(input, output, session) {
p <- ggplot(txhousing) +
geom_line(aes(date, median, group = city))
output$plot <- renderPlotly({
ggplotly(p, dynamicTicks = TRUE) %>%
rangeslider()
})
observeEvent(event_data("plotly_relayout"), {
d <- event_data("plotly_relayout")
# unfortunately, the data structure emitted is different depending on
# whether the relayout is triggered from the rangeslider or the plot
xmin <- if (length(d[["xaxis.range[0]"]])) d[["xaxis.range[0]"]] else d[["xaxis.range"]][1]
xmax <- if (length(d[["xaxis.range[1]"]])) d[["xaxis.range[1]"]] else d[["xaxis.range"]][2]
if (is.null(xmin) || is.null(xmax)) return(NULL)
# compute the y-range based on the new x-range
idx <- with(txhousing, xmin <= date & date <= xmax)
yrng <- extendrange(txhousing$median[idx])
plotlyProxy("plot", session) %>%
plotlyProxyInvoke("relayout", list(yaxis = list(range = yrng)))
})
yRange <- range(txhousing$median, na.rm = TRUE)
observeEvent(event_data("plotly_doubleclick"), {
plotlyProxy("plot", session) %>%
plotlyProxyInvoke("relayout", list(yaxis = list(range = yRange)))
})
}
shinyApp(ui, server)
|
4b0a4ce981c0b9876c1b29689bd219cb04c50cc8 | 49373eee9ed92cbe9db5229309d591b0a8d54b27 | /R/app_server.R | aa4fe0e2e4573e65fca2452c7ee08a58d7e19004 | [
"MIT"
] | permissive | cparsania/FungiExpresZ | 263520cf9c4f02b6d3110a9e73759aff5722631f | 8a18f17feedade5ecf4e9a4fe995530998f3b527 | refs/heads/master | 2023-08-24T17:38:06.221566 | 2023-07-21T12:47:58 | 2023-07-21T12:47:58 | 212,790,225 | 12 | 0 | NOASSERTION | 2023-05-26T01:29:03 | 2019-10-04T10:30:38 | R | UTF-8 | R | false | false | 159,219 | r | app_server.R |
#' app server
#'
#' @param input session input
#' @param output session output
#' @param session session
#' @importFrom dplyr %>%
#' @importFrom dplyr add_count
#' @importFrom dplyr add_row
#' @importFrom dplyr add_rownames
#' @importFrom dplyr add_tally
#' @importFrom dplyr all_vars
#' @importFrom dplyr any_vars
#' @importFrom dplyr arrange
#' @importFrom dplyr arrange_all
#' @importFrom dplyr arrange_at
#' @importFrom dplyr arrange_if
#' @importFrom dplyr as_data_frame
#' @importFrom dplyr as_tibble
#' @importFrom dplyr bind_cols
#' @importFrom dplyr bind_rows
#' @importFrom dplyr case_when
#' @importFrom dplyr count
#' @importFrom dplyr count_
#' @importFrom dplyr data_frame
#' @importFrom dplyr distinct
#' @importFrom dplyr distinct_all
#' @importFrom dplyr distinct_at
#' @importFrom dplyr distinct_if
#' @importFrom dplyr distinct_prepare
#' @importFrom dplyr enquo
#' @importFrom dplyr enquos
#' @importFrom dplyr everything
#' @importFrom dplyr expr
#' @importFrom dplyr filter
#' @importFrom dplyr filter_all
#' @importFrom dplyr filter_at
#' @importFrom dplyr filter_if
#' @importFrom dplyr first
#' @importFrom dplyr funs
#' @importFrom dplyr glimpse
#' @importFrom dplyr group_by
#' @importFrom dplyr group_by_
#' @importFrom dplyr group_by_all
#' @importFrom dplyr group_by_at
#' @importFrom dplyr if_else
#' @importFrom dplyr intersect
#' @importFrom dplyr last
#' @importFrom dplyr last_col
#' @importFrom dplyr left_join
#' @importFrom dplyr mutate
#' @importFrom dplyr mutate_
#' @importFrom dplyr mutate_all
#' @importFrom dplyr mutate_at
#' @importFrom dplyr mutate_each
#' @importFrom dplyr mutate_if
#' @importFrom dplyr n
#' @importFrom dplyr n_distinct
#' @importFrom dplyr n_groups
#' @importFrom dplyr na_if
#' @importFrom dplyr order_by
#' @importFrom dplyr pull
#' @importFrom dplyr quo
#' @importFrom dplyr quo_name
#' @importFrom dplyr quos
#' @importFrom dplyr rename
#' @importFrom dplyr rename_all
#' @importFrom dplyr rename_at
#' @importFrom dplyr rename_if
#' @importFrom dplyr rename_vars
#' @importFrom dplyr rename_vars_
#' @importFrom dplyr right_join
#' @importFrom dplyr row_number
#' @importFrom dplyr rowwise
#' @importFrom dplyr same_src
#' @importFrom dplyr sample_frac
#' @importFrom dplyr sample_n
#' @importFrom dplyr select
#' @importFrom dplyr select_
#' @importFrom dplyr select_all
#' @importFrom dplyr select_at
#' @importFrom dplyr select_if
#' @importFrom dplyr select_var
#' @importFrom dplyr select_vars
#' @importFrom dplyr select_vars_
#' @importFrom dplyr slice
#' @importFrom dplyr slice_
#' @importFrom dplyr setdiff
#' @importFrom dplyr starts_with
#' @importFrom dplyr summarise
#' @importFrom dplyr summarise_
#' @importFrom dplyr summarise_all
#' @importFrom dplyr summarise_at
#' @importFrom dplyr summarise_each
#' @importFrom dplyr summarise_each_
#' @importFrom dplyr summarise_if
#' @importFrom dplyr summarize
#' @importFrom dplyr summarize_
#' @importFrom dplyr summarize_all
#' @importFrom dplyr summarize_at
#' @importFrom dplyr summarize_each
#' @importFrom dplyr summarize_each_
#' @importFrom dplyr summarize_if
#' @importFrom dplyr tally
#' @importFrom dplyr tally_
#' @importFrom dplyr tbl
#' @importFrom dplyr tbl_df
#' @importFrom dplyr tibble
#' @importFrom dplyr top_n
#' @importFrom dplyr tribble
#' @importFrom dplyr ungroup
#' @importFrom dplyr union
#' @importFrom dplyr union_all
#' @importFrom dplyr vars
#' @importFrom dplyr contains
#' @importFrom dplyr if_else
#' @importFrom tidyr %>%
#' @importFrom tidyr drop_na
#' @importFrom tidyr gather
#' @importFrom tidyr nest
#' @importFrom tidyr replace_na
#' @importFrom tidyr separate
#' @importFrom tidyr separate_rows
#' @importFrom tidyr spread
#' @importFrom tidyr unite
#' @importFrom tidyr unnest
#' @importFrom magrittr %>%
#' @importFrom readr read_csv
#' @importFrom readr read_delim
#' @importFrom readr read_file
#' @importFrom readr read_lines
#' @importFrom readr read_rds
#' @importFrom readr read_tsv
#' @importFrom readr type_convert
#' @importFrom readr write_csv
#' @importFrom readr write_csv2
#' @importFrom readr write_delim
#' @importFrom readr write_file
#' @importFrom readr write_lines
#' @importFrom readr write_rds
#' @importFrom readr write_tsv
#' @importFrom rlang as_function
#' @importFrom purrr as_mapper
#' @importFrom purrr as_vector
#' @importFrom purrr compact
#' @importFrom purrr compose
#' @importFrom purrr is_list
#' @importFrom purrr keep
#' @importFrom purrr map
#' @importFrom purrr map_at
#' @importFrom purrr map_chr
#' @importFrom purrr map_dbl
#' @importFrom purrr map_depth
#' @importFrom purrr map_df
#' @importFrom purrr map_dfc
#' @importFrom purrr map_dfr
#' @importFrom purrr map_if
#' @importFrom purrr map_int
#' @importFrom purrr map_lgl
#' @importFrom purrr map_raw
#' @importFrom purrr map2
#' @importFrom purrr map2_chr
#' @importFrom purrr map2_dbl
#' @importFrom purrr map2_df
#' @importFrom purrr map2_dfc
#' @importFrom purrr map2_dfr
#' @importFrom purrr map2_int
#' @importFrom purrr map2_lgl
#' @importFrom purrr map2_raw
#' @importFrom purrr negate
#' @importFrom purrr partial
#' @importFrom purrr safely
#' @importFrom purrr set_names
#' @importFrom purrr splice
#' @importFrom tibble add_column
#' @importFrom tibble add_row
#' @importFrom tibble as_data_frame
#' @importFrom tibble as_tibble
#' @importFrom tibble as.tibble
#' @importFrom tibble column_to_rownames
#' @importFrom tibble data_frame
#' @importFrom tibble enframe
#' @importFrom tibble glimpse
#' @importFrom tibble repair_names
#' @importFrom tibble rowid_to_column
#' @importFrom tibble rownames_to_column
#' @importFrom tibble tibble
#' @importFrom glue glue
#' @importFrom stringr str_c
#' @importFrom stringr str_extract
#' @importFrom stringr str_extract_all
#' @importFrom stringr str_glue
#' @importFrom stringr str_length
#' @importFrom stringr str_match
#' @importFrom stringr str_match_all
#' @importFrom stringr str_order
#' @importFrom stringr str_pad
#' @importFrom stringr str_remove
#' @importFrom stringr str_remove_all
#' @importFrom stringr str_replace
#' @importFrom stringr str_replace_all
#' @importFrom stringr str_replace_na
#' @importFrom stringr str_sort
#' @importFrom stringr str_split
#' @importFrom stringr str_wrap
#' @importFrom forcats %>%
#' @importFrom forcats as_factor
#' @importFrom forcats fct_c
#' @importFrom forcats fct_drop
#' @importFrom forcats fct_expand
#' @importFrom forcats fct_inorder
#' @importFrom forcats fct_match
#' @importFrom forcats fct_other
#' @importFrom forcats fct_recode
#' @importFrom forcats fct_relabel
#' @importFrom forcats fct_relevel
#' @importFrom forcats fct_reorder
#' @importFrom forcats fct_reorder2
#' @importFrom clipr clear_clip
#' @importFrom clipr read_clip
#' @importFrom clipr read_clip_tbl
#' @importFrom clipr write_clip
#' @importFrom clipr write_last_clip
#' @importFrom rlang :=
#' @importFrom rlang !!
#' @importFrom rlang !!!
#' @importFrom rlang %@%
#' @importFrom rlang %@%<-
#' @importFrom rlang %|%
#' @importFrom rlang %||%
#' @importFrom rlang enquo
#' @importFrom rlang enquos
#' @importFrom rlang quo
#' @importFrom rlang quos
#' @importFrom rlang set_names
#' @importFrom tm Corpus
#' @importFrom tm Docs
#' @importFrom tm DocumentTermMatrix
#' @importFrom tm removeNumbers
#' @importFrom tm removePunctuation
#' @importFrom tm removeSparseTerms
#' @importFrom tm removeWords
#' @importFrom tm SimpleCorpus
#' @importFrom tm stemDocument
#' @importFrom tm stopwords
#' @importFrom tm TermDocumentMatrix
#' @importFrom tm termFreq
#' @importFrom tm Terms
#' @importFrom tm tm_filter
#' @importFrom tm tm_index
#' @importFrom tm tm_map
#' @importFrom tm tm_parLapply
#' @importFrom tm tm_parLapply_engine
#' @importFrom tm tm_reduce
#' @importFrom tm tm_term_score
#' @importFrom tm VCorpus
#' @importFrom tm VectorSource
#' @importFrom tm content_transformer
#' @importFrom tm stripWhitespace
#' @importFrom MASS kde2d
#' @importFrom janitor clean_names
#' @importFrom broom tidy
#' @importFrom rio convert
#' @importFrom scales squish
#' @import ggupset
#' @keywords internal
app_server <- function(input, output,session) {
# List the first level callModules here
options(shiny.maxRequestSize = 30 * 1024^2)
## prepare user applied filters' (while selecting pre loaded data) expressions .
## NOTE : following filter expressions will be used to filter the sample_info (pre loaded data) table.
## Therefore, column names - `Organism` and `strain` (given as names of vector) must be identical to the column names of the sample_info table.
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Display, filter and select existing data ----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
### strains by species
strains_by_species <- reactive({
req(sample_info)
sample_info %>%
dplyr::select(species, strain, genotype) %>%
dplyr::group_by(species) %>%
dplyr::summarise(genotype = list(genotype) , strain = list(strain))
})
## Update species on sra sample info table
observe({
req(strains_by_species())
## all species
available_species <- base::unique(strains_by_species()$species)
shinyWidgets::updatePickerInput(session = session,
inputId = "select_species",
choices = available_species,
selected = "Aspergillus nidulans")
})
## get selected species strain
selected_species_strains <- reactive({
req(input$select_species ,strains_by_species())
## strains for selected species
available_strains <- strains_by_species() %>%
dplyr::filter(species == input$select_species) %>%
dplyr::select(strain) %>%
tidyr::unnest(cols = strain) %>%
tidyr::drop_na() %>%
pull(1) %>%
unique() %>%
sort()
return(available_strains)
})
## update strain info
observe({
req(selected_species_strains())
shinyWidgets::updatePickerInput(session = session,
inputId = "select_strain",
choices = selected_species_strains())
})
## reset strain filter
observeEvent(input$reset_strain_filter , {
shinyWidgets::updatePickerInput(session = session,
inputId = "select_strain",
choices = selected_species_strains())
})
## get genotypes for selected species
selected_species_genotype <- reactive({
req(input$select_species ,strains_by_species())
## genotype for selected species
available_genotype <- strains_by_species() %>%
dplyr::filter(species == input$select_species) %>%
dplyr::select(genotype) %>%
tidyr::unnest(cols = genotype) %>%
tidyr::drop_na() %>%
pull(1) %>%
unique() %>%
sort()
return(available_genotype)
})
## update genotype info
observe({
req(selected_species_genotype())
shinyWidgets::updatePickerInput(session = session,
inputId = "select_genotype",
choices = selected_species_genotype())
})
## reset genotype filter
observeEvent(input$reset_genotype_filter , {
shinyWidgets::updatePickerInput(session = session,
inputId = "select_genotype",
choices = selected_species_genotype())
})
## show number of rows selected
user_selected_rows <- reactive({
if(is.null(input$pre_loaded_data_sra_sample_info_rows_selected)){
return(0)
}else{
return(length(input$pre_loaded_data_sra_sample_info_rows_selected))
}
})
## Render text for number of rows selected
output$sra_info_number_of_rows_selected <- renderText(
paste(user_selected_rows() ,"row(s) selected")
)
## Update strain on sra sample info table
user_filters_expressions <- reactive({
u_filters_vals <- c(species = input$select_species,
strain = input$select_strain ,
genotype = input$select_genotype)
u_filters_expr <- lapply(names(u_filters_vals), function(col) {
my_filter(col, "==", value = u_filters_vals[[col]])
})
return(u_filters_expr)
})
## SRA sample info table user display
sra_sample_info_user_display <- reactive({
## add ncbi urls whereever possible
dd <- sample_info %>%
dplyr::filter(!!!user_filters_expressions())
return(dd)
})
## pre loaded data
output$pre_loaded_data_sra_sample_info <- DT::renderDataTable({
return(sra_sample_info_user_display()) #%>%
# dplyr::mutate(run_accession =
# map_chr(run_accession ,
# ~ tags$a( .x, href = paste("https://www.ncbi.nlm.nih.gov/sra/" ,
# .x ,
# sep = "") , target = "blank") %>%
# as.character()
# ))
},
selection = "multiple",
class = "cell-border stripe",
rownames = FALSE,
extensions = c("Buttons"),
server = F,
options = list(
scrollX = TRUE,
dom = "Blfrtip",
autoWidth = TRUE,
searchHighlight = TRUE,
columnDefs = list(
list(width = '700px', targets = c(5, 6,9)), # Fix width columns
list(targets = c(2,3, 8:18), visible = FALSE)# The column number must be identical to the columns in colvis extension
),
buttons =
list("copy",
list(extend = "collection", buttons = c("csv", "excel", "pdf"), text = "Download"),
list(extend = "colvis", columns = c(2,3,8:18))
), # end of buttons customization
lengthMenu = list(
c(10, 20 ,50, 70, 100,500 ) # declare values
, c(10, 20 ,50, 70, 100,500 ) # declare titles
),
pageLength = 10
), ## end of options
)
## reset rows selection in preloaded data sample information
observeEvent(input$deselect_all_rows, {
proxy <- DT::dataTableProxy("pre_loaded_data_sra_sample_info")
DT::selectRows(proxy = proxy, NULL)
})
## select all rows in preloaded data sample information
observe({
#req(pre_loaded_data_sra_sample_info())
proxy = DT::dataTableProxy("pre_loaded_data_sra_sample_info")
if (is.null(input$sra_sample_info_select_all_rows)) {
DT::selectRows(proxy = proxy, NULL)
} else if(input$sra_sample_info_select_all_rows == "TRUE") {
DT::selectRows(proxy = proxy, input$pre_loaded_data_sra_sample_info_rows_all)
}
})
## user selected SRA id sample info
user_selected_sra_id_sample_info <- eventReactive(input$submit_user_sra_samples, {
req(input$pre_loaded_data_sra_sample_info_rows_selected)
selected_rows <- input$pre_loaded_data_sra_sample_info_rows_selected
n_select <- selected_rows %>% length()
n_max_sra_limit <-1000
if(n_select > n_max_sra_limit){
shinyWidgets::sendSweetAlert(session = session ,
type = "warning",
title = "Warning...!!" ,
text = paste("Due to memory limit, current version allows maximum",
n_max_sra_limit,
"samples. You have selected " ,
n_select, "." ,
"Application will continue with first",
n_max_sra_limit, "samples.",
sep = " "))
selected_rows <- selected_rows[1:n_max_sra_limit]
}
## process user applied filter expressions
user_selected_data_sra_ids <- sra_sample_info_user_display() %>%
dplyr::slice(selected_rows)
})
## prepare tibble having sra_id and bioproject_id columns
user_selected_sra_id_to_bioproject_id <- reactive({
user_selected_sra_id_sample_info() %>% dplyr::select(bio_project , run_accession) %>%
dplyr::mutate_all( ~replace_na(. ,"--NA--"))
})
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## User selected SRA data ----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## user selected data
user_selected_sra_id_data <- eventReactive(input$submit_user_sra_samples,{
req(user_selected_sra_id_sample_info() )
## process user applied filter expressions
user_selected_data_sra_ids <- user_selected_sra_id_sample_info()%>%
dplyr::select(23) %>% ## column 23 is Run/SRA id. IMP: change this, if column order in the sample_info table changed.
dplyr::pull(1)
## Objective : load species specific data.
## Reference annotation name of respective species will be used to load species specific data
## 1st step is to get the user reference annotation name (e.g FungiDB-39_AnidulansFGSCA4 for Aspergillus nidulans and FungiDB-39_CalbicansSC5314 for Candida albicans)
## Once reference annotation name obtained, use it to get the respective species expression .rds file name.
## Reference annotations to .rds file name mapping given in the rds object "reference_annotation_to_expr_map.rds"
## once the name of .rds file obtained, load it and get the user selected data
user_selected_species_ref_annot <- sra_sample_info_user_display()%>%
dplyr::select(2) %>% ## column 2 is get the name of ref annot
dplyr::pull(1) %>%
as.character() %>%
.[1]
#ref_annot_to_expr_rds contains two cols ---> 'reference_annotation' , 'expression_mat_data_file'
## load respective expression mat
expr_data_mat_rds_file <- ref_annot_to_expr_rds %>%
dplyr::filter(.data$reference_annotation == !!user_selected_species_ref_annot) %>%
dplyr::select(2) %>% ## 2nd column is expression_mat_data_file
pull(1) %>%
.[1] ### it make sure that if reference annot match to multiple rows, it only returns .rds data mat file for first match.
## load expr data
withProgress(message = "Loadind data from .rds" , {
incProgress(0.5)
#print("expr_mat_file_path")
#print(paste(get_expression_mats_dir_path() , expr_data_mat_rds_file , sep = "/"))
expr_data_mat <- readr::read_rds(paste(get_expression_mats_dir_path() , expr_data_mat_rds_file , sep = "/"))
incProgress(1)
})
user_selected_sra_id_data <- expr_data_mat %>%
dplyr::select(1, !!user_selected_data_sra_ids)
return(user_selected_sra_id_data)
})
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
### display active group info in the in side panel ----
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#observe(group_info,{
callModule(module = display_active_groups_server ,
id = "display_active_groups",
group_data = reactive(group_info)
)
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
### display sample info for user selected sample info in side panel ----
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
observeEvent(user_selected_sra_id_sample_info(),{
req(user_selected_sra_id_sample_info())
callModule(module = display_sra_sample_info_server ,
id = "user_selected_sra_id_sample_info" ,
user_selected_sra_sample_info_tibble = reactive(user_selected_sra_id_sample_info())
)
})
## set output status for sra sample info display on the side panel.
## UI will displayed only if sra samples selected by users
output$display_sra_sample_info_in_side_panel <- reactive({
req(user_selected_sra_id_sample_info())
return(TRUE)
})
outputOptions(x = output , name = "display_sra_sample_info_in_side_panel" , suspendWhenHidden = FALSE)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Process user uploaded data ----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## user upoaded data
user_uploaded_data <- eventReactive(input$submit_user_uploaded_data,{
req(input$upload_data_source)
upload_data_source <- input$upload_data_source
data_from_user = tryCatch({
if(upload_data_source == "upload_from_file"){
req(input$file1)
data_from_user <- readr::read_delim(file = input$file1$datapath, delim = input$sep, col_names = T, trim_ws = T)
}else if(upload_data_source == "upload_from_clipboard") {
data_from_user <- text_to_tibble(text = input$user_pasted_data , sep = input$sep)
}
}, error = function(e) {
print(paste("File uploading error." , e, sep = " "))
return(NULL)
} , warning = function(w){
print(paste("File uploaded with warnings." , w, sep = " "))
return(NULL)
})
## show alert/error if error produced while reading file.
if(is.null(data_from_user)){
shinyWidgets::sendSweetAlert(session = session ,type = "error",title = "Error...!!" ,text = paste("Error while reading the data. Probably unsupported file format (if uploaded from file) or uncorrect pasted data.", sep = " "))
return(NULL)
}
## error if first column is not type char
if(!purrr::map_lgl(data_from_user[1], is.character) ){
shinyWidgets::sendSweetAlert(session = session ,type = "error",title = "Error...!!" ,text = paste("First column must be of type character.", sep = " "))
return(NULL)
}
## other than 1st column, all must be of type nueric
if( ! all(purrr::map_lgl(data_from_user[-1], is.numeric)) ){
shinyWidgets::sendSweetAlert(session = session ,type = "error",title = "Error...!!" ,text = paste("Other than first column, all must be of type numeric.", sep = " "))
return(NULL)
}
## column 1 must not have duplicates.
if(any(base::duplicated(data_from_user[[1]]))){
shinyWidgets::sendSweetAlert(session = session ,type = "error" ,title = "Error...!!" ,text = paste("Column 1 must have unique values.", sep = " "))
return(NULL)
}
## uploded data must have atleast two columns
if(base::ncol(data_from_user) < 2){
shinyWidgets::sendSweetAlert(session = session ,type = "error",title = "Error...!!" ,text = paste("Uploaded data must have atleast 2 columns.\n Make sure that you have selected correct separator.", sep = " "))
return(NULL)
}
## remove genes which are 0 in all samples
processed_data <- data_from_user %>%
tidyr::drop_na() %>% ## genes will be dropped if any column contain NA
dplyr::filter_if(is.numeric , .vars_predicate = dplyr::any_vars(. != 0 )) ## select genes which have atleast one non zero value
## log2 trasform user uploaded data
if(input$log_transform_user_upload_data == "log2"){
processed_data <- processed_data %>%
dplyr::filter_if(is.numeric , dplyr::any_vars( . > 0)) %>% ## remove genes having negative values (value < 0) in any sample
dplyr::mutate_if(is.numeric , ~(log2( . + 1)))
}
## log10 trasform user uploaded data
if(input$log_transform_user_upload_data == "log10"){
processed_data <- processed_data %>%
dplyr::filter_if(is.numeric , dplyr::any_vars( . > 0)) %>%## remove genes having negative values (value < 0) in any sample
dplyr::mutate_if(is.numeric , ~(log10( . + 1)))
}
## remove na and round the values.
processed_data <- processed_data %>%
dplyr::mutate_if(is.numeric, round, 4) %>% ## 4 digits after decimal
as_tibble() %>%
tidyr::drop_na() ## rows containing NA (after log transform) will be removed
## check the number of rows in the processed data. if number of row remain zero throw error.
if(base::nrow(processed_data) == 0 ){
warning_text <- paste("All genes have value NA after log transformation (if selected) in one of the sample or value 0 in all the samples. Please upload correct data. If log transformation used, make sure that values are non-negative. ")
shinyWidgets::sendSweetAlert(session = session ,
type = "error",
title = "Error...!!" ,
text = warning_text)
return(NULL)
}
## Update alert type depending on number of genes remained after removing NA.
## if number of rows in the processed data less than user uploaded rows, throw warning.
if(base::nrow(processed_data) < base::nrow(data_from_user)){
genes_removed <- dplyr::setdiff(data_from_user[[1]] , processed_data[[1]])
type = "warning"
title = "Data upload with warning...!!!"
numb_of_genes_removed <- base::nrow(data_from_user) - base::nrow(processed_data)
text <- tags$h5(numb_of_genes_removed,
" genes have been removed due to one of the following reasons...",
tags$br(),tags$br(),
"1). Genes have value NA in one of the sample. NA found either directly from uploaded data or due to log transformation (if selected).",
tags$br(),tags$br(),
"2). Genes have value 0 in all the samples.",
tags$br(),tags$br(),
"Removed genes are ...",
tags$br(),tags$br(),
tags$h5(paste0(genes_removed , collapse = ","),
style = "height: 50px;white-space:nowrap;overflow-x:auto;"), style = "text-align:justify;")
## if number of rows in the processed data equal to the user uploaded rows, show success.
} else if(base::nrow(processed_data) == base::nrow(data_from_user)) {
type = "success"
title = "Success...!!!"
text <- paste("Data uploaded successfully.")
}
shinyWidgets::sendSweetAlert(session = session ,
type = type,
title = title ,
text = text )
## column names replace space to "_", this must be applied to groups also when group info uploaded from file
user_uploaded_processed_data <- processed_data %>%
rename_all(function(.){gsub(pattern = " " , "_" , .)})
return(user_uploaded_processed_data)
})
## disable UI elems if upload example data checked on
observe({
if(input$upload_sample_data){
shinyjs::hide(id ="user_data_upload_section")
shinyWidgets::updateProgressBar(session = session , id = "upload_sample_data_pb" , value = 100)
} else {
shinyjs::show(id ="user_data_upload_section")
shinyWidgets::updateProgressBar(session = session , id = "upload_sample_data_pb" , value = 0)
}
## enable disable data upload. at a time either of the upload from file or upload from clipboard will be enabled
if(input$upload_data_source == "upload_from_file"){
shinyjs::enable(id = "file1")
} else{
shinyjs::disable(id = "file1")
}
})
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Finalize active plot data ----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
plot_data <- reactiveVal(NULL)
## when user hit action button on sra sample info page, update plot data.
observeEvent(input$submit_user_sra_samples ,{
req(user_selected_sra_id_data())
final_data <- user_selected_sra_id_data() %>%
tidyr::drop_na() %>%
dplyr::filter_if(is.numeric , .vars_predicate = dplyr::any_vars(. != 0 )) ## select genes which have atleast one non zero value
if(nrow(final_data) == 0 ){ ## send error if number of rows remained after removing na is 0.
sendSweetAlert(
session = session,
title = "Error...",
text = "All the rows have atleast one missing or NA value found. Input file needs to be fixed.",
type = "error"
)
plot_data(NULL)
}
## display success alert when data uploaded
sendSweetAlert(
session = session,
title = paste(final_data %>% ncol() - 1 , "data selected"),
text = paste("Selected data now available for analysis.\n\nRun accession will be used as identifier for downstream analysis.\n\nClose selection window to use data further.\n\n"),
type = "success"
)
plot_data(final_data)
})
## if user upload the data , make it plot data
observeEvent(user_uploaded_data(),{
req(user_uploaded_data()) ## to execute this user data must be available
final_data <- user_uploaded_data() %>%
tidyr::drop_na() %>%
dplyr::filter_if(is.numeric , .vars_predicate = dplyr::any_vars(. != 0 )) ## select genes which have atleast one non zero value
plot_data(final_data)
})
## make plot data to example data when th eaction button input$upload_sample_data triggered
observeEvent(input$upload_sample_data,{
if(input$upload_sample_data){
example_data_file = system.file("app", "cartoon_data" ,"cartoon_log2fpkm_log2fc.txt" ,package = "FungiExpresZ" )
example_data <- readr::read_delim(file = example_data_file , delim = "\t") %>%
tidyr::drop_na() %>%
dplyr::filter_if(is.numeric , .vars_predicate = dplyr::any_vars(. != 0 )) ## select genes which have atleast one non zero value
plot_data(example_data)
}
## if input$upload_sample_data == FALSE set NULL
else {
plot_data(NULL)
}
})
## when submit_user_uploaded_data triggered and join data button is on
observeEvent(input$submit_user_uploaded_data,{
req(input$join_user_data, user_uploaded_data()) ## to execute this user data must be available
user_upload_join_by <- colnames(user_uploaded_data())[1]
merge_data <- user_uploaded_data() %>%
dplyr::left_join(user_selected_sra_id_data() , by = set_names("geneName" , user_upload_join_by))
merge_data <- merge_data %>% tidyr::drop_na() ## rows with NA after merger will be removed.
## send warning if number of rows remained after removing na is < user_uploaded_data()
if(merge_data %>% nrow() < user_uploaded_data() %>% nrow() && merge_data %>% nrow() > 0){
sendSweetAlert(
session = session,
title = "Warning...!",
text = paste0("Only ", merge_data %>% nrow(), " out of " , user_uploaded_data() %>% nrow() , " genes matched to database ID. Application will continue with ", merge_data %>% nrow() , " genes."),
type = "warning"
)
final_data <- merge_data %>%
dplyr::filter_if(is.numeric , .vars_predicate = dplyr::any_vars(. != 0 )) ## select genes which have atleast one non zero value
plot_data(final_data)
}
## send warning if number of rows remained after removing na is 0 return user uploaded data
if(nrow(merge_data) == 0 ){
sendSweetAlert(
session = session,
title = "Warning...",
text = "None of the ID from first column of uploaded data match with database id. Either fix the input id or application continue with uploaded data.",
type = "warning"
)
final_data <- user_uploaded_data() %>%
tidyr::drop_na() %>%
dplyr::filter_if(is.numeric , .vars_predicate = dplyr::any_vars(. != 0 )) ## select genes which have atleast one non zero value
plot_data(final_data)
}
## send success message if All the IDs from the first column of uploaded data match with database id
if(merge_data %>% nrow() >= user_uploaded_data() %>% nrow()){
sendSweetAlert(
session = session,
title = "Data joined successfully...",
text = "All the IDs from the first column of uploaded data matched with database id.",
type = "success"
)
final_data <- merge_data %>% tidyr::drop_na() %>%
dplyr::filter_if(is.numeric , .vars_predicate = dplyr::any_vars(. != 0 )) ## select genes which have atleast one non zero value
plot_data(final_data)
}
})
## print final plot data on terminal
observe({
req(plot_data())
# print("plot_data dims")
# print(dim(plot_data()))
# print("plot_data")
# print(plot_data())
})
## preserve plot data original row order
plot_data_original_row_order <- reactive({
req(plot_data())
orig_row_ord <- plot_data() %>%
dplyr::select(1) %>%
dplyr::mutate(!!generate_random_strings(1) := dplyr::row_number())
return(orig_row_ord)
})
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Define groups ----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
##reactive values containing group info
group_info <- reactiveValues()
## User defined column groups
user_defined_column_groups <- callModule(module = add_sample_group , id = "sample_group" ,
sample_names = reactive(base::colnames(plot_data())[-1]) ,
bioproject_groups = reactive(user_selected_sra_id_to_bioproject_id()))
## Assign user groups to reactive values. Below observe will be executed if user_defined_groups changed
observe({
group_info$column_groups <- user_defined_column_groups()
})
## User defined row groups
user_defined_row_groups <- callModule(module = add_gene_groups , session = session,
id = "gene_groups" ,
gene_names = reactive( plot_data() %>% .[[1]] ))
## Assign user defined row groups
observe({
group_info$row_groups <- user_defined_row_groups()
})
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Reset UI elems of each plot panel based on data being uploaded or selected from pre-loaded ----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
### if no rows selected, remove x/y choices
observe({
# if ((input$data_selection == "select_sample" &
# base::is.null(input$pre_loaded_data_sra_sample_info_rows_selected)) |
# (input$data_selection == "upload_user_data" & base::is.null(input$file1))
# ) {
if(is.null(plot_data())){
## update scatter x
updateSelectInput(
session = session,
inputId = "scatter_x",
choices = character(0)
)
## update scatter y
updateSelectInput(
session = session,
inputId = "scatter_y",
choices = character(0)
)
## update multi scatter x
updateSelectInput(
session = session,
inputId = "multi_scatter_vars",
choices = character(0)
)
## update density x
updateSelectInput(
session = session,
inputId = "density_x",
choices = character(0)
)
## update box x
updateSelectInput(
session = session,
inputId = "box_x",
choices = character(0)
)
## update line x
updateSelectInput(
session = session,
inputId = "lineplot_x",
choices = character(0)
)
## joy plot
updateSelectInput(
session = session,
inputId = "joy_plot_x",
choices = character(0)
)
## heatmap vars
updateSelectInput(
session = session,
inputId = "heatmap_vars",
choices = character(0)
)
## pca plot
updateSelectInput(
session = session,
inputId = "pca_plot_vars",
choices = character(0)
)
## corr heat box
updateSelectInput(
session = session,
inputId = "corr_heatbox_vars",
choices = character(0)
)
## violin
updateSelectInput(
session = session,
inputId = "violin_x",
choices = character(0)
)
## histogram
updateSelectInput(
session = session,
inputId = "histogram_x",
choices = character(0)
)
## bar
updateSelectInput(
session = session,
inputId = "barplot_vars",
choices = character(0)
)
}
### if no file uploaded, remove x/y choices
# if (input$data_selection == "upload_user_data" & base::is.null(input$file1)) {
#
# ## update scatter x
# updateSelectInput(
# session = session,
# inputId = "scatter_x",
# choices = character(0)
# )
#
# ## update scatter y
# updateSelectInput(
# session = session,
# inputId = "scatter_y",
# choices = character(0)
# )
#
# ## update multi scatter x
# updateSelectInput(
# session = session,
# inputId = "multi_scatter_vars",
# choices = character(0)
# )
#
#
# ## update density x
# updateSelectInput(
# session = session,
# inputId = "density_x",
# choices = character(0)
# )
#
# ## update box x
# updateSelectInput(
# session = session,
# inputId = "box_x",
# choices = character(0)
# )
#
# ## update line x
# updateSelectInput(
# session = session,
# inputId = "lineplot_x",
# choices = character(0)
# )
#
# ## joy plot
# updateSelectInput(
# session = session,
# inputId = "joy_plot_x",
# choices = character(0)
# )
#
# ## heatmap vars
# updateSelectInput(
# session = session,
# inputId = "heatmap_vars",
# choices = character(0)
# )
#
# ## pca plot
# updateSelectInput(
# session = session,
# inputId = "pca_plot_vars",
# choices = character(0)
# )
#
# ## corr heat box
# updateSelectInput(
# session = session,
# inputId = "corr_heatbox_vars",
# choices = character(0)
# )
#
# ## violin
# updateSelectInput(
# session = session,
# inputId = "violin_x",
# choices = character(0)
# )
# ## histogram
# updateSelectInput(
# session = session,
# inputId = "histogram_x",
# choices = character(0)
# )
#
# ## bar
# updateSelectInput(
# session = session,
# inputId = "barplot_vars",
# choices = character(0)
# )
#
#
# }
})
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Fix reference annotations ----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
genome_for_annotations <- reactiveVal(NULL)
## reference annot when user choose SRA data
observeEvent(input$submit_user_sra_samples , {
#if(input$data_selection == "select_sample"){ ## when data selected from existing one
req(input$pre_loaded_data_sra_sample_info_rows_selected)
genome_for_annotations <- sample_info %>% ## sample_info is pre loaded data
dplyr::filter(!!!user_filters_expressions()) %>% ## filter by user applied filters
dplyr::select(2) %>% ## column 2 is reference_annotation columns. it contains genome name identical to fungidb annotations. IMP: change this, if column order in the sample_info table changed.
dplyr::slice(input$pre_loaded_data_sra_sample_info_rows_selected)%>% ## user selected rows
dplyr::pull(1)
genome_for_annotations(genome_for_annotations[1])
})
## reference annot when user upload data or select example data
observe({
genome_for_annotations(input$user_selected_species)
if(input$upload_sample_data){
genome_for_annotations("FungiDB-64_CalbicansSC5314")
}
})
## Check current annotations
observe({
req(genome_for_annotations())
#print(paste("User selected geneome", genome_for_annotations() ,sep = " "))
})
## For selected genome sample ids
output$sample_selected_species_gene_id <- renderText({
req(genome_for_annotations())
sample_ids_to_display <- tryCatch({
ah_data_summary2 %>%
dplyr::filter(genome == genome_for_annotations()) %>%
dplyr::select(gr_cols) %>%
tidyr::unnest(cols = gr_cols) %>%
dplyr::pull(ID) %>% sample(20) %>% paste0(collapse = ", ")
}, error = function(x){
return("Trouble to get Ids")
})
return(sample_ids_to_display)
})
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# scatter plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
## Question : Both evenReactive and observeEvent can be invalidated by action button.
## Then, why am I using eventReactive here ? Why not observeEvent ??
## Ans :: eventReactive is there to delay the reactions. Given the reactive value, it triggers the chunk of code and
## return some values, which can be cached and used further in the app. While, observeEvent is also there to trigger the action
## on the basis of given reactive values but it does not return anything. It has side effects. Side effects are the actions due
## to functions but without returning anything.
## check whether x and y are numeric type
scatter_vars_validated <- eventReactive(input$generate_scatter, {
req(input$scatter_x, input$scatter_y, plot_data())
x_vals <- plot_data()[, input$scatter_x][[1]]
y_vals <- plot_data()[, input$scatter_y][[1]]
## validate user selected variables
if (!is.numeric(x_vals)) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = "X must be of type numeric",
type = "error"
)
return(FALSE)
} else if (!is.numeric(y_vals)) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = "Y must be of type numeric",
type = "error"
)
return(FALSE)
}
return(TRUE)
})
## update plot axis limits, reason to use observe event is,
## it won't cache the values in the computer memory. Whenever user clicks on "Generate Plot" (input$plot) invalidated
## the axis limits will be updated.
observeEvent(input$generate_scatter, {
req(scatter_vars_validated())
# selected x y cols
x_vals <- plot_data()[, input$scatter_x][[1]]
y_vals <- plot_data()[, input$scatter_y][[1]]
# calculate axis limits
# xlim
x_range <- round(range(x_vals), 3)
# ylim
y_range <- round(range(y_vals), 3)
# update UI
# X min and max
updateNumericInput(
session = session,
inputId = "plot_xmin",
min = x_range[1],
max = x_range[2],
value = x_range[1]
)
updateNumericInput(
session = session,
inputId = "plot_xmax",
min = x_range[1],
max = x_range[2],
value = x_range[2]
)
# Y min and max
updateNumericInput(
session = session,
inputId = "plot_ymin",
min = y_range[1],
max = y_range[2],
value = y_range[1]
)
updateNumericInput(
session = session,
inputId = "plot_ymax",
min = y_range[1],
max = y_range[2],
value = y_range[2]
)
})
## update x, y variables
observe({
## update x
updatePickerInput(
session = session,
inputId = "scatter_x",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
## update y
updatePickerInput(
session = session,
inputId = "scatter_y",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## scatter plot user selected gene group data
scatter_plot_data <- callModule(module = gene_group_selection ,
id = "scatter_plot_select_gene_groups" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_scatter),
current_session_data_matrix = plot_data)
### current scatter plot genome, updates only when generate scatter hits.
current_scatter_plot_genome <- eventReactive(input$generate_scatter , {
return(genome_for_annotations())
})
## get scatter plot elements : corr value, density, intercept , slope and base ggplot
scatter_plot_elems <- eventReactive(input$generate_scatter, {
req(scatter_plot_data())
xvar <- scatter_plot_data() %>% dplyr::pull(input$scatter_x)
yvar <- scatter_plot_data() %>% dplyr::pull(input$scatter_y)
# linear model param
mm <- lm(formula = yvar ~ xvar)
linear_model_param <- broom::tidy(mm)
# calculte corr value
xvar <- scatter_plot_data() %>% dplyr::pull(input$scatter_x)
yvar <- scatter_plot_data() %>% dplyr::pull(input$scatter_y)
corr <- cor(xvar, yvar)
scatter_xy_corr <- format(round(corr, 3), nsmall = 2) ## restric digits after decimal
# calculate density
x_y_density <- get_density(scatter_plot_data()[, input$scatter_x][[1]], scatter_plot_data()[, input$scatter_y][[1]])
# xy scatter plot
gp <- ggplot(scatter_plot_data(), aes_string(x = as.symbol(input$scatter_x), y = as.symbol(input$scatter_y)))
gff_feat = ah_data_summary2 %>%
dplyr::filter(genome == current_scatter_plot_genome()) %>%
dplyr::select(gr_cols) %>%
tidyr::unnest(cols = gr_cols)
return(list(linear_model_param = linear_model_param,
scatter_xy_corr = scatter_xy_corr, gp = gp,
x_y_density = x_y_density,
gff_feat = gff_feat))
})
# observe({
# req(scatter_plot_data())
# #print(scatter_plot_data())
# })
## scatterplot condition panel:show only if variable (x, y) validated and displayed on UI
output$scatter_plot_status <- reactive({
req(final_scatter())
return(TRUE)
})
outputOptions(output, "scatter_plot_status", suspendWhenHidden = FALSE)
## final scatter plot
final_scatter <- reactive({
req(scatter_vars_validated() , scatter_plot_elems() ,
input$plot_xmin, input$plot_xmax,input$plot_ymin, input$plot_ymax
)
# req(input$plot_xmax,input$plot_xmin) ## all four xmin, xmax, ymin and ymax must be there to generate the plot
withProgress(message = "Preparing scatter plot", {
incProgress(0.5)
## color by density, default colors
if (input$scatter_color_chooser == "default") {
gp <- scatter_plot_elems()$gp +
geom_point(aes(color = scatter_plot_elems()$x_y_density),
alpha = input$scatter_alpha , size= input$scatter_point_size) +
viridis::scale_color_viridis()
}
## color by density, user defined colors
if (input$scatter_color_chooser == "manual") {
gp <- scatter_plot_elems()$gp +
geom_point(aes(color = scatter_plot_elems()$x_y_density), alpha = input$scatter_alpha , size= input$scatter_point_size) +
scale_color_gradientn(colours = c(input$scatter_col_low, input$scatter_col_medium, input$scatter_col_high))
}
## color by genesets , default colors
if (input$scatter_color_chooser == "by_gene_groups") {
gp <- scatter_plot_elems()$gp +
geom_point(aes(color = gene_groups), alpha = input$scatter_alpha , size= input$scatter_point_size) +
scale_color_discrete()
}
# real time change of axis limits.
gp <- gp +
xlim(input$plot_xmin, input$plot_xmax) +
ylim(input$plot_ymin, input$plot_ymax)
## default plot title
gp <- callModule(module = plot_title_and_axis_label_server ,
id = "scatter_plot_title_and_labels",
my_ggplot = gp ,
plot_title = base::paste("Pearson correlation:",
scatter_plot_elems()$scatter_xy_corr, sep = ""),
axis_x_title = shiny::isolate(input$scatter_x),
axis_y_title = shiny::isolate(input$scatter_y),
x_tick_angle = 0,
color_legend_title = ifelse(input$scatter_color_chooser == "by_gene_groups" , "Groups", "Density")
)
## add / remove regression line
if (input$scatter_diagonal_line == "manual") {
gp <- gp + geom_abline(slope = input$manual_slope, intercept = input$manual_intercept)
}
if (input$scatter_diagonal_line == "from_data") {
i_cept <- scatter_plot_elems()$linear_model_param %>% dplyr::filter(term == "(Intercept)") %>% pull(estimate)
slope <- scatter_plot_elems()$linear_model_param %>% dplyr::filter(term == "xvar") %>% pull(estimate)
gp <- gp + geom_abline(intercept = i_cept, slope = slope )
}
incProgress(1)
})
return(gp)
})
## render scatter plot
output$scatter_plot <- renderPlot({
req(final_scatter())
final_scatter()
},
height = function() {
return(session$clientData$output_scatter_plot_width) ## dynamic height
},
width = function() {
return(session$clientData$output_scatter_plot_width) ## dynamic height
}, res = 96
) ## dynamic resolution
## render corr value
output$scatter_xy_corr <- renderText({
return(scatter_plot_elems()$scatter_xy_corr)
})
## brush table for scatter plot
scatter_plot_brushed_points <- eventReactive(input$plot_brush, {
req(scatter_plot_data())
scatter_plot_brushed_points <- brushedPoints(
df = scatter_plot_data(),
brush = input$plot_brush,
xvar = scatter_plot_elems()$gp$labels$x, #input$scatter_x
yvar = scatter_plot_elems()$gp$labels$y
)
#map metadata info to geneNames
join_col_x <- base::colnames(scatter_plot_brushed_points)[1] ## first column containing geneNames
join_col_y <- base::colnames(scatter_plot_elems()$gff_feat)[7] ## "id" column
display_data <- scatter_plot_brushed_points %>%
left_join(scatter_plot_elems()$gff_feat, by = setNames(join_col_y, join_col_x)) %>%
dplyr::select(1,c("seqnames", "start", "end","width", "strand", "description"), dplyr::everything())
return(display_data)
})
## render brushed data
output$brush_table <- DT::renderDataTable({
req(scatter_plot_brushed_points())
return(scatter_plot_brushed_points() %>%
dplyr::mutate_all(funs(replace_na(as.character(.),"--NA--"))) %>%
dplyr::mutate_if(is.numeric , round , 2))
},selection = "none",
server = F,
extensions = c("Buttons"),
options = list(
deferRender = TRUE,
scrollX = TRUE,
dom = "Blfrtip",
searchHighlight = TRUE,
buttons =
list("copy",
list(extend ="collection",
buttons = c("csv", "excel", "pdf"), text = "Download"
)), # end of buttons customization
# customize the length menu
lengthMenu = list(
c(10, 50, 100, 500 ) # declare values
, c(10, 50, 100, 500) # declare titles
),
pageLength = 10
)
)
## call module to download scatter plot
callModule(module = export_plot,
id = "export_scatter",
file_name = "scatter",
plot = final_scatter
)
## call module for scatter plot functional analysis
callModule(module = functional_analysis_server ,
id = "scatterplot_functional_analysis_ui" ,
ui_id = "scatterplot_functional_analysis_ui",
session = session,
gene_set = reactive(split(x = scatter_plot_brushed_points()[[1]], ## genename
f = "1")) ,
genome = reactive(genome_for_annotations()) )
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# multi scatter plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## check whether x and y are numeric type
multi_scatter_vars_validated <- eventReactive(input$generate_multi_scatter, {
req(input$multi_scatter_vars)
selected_vars <- plot_data() %>% dplyr::select(input$multi_scatter_vars)
col_class <- plot_data() %>%
dplyr::select(input$multi_scatter_vars) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather()
if (!all(col_class$value)) {
non_numeric_cols <- col_class %>% dplyr::filter(!value)
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0(paste0(non_numeric_cols$key, collapse = " "), " must be of type numeric", collapse = ""),
type = "error"
)
return(FALSE)
}
if (length(input$multi_scatter_vars) > 5) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0("Due to memory limit, maximum 5 variables allowed at moment. Try CorrHeatBox for more than 5 variables."),
type = "error"
)
return(FALSE)
}
else {
return(TRUE)
}
})
## multi scatterplot condition panel:show only if variable (x, y) validated and displayed on UI
output$multi_scatter_plot_status <- reactive({
req(final_multi_scatter_plot())
return(TRUE)
})
outputOptions(output, "multi_scatter_plot_status", suspendWhenHidden = FALSE)
## update x variables
observe({
req(plot_data())
## update x
updatePickerInput(
session = session,
inputId = "multi_scatter_vars",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## multi scatter plot user selected gene group data
multi_scatter_plot_data <- callModule(module = gene_group_selection ,
id = "multi_scatter_plot_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_multi_scatter),
current_session_data_matrix = plot_data
)
## prepare multi scatter plot
ggpair_plot <- eventReactive(input$generate_multi_scatter, {
req(multi_scatter_plot_data() , multi_scatter_vars_validated())
gp <- GGally::ggpairs(multi_scatter_plot_data(),
columns = c(input$multi_scatter_vars),
upper = list(continuous = GGally::wrap("cor",
size = input$multi_scatter_corr_text_text_size ,
color = input$multi_scatter_corr_text_col)))
return(gp)
})
## multi scatter final plot
final_multi_scatter_plot <- reactive({
req(ggpair_plot())
multi_scatter_gp <- ggpair_plot()
## decorate multi scatter through module
multi_scatter_gp <- callModule(module = plot_title_and_axis_label_server ,
id = "multi_scatter_plot_title_and_labels" ,
my_ggplot = multi_scatter_gp ,
axis_x_title = "Value" ,
axis_y_title = "Value",
x_tick_angle = 0 ,
aspect_ratio =NULL)
return(multi_scatter_gp)
})
output$multi_scatter_plot <- renderPlot({
req(final_multi_scatter_plot())
return(
withProgress(message = "Display multi-scatter plot in progress",{
incProgress(0.5)
print(final_multi_scatter_plot())
incProgress(1)
})
)
},
height = function() {
return(session$clientData$output_multi_scatter_plot_width)
}, ## dynamic height
width = function() {
return(session$clientData$output_multi_scatter_plot_width)
}
) ## dynamic width
## call module to export multi scatter plot
callModule(module = export_plot,
id = "export_multi_scatter" ,
file_name = "multi_scatter_plot",
plot = final_multi_scatter_plot )
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# CorrHeatBox server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update x, y variables
observe({
## update x
updatePickerInput(
session = session,
inputId = "corr_heatbox_vars",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## check whether selected x var(s) are of numeric type
corr_heatbox_vars_validated <- eventReactive(input$generate_corr_heatbox, {
req(input$corr_heatbox_vars)
## validate selected variables
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$corr_heatbox_vars)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
} else {
return(TRUE)
}
})
## corr_heatbox status
output$corr_heatbox_status <- reactive({
req(final_corr_heatbox())
return(TRUE)
})
outputOptions(output, "corr_heatbox_status", suspendWhenHidden = FALSE)
## corr heat box user selected gene group data
corr_heatbox_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "corr_heat_box_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_corr_heatbox),
current_session_data_matrix = plot_data
)
## prepare corr_heatbox data
corr_heatbox_data <- eventReactive(input$generate_corr_heatbox, {
req(input$corr_heatbox_vars, corr_heatbox_vars_validated() , corr_heatbox_user_selected_gene_group_data())
withProgress(message = "Preparing CorrHeatBox " , {
incProgress(0.5)
## tibble to data.frame with row names
corr_heatbox_data <- corr_heatbox_user_selected_gene_group_data() %>%
dplyr::select(1,c(input$corr_heatbox_vars)) %>%
as.data.frame() %>%
tibble::column_to_rownames(colnames(corr_heatbox_user_selected_gene_group_data())[1])
## calculate correlation
corr_mat <- tryCatch({
cor_mat <- stats::cor(corr_heatbox_data)
},error = function(e){
return(NULL)
})
incProgress(1)
})
return(corr_mat)
})
## show error if corr data is NULL
observe({
if(is.null(corr_heatbox_data())){
shinyWidgets::sendSweetAlert(session = session,title = "Error..!",
text = "Error while calculating correlation (stats::cor). Please check the data.")
}
})
## final corrHeatBox
final_corr_heatbox <- reactive({
req(corr_heatbox_data())
chb <- tryCatch({
chb <- ggcorrplot::ggcorrplot(corr = corr_heatbox_data() ,
method = input$heatbox_method ,
type = input$heatbox_type,
hc.order = input$cluster_heatbox,
show.diag = as.logical(input$heatbox_show_diag),
lab_col = input$heatbox_corr_text_col ,
lab_size = input$heatbox_corr_text_text_size,
lab = input$heatbox_show_corr_value
)
},error = function(e){
return(NULL)
})
## return NULL (at function) if ggcorrplot::ggcorrplot throws an error
if(is.null(chb)){
return(NULL)
}
## set color scale auto:
if(input$corr_heatbox_scale_manual == "auto"){
chb <- chb +
scale_fill_gradientn(colours = viridis::viridis(n = input$corr_heatbox_colors) %>% rev())
}
## set color scale manual
if(input$corr_heatbox_scale_manual == "manual"){
chb <- chb +
scale_fill_gradientn(colours = viridis::viridis(n = input$corr_heatbox_colors) %>% rev(),
limits = c(input$corr_heatbox_scale_manual_min , input$corr_heatbox_scale_manual_max) , oob=scales::squish)
}
## decorate plot
chb <- callModule(plot_title_and_axis_label_server,
id = "corr_heatbox_title_and_labels" ,
my_ggplot = chb, x_tick_angle = 45 ,
fill_legend_title = "Corr",
apply_theme =FALSE)
return(chb)
})
## show error if corr data is NULL
observe({
if(is.null(final_corr_heatbox())){
shinyWidgets::sendSweetAlert(session = session,title = "Error..!",
type = "error",
text = "Error while generating corr plot (ggcorrplot::ggcorrplot). Make sure that you selected at least two samples or else check input data.")
}
})
## render corr_heatbox
output$corr_heatbox <- renderPlot({
req(final_corr_heatbox())
return(print(final_corr_heatbox()))
}, height = function() {
return(session$clientData$output_corr_heatbox_width)
}, width = function() {
return(session$clientData$output_corr_heatbox_width)
}, res = 90)
## render corr heat box data
output$corr_heatbox_data <- DT::renderDataTable({
req(corr_heatbox_data())
return(corr_heatbox_data() %>%
as.data.frame() %>%
rownames_to_column("RowNames") %>%
as_tibble() %>%
dplyr::mutate_if(is.numeric, function(i){round(i,2)}))
},selection = "none",
server = F,
extensions = "Buttons",
options = list(
scrollX = TRUE,
dom = "Blfrtip",
searchHighlight = TRUE,
buttons =
list("copy",
list(extend ="collection",
buttons = c("csv", "excel", "pdf"), text = "Download"
)), # end of buttons customization
# customize the length menu
lengthMenu = list(
c(10, 50, 100, 500, -1) # declare values
, c(10, 50, 100, 500, "All") # declare titles
),
pageLength = 10
)
)
## export corr heat box
callModule(module = export_plot,
id = "export_corr_heatbox" ,
file_name = "corr_heatbox",
plot = final_corr_heatbox )
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# density plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update x, y variables
observe({
## update x
updatePickerInput(
session = session,
inputId = "density_x",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## check whether selected x var(s) are of numeric type
density_vars_validated <- eventReactive(input$generate_density, {
req(input$density_x)
## validate selected variables
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$density_x)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
} else {
return(TRUE)
}
})
## density plot status
output$density_plot_status <- reactive({
req(final_density_plot())
return(TRUE)
})
outputOptions(output, "density_plot_status", suspendWhenHidden = FALSE)
## density plot user selected gene group data
density_plot_data <- callModule(module = gene_group_selection ,
id = "density_plot_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_density),
current_session_data_matrix = plot_data
)
## density plot
dp <- eventReactive(input$generate_density, {
req(density_plot_data() , density_vars_validated())
withProgress(message = "Preparing density plot" , {
incProgress(0.5)
plot_data <- density_plot_data() %>%
dplyr::select(c(input$density_x , !!as.symbol("gene_groups")))
melted <- plot_data %>%
tidyr::gather(samples, value , -gene_groups) %>%
left_join(group_info$column_groups , by = c (samples = "group_members")) %>% ## addd sample groups
tidyr::replace_na(list(groups = "No groups assigned")) %>% ## NA will be converted to "No groups assigned"
dplyr::arrange(groups , samples)%>%
dplyr::mutate(samples = forcats::fct_inorder(samples))
incProgress(1)
})
## base density plot
dp <- ggplot(melted, aes(x = value, fill = samples))
return(dp)
})
## final density plot
final_density_plot <- reactive({
## dynamic alpha
req(dp())
dp <- dp() + geom_density(alpha = input$density_plot_alpha) #+ theme_bw()
## fill and facet density plot
out <- callModule(module = ggplot_fill_and_facet ,
id = "density_plot_fill_and_facet" ,
ggplot = dp , allow_x_var_selection = FALSE)
dp <- out$plot
## decorate plot
dp <- callModule(plot_title_and_axis_label_server ,
id = "decorate_density_plot" ,
my_ggplot = dp ,
axis_x_title = "Value",
x_tick_angle = 0,
axis_y_title = "Density",
fill_legend_title = out$fill_var,
color_legend_title = out$fill_var
)
return(dp)
})
## render density plot
output$density_plot <- renderPlot({
return(print(final_density_plot()))
}, height = function() {
return(session$clientData$output_density_plot_width)
}, width = function() {
return(session$clientData$output_density_plot_width)
}, res = 90)
## prepare density plot data to be shown as table
density_filtered_column <- eventReactive(dp(), {
dd <- plot_data()
return(dd)
})
## render density plot data
output$density_filtered_column_display <- DT::renderDataTable({
return(density_filtered_column())
})
callModule(module = export_plot, id = "export_density" , file_name = "density_plot", plot = final_density_plot )
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# histogram server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update x, y variables
observe({
## update x
updatePickerInput(
session = session,
inputId = "histogram_x",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## check whether selected x var(s) are of numeric type
histogram_vars_validated <- eventReactive(input$generate_histogram, {
req(input$histogram_x)
## validate selected variables
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$histogram_x)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
} else {
return(TRUE)
}
})
## histogram status
output$histogram_status <- reactive({
req(final_histogram())
return(TRUE)
})
outputOptions(output, "histogram_status", suspendWhenHidden = FALSE)
## get histogram gene group specific data
histogram_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "histogram_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_histogram),
current_session_data_matrix = plot_data)
## histogram
hg <- eventReactive(input$generate_histogram, {
req(input$histogram_x, histogram_vars_validated() , histogram_user_selected_gene_group_data())
withProgress(message = "Preparing histogram" , {
incProgress(0.5)
plot_data <- histogram_user_selected_gene_group_data() %>%
dplyr::select(c(input$histogram_x , !!as.symbol("gene_groups")))
melted <- plot_data %>%
tidyr::gather(samples, value , -gene_groups) %>%
left_join(group_info$column_groups , by = c (samples = "group_members")) %>%
tidyr::replace_na(list(groups = "No groups assigned")) %>% ## NA will be converted to "No groups assigned"
dplyr::arrange(groups , samples)%>%
dplyr::mutate(samples = forcats::fct_inorder(samples))
incProgress(1)
})
hg <- ggplot(melted, aes(x = value, fill = samples, col = samples))
return(hg)
})
## final histogram
final_histogram <- reactive({
## dynamic alpha
req(hg())
hg <- hg() + geom_histogram(alpha = input$histogram_alpha ,
position = input$histogram_positions ,
bins = input$histogram_number_of_bins , col ="black")
## fill and facet histogram
out <- callModule(module = ggplot_fill_and_facet ,
id = "histogram_fill_and_facet" ,
ggplot = hg , allow_x_var_selection = FALSE)
hg <- out$plot
## decorate plot
hg <- callModule(plot_title_and_axis_label_server ,
id = "decorate_histogram" ,
my_ggplot = hg ,
axis_x_title = "Value",
x_tick_angle = 0,
axis_y_title = "Count",
fill_legend_title = out$fill_var,
color_legend_title = out$fill_var
)
## remove legend title
return(hg)
})
## render histogram
output$histogram <- renderPlot({
return(print(final_histogram()))
}, height = function() {
return(session$clientData$output_histogram_width)
}, width = function() {
return(session$clientData$output_histogram_width)
}, res = 90)
callModule(module = export_plot, id = "export_histogram" , file_name = "histogram", plot = final_histogram)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Joy plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update x, y variables
observe({
## update x
updatePickerInput(
session = session,
inputId = "joy_plot_x",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## check whether selected x var(s) are of numeric type
joy_plot_vars_validated <- eventReactive(input$generate_joy_plot, {
req(input$joy_plot_x)
## validate selected variables
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$joy_plot_x)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
} else {
return(TRUE)
}
})
## multi scatterplot condition panel:show only if variable (x, y) validated and displayed on UI
output$joy_plot_status <- reactive({
req(final_joy_plot())
return(TRUE)
})
outputOptions(output, "joy_plot_status", suspendWhenHidden = FALSE)
## joy plot user selected gene group data
joy_plot_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "joy_plot_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_joy_plot),
current_session_data_matrix = plot_data)
## joy plot
ggjoy_plot <- eventReactive(input$generate_joy_plot, {
req(input$joy_plot_x, joy_plot_vars_validated() , joy_plot_user_selected_gene_group_data())
plot_data <- joy_plot_user_selected_gene_group_data() %>%
dplyr::select(c(input$joy_plot_x) , gene_groups) %>%
tidyr::gather(samples, value , -gene_groups) %>%
left_join(group_info$column_groups , by = c(samples = "group_members")) %>%
tidyr::replace_na(list(groups = "No groups assigned")) %>% ## NA will be converted to "No groups assigned"
dplyr::mutate(samples = forcats::fct_inorder(samples)) %>% ## preserve original order
dplyr::mutate(groups = forcats::fct_inorder(groups))# %>%
ggjoy_plot <- ggplot(plot_data) + aes(x = value)
return(ggjoy_plot)
})
## final joy plot
final_joy_plot <- reactive({
req(ggjoy_plot())
## fill and facet
ggjoy_plot <- ggjoy_plot()
out <- callModule(module = joyplot_fill_and_facet , id = "joy_plot_fill_and_facet" , joyplot = ggjoy_plot)
ggjoy_plot <- out$plot
withProgress(message = "Preparing joy plot",{
incProgress(0.5)
ggjoy_plot <- ggjoy_plot +
scale_x_continuous(expand = c(0.01, 0)) +
scale_y_discrete(expand = c(0.01, 0)) #+
#theme_bw()
## decorate joy plot
ggjoy_plot <- callModule(plot_title_and_axis_label_server ,
id = "decorate_joy_plot" ,
my_ggplot = ggjoy_plot ,
axis_x_title = ggjoy_plot$mapping$x %>% rlang::quo_text(),
axis_y_title = ggjoy_plot$mapping$y %>% rlang::quo_text(),
x_tick_angle = 0,
fill_legend_title = out$fill_var)
incProgress(1)
})
return(ggjoy_plot)
})
## render density plot
output$joy_plot <- renderPlot({
## dynamic alpha
req(final_joy_plot())
return(print(final_joy_plot()))
}, height = function() {
return(session$clientData$output_joy_plot_width)
}, width = function() {
return(session$clientData$output_joy_plot_width)
}, res = 90)
callModule(module = export_plot , id = "export_joy_plot" , file_name = "joy_plot" , plot = final_joy_plot)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# box plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update box plot variables
observeEvent(plot_data(), {
updatePickerInput(
session = session,
inputId = "box_x",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## validate boxplot selected variables
box_vars_validated <- eventReactive(input$generate_box, {
req(input$box_x)
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$box_x)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
} else {
return(TRUE)
}
})
## boxplot condition panel : show only if selected variable(s) validated and displayed on UI
output$box_plot_status <- reactive({
req(final_box_plot())
return(TRUE)
})
outputOptions(output, "box_plot_status", suspendWhenHidden = FALSE)
## box plot user selected gene group data
box_plot_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "box_plot_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_box),
current_session_data_matrix = plot_data)
## box plot
boxplt <- eventReactive(input$generate_box, {
req(input$box_x , box_vars_validated() , box_plot_user_selected_gene_group_data())
box_plot_data <- box_plot_user_selected_gene_group_data() %>%
dplyr::select(c(input$box_x , "gene_groups")) %>%
tidyr::gather(key = samples , value = value , -gene_groups) %>%
dplyr::left_join(group_info$column_groups , by = c(samples = "group_members")) %>%
tidyr::replace_na(list(groups = "No groups assigned")) %>% ## NA will be converted to "No groups assigned"
dplyr::mutate(samples = forcats::fct_inorder(samples)) %>%
dplyr::mutate(groups = forcats::fct_inorder(groups))# %>%
## base plot
bp <- ggplot(box_plot_data)
return(bp)
})
final_box_plot <- reactive({
req(boxplt())
withProgress(message = "Preparing box plot" , {
incProgress(0.5)
boxplt <- boxplt()
## y axis and alpha
boxplt <- boxplt + geom_boxplot(alpha = input$box_plot_alpha) +
aes(y = value)
## fill and facet box plot
out <- callModule(module = ggplot_fill_and_facet ,
id = "box_plot_fill_and_facet" ,
ggplot = boxplt)
boxplt <- out$plot
# dynamic display pvalue
if (input$box_plot_pvalue == "TRUE") {
box_x_var_type <- boxplt$data %>%
dplyr::pull(!!boxplt$mapping$x) %>%
unique() %>% as.character()
if(length(box_x_var_type) >= 2){
all_combin <- combn(box_x_var_type, 2, simplify = F)
boxplt <- boxplt +
ggpubr::stat_compare_means(comparisons = all_combin,
method = input$box_plot_pval_method)
}
}
## box plot dots
if(input$box_plot_show_dots %>% as.logical()){
boxplt <- boxplt + geom_jitter(width = input$boxplot_dots_width , color = input$boxplot_dots_color , alpha = input$boxplot_dots_transprancy)
}
## decorate box plot
boxplt <- callModule(module = plot_title_and_axis_label_server ,
id = "box_plot_label_and_title",
my_ggplot = boxplt,
axis_x_title = boxplt$mapping$x %>% rlang::quo_text(),
axis_y_title = boxplt$mapping$y %>% rlang::quo_text(),
fill_legend_title = boxplt$mapping$fill %>% rlang::quo_text(),
x_tick_angle = 90,
)
incProgress(1)
})
return(boxplt)
})
## render boxplot
output$box_plot <- renderPlot({
req(final_box_plot())
return(print(final_box_plot()))
}, height = function() {
return(session$clientData$output_box_plot_width)
}, width = function() {
return(session$clientData$output_box_plot_width)
}, res = 96)
## export box plot
callModule(module = export_plot , id = "export_box_plot" , file_name = "box_plot" ,plot = final_box_plot)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# violin plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update violin plot variables
observeEvent(plot_data(), {
updatePickerInput(
session = session,
inputId = "violin_x",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## validate violin plot selected variables
violin_vars_validated <- eventReactive(input$generate_violin, {
req(input$violin_x)
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$violin_x)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
} else {
return(TRUE)
}
})
## violin plot condition panel : show only if selected variable(s) validated and displayed on UI
output$violin_plot_status <- reactive({
req(final_violin_plot())
return(TRUE)
})
outputOptions(output, "violin_plot_status", suspendWhenHidden = FALSE)
## violin plot data
violin_plot_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "violin_plot_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_violin),
current_session_data_matrix = plot_data)
## violin plot
violinplt <- eventReactive(input$generate_violin, {
req(input$violin_x , violin_vars_validated() , violin_plot_user_selected_gene_group_data())
violin_plot_data <- violin_plot_user_selected_gene_group_data() %>%
dplyr::select(c(input$violin_x) , gene_groups) %>%
tidyr::gather(key = samples , value = value , -gene_groups) %>%
left_join(group_info$column_groups , by = c(samples = "group_members")) %>%
tidyr::replace_na(list(groups = "No groups assigned")) %>% ## NA will be converted to "No groups assigned"
dplyr::mutate(samples = forcats::fct_inorder(samples)) %>%
dplyr::mutate(groups = forcats::fct_inorder(groups))# %>%
## fill violin plot by variable, default
vp <- ggplot(violin_plot_data)
return(vp)
})
final_violin_plot <- reactive({
req(violinplt())
withProgress(message = "Preparing violin plot" , {
incProgress(0.5)
violin_plot_vars <- isolate(input$violin_x)
violinplt <- violinplt()
#violinplt <- violinplt + aes(x = samples, y = value , fill = samples )
## y axis and alpha
violinplt <- violinplt +
geom_violin(alpha = input$violin_plot_alpha , draw_quantiles = c(input$violin_show_quantile))+
aes(y = value)
# # dynamic fill and alpha
# violinplt <- violinplt +
# geom_violin(alpha = input$violin_plot_alpha , draw_quantiles = c(input$violin_show_quantile))
#
#
## fill and facet violin plot
out <- callModule(module = ggplot_fill_and_facet ,
id = "violin_plot_fill_and_facet" ,
ggplot = violinplt)
violinplt <- out$plot
# dynamic display pvalue
if (input$violin_plot_pvalue == "TRUE") {
violin_x_var_type <- violinplt$data %>%
dplyr::pull(!!violinplt$mapping$x) %>%
unique() %>% as.character()
if(length(violin_x_var_type) >= 2){
all_combin <- combn(violin_x_var_type, 2, simplify = F)
violinplt <- violinplt +
ggpubr::stat_compare_means(comparisons = all_combin, method = input$violin_plot_pval_method)
}
}
# decorate box plot
violinplt <- callModule(module = plot_title_and_axis_label_server ,
id = "violin_plot_label_and_title",
my_ggplot = violinplt,
axis_x_title = violinplt$mapping$x %>% rlang::quo_text(),
axis_y_title = violinplt$mapping$y %>% rlang::quo_text(),
x_tick_angle = 90,
fill_legend_title = violinplt$mapping$fill %>% rlang::quo_text()
)
incProgress(1)
})
return(violinplt)
})
## render violine
output$violin_plot <- renderPlot({
req(final_violin_plot())
return(print(final_violin_plot()))
}, height = function() {
return(session$clientData$output_violin_plot_width)
}, width = function() {
return(session$clientData$output_violin_plot_width)
}, res = 96)
## export box plot
callModule(module = export_plot , id = "export_violin_plot" , file_name = "violin_plot" ,plot = final_violin_plot)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# bar plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update bar plot variables
observeEvent(plot_data(), {
updatePickerInput(
session = session,
inputId = "barplot_vars",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## update bar plot genes
observeEvent(plot_data(), {
updatePickerInput(
session = session,
inputId = "barplot_select_genes",
choices = plot_data() %>% dplyr::pull(1), ## first column is gene name
selected = plot_data() %>% dplyr::pull(1) %>% .[1]
)
})
## validate barplot selected variables
barplot_vars_validated <- eventReactive(input$generate_barplot, {
req(input$barplot_vars)
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$barplot_vars)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
} else {
return(TRUE)
}
})
## barplot condition panel : show only if selected variable(s) validated and displayed on UI
output$barplot_status <- reactive({
req(final_barplot())
return(TRUE)
})
outputOptions(output, "barplot_status", suspendWhenHidden = FALSE)
## barplot
barplot <- eventReactive(input$generate_barplot, {
req(input$barplot_vars , barplot_vars_validated(),input$barplot_select_genes)
barplot_data <- plot_data() %>%
dplyr::rename_at(1 , funs(return("GeneNames"))) %>%## set first column name as geneNames
dplyr::select(1, c(input$barplot_vars)) %>%
dplyr::filter(GeneNames %in% input$barplot_select_genes) %>%
tidyr::gather(key = samples , value = value , -GeneNames) %>%
left_join(group_info$column_groups , by = c(samples = "group_members")) %>%
tidyr::replace_na(list(groups = "No groups assigned")) %>% ## NA will be converted to "No groups assigned"
dplyr::mutate(samples = forcats::fct_inorder(samples)) %>% ## order by first appearance
dplyr::mutate(groups = forcats::fct_inorder(groups))# %>% ## order by first appearance
## if user groups uploaded chunk below make sure that order in each group is as per user uploaded samples
if(TRUE){ ## original (user supplied) sample order
barplot_data <- barplot_data %>%
dplyr::mutate(samples = forcats::fct_relevel(samples ,group_info$column_groups %>% pull(2) %>%
as.character() %>% unique() )) #%>% ## order by group members
}
if(TRUE){ ## original (user supplied) column group order
barplot_data <- barplot_data %>%
dplyr::mutate(groups = forcats::fct_relevel(groups ,group_info$column_groups %>% pull(1) %>%
as.character() %>% unique()))
}
## barplot base ggplot
barplot <- ggplot(barplot_data)
return(barplot)
})
final_barplot <- reactive({
req(barplot())
withProgress(message = "Preparing bar plot" , {
incProgress(0.5)
barplot <- barplot()
## current bar plot data
current_bar_plot_data <- barplot$data
## dynamic color aesthatic (color_by_var) is opposite to x axis aesthatic.
## can be one of the : samples or GeneNames. is the gene column is always named as GeneName ?
color_by_var <- ifelse(input$barplot_xaxis_choices == "samples" , "GeneNames" , "samples")
barplot <- barplot +
geom_col(aes(x = !!as.symbol(input$barplot_xaxis_choices),
y = value , col = !!(as.symbol(color_by_var)) ) , position = "dodge" ,
alpha = input$barplot_alpha) +
scale_color_manual(breaks = current_bar_plot_data[[color_by_var]] %>% unique(),
values = c(rep("black", current_bar_plot_data[[color_by_var]] %>%
unique() %>% length() ))) +
guides(color = FALSE, size = FALSE)
## fill by groups or variable.
if(input$fill_barplot != "identical"){
barplot <- barplot + aes(fill = !!as.name(input$fill_barplot))
}
## fill bar plot identical
if (input$fill_barplot == "identical") {
barplot <- barplot +
aes(fill = input$barplot_color_chooser) + ## fake aesthatic
scale_fill_manual(breaks = current_bar_plot_data[[color_by_var]] %>% unique(),
values = c(rep(input$barplot_color_chooser, current_bar_plot_data[[color_by_var]] %>%
unique() %>% length() )))
}
## facet bar plot
if(input$barplot_facet_value != "none") {
barplot <- barplot +
facet_wrap(~ eval(as.symbol(input$barplot_facet_value)) , scales = c(input$barplot_yscale ))
}
## decorate box plot
barplot <- callModule(module = plot_title_and_axis_label_server ,
id = "barplot_label_and_title",
my_ggplot = barplot,
axis_x_title = input$barplot_xaxis_choices,
axis_y_title = "Value",
x_tick_angle = 90,
fill_legend_title = input$fill_barplot
)
incProgress(1)
})
return(barplot)
})
## render barplot
output$barplot <- renderPlot({
req(final_barplot())
return(print(final_barplot()))
}, height = function() {
return(session$clientData$output_barplot_width)
}, width = function() {
return(session$clientData$output_barplot_width)
}, res = 96)
## export barplot
callModule(module = export_plot , id = "export_barplot" , file_name = "barplot" ,plot = final_barplot)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# line plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## update x variables
observe({
# update x
updatePickerInput(
session = session,
inputId = "lineplot_x",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## validate line plot parameters
line_plot_params_validated <- eventReactive(input$generate_lineplot, {
req(input$lineplot_x)
selected_non_num_vars <- plot_data() %>%
dplyr::select(c(input$lineplot_x)) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather() %>%
dplyr::filter(value == F)
## check if selected variables are of type numeric
if (nrow(selected_non_num_vars) >= 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = ifelse(length(selected_non_num_vars$key) == 1,
base::paste0(selected_non_num_vars$key, " is non numeric varibales"),
base::paste0(base::paste0(selected_non_num_vars$key, collapse = ","), " are non numeric varibales")
),
type = "error"
)
return(FALSE)
}
## atleast 2 variable req to generate line plot
if(length(input$lineplot_x) == 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0("Select atleast two columns to draw line plot"),
type = "error"
)
return(FALSE)
}
## value given for the `lineplot_top_n_genes` must be of type numeric
if(!is.numeric(input$lineplot_top_n_genes)) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0("`# top variable genes to show` value must be of type numeric"),
type = "error"
)
return(FALSE)
}
## value given for the `line_plot_nclust` must be of type numeric
if(!is.numeric(input$line_plot_nclust)) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0("`# of clusters (k-means)` value must be of type numeric"),
type = "error"
)
return(FALSE)
}
else {
return(TRUE)
}
})
## line plot condition panel : show only if selected variable(s) validated and displayed on UI
output$line_plot_status <- reactive({
req(line_plt())
return(TRUE)
})
outputOptions(output, "line_plot_status", suspendWhenHidden = FALSE)
## line plot user selected gene group data
line_plot_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "line_plot_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_lineplot),
current_session_data_matrix = plot_data)
## preapre line plot data.
line_plot_data <- eventReactive(eventExpr = input$generate_lineplot, {
req(line_plot_params_validated())
withProgress(message = 'Preparing line plot data', {
# Update progress
incProgress(0.5) ## display 50% task finished when it start
# Long Running Task
set.seed(1234)
## handle error if generated while performing clustering
clustered <- tryCatch({
clustered <- tibble_to_row_clusters(x = line_plot_user_selected_gene_group_data(),
row_ident = 1 ,
cols_to_use = input$lineplot_x,
use_z_score_for_clustering = ifelse(input$lineplot_cluster_value_type == 'zscore' ,
TRUE, FALSE) ,
num_of_top_var_rows = ifelse(input$lineplot_genes_selection == 'top_variable_genes',input$lineplot_top_n_genes , -1),
nclust = ifelse(input$line_plot_cluster_genes_by == "kmeans",
as.numeric(input$line_plot_nclust) , 1 ) ## if user selects cluster by gene groups, value of nclust = 1
)
## add count to cluster name
}, error = function(x){
shinyWidgets::sendSweetAlert(session = session ,
type = "error",
title = "Error" ,
text = tags$h5(tags$code(x)))
return(NULL)
})
# Update progress
incProgress(1)
}) ## with progress ends
## return NULL if oject clustered in NULL
if(is.null(clustered)){
return(NULL)
}
## select which value to display on plot, zscore or raw value.
if(input$lineplot_display_value_type == "zscore"){
forPlot <- clustered$zscore %>% left_join(clustered$clusters)
} else{
forPlot <- clustered$raw_value %>% left_join(clustered$clusters)
}
## if cluster by gene_groups assign user defined groups
if(input$line_plot_cluster_genes_by == "gene_groups"){
## tibble of two cols, where 1st column is gene names and 2nd is user supplied gene_groups
user_gene_groups <- line_plot_user_selected_gene_group_data() %>%
dplyr::select(1, dplyr::last_col())
## get column names to use them inside left_join
left_col <- colnames(forPlot)[1]
right_col <- colnames(user_gene_groups)[1]
## get the name of target column which is going to join as a result of left join.
## the name of resulted joined column will be used later to remove column from the df.
column_to_be_joined <- colnames(user_gene_groups)[2]
forPlot <- forPlot %>%
left_join(user_gene_groups , setNames(right_col, left_col)) %>%
dplyr::mutate(clust = !!as.symbol(column_to_be_joined)) %>%
dplyr::select(-!!as.symbol(column_to_be_joined))
}
## Add gene count in cluster name
forPlot <- forPlot %>%
dplyr::group_by(clust) %>%
dplyr::add_count() %>%
dplyr::ungroup() %>%
dplyr::mutate(clust = paste(clust, ":(N=" ,n , ")" , sep = "")) %>%
dplyr::select(-n)
## plot all lines vs avg line based on tick of activate_avg_line_plot
if (input$activate_avg_line_plot == "average") {
forPlot <- forPlot %>%
dplyr::group_by(clust) %>%
dplyr::summarise_if(is.numeric, input$avg_line_type)
}
forPlot <- forPlot %>%
tidyr::gather(key = "variable", value = "value" , input$lineplot_x) %>%
dplyr::mutate(variable = forcats::fct_inorder(variable)) %>% ## preserve original order of samples
dplyr::group_by(variable)
return(forPlot)
})
## plot
line_plt <- eventReactive(eventExpr = input$generate_lineplot, {
req(line_plot_data())
### plot
gp <- line_plot_data() %>%
dplyr::mutate(clust = forcats::fct_relevel(clust, as.numeric(clust) %>%
unique %>%
sort %>% as.character)) %>% ## numeric cluster to ordered factor
dplyr::mutate(row_num = 1:n()) %>%
ggplot2::ggplot(aes(x = variable,
y = value, group = row_num , alpha = 1))
return(gp)
})
## final line plot
final_line_plot <- reactive({
req(line_plt())
validate(need(is.numeric(input$line_plot_ncols) ,
message = "Number of columns must be numeric"))
## line plot geoms
line_plt <- line_plt() +
geom_point(size = input$lineplot_point_size) +
#geom_line(aes(col = clust) , size = input$lineplot_line_size) +
ggalt::geom_xspline(aes(col = clust),
spline_shape=input$line_plot_splin_shape,
size=input$lineplot_line_size)+
theme(legend.position = "none", axis.text.x = element_text(angle = 90, vjust = 0.4))
## line plot color identical
if(input$line_plot_color_by == 'identical'){
## get number of clusters
n_clust <- line_plt$data %>%
dplyr::pull(clust) %>% ## pull cluster var column
base::unique() %>%
as.character() %>% length()
## generate colors for each cluster
colrs <- c(rep(input$line_plot_color_chooser, n_clust))
## color and fill manual
line_plt <- line_plt +
scale_fill_manual(values = colrs) +
scale_color_manual(values = colrs)
}
## line plot manual alpha
line_plt <- line_plt +
scale_alpha(range = input$line_plot_line_transparancy) + ## override alpha in the plot
guides(alpha = FALSE ,
color = guide_legend(override.aes = list(alpha = input$line_plot_line_transparancy))) ## override alpha in the legend
## lie plot facet
if(input$line_plot_separate_by == 'gene_groups'){
line_plt <- line_plt + facet_wrap(~clust, ncol = input$line_plot_ncols ,
scales = input$line_plot_facet_scale_free)
}
## decorate line plot
line_plot <- callModule(module = plot_title_and_axis_label_server ,
id = "decorate_line_plot" ,
my_ggplot = line_plt ,
axis_x_title = "Samples",
axis_y_title = "Value",
color_legend_title = "Cluster",
x_tick_angle = 90
)
# ## fill color identical
# line_plot <- callModule(module = ggplot_fill_colour , "line_plot_color_identical" ,
# gp = line_plot , times = input$line_plot_nclust)
return(line_plot)
})
## render line plot
output$line_plot <- renderPlot({
req(final_line_plot())
return(print(final_line_plot()))
#return(
# withProgress(message = "Display line plot in progress", {
# incProgress(0.5)
#print(final_line_plot())
#incProgress(1)
#})
#)
}, height = function() {
return(session$clientData$output_line_plot_width)
}, width = function() {
return(session$clientData$output_line_plot_width)
}, res = 96)
## lineplot data to display
line_plot_data_disply <- eventReactive(input$generate_lineplot , {
req(line_plot_data())
display_data <- line_plot_data() %>%
tidyr::spread(key = "variable" , value = "value")%>%
dplyr::select(1,input$lineplot_x,dplyr::everything())
})
## genome annotation for line plot
## global genome annotations object cannot be used in following scenario
## For example : user has generated one lineplot Now accidently or purposely user selected / uploaded data
## for different species than the previous lineplot generated. In this scenario, global reference genome will
## be updated , and therefore, annotations in the existing lineplot will also be affected.
## To prepvent this seprate heatmap ref genome from global annot. lineplot ref annot will be updated only when
## plot lineplot submit button hit
lineplot_reference_annot <- eventReactive(input$generate_lineplot , {
req(genome_for_annotations())
return(genome_for_annotations())
})
## line plot output table
output$line_plot_clustred_data <- DT::renderDataTable({
## map gene features if average line not asked
if(isolate(input$activate_avg_line_plot != "average")){
user_selected_species_annot <- ah_data_summary2 %>%
dplyr::filter(genome == lineplot_reference_annot())%>%
dplyr::select(gr_cols) %>%
tidyr::unnest(cols = gr_cols)
join_col_x <- base::colnames(line_plot_data_disply())[1] ## first column containing geneNames
join_col_y <- base::colnames(user_selected_species_annot)[7] ## "id" column
line_plot_data_to_show <- line_plot_data_disply()%>%
left_join(user_selected_species_annot, by = setNames(join_col_y, join_col_x)) %>%
dplyr::select(1,c("seqnames", "start", "end", "strand", "description"), colnames(line_plot_data_disply())) %>%
dplyr::mutate_if(is.numeric, round, 4)%>%
dplyr::mutate_all(funs(replace_na(as.character(.),"--NA--")))
}else{
line_plot_data_to_show <- line_plot_data_disply()
}
return(line_plot_data_to_show %>% dplyr::arrange(clust) %>%
dplyr::mutate_if(is.numeric , round , 3))
},
selection = "none",
server = F,
extensions = "Buttons",
options = list(
scrollX = TRUE,
dom = "Blfrtip",
searchHighlight = TRUE,
buttons =
list("copy", list(
extend =
"collection", buttons =
c("csv", "excel", "pdf"), text = "Download"
)), # end of buttons customization
# customize the length menu
lengthMenu = list(
c(10, 50, 100, 500, -1) # declare values
, c(10, 50, 100, 500, "All") # declare titles
),
pageLength = 10
) ## end of options
)
## export line plot
callModule(module = export_plot ,id = "export_line_plot" ,file_name = "line_plot" , plot = final_line_plot)
## line plot functional analysis
callModule(module = functional_analysis_server , id = "lineplot_functional_analysis_ui" ,
ui_id = "lineplot_functional_analysis_ui",
session = session,
gene_set = reactive(split(x = line_plot_data_disply()[[1]], ## genename
f = line_plot_data_disply()$clust)) ,
genome = reactive({lineplot_reference_annot()}))
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# PCA plot server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## check whether x and y are numeric type
pca_plot_vars_validated <- eventReactive(input$generate_pca_plot, {
req(input$pca_plot_vars)
withProgress(message = "validate data" , {
incProgress(0.3)
selected_vars <- plot_data() %>% dplyr::select(input$pca_plot_vars)
col_class <- plot_data() %>% dplyr::select(input$pca_plot_vars) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather()
incProgress(0.5)
if (!all(col_class$value)) {
non_numeric_cols <- col_class %>% dplyr::filter(!value)
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0(paste0(non_numeric_cols$key, collapse = " "), " must be of type numeric", collapse = ""),
type = "error"
)
return(FALSE)
}
incProgress(0.7)
if(length(input$pca_plot_vars) == 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0("Select atleast two columns to perfrom PCA."),
type = "error"
)
return(FALSE)
}
else {
return(TRUE)
}
incProgress(1)
})
})
## update x variables
observe({
req(plot_data())
# update x
updatePickerInput(
session = session,
inputId = "pca_plot_vars",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## pca plot user selected gene group data
pca_plot_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "pca_plot_gene_group_selection" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_pca_plot),
current_session_data_matrix = plot_data)
## prepare PCA input matrix , where columns are gene names and rows are sample names
pca_input_matrix <- eventReactive(eventExpr = input$generate_pca_plot,{
req(pca_plot_vars_validated() , pca_plot_user_selected_gene_group_data())
withProgress(message = "Prepare PCA data",{
incProgress(0.3)
plot_data_t <- pca_plot_user_selected_gene_group_data() %>%
dplyr::select(1, input$pca_plot_vars) %>%
dplyr::select_if(function(.) {
!all(. == 0)
}) %>% ## remove if all values are 0 in the sample
tidyr::gather(key = sname, value, -1) %>%
tidyr::spread(1, value) ## transpose data --> genes will be column and samples will be row
## conver dt to matrix for prcomp input
pca_input_data <- plot_data_t %>%
as.data.frame() %>%
column_to_rownames("sname")
})
return(pca_input_data)
})
## prform PCA
pca_plot_pr_comp <- eventReactive(eventExpr = input$generate_pca_plot,{
req(pca_input_matrix())
withProgress(message = "Perform PCA" , {
# perform pca
pr <- tryCatch({
prcomp(pca_input_matrix()) ## remove last column having sample groups
} , error = function(x){
sendSweetAlert(session = session ,title = "Error while performing PCA" ,
text = tags$h4("Error message from function :" ,
tags$code("prcomp()"),br(),br(), tags$code(x)) ,
type = "error")
return(NULL)
})
})
return(pr)
})
### perform pca sample k-means
pca_data_sample_kmeans <- reactive({
req(pca_input_matrix())
validate(
need(is.numeric(input$pca_sample_sample_kmeans_n) &&
(input$pca_sample_sample_kmeans_n > 0 ) , message = "Number of cluster must be numeric")
)
## perform kmenas
km_data <- tryCatch({
set.seed(12345)
km <- kmeans(pca_input_matrix() , centers = input$pca_sample_sample_kmeans_n )
tt <- tibble::tibble(sname = rownames(pca_input_matrix()) , kmeans_clust = km$cluster )
}, error = function(x){
sendSweetAlert(session = session ,title = "PCA sample K-means error!!!" ,
text = tags$h4("Putting all samples in one group.",br(),
"Error message from function :" ,
tags$code("kmeans()"),br(),br(), tags$code(x)) ,
type = "error")
tt <- tibble::tibble(sname = rownames(pca_input_matrix()) , kmeans_clust = "All samples")
return(tt)
})
return(km_data)
})
## Attach % variance with each PC
my_pr_comp_names <- reactive({
req(pca_plot_pr_comp())
## get proportion of variance for each PC
pc_prop_of_var <- summary(pca_plot_pr_comp())$importance %>%
#summary(pr)$importance %>%
as.data.frame() %>%
rownames_to_column(var = "feature_type") %>%
tidyr::gather(pc , value , -feature_type) %>%
dplyr::filter(feature_type == "Proportion of Variance") %>%
dplyr::select(-feature_type) %>%
dplyr::mutate(value = round(value * 100 ,1)) %>% ## convert to percentage
dplyr::mutate(with_var = paste(pc ," (" , value ,"%",")" , sep = "")) %>%
dplyr::select(-value)
## return named vector
pc_prop_of_var %>% dplyr::pull(with_var) %>% rlang::set_names( pc_prop_of_var$pc)
})
## Get PCs from prcomp obtained object. pr_comp_derieved_tibble is a tibble where columns are PCs and rows are samples.
pr_comp_derieved_tibble <- reactive({
req(pca_plot_pr_comp())
## get pr_comp tibble, and map sample and group info
pr_comp_tbl <- pca_plot_pr_comp()$x %>%
as.data.frame() %>%
dplyr::rename_all(~(my_pr_comp_names())) %>%
tibble::rownames_to_column("sname") %>%
as_tibble() ## rename column names. new column names are with 'prop of var' attached.
## create column sname (sample names ) from row names
## prevent rendering from global sample group update
isolate({
pr_comp_tbl_grps <- pr_comp_tbl %>%
left_join(group_info$column_groups , by = c (sname = "group_members")) %>% ## add column groups denoting sample groups
tidyr::replace_na(list(groups = "No groups assigned")) ## in the column groups replace NA to "No groups assigned"
})
## add sample kmeans data
pr_comp_tbl_grps <- pr_comp_tbl_grps %>%
left_join(pca_data_sample_kmeans() , by = "sname") %>%
tidyr::replace_na(list(kmeans_clust = "No groups assigned")) ## in the column groups
return(pr_comp_tbl_grps)
})
## PCA plot update show / hide sample groups. It allows user to exclude sample groups directly from PCA plot
observe({
req(pr_comp_derieved_tibble())
if(input$pca_plot_colour == "groups"){
shinyWidgets::updateMultiInput(inputId = "pca_plot_hide_sample_groups" , session = session,
choices = pr_comp_derieved_tibble() %>% pull("groups") %>% unique()
)
}
if(input$pca_plot_colour == "kmeans"){
shinyWidgets::updateMultiInput(inputId = "pca_plot_hide_sample_groups" , session = session,
choices = pr_comp_derieved_tibble() %>% pull("kmeans_clust") %>% unique()
)
}
})
### Remove user excluded groups before final PCA plot
pca_data_after_groups_excluded_by_user <- reactive({
#req(input$pca_plot_hide_sample_groups)
user_excluded_groups <- input$pca_plot_hide_sample_groups
plot_data <- pr_comp_derieved_tibble() %>%
dplyr::filter(! xor((groups %in% user_excluded_groups),(kmeans_clust %in% user_excluded_groups)))
## make sure pca_data_after_groups_excluded_by_user() > 1 row.
### This is necessary to check when user exclude all the groups
validate(
need(plot_data %>% nrow() > 1 , "Atleast one sample group must be selected" )
)
return(plot_data)
})
## update PC choices
observe({
req(my_pr_comp_names())
available_pc <- my_pr_comp_names()
default_pc <- available_pc[1:2] ## default PCs
updateSelectizeInput(session = session ,
inputId = "select_pcs_to_plot" ,
choices = available_pc %>% unname(),
selected = default_pc ,server = TRUE)
})
## get names of PC columns
pcs_to_plot <- reactive({
validate(
need(input$select_pcs_to_plot %>% length() == 2 , "Select 2 Pcs.")
)
return(input$select_pcs_to_plot)
})
## pca plot
pca_gp <- reactive({
req(all(pcs_to_plot() %in% my_pr_comp_names()))
withProgress(message = "Preparing PCA plot",{
incProgress(0.5)
## add group information
plot_data <- pca_data_after_groups_excluded_by_user()
#print("PCA plot data")
#print(plot_data %>% dplyr::select(sname, groups))
## PCs to plot
pc_x = pcs_to_plot()[1]## PC1
pc_y = pcs_to_plot()[2] ## PC2
## plot , default PC1 and PC2
pca_gp <- ggplot(data = plot_data ,
aes_string(x = as.symbol(pc_x), y = as.symbol(pc_y) )) +
geom_point(size = input$pca_sample_dot_size) #+ theme_bw()
## whether sample names should be shown or not
if(input$pca_display_sample_names) {
pca_gp <- pca_gp +
ggrepel::geom_text_repel(aes(label = sname),
size = input$pca_plot_sample_names_size)
}
### color groups
if(input$pca_plot_colour == "groups") {
pca_gp <- pca_gp + aes(color = groups) +
scale_color_manual(values = get_gg_colors(pr_comp_derieved_tibble() %>%
pull(groups) %>%
as_factor() %>% levels()) )
}
## color by sample kmeans
if(input$pca_plot_colour == "kmeans") {
pca_gp <- pca_gp + aes(color = factor(kmeans_clust)) +
scale_color_manual(values = get_gg_colors(pr_comp_derieved_tibble() %>%
dplyr::pull(kmeans_clust) %>%
forcats::as_factor() %>% levels()))
}
## color identical , NOTE: using breaks = NULL explicitly will be helpful to getrid of legends
if(input$pca_plot_colour == "identical") {
pca_gp <- pca_gp + aes(col = "") +
scale_color_manual( breaks = NULL , values = input$pca_sample_name_color)
}
incProgress(1)
})
return(pca_gp)
})
## final pca plot
final_pca_plot <- reactive({
req(pca_gp())
withProgress(message = "Decorate PCA plot",{
incProgress(0.5)
pca_gp <- callModule(module = plot_title_and_axis_label_server ,
id = "decorate_pca_plot" ,
my_ggplot = pca_gp() ,
axis_x_title = pca_gp()$labels$x,
axis_y_title = pca_gp()$labels$y ,
x_tick_angle = 0,
color_legend_title = input$pca_plot_colour)
pca_gp <- pca_gp +
scale_x_continuous(expand = expand_scale(mult = 0.2)) + # expand x scale
scale_y_continuous(expand = expand_scale(mult = 0.2)) # expand y scale
incProgress(1)
return(pca_gp)
})
})
## render pca
output$pca_plot <- renderPlot({
req(final_pca_plot())
final_pca_plot()
#return(print(final_pca_plot()))
},
height = function() {
req(final_pca_plot())
return(session$clientData$output_pca_plot_width)
}, ## dynamic height
width = function() {
req(final_pca_plot())
return(session$clientData$output_pca_plot_width)
}, res = 96
) ## dynamic width
## export pca plot
callModule(module = export_plot , id = "export_pca_plot" , file_name = "pca_plot" , plot = final_pca_plot)
## current pca plot SRA sample info, update only when generate pca plot submit button hit
current_pca_plot_sra_sample_info <- eventReactive(eventExpr = input$generate_pca_plot,{
return(user_selected_sra_id_sample_info())
})
### pla plot brush data
pca_brushed_data <- eventReactive(input$pca_plot_brush , {
pc_x = pcs_to_plot()[1] ## PC1
pc_y = pcs_to_plot()[2] ## PC2
## subset brushed points data
bp <- brushedPoints(df = pca_data_after_groups_excluded_by_user() ,
brush = input$pca_plot_brush ,
xvar = pc_x,
yvar = pc_y)
## map required sample info
display_pca_data <- current_pca_plot_sra_sample_info() %>%
dplyr::filter(run_accession %in% bp$sname) %>%
dplyr::select(-c("reference_annot" , "taxon_id","scientific_name" , "instrument_model" , "updated_date"))
return(display_pca_data)
})
## render PCA brushed data
output$pca_brushed_datatable <- DT::renderDataTable({
req(pca_brushed_data())
return(pca_brushed_data() %>%
dplyr::mutate_all(funs(replace_na(as.character(.),"--NA--"))) %>%
dplyr::mutate_if(is.numeric , round , 2))
},selection = "none",
server = F,
extensions = c("Buttons"),
options = list(
deferRender = TRUE,
scrollX = TRUE,
dom = "Blfrtip",
searchHighlight = TRUE,
columnDefs = list(
list(targets = c(6:15), visible = FALSE) # The column number must be identical to the columns in colvis extension
),
buttons =
list("copy",
list(extend = "collection",
buttons = c("csv", "excel", "pdf"), text = "Download"
),
list(extend = "colvis",
columns = c(6:15)
)), # end of buttons customization
# customize the length menu
lengthMenu = list(
c(10, 50, 100, 500, -1) # declare values
, c(10, 50, 100, 500, "All") # declare titles
),
pageLength = 10
)
)
## pca plot brushed data table status , decide whether should be displayed or not
output$pac_plot_brushed_data_table_status <- reactive({
req(pca_brushed_data())
return(TRUE)
})
outputOptions(output,name = "pac_plot_brushed_data_table_status" , suspendWhenHidden = FALSE)
## pca plot condition panel:show only if variable (x, y) validated and displayed on UI
output$pca_plot_status <- reactive({
req(pca_plot_pr_comp())
return(TRUE)
})
outputOptions(output, "pca_plot_status", suspendWhenHidden = FALSE)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# heatmap server----
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## check whether x and y are numeric type
heatmap_parmas_validated <- eventReactive(input$generate_heatmap, {
req(input$heatmap_vars)
selected_vars <- plot_data() %>% dplyr::select(input$heatmap_vars)
col_class <- plot_data() %>% dplyr::select(input$heatmap_vars) %>%
dplyr::summarise_all(is.numeric) %>%
tidyr::gather()
withProgress(message = "Validating inputs", {
incProgress(1/10)
if (!all(col_class$value)) {
non_numeric_cols <- col_class %>% dplyr::filter(!value)
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0(paste0(non_numeric_cols$key, collapse = " "), " must be of type numeric", collapse = ""),
type = "error"
)
return(FALSE)
}
incProgress(2/10)
## atleast two vars req to generate hm
if(length(input$heatmap_vars) == 1) {
shinyWidgets::sendSweetAlert(
session = session,
title = "Error...",
text = paste0("Select atleast two columns to draw heatmap"),
type = "error"
)
return(FALSE)
}
incProgress(3/10)
# check if number of genes given are of type numeric
if ((!is.numeric(input$heatmap_top_n_genes)) & (input$hm_genes_selection == "top_variable_genes")) {
sendSweetAlert(
session = session,
title = "Error...",
text = "'# top variable genes to show' must be numeric",
type = "error"
)
return(NULL)
}
incProgress(4/10)
## check if user given color scale min is valid
if (!is.numeric(input$heatmap_scale_min)) {
sendSweetAlert(
session = session,
title = "Error...",
text = "Heatmap color scale (minimum) must be numeric value",
type = "error"
)
return(NULL)
}
incProgress(5/10)
## check if user given color scale max valid or not
if (!is.numeric(input$heatmap_scale_max)) {
sendSweetAlert(
session = session,
title = "Error...",
text = "Heatmap color scale (maximum) must be numeric value",
type = "error"
)
return(NULL)
}
incProgress(6/10)
## check if number of cluster given are of type numeric.
if (!is.numeric(input$heatmap_row_nclust) || !is.numeric(input$heatmap_coulm_nclust)) {
sendSweetAlert(
session = session,
title = "Error...",
text = "'# of clusters (column / row k-means)' must be of type numeric",
type = "error"
)
return(NULL)
}
incProgress(7/10)
## check if number of cluster given are > 0 and <= total rows in the data or
if (input$heatmap_row_nclust == 0 ||
input$heatmap_coulm_nclust == 0 ||
input$heatmap_row_nclust >= nrow(plot_data()) ||
input$heatmap_row_nclust >= input$heatmap_top_n_genes ||
input$heatmap_coulm_nclust >= ncol(plot_data()) ||
input$heatmap_coulm_nclust >= length(input$heatmap_vars)
) {
sendSweetAlert(
session = session,
title = "Error...",
text = "'# of clusters (column / row k-means)' must be > 0 and < total rows (row clusters) or total columns (column clusters)",
type = "error"
)
return(NULL)
}
else {
return(TRUE)
}
incProgress(10/10)
})
})
## heatmap conditional panel
output$heatmap_status <- reactive({
req(heatmap_to_display())
return(TRUE)
})
outputOptions(output, "heatmap_status", suspendWhenHidden = FALSE)
## update x variables
observe({
req(plot_data())
## update x
updatePickerInput(
session = session,
inputId = "heatmap_vars",
choices = base::colnames(plot_data())[-1],
selected = base::colnames(plot_data())[2]
)
})
## heatmap user selected gene group data
heatmap_user_selected_gene_group_data <- callModule(module = gene_group_selection ,
id = "heatmap_select_gene_groups" ,
gene_group_info = reactive(group_info$row_groups),
generate_plot_action = reactive(input$generate_heatmap),
current_session_data_matrix = plot_data)
## prepare heatmap data.
heatmap_top_variable_genes_df <- eventReactive(eventExpr = input$generate_heatmap, {
req(heatmap_parmas_validated(), heatmap_user_selected_gene_group_data())
withProgress(message = "Preparing heatmap data " , {
incProgress(0.1)
# fix number of genes to plot, default all genes (if any gene group selected, all the genes from selected gene group will be plotted)
num_of_genes_for_hm <- nrow(heatmap_user_selected_gene_group_data())
if (input$hm_genes_selection == "top_variable_genes") {
## if user given number of genes argument < total genes, all genes will be used in the plot.
if (nrow(heatmap_user_selected_gene_group_data()) < input$heatmap_top_n_genes) {
num_of_genes_for_hm <- nrow(heatmap_user_selected_gene_group_data())
} else {
num_of_genes_for_hm <- input$heatmap_top_n_genes
}
}
# max genes cannot be higher than 20k
max_hm_genes <- 20000
if (num_of_genes_for_hm > max_hm_genes) {
sendSweetAlert(
session = session,
title = "Error...",
text = paste("Number of genes cannot be higher than ", max_hm_genes, ".", sep = ""),
type = "error"
)
return(NULL)
}
incProgress(0.3 , message = "row wise clustering")
## get clusters and zscore data
clustered <- tryCatch({
tibble_to_row_clusters(x = heatmap_user_selected_gene_group_data(),
row_ident = 1 ,
cols_to_use = input$heatmap_vars,
use_z_score_for_clustering = ifelse(input$heatmap_cluster_value_type == 'zscore' , TRUE, FALSE),
#use_z_score_for_clustering = input$hm_use_zscore ,
num_of_top_var_rows = ifelse(input$hm_genes_selection == 'top_variable_genes',
num_of_genes_for_hm , -1),
nclust = as.numeric(input$heatmap_row_nclust)
)
} , error = function(x){
return(x)
})
## if error generated while clustering
if(inherits(clustered , "error")){
sendSweetAlert(
session = session,
title = "Error...",
text = paste0("Row k-means error : ", clustered, collapse = " "),
type = "error"
)
return(NULL)
}
## select which value to display on plot, zscore or raw value.
if(input$heatmap_display_value_type == "zscore"){
heatmap_data <- clustered$zscore
#print(heatmap_data)
} else{
heatmap_data <- clustered$raw_value
#print(heatmap_data)
}
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## arrange heatmap input data in original order is important when user wants to have same order of genes in the heatmap which was uploaded.
## Following things must be kept in mind when arranging heatmap data matrix in the original order
## 1) output of the tibble_to_row_clusters function throws list containing raw_data_matrix, zscore_matrix and row_clusters
## if order of the rows in heatmap data matrix (zscore or raw data) change, order in the row clusters must change.
##@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
gene_name_col_names <- plot_data_original_row_order() %>% colnames() %>% .[1]
row_order_col_name <- plot_data_original_row_order() %>% colnames() %>% .[2]
heatmap_data <- heatmap_data %>%
left_join(plot_data_original_row_order() , by = setNames(gene_name_col_names , colnames(heatmap_data)[1])) %>%
dplyr::arrange(.data[[row_order_col_name]]) %>% ## arrange by original gene order
dplyr::select(-.data[[row_order_col_name]]) ## remove column
## as mentioned above change order in row clusters
row_splt <- clustered$clusters %>%
left_join(plot_data_original_row_order() , by = setNames(gene_name_col_names , colnames(clustered$clusters)[1])) %>%
dplyr::arrange(.data[[row_order_col_name]]) %>%
dplyr::select(-.data[[row_order_col_name]])
### which clusters to use ? kmeans or user defined
if(input$heatmap_row_clusters_choices == "kmeans") {
row_splt <- row_splt%>%
pull(2)
} else if(input$heatmap_row_clusters_choices == "gene_groups"){
## get user given row groups
row_splt = heatmap_data %>%
as_tibble() %>%
dplyr::left_join(group_info$row_groups ,
by = setNames(colnames(group_info$row_groups)[2] ,
colnames(heatmap_data)[1])) %>%
pull(colnames(group_info$row_groups)[1])
## set levels in the orginal order of user supplied groups
row_splt <- row_splt %>%
as.factor() %>%
fct_relevel(., group_info$row_groups %>%
pull(1) %>% unique())
}
## number of genes in each cluster
gene_count <- table(row_splt)
## Fix row cluster names
row_splt <- row_splt %>%
tibble(x = . ) %>%
dplyr::add_count(x) %>%
dplyr::mutate(cluster_name = paste(input$hm_cluster_prefix , x ,":(N=", n,")" , sep = "")) %>%
pull(cluster_name)
## convert tibble to data frame
heatmap_data <- heatmap_data %>%
as.data.frame()
##generate matrix with rownames as gene name
rownames(heatmap_data) <- heatmap_data[[1]]
heatmap_data <- heatmap_data[-1]
incProgress(0.7 , message = "column wise clustering")
## column clusters, which clusters to use ? kmeans or user defined
if(input$heatmap_column_clusters_choices == "kmeans"){ #
column_splt <- tryCatch({
set.seed(1234)
column_km_out <- kmeans(t(heatmap_data) ,centers = input$heatmap_coulm_nclust)
column_km_out$cluster
} , error = function(x){
return(x)
})
} else if(input$heatmap_column_clusters_choices == "sample_groups"){ # use sample groups as column cluster
## heatmap column names to user supplied grp names tibble
column_splt_ss = heatmap_data %>%
colnames() %>%
tibble(sname = .) %>%
left_join(group_info$column_groups , by = c (sname = "group_members")) #%>%
## arrange by user supplied column order
column_splt_ss <- column_splt_ss %>%
dplyr::mutate(sname = forcats::fct_relevel(as.character(sname) , group_info$column_groups %>% pull(2) %>% unique() ))
## get named vector of heatmap column names, where vector elems are group names and names are column names.
column_splt <- column_splt_ss %>%
pull(colnames(group_info$column_groups)[1]) %>% ## pull user supplied group columns
rlang::set_names( . ,column_splt_ss$sname)
## rearrange heatmap data as split order changed
heatmap_data <- heatmap_data %>% dplyr:: select(names(column_splt))
}
## Fix column cluster name
column_splt <- column_splt %>%
tibble(x = .) %>%
dplyr::add_count(x) %>%
dplyr::mutate(cluster_name = paste(input$hm_column_cluster_prefix , x ,"\n(N=", n,")" , sep = "")) %>%
pull(cluster_name) %>%
set_names(names(column_splt)) ## set orig names if it has
## handle column split error
if(inherits(column_splt , "error")){
sendSweetAlert(
session = session,
title = "Error...",
text = paste0("Column k-means error : ", column_splt, collapse = " "),
type = "error"
)
return(NULL)
}
incProgress(0.9 , message = "final data")
## set heatmap data to a list
rr= NULL
rr$mat <- heatmap_data
rr$clust <- row_splt
rr$column_clusters <- column_splt
rr$row_sd <- rr$mat %>%
rownames %>%
tibble(geneName = . ) %>%
left_join(clustered$std_dev , by =c("geneName" = colnames(clustered$std_dev)[1])) %>%
as.data.frame() %>%
column_to_rownames("geneName") %>% as.matrix()
incProgress(0.1)
return(rr)
})
})
## update heatmap legend range
observe({
#req(heatmap_top_variable_genes_df())
if(input$heatmap_display_value_type == "zscore"){
updateNumericInput(session = session ,
inputId = "heatmap_scale_min",
value = -1.5)
updateNumericInput(session = session ,
inputId = "heatmap_scale_max",
value = 1.5)
}
if(input$heatmap_display_value_type != "zscore"){
updateNumericInput(session = session ,
inputId = "heatmap_scale_min",
value = 0)
updateNumericInput(session = session ,
inputId = "heatmap_scale_max",
value = 10)
}
})
## generate heatmap
heatmap <- eventReactive(input$generate_heatmap, {
req(heatmap_top_variable_genes_df())
withProgress(message = "Preparing heatmap",{
incProgress(0.5)
# generate heatmap
hm_row_threshold <- 2000 ##if number of genes higher than this, gene names will be disabled.
# ## hm colors
hm_colors <- tryCatch({
circlize::colorRamp2(
breaks = seq(from = input$heatmap_scale_min,
to = input$heatmap_scale_max , length.out = 3),
colors = c(input$heatmap_col_low, input$heatmap_col_medium, input$heatmap_col_high)
)
}, error = function(e){
## send warning if color function give error
sendSweetAlert(session = session , title = "Warning" ,
type = "warning" ,
text = tags$h4("Error while generating user defined colors",
br(),
"Error from" , tags$code("circlize::colorRamp2()"),
br(),
tags$code(e),br(),
"Default scale (-2 to 2) will be used."
))
## default color : get minimum and maximum from
circlize::colorRamp2(
breaks = seq(from = -2,
to = 2 , length.out = 3),
colors = c(input$heatmap_col_low, input$heatmap_col_medium, input$heatmap_col_high)
)
})
## prepare column annotations
column_ha = NULL ## default column annotations
## boxplot and column name annotations
if(!is.null(input$heatmap_anno_type)){ ## make sure atleast one annot selected
## convert NA to "No groups assigned"
no_grp_label <- "NA"
## get groups for hm cols
hm_selected_column_groups <- heatmap_top_variable_genes_df()$mat %>%
colnames() %>%
tibble(hm_cols = . ) %>%
left_join(group_info$column_groups , by = c(hm_cols = "group_members")) %>%
pull(groups) %>%
replace_na(no_grp_label)
### column grp annot
grp_colors <- get_gg_colors(x = hm_selected_column_groups %>% as_factor() %>% levels())
## column box annot
column_box_anno <- ComplexHeatmap::anno_boxplot(heatmap_top_variable_genes_df()$mat ,
axis = T ,
border = T ,
axis_param = list(gp = grid::gpar(fontsize = 8),
side = "left",
facing = "outside"
),
height = unit(input$heatmap_top_annot_height,"cm")
)
## display both grp and box annot
if(all(c("show_heatmap_column_groups" , "show_heatmap_column_boxplot") %in% input$heatmap_anno_type)){ ## all annotations
column_ha = ComplexHeatmap::HeatmapAnnotation(Groups = hm_selected_column_groups ,
Box = column_box_anno,
col = list(Groups = grp_colors)
)
## only grp annot
}else if(input$heatmap_anno_type == "show_heatmap_column_groups") {## sample group annotations
column_ha = ComplexHeatmap::HeatmapAnnotation(Groups = hm_selected_column_groups , col = list(Groups = grp_colors))
## only box annot
}else if(input$heatmap_anno_type == "show_heatmap_column_boxplot"){ ## box plot annotations
column_ha = ComplexHeatmap::HeatmapAnnotation(box = column_box_anno)
}
}
## final hm
hm <- ComplexHeatmap::Heatmap(heatmap_top_variable_genes_df()$mat,
#row_title = "cluster_%s",
cluster_column_slices = F,#input$hm_cluster_column_slice %>% as.logical() , ## it make sure that column clusters arrange in order
column_split = heatmap_top_variable_genes_df()$column_clusters %>%
factor(., levels = gtools::mixedsort(unique(.))),
show_column_names = input$show_hm_colum_names %>% as.logical(),
show_column_dend = input$show_hm_column_dend %>% as.logical(),
cluster_columns = input$hm_cluster_columns %>% as.logical(),
column_names_gp = grid::gpar(fontsize = input$hm_column_names_font_size),
top_annotation = column_ha,
column_title_rot = 90,
column_title_gp = grid::gpar(fontsize = 10),
# if SD heatmap AND sort by SD both on together, turn off row clustering.
cluster_rows = ifelse(input$sort_hm_by_std_dev %>% as.logical() &&
input$show_std_dev_hm %>% as.logical() , FALSE , as.logical(input$hm_cluster_rows)),
# if number of genes higher than hm_row_threshold disable genenames
show_row_names = ifelse(nrow(heatmap_top_variable_genes_df()$mat) <= hm_row_threshold, as.logical(input$show_hm_row_names), FALSE),
show_row_dend = input$show_hm_row_dend %>% as.logical() ,
#row_split = paste0("Clust_",heatmap_top_variable_genes_df()$clust),
row_split = heatmap_top_variable_genes_df()$clust %>% factor(., levels = gtools::mixedsort(unique(.))) ,
row_names_gp = grid::gpar(fontsize = input$hm_row_names_font_size),
row_order = if(all(input$sort_hm_by_std_dev %>% as.logical() , input$show_std_dev_hm %>% as.logical())) order(heatmap_top_variable_genes_df()$row_sd[,1]) else NULL,
cluster_row_slices = F, #input$hm_cluster_row_slice,
row_title_rot = 0,
row_title_gp = grid::gpar(fontsize = 10),
name = ifelse(input$heatmap_display_value_type == "zscore" , "zscore", input$heatmap_legend_name),
heatmap_legend_param = list(legend_direction = input$heatmap_legened_direction),
col = hm_colors,
border = input$hm_border %>% as.logical()
#width = unit(5 , "in"),
#height = unit(5 , "in")
)
incProgress(1)
})
return(hm)
})
## standard deviation heatmap
sd_heatmap <- eventReactive(input$generate_heatmap, {
req(heatmap_top_variable_genes_df())
hm_row_threshold = 2000
sd_hm <- ComplexHeatmap::Heatmap(heatmap_top_variable_genes_df()$row_sd,
row_split = heatmap_top_variable_genes_df()$clust,
show_row_names = ifelse(nrow(heatmap_top_variable_genes_df()$mat) <= hm_row_threshold, as.logical(input$show_hm_row_names), FALSE),
row_names_gp = grid::gpar(fontsize = input$hm_row_names_font_size),
heatmap_legend_param = list(legend_direction = input$heatmap_legened_direction),
name = "std_dev")
return(sd_hm)
})
## final heatmap : data heatmap or combined with SD heatmap
heatmap_to_display <- eventReactive(input$generate_heatmap , {
req(heatmap())
if(input$show_std_dev_hm %>% as.logical()) {
show_hm <- heatmap() + sd_heatmap() ## temparory SD heatmap de activted
}else{
show_hm <- heatmap()
}
return(show_hm)
})
## render heatmap
output$heatmap <- renderPlot({
#req(heatmap_to_display())
# heatmap display in progress
# withProgress(message = "Display heatmap in progress" , {
# incProgress(0.5)
# set.seed(123)
# #hm <- shiny::isolate(ComplexHeatmap::draw(heatmap_to_display() , heatmap_legend_side = input$heatmap_legend_pos))
# incProgress(1)
# })
return(heatmap_to_display())
},
height = function() {
req(heatmap_top_variable_genes_df())
minimum_height <- 400
ht <- minimum_height + (input$hm_row_height * nrow(heatmap_top_variable_genes_df()$mat))
return(ht)
}, ## dynamic height
width = function() {
req(heatmap_top_variable_genes_df())
minimum_width <- 500
wd <- minimum_width + (input$hm_col_width * ncol(heatmap_top_variable_genes_df()$mat))
return(wd)
}, res = 108
) ## dynamic width
# Export heatmap
callModule(module = export_base_graphics ,
id = "export_heatmap" ,
file_name = "heatmap" ,
plot = as_mapper(~heatmap_to_display()), ## pass as a function does not open device locally
legend_pos = reactive(input$heatmap_legend_pos))
## prepare heatmap data for display
active_heatmap_data <- reactive({
req(heatmap_to_display())
# clusts must be a list. However, if only 1 cluster (rowwise) given , `clusts` becomes vector.
# To overcome this, convert clusts to list explicitly when number of row clusters == 1.
if(heatmap()@matrix_param$row_split %>% pull(row_split) %>% unique() %>% length() == 1) { ## this can be improved
clusts <- list("1" = ComplexHeatmap::row_order(heatmap()))
} else {
## return list where names of each elem indicates the cluster name displyed in heatmap
clusts <- ComplexHeatmap::row_order(heatmap())
}
## prepare the tibble of two column where first column is cluster names and second column is row number of original data matrix given to the ComplexHeatmap::Heatmap function
names(clusts) <- as.character(names(clusts))
hm_clust <- data_frame(clust = names(clusts), row_num = clusts) %>%
tidyr::unnest()
##column order
hm_coloum_ord <- ComplexHeatmap::column_order(heatmap()) %>% unlist(use.names = F)
## final data to be shown below heatmap as heatmap data
hm_data <- heatmap_top_variable_genes_df()$mat %>% ## this is the original data supplied to the heatmap
as.data.frame() %>%
dplyr::select(hm_coloum_ord) %>%
rownames_to_column(var = "gene_name") %>%
as_data_frame() %>%
dplyr::mutate(row_num = dplyr::row_number()) %>%
right_join(hm_clust , by = "row_num") %>% ## two tibble will be join by row_num column
dplyr::select(-row_num) %>%
left_join(heatmap_top_variable_genes_df()$row_sd %>%
as.data.frame() %>%
tibble::rownames_to_column("gene_name") , by = "gene_name")## add std dev
return(hm_data %>%
dplyr::mutate_if(is.numeric, round, 4))
})
## genome annotation for heatmap
## global genome annotations object cannot be used in following scenario
## For example : user has generated one heatmap. Now accidently or purposely user selected / uploaded data
## for different species than the previous heatmap generated. In this scenario, global reference genome will
## be updated , and therefore, annotations in the existing heatmap will also be affected.
## To prepvent this seprate heatmap ref genome from global annot. Heatmap ref annot will be updated only when
## plot heatmap submit button hit
heatmap_reference_annot <- eventReactive(input$generate_heatmap , {
req(genome_for_annotations())
return(genome_for_annotations())
})
## render heatmap data
output$heatmap_data <- DT::renderDataTable({
## map species annotations while displaying heatmap data
user_selected_species_annot <- ah_data_summary2 %>%
dplyr::filter(genome == heatmap_reference_annot()) %>%
dplyr::select(gr_cols) %>%
tidyr::unnest(cols = gr_cols)
join_col_x <- base::colnames(active_heatmap_data())[1] ## first column containing geneNames
join_col_y <- base::colnames(user_selected_species_annot)[7] ## "id" column
heat_map_data_to_show <- active_heatmap_data() %>%
left_join(user_selected_species_annot, by = setNames(join_col_y, join_col_x)) %>%
dplyr::select(1,c("seqnames", "start", "end", "strand", "description"), colnames(active_heatmap_data()))%>%
dplyr::mutate_all(funs(replace_na(as.character(.),"--NA--")))
return(heat_map_data_to_show)
},rownames = T,
selection = "none",
server = T,
extensions = "Buttons",
options = list(
scrollX = TRUE,
dom = "Blfrtip",
searchHighlight = TRUE,
buttons =
list("copy", list(
extend =
"collection", buttons =
c("csv", "excel"), text = "Download"
)), # end of buttons customization
# customize the length menu
lengthMenu = list(
c(10, 50, 100, 500, -1) # declare values
, c(10, 50, 100, 500, "All") # declare titles
),
pageLength = 10
)
)
## prepare column cluster data
heatmap_column_cluster <- eventReactive(input$generate_heatmap,{
req(heatmap())
hm_column_labels <- heatmap()@column_names_param$labels
hm_column_ord <- ComplexHeatmap::column_order(heatmap())
named_list_from_vector <- purrr::as_mapper(~
if(!is_list(.x)){
list(`1` = .x)
}else{
.x
} )
## given a list of a char vector it returns a char vector where each elem of list collpase in a single vector elem.
colum_list_to_chr = as_mapper(~map_chr(., paste0 , collapse =","))
hm_column_ord_lst <- named_list_from_vector(hm_column_ord)
## wide format
wide <- tibble(cluster = names(hm_column_ord_lst) , column_order = hm_column_ord_lst) %>%
tidyr::unnest() %>%
dplyr::mutate(column_labels = hm_column_labels[column_order]) %>%
dplyr::select(-column_order) %>%
dplyr::group_by(cluster) %>%
dplyr::summarise(column_labels = list(column_labels)) %>%
dplyr::mutate(count = lengths(column_labels)) %>%
dplyr::mutate(column_labels = colum_list_to_chr(column_labels)) %>%
dplyr::select(cluster, count, column_labels) %>%
dplyr::slice(match(names(hm_column_ord_lst) , .$cluster )) ## arrange clusters in original heatmap order
## given a char vector , reeturns list of vectors. Each elem of char will be splitted by 'split' and all splitted elems will be strored in a vector
colum_chr_to_list = as_mapper(~map(., function(.) {strsplit(. , split = ",") %>% unlist() }))
## Long format
long <- wide %>%
dplyr::mutate(column_labels = colum_chr_to_list(column_labels)) %>%
tidyr::unnest(cols = column_labels)
list(wide = wide, long = long)
})
## prepare row cluster data
heatmap_row_cluster <-eventReactive(input$generate_heatmap,{
req(active_heatmap_data())
## get data from reactive elems
active_hm_data <- active_heatmap_data()
cols_of_interest <- c(colnames(active_hm_data)[1] , "clust")
wide <- active_hm_data %>%
dplyr::select(cols_of_interest) %>% ## select first (gene name) and row cluster column : 'clust'
dplyr::group_by(clust) %>%
dplyr::summarise(row_labels = list(!!as.symbol(cols_of_interest[1] ))) %>%
dplyr::mutate(count = lengths(row_labels)) %>%
dplyr::mutate(row_labels = map_chr(row_labels, ~paste0(.x , collapse = ","))) %>%
dplyr::select( clust, count , row_labels) %>%
dplyr::slice(match( active_heatmap_data()$clust %>% unique(), .$clust )) ## arrange in orginal order
## given a char vector , reeturns list of vectors. Each elem of char will be splitted by 'split' and all splitted elems will be strored in a vector
colum_chr_to_list = as_mapper(~map(., function(.) {strsplit(. , split = ",") %>% unlist() }))
long <- wide %>%
dplyr::mutate(row_labels = colum_chr_to_list(row_labels)) %>%
tidyr::unnest(cols = row_labels)
list(wide = wide, long = long )
})
# render heatmap cluster data
output$heatmap_display_cluster_data <- DT::renderDataTable({
if(input$heatmap_cluster_type == 'show_hm_row_side_clusters'){
return(heatmap_row_cluster()[[input$heatmap_cluster_data_format]])
} else if (input$heatmap_cluster_type == 'show_hm_column_side_clusters'){
return(heatmap_column_cluster()[[input$heatmap_cluster_data_format]] )
}
}, rownames = T,
selection = "none",
server = T,
extensions = "Buttons",
options = list(
scrollX = TRUE,
dom = "Blfrtip",
searchHighlight = TRUE,
buttons =
list("copy",
list(extend = "collection",
buttons =c("csv", "excel"),
text = "Download"
)
), # end of buttons customization
# customize the length menu
pageLength = 10
))
## heatmap functional analysis
callModule(module = functional_analysis_server ,
id = "heatmap_functional_analysis_ui" ,
ui_id = "heatmap_functional_analysis_ui",
session = session,
gene_set = reactive(split(x = active_heatmap_data()$gene_name,
f = factor(active_heatmap_data()$clust , levels = active_heatmap_data()$clust %>% unique()))) ,
genome = reactive({heatmap_reference_annot()}) )
## update heatmap column cluster in the wordcloud
observeEvent(input$generate_heatmap,{
shinyWidgets::updatePickerInput(session = session ,
inputId = "wordcloud_column_cluster",
choices = heatmap_column_cluster()$wide$cluster %>% gtools::mixedsort())
})
## heatmap word cloud
hm_cluster_wise_sample_infor_ll <- eventReactive(input$generate_heatmap, {
## get samples / column labels for user selected cluster
cluster_name_column_lable_tbl <- heatmap_column_cluster()$long %>%
dplyr::select(cluster, column_labels )
## get abstract from selected labels
join_col_x <- cluster_name_column_lable_tbl %>% colnames() %>% .[2] ## column having hm column labels
join_col_y <- user_selected_sra_id_sample_info() %>% colnames()%>% .[23] ## column having SRA id
cluster_wise_sample_infor_tbl <- cluster_name_column_lable_tbl %>%
left_join(user_selected_sra_id_sample_info() ,
by = setNames(join_col_y,join_col_x)) %>%
dplyr::group_by(cluster) %>%
tidyr::nest()
cluster_wise_sample_infor_ll <- cluster_wise_sample_infor_tbl %>%
dplyr::pull(2)
names(cluster_wise_sample_infor_ll) <- cluster_wise_sample_infor_tbl %>%
pull(1)
return(cluster_wise_sample_infor_ll)
})
observe({
callModule(id = "hm_sample_infor" ,
module = cluster_wise_sample_information_server , parent_id = "hm_sample_infor",
cluster_wise_sample_information = reactive(hm_cluster_wise_sample_infor_ll()))
})
####@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## Download page server code ----
####@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## download GO data
callModule(module = download_go_data_server , id = "download_go_data" , ah_data = ah_data_summary2)
## download gene expression matrix
callModule(module = download_gene_expression_matrix_server , id = "download_gene_expression_data")
}
|
0b2d4717d566a4f4aa87f43062c552089b41bc2c | 471b0bb52ba7fce30bbdf908362646ea352fddfa | /plot3.R | 548c2fefea048a801e06f169a5c06948d84ee013 | [] | no_license | ShankerGCEO/ExploratoryDataCourseProject1 | 32958c768ab60b8f7a84a31bdc5b93267ca9ec57 | f48a172c9771eedd24b4bfa91048bf59ffa40409 | refs/heads/master | 2021-01-23T19:44:14.052094 | 2014-07-13T15:49:14 | 2014-07-13T15:53:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 838 | r | plot3.R | setwd("V:/Data Analytics/RPractice/coursera")
a<-read.table("household_power_consumption.txt",sep=";",heade=T,na.strings = c("?"))
a[,"Date"]<-as.Date(a$Date,format="%d/%m/%Y")
a1<-a[a[,"Date"]=="2007-02-01"|a[,"Date"]== "2007-02-02",]
a1$DateTime<-strptime(do.call(paste, c(a1[c("Date", "Time")], sep = " ")),format="%Y-%m-%d %H:%M:%S",tz="")
png("plot3.png",width=480,height=480)
with(a1,plot(DateTime,Sub_metering_1,type="n",xlab="",ylab="Energy sub metering"))
with(a1, points(DateTime, as.numeric(Sub_metering_1), col = "black",type="l"))
with(a1, points(DateTime, as.numeric(Sub_metering_2), col = "red",type="l"))
with(a1, points(DateTime, as.numeric(Sub_metering_3), col = "blue",type="l"))
legend("topright", pch = "______", col = c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off() |
0c984732ae8f019a94f9c4e2ae962d58777c9258 | 9420cae72566bc3881ddadac1728f1b75c967d35 | /man/deweather-package.Rd | acafec89a35b3f243ce9e240f51e9289f5732fd6 | [] | no_license | davidcarslaw/deweather | 6b9482c81914a4a0d9e18a528f770b2b7f7ae76d | 12d5a11f70223f8c528575274929f721162642e0 | refs/heads/master | 2023-08-27T16:47:03.618721 | 2023-08-07T15:10:58 | 2023-08-07T15:10:58 | 30,747,167 | 29 | 9 | null | 2023-07-20T06:28:23 | 2015-02-13T08:22:40 | R | UTF-8 | R | false | true | 903 | rd | deweather-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deweather-package.R
\docType{package}
\name{deweather-package}
\alias{deweather}
\alias{deweather-package}
\title{deweather: Remove the influence of weather on air quality data}
\description{
\if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}}
Model and account for (or remove) the effect of meteorology on atmospheric composition data. The technique uses boosted regression trees based on the gbm package. Code can also be run using multiple cores if available.
}
\seealso{
Useful links:
\itemize{
\item \url{https://davidcarslaw.github.io/deweather/}
\item \url{https://github.com/davidcarslaw/deweather}
\item Report bugs at \url{https://github.com/davidcarslaw/deweather/issues}
}
}
\author{
\strong{Maintainer}: David Carslaw \email{david.carslaw@york.ac.uk}
}
\keyword{internal}
|
18f2c628782172c4ea434ceeb06cea89889b0a57 | e6eae169a196ee2a9e71e54620bde8f145a73fa7 | /scale.R | ae3fa9670c88ee2fe7254025bda0c65d6e14dfc1 | [] | no_license | rhesus123/rcns_scripts | 1871dad8b09a5eea79a876023fd77bf788787f5a | 5e351aaf5f52c5278018e360c19d1b2381e065a0 | refs/heads/master | 2020-04-27T07:40:49.166066 | 2019-03-05T10:19:53 | 2019-03-05T10:19:53 | 174,143,960 | 0 | 0 | null | 2019-03-06T12:46:53 | 2019-03-06T12:46:52 | null | UTF-8 | R | false | false | 424 | r | scale.R |
for(i in list.files(pattern = "exp.+tsv")){
data <- read.table(i, header = T, check.names = F, sep = "\t")
keep <- grep("-", data[,1], invert = T)
data <- data[keep,]
for(col in 2:ncol(data)){
data[, col] <- floor(data[, col] / mean(data[, col]) * 1000)
}
data <- data[rowSums(data[,2:ncol(data)]) > 50,]
write.table(data, paste("../scaled/", i, sep = ""), quote = F, sep = "\t", row.names = F)
cat(i,sep="\n")
}
|
5a1b1363c742ab3a4a601071be041b207b2a7d59 | a1e2905336de2218b7198456ccc18c4c5a774e79 | /man/SimulateSMD.Rd | ca9076815317cfdb4548276dd19b2a2327b3f483 | [] | no_license | cran/metaforest | c07e7e5b8033293a4faddc275598cdd2bbf302c6 | 75bd2037e1a6bb2019a664d9030b5bb80519d12f | refs/heads/master | 2022-04-01T21:51:43.638562 | 2020-01-08T03:50:02 | 2020-01-08T03:50:02 | 102,970,968 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 3,277 | rd | SimulateSMD.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SimulateSMD.R
\name{SimulateSMD}
\alias{SimulateSMD}
\title{Simulates a meta-analytic dataset}
\usage{
SimulateSMD(k_train = 20, k_test = 100, mean_n = 40, es = 0.5,
tau2 = 0.04, moderators = 5, distribution = "normal", model = es
* x[, 1])
}
\arguments{
\item{k_train}{Atomic integer. The number of studies in the training dataset.
Defaults to 20.}
\item{k_test}{Atomic integer. The number of studies in the testing dataset.
Defaults to 100.}
\item{mean_n}{Atomic integer. The mean sample size of each simulated study in
the meta-analytic dataset. Defaults to 40. For each simulated study, the
sample size n is randomly drawn from a normal distribution with mean mean_n,
and sd mean_n/3.}
\item{es}{Atomic numeric vector. The effect size, also known as beta, used in
the model statement. Defaults to .5.}
\item{tau2}{Atomic numeric vector. The residual heterogeneity. For a range of
realistic values encountered in psychological research, see Van Erp,
Verhagen, Grasman, & Wagenmakers, 2017. Defaults to 0.04.}
\item{moderators}{Atomic integer. The number of moderators to simulate for
each study. Make sure that the number of moderators to be simulated is at
least as large as the number of moderators referred to in the model
parameter. Internally, the matrix of moderators is referred to as "x".
Defaults to 5.}
\item{distribution}{Atomic character. The distribution of the moderators.
Can be set to either "normal" or "bernoulli". Defaults to "normal".}
\item{model}{Expression. An expression to specify the model from which to
simulate the mean true effect size, mu. This formula may use the terms "es"
(referring to the es parameter of the call to SimulateSMD), and "x[, ]"
(referring to the matrix of moderators, x). Thus, to specify that the mean
effect size, mu, is a function of the effect size and the first moderator,
one would pass the value \code{model = es * x[ , 1]}.
Defaults to es * x[ , 1].}
}
\value{
List of length 4. The "training" element of this list is a data.frame
with k_train rows. The columns are the variance of the effect size, vi; the
effect size, yi, and the moderators, X. The "testing" element of this list is
a data.frame with k_test rows. The columns are the effect size, yi, and the
moderators, X. The "housekeeping" element of this list is a data.frame with
k_train + k_test rows. The columns are n, the sample size n for each
simulated study; mu_i, the mean true effect size for each simulated study;
and theta_i, the true effect size for each simulated study.
}
\description{
This function simulates a meta-analytic dataset based on the random-effects
model. The simulated effect size is Hedges' G, an estimator of the
Standardized Mean Difference (Hedges, 1981; Li, Dusseldorp, & Meulman, 2017).
The functional form of the model can be specified, and moderators can be
either normally distributed or Bernoulli-distributed. See Van Lissa, in
preparation, for a detailed explanation of the simulation procedure.
}
\examples{
set.seed(8)
SimulateSMD()
SimulateSMD(k_train = 50, distribution = "bernoulli")
SimulateSMD(distribution = "bernoulli", model = es * x[ ,1] * x[ ,2])
}
|
efa42aed320921ee2c1b519540c538199e5778b8 | 99a695f64f28d9da2981e3d8447b5b6a287d69d0 | /Plot3.R | 8393dfad66656b2c6bd61241ada0c280c746f12e | [] | no_license | JimCallahanOrlando/ExData_Plotting2 | 5ce37891809a7aa36ae076d18034449e01997871 | 50b4c3267665e8d1cd42fc95410fd9a855238592 | refs/heads/master | 2020-04-21T09:02:43.874118 | 2015-09-23T18:55:52 | 2015-09-23T18:55:52 | 41,051,321 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,711 | r | Plot3.R | # Plot3.R
# PURPOSE: Load EPA Air Pollution Data files into R AND draw plot 3
# Set Directory to project directory for plots
setwd("~/GitHub/ExData_Plotting2/")
getwd()
## This first line will likely take a few seconds. Be patient!
NEI <- readRDS(".data/summarySCC_PM25.rds")
SCC <- readRDS(".data/Source_Classification_Code.rds")
str(NEI) # before removal
# Convert all of the NEI columns EXCEPT Emissions, fips and SCC to factor.
# NEI$fips <- factor(NEI$fips)
# NEI$SCC <- factor(NEI$SCC)
NEI$Pollutant <- factor(NEI$Pollutant)
NEI$type <- factor(NEI$type)
NEI$year <- factor(NEI$year)
str(NEI) # after removal
# Number of observations for each year (before NA and INF removal).
table(NEI$year)
# Remove incomplete, missing, infinite, negative and zero observations.
# Only computing sums(not averages) and zeros appear to be missing data.
NEI <- NEI[complete.cases(NEI), ]
NEI <- NEI[!is.na(NEI$Emissions), ]
NEI <- NEI[!is.infinite(NEI$Emissions), ]
NEI <- NEI[NEI$Emissions > 0, ] # will be taking log() -- can't have neg value.
# Number of observations for each year (after NA and INF removal).
table(NEI$year)
# Summarize NEI$Emissions by NEI$year
NEI$year <- factor(NEI$year)
Year <- levels(NEI$year)
summary(NEI$Emissions)
# Question 3:
# Of the four types of sources indicated by the type
# (point, nonpoint, onroad, nonroad) variable, which of these four sources
# have seen decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008?
# Use the ggplot2 plotting system to make a plot answer this question.
# NEI$type factor (point, nonpoint, onroad, nonroad)
NEI$type <- factor(NEI$type)
NEI$year <- factor(NEI$year)
Year <- levels(NEI$year)
BaltimoreEI <- NEI[NEI$fips == "24510", ]
BaltimoreEI <- BaltimoreEI[!is.na(BaltimoreEI$Emissions), ]
require(ggplot2)
# use same box plot, but faceted.
# use facets to display type.
# This is same boxplot as above; need to add facets for "type"
# BaltimoreEI$EmissionsLog10 <- log10(BaltimoreEI$Emissions)
# HONOR CODE: "R Graphics Cookbook" by Winston Chang for PNG parameters and qplot
# Argument to log must be in quotes: log = "y"
par(mar = c(4, 4, 4, 1) )
ppi <- 150
png(filename = "plot3.png", height = 3.5*ppi, width=3.5*ppi, units = "px", pointsize = 14)
bp2 <- qplot( data = BaltimoreEI, x = year, y = Emissions, log = "y" ) + geom_boxplot()
# Add facets (note: tilda, "~" is required!)
bp3 <- bp2 + facet_wrap( ~ type ) + ggtitle("Baltimore Particulate Matter 2.5 Emission Trends \nBy Source")
bp3 <- bp3 + ylab("Emissions - log scale") + xlab("Year\nSource: US EPA National Emissions Inventory (NEI)")
bp3
dev.off()
# End of Plot3.R |
201812d72ad6a6c1b4c75dc0d15cea4cb92deddf | 1ce5a3554f494371e909ddfd18f3463168522afd | /Week1_assignment_project_1/plot1.R | 315c63679d2a32f05aec95bf4ab4af7e69274000 | [] | no_license | SriNithin965/ExData_Plotting1 | 336ac77e88c3508c0bf8d6afb018731a718d295c | c510c84f15d9935b8084044b09684a61440aaf9f | refs/heads/master | 2022-06-21T07:43:27.197004 | 2020-05-01T06:14:15 | 2020-05-01T06:14:15 | 260,389,729 | 0 | 0 | null | 2020-05-01T05:53:37 | 2020-05-01T05:53:37 | null | UTF-8 | R | false | false | 451 | r | plot1.R | library(data.table)
library(dplyr)
library(lubridate)
data<-fread("household_power_consumption.txt",na.strings="?")
data[, dateTime := as.POSIXct(paste(Date, Time), format = "%d/%m/%Y %H:%M:%S")]
data <- data[(dateTime >= "2007-02-01") & (dateTime < "2007-02-03")]
png("plot1.png",width=480,height=480)
with(data,hist(data[["Global_active_power"]],main="Global active power",col="red",xlab="Global active power(KiloWatts)",ylab="frequency"))
dev.off() |
b49b6ebc0518a2254d96b42312da75d334261708 | 90570592e0b39ed04999798d6a2b294762004838 | /data/Rate_1.data.R | 6fa31caaa89964816e3fac013e07fa6a492b5e85 | [] | no_license | wmmurrah/cognitivemodeling | d7fa6d081ee16b4c45642e807f495345c7926fa1 | 38e98cabdfaa3f659f821e2fdb27396e49d16f84 | refs/heads/master | 2021-07-12T06:05:07.816085 | 2020-10-11T23:01:54 | 2020-10-11T23:01:54 | 210,068,736 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 68 | r | Rate_1.data.R | k <- 50
n <- 100
# beta_prior_shape1 <- 1
# beta_prior_shape2 <- 1
|
183914d55926fc95b56f44981f35b17c7ae52393 | 0053e50cff6a87c3190dc00fbeb4b931b53a162c | /server.R | bedc842ec8ccb94c1399bd9d83ff3f0104bfe897 | [] | no_license | flow-r/flow_creator | 6eec0373cdb951e05d534ea581f1706b164de57e | b7f3d7a3693242768f9f9e54756badff5f7d4a8f | refs/heads/master | 2021-01-10T11:41:42.571883 | 2015-11-25T02:04:51 | 2015-11-25T02:04:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,632 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(gplots)
require(reshape2)
library(flowr)
source("funcs2.R")
as.c=as.character
#source("~/Dropbox/public/github_flow/R/plot-funcs.R")
## http://shiny.rstudio.com/gallery/widgets-gallery.html
## http://stackoverflow.com/questions/22160372/r-shiny-how-to-build-dynamic-ui-text-input
## for each job we have:
## jobname
## cmd
## previous_job
## submission_type
## dependency_type
shinyServer(func = function(input, output, session){
##print(url_fields_to_sync)
firstTime <- TRUE
source("funcs2.R")
output$hash <- renderText({
#url_fields_to_sync <- as.c(names(input))
#if(length(url_fields_to_sync) == 0)
url_fields_to_sync = c("num_jobs", "refresh")
newHash = paste(collapse=",",
Map(function(field) {
paste(sep="=",
field,
input[[field]])
},
url_fields_to_sync))
# the VERY FIRST time we pass the input hash up.
return(
if (!firstTime) {
newHash
} else {
if (is.null(input$hash)) {
NULL
} else {
firstTime<<-F;
isolate(input$hash)
}
}
)
})
output$jobs = renderUI({
## make a new UI, when ever either of these change their value
input$num_jobs;
input$refresh
#print(get_jobnames(input))
isolate({
get_jobforms(input)
})
})
#outputOptions(output, 'jobs', suspendWhenHidden=FALSE)
#----------- get the dat to be used in the later steps
#debug(get_flow_table)
current_def <- reactive({
if (input$num_jobs < 1)
return("")
# get this only if we have more than 0 jobs
message("class of input is: ")
#print(class(input))
to_flowdef(input)
})
# --------- flow summary table, this is really a flowdef
output$flow_def <- renderTable({
## need atleast one job
#browser()
if (input$num_jobs < 1)
return()
def <- current_def()
return(as.data.frame(def))
}, include.rownames=FALSE)
# ------------------------------------- output flowdef -------------------------------
output$download_flowdef <- downloadHandler(
filename = function() {
paste(input$flow_name, '.def', sep='')
},
content = function(file) {
# get this only if we have more than 2 jobs
def <- current_def()
params::write_sheet(def, file)
})
# -------------------------- flow summary plot ----------------------------------------------------
#debug(.plot_flow_dat)
output$flow_diagram <- renderPlot({
#browser()
# ------- proceed only when we have more than 2 jobs
if (input$num_jobs < 2)
return(textplot(c('Add some jobs using the slider below\n', 'Say more than 3 ...')))
def <- try(current_def())
if(class(def)[1] == "try-error"){
textplot("please refresh ...")
}
else{
plot_flow(def)
}
})
# ------------------------------------- download the plot made earlier -------------------------------
output$downloadPlot <- downloadHandler(
filename = function() { paste(input$flow_name, '.pdf', sep='') },
content = function(file) {
# get this only if we have more than 2 jobs
if (input$num_jobs < 2)
return("")
pdf(file)
def <- current_def()
plot_flow(def)
dev.off()
})
current_flow_code <- reactive({
to_flowcode(input=input)
})
# ------------------------------------- output code -------------------------------
output$code <- renderUI({
#debug(get_dat_flowcode)
flow_code <- current_flow_code()
#print(flow_code)
ret <- pre(class="shiny-code",
# we need to prevent the indentation of <code> ... </code>
HTML(format(tags$code(
class="language-r",
#paste(readLines(file.path.ci(getwd(), rFile), warn=FALSE),collapse="\n")
paste(flow_code, collapse = "\n")
), indent = FALSE)))
return(ret)
})
output$rscript <- downloadHandler(
filename = function () {
paste(input$flow_name, '.R', sep='')
},
content = function(file) {
flow_code <- current_flow_code()
write(flow_code, file)
})
}) ## server
|
a6c6b0e3bb220ec3fd4179a12f4f183f558ee305 | f1000eab041e19d6194b84a11d24297115332dda | /man/add.zeros.noCount.Rd | fcbbdf3533846312ec92d6bde8b551fddacc2ec3 | [] | no_license | pointblue/RMN.functions | 902a1a41e0463bf871f2eaacb321622a69f6f77c | ef89702f869ea3a9b338dccc0c7f520b498bac8c | refs/heads/master | 2020-09-22T12:57:28.781442 | 2020-04-10T01:10:14 | 2020-04-10T01:10:14 | 225,205,393 | 0 | 1 | null | 2020-01-25T18:49:16 | 2019-12-01T18:06:43 | R | UTF-8 | R | false | true | 514 | rd | add.zeros.noCount.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add.zeros.noCount.r
\name{add.zeros.noCount}
\alias{add.zeros.noCount}
\title{Adds zeros to dataframe for statistical analysis}
\usage{
add.zeros.noCount(pc)
}
\arguments{
\item{df}{A data frame object}
}
\value{
A data frame
}
\description{
Takes bird data that has been prepared by bird_prepare function and turned into the same information but with zeros where no detections were found.
}
\examples{
df2 = bird_species.common(pc)
}
|
41ef464fd158f393e4552ba8df901cfb1e8c32c6 | 19526246d42b3fd8fdf67750b57909e6e6a99114 | /man/rmspfc.Rd | 3647a00d08c773a5e70852134485ce1d1eb8dc6a | [] | no_license | cran/nlirms | 3b864e5b6a54bd39bf9a0530ef6fe40fd8776f79 | 4c4e32251c750735ae29a5a2eaf88eddf6b0e1d0 | refs/heads/master | 2020-03-10T13:05:38.733788 | 2018-04-13T10:16:48 | 2018-04-13T10:16:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,975 | rd | rmspfc.Rd | \name{rmspfc}
\alias{rmspfc}
\alias{rmspfc}
\title{Rate-making system based on the posteriori freuency component}
\usage{
rmspfc(time = 5, claim = 5, fmu = .2, fsigma = 2, fnu = 1,
family = "NO", round = 2, size = 8, padlength = 4, padwidth = 2,
...)
}
\arguments{
\item{time}{time period to designing of rate-making system based on the posteriori freuency component}
\item{claim}{number of claims to designing of rate-making system based on the posteriori freuency component}
\item{fmu}{mu parameter of frequency model in designing of rate-making system}
\item{fsigma}{sigma parameter of frequency model in designing of rate-making system}
\item{fnu}{nu parameter of frequency model in designing of rate-making system}
\item{family}{a nlirms.family object, which is used to define the frequency model to designing of rate-making system}
\item{round}{rounds the rate-making system values to the specified number of decimal places }
\item{size}{indicates the size of graphical table for rate-making system}
\item{padlength}{indicates the length of each graphical table cells}
\item{padwidth}{indicates the width of each graphical table cells}
\item{...}{for further arguments}
}
\value{
rmspfc() function return the expected number of claims of policyholders based on the different models.
}
\description{
rmspfc() function gives the rate-making system based on the posteriori freuency component. Values given by rmspfc() function is equal to with expected number of claims given by enc.family (i.e. enc.PGA, enc.PIGA, enc.PGIG) for different amounts of time and claim.
}
\details{
rmspfc() function gives the rate-making system in the form of a table where each table cells is related to the one time and claim. for example the cell with time=2 and claim=1, shows the expected number of claims in next year for a ploicyholder that who had a one claim in past two years.
}
\author{
Saeed Mohammadpour (s.mohammadpour1111@gmail.com), Soodabeh Mohammadpoor Golojeh (s.mohammadpour@gmail.com)
}
\references{
Frangos, N. E., & Vrontos, S. D. (2001). Design of optimal bonus-malus systems with a frequency and a severity component on an individual basis in automobile insurance. ASTIN Bulletin: The Journal of the IAA, 31(1), 1-22.
Lemaire, J. (1995) Bonus-Malus Systems in Automobile Insurance, Kluwer Academic Publishers, Massachusetts.
MohammadPour, S., Saeedi, K., & Mahmoudvand, R. (2017). Bonus-Malus System Using Finite Mixture Models. Statistics, Optimization & Information Computing, 5(3), 179-187.
Najafabadi, A. T. P., & MohammadPour, S. (2017). A k-Inflated Negative Binomial Mixture Regression Model: Application to Rate--Making Systems. Asia-Pacific Journal of Risk and Insurance, 12.
Rigby, R. A., & Stasinopoulos, D. M. (2005). Generalized additive models for location, scale and shape. Journal of the Royal Statistical Society: Series C (Applied Statistics), 54(3), 507-554.
Stasinopoulos, D. M., Rigby, B. A., Akantziliotou, C., Heller, G., Ospina, R., & Motpan, N. (2010). gamlss. dist: Distributions to Be Used for GAMLSS Modelling. R package version, 4-0.
Stasinopoulos, D. M., & Rigby, R. A. (2007). Generalized additive models for location scale and shape (GAMLSS) in R. Journal of Statistical Software, 23(7), 1-46.
}
\examples{
# rate-Making system based on the Poisson-Gamma model for frequency component
rmspfc(time = 5, claim = 5, fmu = .2, fsigma = 2, fnu = 1, family = "PGA", round
= 2, size = 8, padlength = 4, padwidth = 2)
# rate-Making system based on the Poisson-Inverse Gamma model for frequency component
rmspfc(time = 5, claim = 5, fmu = .2, fsigma = 2, fnu = 1, family = "PIGA",
round = 2, size = 8, padlength = 4, padwidth = 2)
# rate-Making system based on the Poisson-Generalized Inverse Gaussian model for frequency
rmspfc(time = 5, claim = 5, fmu = .2, fsigma = 2, fnu = 1, family = "PGIG",
round = 2, size = 8, padlength = 4, padwidth = 2)
}
|
5dc21cd3083b622047f5a4a5dcdfe1a31ab857bc | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/pvar/man/ChangePoints.Rd | 9b3399e71ed20c633e1e2ad28085184547bc46f7 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 591 | rd | ChangePoints.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{ChangePoints}
\alias{ChangePoints}
\title{Change Points of a \code{numeric} vector}
\usage{
ChangePoints(x)
}
\arguments{
\item{x}{\code{numeric} vector.}
}
\value{
The vector of index of change points.
}
\description{
Finds changes points (i.e. corners) in the \code{numeric} vector.
}
\details{
The end points of the vector will be always included in the results.
}
\examples{
x <- rwiener(100)
cid <- ChangePoints(x)
plot(x, type="l")
points(time(x)[cid], x[cid], cex=0.5, col=2, pch=19)
}
|
6b856fa609e048152ac1a2f8347f3ffbde8b0273 | 4ba396a905823ed536d62bfaa45aafe6dc6b2715 | /man/loadHMM.Rd | 38d330852ba97bb0321bf34fea15ec878e0d3efe | [] | no_license | msesia/snpknock | 2570aa5c9f0c4af9d0dc4953a3f9f51a65df65d9 | f0d027b4891fa9a0ec2a541c231fb80a8dbbabc8 | refs/heads/master | 2020-05-18T14:13:26.987921 | 2019-11-07T02:22:08 | 2019-11-07T02:46:05 | 184,463,949 | 2 | 2 | null | null | null | null | UTF-8 | R | false | true | 2,533 | rd | loadHMM.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fastphase.R
\name{loadHMM}
\alias{loadHMM}
\title{Load HMM parameters fitted by fastPHASE}
\usage{
loadHMM(r_file, alpha_file, theta_file, char_file, compact = TRUE,
phased = FALSE)
}
\arguments{
\item{r_file}{a string with the path of the "_rhat.txt" file produced by fastPHASE.}
\item{alpha_file}{a string with the path of the "_alphahat.txt" file produced by fastPHASE.}
\item{theta_file}{a string with the path of the "_thetahat.txt" file produced by fastPHASE.}
\item{char_file}{a string with the path of the "_origchars" file produced by fastPHASE.}
\item{compact}{whether to assemble the explicit transition and emission matrices for the HMM (default: FALSE).}
\item{phased}{whether to assemble a model for phased haplotypes, if compact==FALSE (default: FALSE).}
}
\value{
A structure containing the parameters from the Li and Stephens HMM for phased haplotypes.
}
\description{
This function loads the parameter estimates obtained by fastPHASE (see \link{runFastPhase})
and assembles the Li and Stephens HMM, in the format required by the knockoff generation functions
\link{knockoffHaplotypes} and \link{knockoffGenotypes}.
}
\details{
This function by default returns a structure with three fields:
\itemize{
\item{"r": a numerical array of length p.}
\item{"alpha": a numerical array of size (p,K).}
\item{"theta": a numerical array of size (p,K).}
}
If the parameter compact is FALSE, this function assembles the HMM model for the genotype data
(either unphased or phased), in the format required by the knockoff generation function \link{knockoffHMM}.
}
\examples{
# Specify the location of the fastPHASE output files containing the parameter estimates.
# Example files can be found in the package installation directory.
r_file = system.file("extdata", "genotypes_rhat.txt", package = "SNPknock")
alpha_file = system.file("extdata", "genotypes_alphahat.txt", package = "SNPknock")
theta_file = system.file("extdata", "genotypes_thetahat.txt", package = "SNPknock")
char_file = system.file("extdata", "genotypes_origchars", package = "SNPknock")
# Read the parameter files and load the HMM
hmm = loadHMM(r_file, alpha_file, theta_file, char_file)
# Read the parameter files and load the HMM
hmm.large = loadHMM(r_file, alpha_file, theta_file, char_file, compact=FALSE)
}
\references{
\insertRef{scheet2006}{SNPknock}
}
\seealso{
Other fastPHASE: \code{\link{runFastPhase}},
\code{\link{writeXtoInp}}
}
\concept{fastPHASE}
|
967336c14ce76b8209d8321ba77875147cd32409 | fe7a611013804f9ba61a53816ec3e60b96491547 | /e_practice in text mining_w4/datacamp_online practice.R | 943d7c63b7490cddb83122641e8dfc2da635868d | [] | no_license | cafepeng/R-course-2018 | 9dba30ad1a073dfa64bbd111e6bf3bbd658fd7e2 | 87c46eeb79c8cbc02f6bc9bd0513626f645751eb | refs/heads/master | 2021-01-24T12:12:18.482070 | 2018-04-25T14:22:31 | 2018-04-25T14:22:31 | 123,124,274 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,365 | r | datacamp_online practice.R | ##learn from datacamp (text mining Bag of Words)
#need package qdap
#install.packages("qdap")
library(qdap)
new_Text<-"DataCamp is the first online learning platform that focuses on building the best learning experience specifically for Data Science. We have offices in Boston and Belgium and to date, we trained over 250,000 (aspiring) data scientists in over 150 countries. These data science enthusiasts completed more than 9 million exercises. You can take free beginner courses, or subscribe for $25/month to get access to all premium courses"
new_Text2<-read.csv(new_Text)
#check is it is classified as character
class(new_Text2)
#count the frequency of words
require(qdap)
term_count<-freq_terms(new_Text2,1)
##---parsing from tweet of starbucks-----------##
library(rvest)
#輸入網址,爬取此網頁
title<-read_html("https://mobile.twitter.com/starbucks")
#標題內含css
title<-html_nodes(title,".dir-ltr")
title<-html_text(title)
title<-list(title)
str(title)
##------------------------------------------##
#practice on datacamp#
##1ST TRY##
# Create the object: text
text<-"<b>She</b> woke up at 6 A.M. It\'s so early! She was only 10% awake and began drinking coffee in front of her computer."
library(tm)
library(qdap)
# All lowercase
text<-tolower(text)
text
# Remove punctuation
text<-removePunctuation(text)
text
# Remove numbers
text<-removeNumbers(text)
text
# Remove whitespace
text<-stripWhitespace(text)
text
##2ND TRY##
# Create the object: text
library(tm)
library(qdap)
text00<-list("<b>She</b> woke up at 6 A.M. It\'s so early! She was only 10% awake and began drinking coffee in front of her computer.")
class(text)
text01<-VectorSource(text00)
text02<-VCorpus(text01)
# All lowercase
text02<-tm_map(text02,content_transformer(tolower))
text02
# Remove punctuation
text02<-tm_map(text02,removePunctuation)
text02
# Remove numbers
text02<-tm_map(text02,removeNumbers)
text02
# Remove whitespace
text02<-tm_map(text02,stripWhitespace)
text02
#=====RIGHT ANSWER TO ABOVE=============#
# Create the object: text
text <- "<b>She</b> woke up at 6 A.M. It\'s so early! She was only 10% awake and began drinking coffee in front of her computer."
# All lowercase
tolower(text)
# Remove punctuation
removePunctuation(text)
# Remove numbers
removeNumbers(text)
# Remove whitespace
stripWhitespace(text)
|
43be123b9544b3730089223e2627a2c5895c5adc | b61458d49215f3208eaa855a04f6ee730f68733d | /man/trackNames.Rd | 46025f7cd690e6b543803acf506177e554deae05 | [] | no_license | pneuvial/c3co | 2c22de1990f29755cfb0b8547f65c320d6316cf8 | c20d9de94476bc7a6f4f405d04437ed6876fd088 | refs/heads/master | 2021-08-16T17:13:10.765020 | 2019-03-03T16:10:54 | 2019-03-03T16:10:54 | 59,739,091 | 3 | 2 | null | null | null | null | UTF-8 | R | false | true | 249 | rd | trackNames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/segmentData.R
\name{trackNames}
\alias{trackNames}
\title{Get track names}
\usage{
trackNames(x)
}
\arguments{
\item{x}{A segmentation.}
}
\description{
Get track names
}
|
c1da95bbbca55f7da9143284d77b92e188a889ba | 5a9d2ee3ee9e1f5153108ba5c9a9ffbd7647ee1d | /run_analysis.R | eda7c3a471b1756fb6afb8c2e3e0f333a92168de | [] | no_license | mattsedlar/GetData_CourseProject | 7af69806706285c567af028d9cd7f7f6ecbdb53e | 99e2a68cd54b5aa18c8001e12a9ae765b5e3d2eb | refs/heads/master | 2021-01-19T06:36:41.690603 | 2015-07-23T17:29:57 | 2015-07-23T17:29:57 | 39,090,997 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,475 | r | run_analysis.R | library(dplyr)
source("acquire_data.R")
# label and merge an individual dataset.
label_n_merge <- function(df, lbl, subj) {
names(df) <- make.names(features[,2], unique=TRUE)
df <- cbind(lbl,df)
colnames(df)[1] <- "Activities"
df <- cbind(subj,df)
colnames(df)[1] <- "Subjects"
df
}
test_df <- label_n_merge(test_data, test_labels, test_subjects)
train_df <- label_n_merge(train_data, train_labels, train_subjects)
# combine both sets
df <- rbind(test_df, train_df)
# extract only subjects, activities, means and standard deviations
df <- df %>% select(Subjects,Activities, contains(".mean."), contains(".std."))
# descriptive activity names
y <- 1
while (y <= length(activities)) {
df$Activities[df$Activities == y] <- activities[y]
y <- y + 1
}
# clean up the column names
names(df) <- gsub("^t", "Time ", names(df))
names(df) <- gsub("^f", "Frequency ", names(df))
names(df) <- gsub("Acc", " Accelerometer ", names(df))
names(df) <- gsub("Gyro", " Gyroscope ", names(df))
names(df) <- gsub("Mag", " Magnitude ", names(df))
names(df) <- gsub(".mean", "Mean ", names(df))
names(df) <- gsub(".std", "Standard Deviation ", names(df))
names(df) <- gsub("...X", "X", names(df))
names(df) <- gsub("...Y", "Y", names(df))
names(df) <- gsub("...Z", "Z", names(df))
# TIDY DATA SET
tidy_df <- df %>% group_by(Subjects, Activities) %>% summarise_each(funs(mean))
# Write table
write.table(tidy_df, "tidydata.txt", sep="\t", row.names=FALSE, quote=FALSE) |
39d7dc5ad430d20a6057ad6a3661a572b8d7df01 | 1e39fc5bc9e4f53f63655269d203fd896caeeb00 | /R/write.transects.R | b1e10928a18dd55d263788acd32233eb4d2d8459 | [] | no_license | m-murchie/dht-bootstrap | 0405c921dd2e5dffee3b32e5123fe7eeb672ebb8 | 9b64c13593b63cfbfbea6756c756b0ea8177fb64 | refs/heads/master | 2020-04-05T23:14:16.058224 | 2016-07-07T15:35:08 | 2016-07-07T15:35:08 | 60,344,043 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,090 | r | write.transects.R | ## taken from DL Miller, spatlaugh repo
# write out a transect as a shapefile
# this is a completely Byzantine process
library(sp)
library(rgdal)
# transect should have the following columns:
# x,y x,y coordinates of **endpoints**
# leg survey leg associated
# file filename (well actually foldername) to write to
write.transects <- function(transect, file){
llist <- list()
Sample.Label <- c()
# make each leg a different Line object
for(this_leg in unique(transect$leg)){
# get the transect bits for this leg
tr <- transect[transect$leg==this_leg,]
for(i in 1:(nrow(tr)-1)){
this_label <- paste0(this_leg, "-", i)
ll <- Line(tr[,c("x","y")][i:(i+1),])
llist <- c(llist, Lines(ll, ID=this_label))
Sample.Label <- c(Sample.Label, this_label)
}
}
ll <- SpatialLines(llist)
dat <- data.frame(Sample.Label=Sample.Label)
rownames(dat) <- Sample.Label
ll <- SpatialLinesDataFrame(ll, data=dat)
writeOGR(ll, file, "data", "ESRI Shapefile" )
} |
eb4c5e181cd29d04758beeb26ea7063303138dce | 86535689d126480c5982255305c1de70deb8e09c | /air_pollution_stunting/scripts/nfhs_data_analysis.R | f4c94ba261f33cadaf9645510c9b5683b66d6229 | [] | no_license | prateek149/data_visualization | fc2469bff447bc83f50d5b3b61f5e06f7500cf0f | 368bec576f5aad7df38ead0193701d190b378478 | refs/heads/master | 2021-04-22T06:47:20.530780 | 2018-04-03T08:17:50 | 2018-04-03T08:17:50 | 58,259,378 | 0 | 2 | null | 2017-03-30T09:10:54 | 2016-05-07T09:32:26 | HTML | UTF-8 | R | false | false | 2,592 | r | nfhs_data_analysis.R | library("plyr")
library("sqldf")
library("readstata13")
library("foreign")
setwd("/Users/prateekmittal/Dropbox/IndiaSpend_Prateek/Datasets")
a <- read.dta("newdataset.dta")
a$haz <- a$hw5/100
a <- a[which(a$haz<6),]
a$stunting <- 0
a[which(a$haz<=-2),]$stunting <- 1
a$fuel_type <- "-999"
a[which(a$v161 == 2),]$fuel_type <- "LPG"
a[which(a$v161 == 5),]$fuel_type <- "Kerosene"
a[which(a$v161 == 8),]$fuel_type <- "Wood"
a[which(a$v161 == 10),]$fuel_type <- "Crop Residue"
a[which(a$v161 == 9),]$fuel_type <- "Crop Residue"
a[which(a$v161 == 11),]$fuel_type <- "Dung"
a$separate_kitchen <- 0
a[which(a$hv242==1),]$separate_kitchen <- 1
a$window <- 0
a[which(a$s56a==1),]$window <- 1
a$chimney <- 0
a[which(a$s50==1),]$chimney <- 1
b <- sqldf("SELECT fuel_type, COUNT(fuel_type) AS total,
SUM(stunting) AS stunting
FROM a GROUP BY fuel_type")
b$stunting_rate <- b$stunting/b$total
b$pm10 <- -999
b[which(b$fuel_type=="Dung"),]$pm10 <- 1.6
b[which(b$fuel_type=="Wood"),]$pm10 <- 0.4
b[which(b$fuel_type=="Crop Residue"),]$pm10 <- 1.4
b[which(b$fuel_type=="Kerosene"),]$pm10 <- 0.1
b[which(b$fuel_type=="LPG"),]$pm10 <- 0
write.csv(b[which(b$fuel_type!="-999"),],
"/Users/prateekmittal/Dropbox/data_visualization/air_pollution_stunting/data/stunting_fuel.csv")
c <- sqldf("SELECT fuel_type, separate_kitchen, window,COUNT(fuel_type) AS total,
SUM(stunting) AS stunting
FROM a GROUP BY fuel_type,separate_kitchen, window ")
c <- c[which(c$fuel_type != "-999"),]
for (i in unique(c$fuel_type))
{
total <- sum(c[which(c$fuel_type==i),]$total)
stunting <- sum(c[which(c$fuel_type==i),]$stunting)
c <- rbind(c, data.frame(fuel_type=i,separate_kitchen=-1,window=-1,total=total,
stunting=stunting))
#Window records
total <- sum(c[which(c$fuel_type==i & c$window==1),]$total)
stunting <- sum(c[which(c$fuel_type==i & c$window==1),]$stunting)
stunting_rate <- stunting/total
c <- rbind(c, data.frame(fuel_type=i,separate_kitchen=-1,window=1,total=total,
stunting=stunting))
total <- sum(c[which(c$fuel_type==i & c$separate_kitchen==1),]$total)
stunting <- sum(c[which(c$fuel_type==i & c$separate_kitchen==1),]$stunting)
stunting_rate <- stunting/total
c <- rbind(c, data.frame(fuel_type=i,separate_kitchen=1,window=-1,total=total,
stunting=stunting))
}
c$stunting_rate <- c$stunting/c$total
write.csv(c,
"/Users/prateekmittal/Dropbox/data_visualization/air_pollution_stunting/data/stunting_fuel_ventilation.csv")
|
ce5120a26c951d63277e187d2743bbf11c0c07e1 | 5aa6740910caf6d233fd9d9995c1ba779ca71e69 | /R/mscale.R | d01872c46d1089a9401ac1a9cfd18d95f71c0212 | [] | no_license | jdgonzalezwork/ktaucenters | 39f812e231235d4e0417cdc8e7ec078801f9b7bd | 7cdeb89bf534bba595edbb00e1a6b7d957026233 | refs/heads/master | 2023-02-10T01:33:09.251935 | 2023-01-29T23:04:18 | 2023-01-29T23:04:18 | 198,319,763 | 0 | 1 | null | 2023-01-31T01:42:08 | 2019-07-23T00:10:09 | R | UTF-8 | R | false | false | 7,110 | r | mscale.R | #-----------------------------------------------------------------------------------
constC1=function(p){rep(1,length(p))};
constC2=function(p){
aMaronna <- 2.9987
bMaronna <- -0.4647
aMaronna*p^bMaronna
}
#-----------------------------------------------------------------------------------
# CODIGO desarrollado por mi, (y mas rapido)
Mscale <- function(u, b=0.5, c){
#' Mscale
#' the M scale of an univariate sample (see reference below)
#'
#' @param u an univariate sample of size n.
#' @param b the desired break down point
#' @param c a tuning constant, if consistency to standard normal distribution is desired use
#' \code{\link{normal_consistency_constants}}
#' @return the Mscale value
#' @examples
#' Mscale(u=rnorm(100),c=1)
#'
#' @importFrom stats median
#' @references Maronna, R. A., Martin, R. D., Yohai, V. J., & Salibian-Barrera, M. (2018).
#' Robust statistics: theory and methods (with R). Wiley.
#' @export
sn <- median(abs(u)) / .6745
if (sn==0){return(sn)}
quantity <- mean(rhoOpt(u/sn,cc=c)) - b # indicates if the root is on the right
while (quantity>0){
sn <- 1.5 * sn
quantity=mean(rhoOpt(u/sn,cc=c)) - b #
}
if (quantity==0){return(sn)}
i <- 0
err <- 1
while (( i < 1000 ) & (err > 1e-10)) {
var <- u/sn;
AA <- mean(rhoOpt(var,cc=c));
BB <- mean(psiOpt(var,cc=c)*var);
factorAB <- (AA -BB -b) / (2*AA-BB-2*b);
snplus1 <- sn*factorAB;
err <- abs(snplus1/sn - 1)
sn <- abs(snplus1)
i <- i+1
}
sn
}
#-----------------------------------------------------------------------------------
#' normal_consistency_constants
#'
#' @description constants previously computed in order the M scale value is consistent
#' with the standard normal distribution for the optimal rho function considered in \code{\link{rhoOpt}}.
#' (Constant were computed from p=1 till p =400)
#'
#' @param p dimension where observation lives
#' @return cvalue
#'
#' @examples
#' p=5;
#' n=1000
#' X=matrix(rnorm(n*p), ncol=p)
#' dist=apply(X,1,function(t){sqrt(sum(t^2))})
#' s= Mscale(dist,b=0.5, c=normal_consistency_constants(p))
#'
#' ### variable s should be near from one for all p values between 1 and 400.
#'
#' @references [1] Maronna, R. A., Martin, R. D., Yohai, V. J., & Salibián-Barrera, M. (2018).
#' Robust statistics: theory and methods (with R). Wiley.
#' [2] Salibian-Barrera, M., Willems, G., & Zamar, R. (2008).
#' The fast-tau estimator for regression.
#' Journal of Computational and Graphical Statistics, 17(3), 659-682.
#'
#'
#'
#' @export
normal_consistency_constants <- function(p){
vaux= c(0.404629,0.6944748,0.8985921,1.063144,1.204321,1.329791,
1.443817,1.548994,1.647149,1.739537,1.827075,1.910406,
1.99017,2.066772,2.140529,2.211772,2.280742,2.347639,2.412622,
2.475882,2.537545,2.597723,2.656494,2.714016,2.770276,2.825434,
2.879547,2.932612,2.984741,3.035955,3.08632,3.135869,3.184648,
3.232684,3.279986,3.326633,3.372634,3.418005,3.462781,3.506981,
3.550627,3.593741,3.636342,3.678449,3.720075,3.761236,3.80195,
3.842231,3.88208,3.921557,3.960609,3.999286,4.037581,4.075532,
4.113132,4.15039,4.187306,4.223932,4.260211,4.296178,4.331845,
4.367245,4.402347,4.437173,4.471721,4.506001,4.540023,4.573796,
4.60732,4.6406,4.673648,4.706454,4.739037,4.771396,4.803549,
4.835481,4.867181,4.898696,4.930009,4.961124,4.992046,5.022776,
5.053319,5.083678,5.113838,5.143877,5.173678,5.203352,5.232814,
5.262137,5.291296,5.320304,5.34915,5.377833,5.406382,5.434777,
5.463004,5.491069,5.519042,5.546857,5.574513,5.602029,5.629455,
5.656713,5.683843,5.710848,5.737726,5.764478,5.791107,5.817614,
5.844001,5.870269,5.89642,5.922455,5.948373,5.974178,5.999901,
6.025461,6.05095,6.076328,6.101599,6.126763,6.151823,6.176781,
6.201638,6.226395,6.251055,6.275624,6.300084,6.324457,6.348727,
6.372907,6.397029,6.421037,6.444946,6.468764,6.492492,6.516168,
6.539737,6.563222,6.586623,6.609943,6.63318,6.656336,6.679371,
6.702367,6.725285,6.748124,6.770886,6.793581,6.816188,6.83873,
6.861204,6.883556,6.905896,6.928129,6.950294,6.972394,6.994428,
7.016405,7.038284,7.060111,7.081862,7.103564,7.12519,7.146749,
7.168252,7.189681,7.211047,7.232349,7.253587,7.274746,7.295884,
7.316926,7.337911,7.358844,7.379737,7.400556,7.421316,7.442017,
7.462661,7.483248,7.503778,7.524252,7.54467,7.565039,7.585339,
7.605592,7.625797,7.645945,7.666039,7.68608,7.706069,7.726007,
7.74591,7.765748,7.785535,7.805273,7.82496,7.844558,7.864147,
7.883689,7.903179,7.922623,7.942019,7.961367,7.980669,7.999924,
8.019132,8.038295,8.057414,8.076486,8.095513,8.114495,8.133433,
8.152326,8.171176,8.189983,8.208746,8.227467,8.246149,8.264782,
8.283357,8.301944,8.320429,8.338918,8.35733,8.375742,8.394071,
8.412383,8.430632,8.448894,8.46707,8.485221,8.503334,8.521408,
8.539443,8.557449,8.575411,8.593294,8.61118,8.629047,8.646855,
8.664627,8.682362,8.700059,8.717721,8.735375,8.752975,8.7705,
8.788031,8.805526,8.822969,8.840417,8.857788,8.875119,8.892458,
8.909744,8.926998,8.94422,8.961409,8.978539,8.995658,9.012743,
9.029797,9.046818,9.063807,9.080764,9.097691,9.114586,9.131449,
9.148281,9.165083,9.181853,9.198593,9.215303,9.231982,9.248631,
9.265251,9.28184,9.2984,9.314931,9.331432,9.347904,9.364348,
9.380762,9.397148,9.413505,9.429834,9.446134,9.462407,9.478651,
9.494868,9.511057,9.527219,9.543353,9.55946,9.57554,9.591593,
9.607619,9.623618,9.639591,9.655538,9.671458,9.687352,9.70322,
9.719062,9.734879,9.75067,9.766435,9.782175,9.797895,9.813593,
9.829224,9.84487,9.86049,9.876084,9.891652,9.907196,9.922714,
9.938208,9.953677,9.969121,9.984542,9.999938,10.01531,10.03066,
10.04598,10.06129,10.07656,10.09182,10.10705,10.12226,10.13745,
10.15261,10.16776,10.18288,10.19798,10.21304,10.22809,10.24312,
10.25813,10.27312,10.2881,10.30303,10.31794,10.33285,10.34773,
10.3626,10.37743,10.39222,10.40702,10.42181,10.43656,10.45129,
10.46599,10.48068,10.49537,10.51,10.52463,10.53923,10.55382,
10.56839,10.58294,10.59746,10.61197,10.62646,10.64092,10.65537,
10.6698,10.68421,10.6986,10.71297,10.72732,10.74165,10.75597,
10.77026,10.78454,10.79879,10.81303,10.82725,10.84145,10.85564,
10.8698,10.88395,10.89807,10.91218,10.92627,10.9403,10.95436,
10.96841,10.98243,10.99643,11.01041,11.02437,11.03832,11.05225,
11.06617,11.08006,11.09393)
ret1=vaux[length(vaux)];
if (p <length(vaux) ){
ret1 = vaux[p]
}
ret1
}
|
72b88c569a7f5ac3e65515b7a991d9fe7f6f979d | 6dd8ef4c301d743bf479f5ba1c302d56c179b113 | /tcga_codes/Figures_for_Poster.R | aa5418788c7fe8540391d4bede282ff62509e9b7 | [] | no_license | Fengithub/Genomics_Project_DCGs | 00b55c2fd226461a291d154f658b0d661f886b70 | 677f2291705973ccdbb5b957c26cafb9a118800d | refs/heads/master | 2020-05-03T05:33:10.633680 | 2019-09-17T19:18:56 | 2019-09-17T19:18:56 | 178,450,154 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,195 | r | Figures_for_Poster.R | #source("Figures_for_Poster.R")
#####################
## Set Working Dir ##
#####################
#install.packages("ggrepel")
library(ggplot2)
library(ggrepel)
setwd("~/Box Sync/Genomics_Project/")
source("dirinfo.R")
##########
## Data ##
##########
## Covariance Matrix Summary
cov.normal <- read.csv(file.path(sparsematrixdir, "GLASSO_step1_cov_normal_summary.csv"), header = T)
cov.cancer <- read.csv(file.path(sparsematrixdir, "GLASSO_step1_cov_cancer_summary.csv"), header = T)
cov.normal2 <- read.csv(file.path(sparsematrixdir, "GLASSO_step1_cov_normal_summary2.csv"), header = T)
cov.cancer2 <- read.csv(file.path(sparsematrixdir, "GLASSO_step1_cov_cancer_summary2.csv"), header = T)
cov.normal <- as.data.frame(cbind(cov.normal, cov.normal2))
cov.cancer <- as.data.frame(cbind(cov.cancer, cov.cancer2))
## Commuting Time Distance (ECTD) Summary
ctd.normal <- read.csv(file.path(ECTDdir, "ECTD_MoorePenrose_normal_summary.csv"), header = T)
ctd.cancer <- read.csv(file.path(ECTDdir, "ECTD_MoorePenrose_cancer_summary.csv"), header = T)
ctd.normal2 <- read.csv(file.path(ECTDdir, "ECTD_MoorePenrose_normal_summary2.csv"), header = T)
ctd.cancer2 <- read.csv(file.path(ECTDdir, "ECTD_MoorePenrose_cancer_summary2.csv"), header = T)
ctd.normal <- as.data.frame(cbind(ctd.normal, ctd.normal2))
ctd.cancer <- as.data.frame(cbind(ctd.cancer, ctd.cancer2))
## Matrix Measures
cov.measure <- read.csv(file.path(resultdir, "Cov_GraphicalMeasures.csv"), header = T)
ctd.measure <- read.csv(file.path(resultdir, "ECTD_GraphicalMeasures.csv"), header = T)
#Gene DensityStat DensityStatRank BtwnessStat BtwnessStatRank ClonessStat ClonessStatRank ClonessStatscale
# Gene DensityStat DensityStatRank BtwnessStat BtwnessStatRank ClonessStat ClonessStatRank ClonessStatscale ClonessStatRankscale CluCoefStat CluCoefStatRank
#1 CREB3L1 0.05268447 3477 0 2002 483.3401 982 0.27198879 2110 0.1630802 3335
#2 C10orf90 0.03660332 3655 0 2002 483.4422 857 0.37416214 1840 0.1307714 3504
###########
## Graph ##
###########
## Ven Diagram
thres = 20
for (thres in c(10, 20, 30, 40, 50, 100)) {
venda <- ctd.measure
venda$DensityStatvenda <- 0
idx = c(order(ctd.measure[,3])[1:thres])
venda$DensityStatvenda[idx] <- 1
venda$BtwnessStatvenda <- 0
idx = order(cov.measure[,5])[1:thres]
venda$BtwnessStatvenda[idx] <- 1
venda$ClonessStatvenda <- 0
idx = order(ctd.measure[,9])[1:thres]
venda$ClonessStatvenda[idx] <- 1
venda$CluCoefStatvenda <- 0
idx = order(ctd.measure[,11])[1:thres]
venda$CluCoefStatvenda[idx] <- 1
myvenda = venda[c("Gene", "DensityStatvenda", "BtwnessStatvenda", "ClonessStatvenda", "CluCoefStatvenda")]
myvenda$sum = apply(myvenda[,2:5], 1, sum)
myvenda = myvenda[myvenda$sum>0,]
myvenda = myvenda[order(-myvenda$sum),]
apply(myvenda[,2:5], 2, sum)
dim(myvenda)
deg.res <- read.table(file.path(datdir, "tcga_DEGresult.txt"), header = T)
colnames(deg.res) <- c("Gene", "Statistic", "Pvalue", "BHPvalue", "BonferPvalue")
deg.res$DEGrank <- rank(-abs(deg.res$Statistic))
myvenda <- merge(myvenda, deg.res, by = "Gene")
write.csv(myvenda, file.path(resultdir, paste0("VenDiagram_overappedGenes_top",thres,".csv")), row.names = F, quote = F)
}
ven10 <- read.csv(file.path(resultdir, "VenDiagram_overappedGenes_top10.csv"), header = T)
ven20 <- read.csv(file.path(resultdir, "VenDiagram_overappedGenes_top20.csv"), header = T)
ven30 <- read.csv(file.path(resultdir, "VenDiagram_overappedGenes_top30.csv"), header = T)
ven40 <- read.csv(file.path(resultdir, "VenDiagram_overappedGenes_top40.csv"), header = T)
ven50 <- read.csv(file.path(resultdir, "VenDiagram_overappedGenes_top50.csv"), header = T)
ven100 <- read.csv(file.path(resultdir, "VenDiagram_overappedGenes_top100.csv"), header = T)
## Graph of Measures
cov.measure <- read.csv(file.path(resultdir, "Cov_GraphicalMeasures.csv"), header = T)
ctd.measure <- read.csv(file.path(resultdir, "ECTD_GraphicalMeasures.csv"), header = T)
# plot1 Btw vs Den
thre = cov.measure$BtwnessStat[order(-cov.measure$BtwnessStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), btw = cov.measure$BtwnessStat, den = ctd.measure$DensityStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("densityVSbtwness.png", height = 1000, width = 1000)
p1<-ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(x = "Density Stat.", y = "Betweenness Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p1
dev.off()
thre = ctd.measure$DensityStat[order(-ctd.measure$DensityStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), den = cov.measure$BtwnessStat, btw = ctd.measure$DensityStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("densityVSbtwness_inv.png", height = 1000, width = 1000)
p1<-ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Density Stat.", x = "Betweenness Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p1
dev.off()
# plot2: Clo vs Den
thre = ctd.measure$ClonessStatscale[order(-ctd.measure$ClonessStatscale)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), btw = ctd.measure$ClonessStatscale, den = ctd.measure$DensityStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("densityVScloness.png", height = 1000, width = 1000)
p2 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(x = "Density Stat.", y = "Closenness Stat. (Scaled)") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p2
dev.off()
thre = ctd.measure$DensityStat[order(-ctd.measure$DensityStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), den = ctd.measure$ClonessStatscale, btw = ctd.measure$DensityStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("densityVScloness_inv.png", height = 1000, width = 1000)
p2 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Density Stat.", x = "Closenness Stat. (Scaled)") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p2
dev.off()
# plot3: CC vs Den
thre = ctd.measure$CluCoefStat[order(-ctd.measure$CluCoefStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), btw = ctd.measure$CluCoefStat, den = ctd.measure$DensityStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("densityVScluscoef.png", height = 1000, width = 1000)
p3 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(x = "Density Stat.", y = "Clustering Coefficient Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p3
dev.off()
thre = ctd.measure$DensityStat[order(-ctd.measure$DensityStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), den = ctd.measure$CluCoefStat, btw = ctd.measure$DensityStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("densityVScluscoef_inv.png", height = 1000, width = 1000)
p3 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Density Stat.", x = "Clustering Coefficient Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p3
dev.off()
# plot4: CC vs gene
thre = ctd.measure$CluCoefStat[order(-ctd.measure$CluCoefStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), cc = ctd.measure$CluCoefStat, ccidx = 1:length(ctd.measure$CluCoefStat))
rownames(dat) = dat$gene
dat$Significant <- ifelse(ctd.measure$CluCoefStat >= thre, "Y", "N")
png("ClusteringCoefStatPlot.png", height = 800, width = 1200)
p4<-ggplot(dat, aes(x = ccidx, y = cc)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Clustering Coefficient Stat.", x = "Gene") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 15) +
geom_text_repel(
data = subset(dat, cc >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.5, "lines"),
point.padding = unit(0.3, "lines")
)
p4
dev.off()
# plot5: Btw vs Btw
dat <- data.frame(gene = as.character(cov.measure$Gene), btw = cov.measure$BtwnessStat, den = ctd.measure$DensityStat, btw2 = ctd.measure$BtwnessStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw2 > 10^4, "Y", "N")
png("btwnessVSbtwness.png", height = 1000, width = 1000)
p5<-ggplot(dat, aes(x = btw, y = btw2)) +
theme(legend.position = "none", axis.text = element_text(size=40)) +
geom_point(aes(color = Significant), size = 5) +
labs(x = "Betweenness Stat. (Covariance Mat)", y = "Betweenness Stat. (ECTD)") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 30) +
geom_text_repel(
data = subset(dat, btw2 > 10^4),
aes(label = gene),
size = 25,
box.padding = unit(0.8, "lines"),
point.padding = unit(0.3, "lines")
)
p5
dev.off()
# plot6: Density Plot
thre = ctd.measure$DensityStat[order(-ctd.measure$DensityStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), cc = ctd.measure$DensityStat, ccidx = 1:length(ctd.measure$DensityStat))
rownames(dat) = dat$gene
dat$Significant <- ifelse(ctd.measure$DensityStat >= thre, "Y", "N")
png("DensityStatPlot.png", height = 800, width = 1200)
p6<-ggplot(dat, aes(x = ccidx, y = cc)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Density Stat.", x = "Gene") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 15) +
geom_text_repel(
data = subset(dat, cc >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.5, "lines"),
point.padding = unit(0.3, "lines")
)
p6
dev.off()
# plot7: Btw Plot
thre = cov.measure$BtwnessStat[order(-cov.measure$BtwnessStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), cc = cov.measure$BtwnessStat, ccidx = 1:length(cov.measure$BtwnessStat))
rownames(dat) = dat$gene
dat$Significant <- ifelse(cov.measure$BtwnessStat >= thre, "Y", "N")
png("BetweennessStatPlot.png", height = 800, width = 1200)
p7<-ggplot(dat, aes(x = ccidx, y = cc)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Betweenness Stat.", x = "Gene") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 15) +
geom_text_repel(
data = subset(dat, cc >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.5, "lines"),
point.padding = unit(0.3, "lines")
)
p7
dev.off()
# plot8: Closeness Plot (Scaled)
thre = ctd.measure$ClonessStatscale[order(-ctd.measure$ClonessStatscale)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), cc = ctd.measure$ClonessStatscale, ccidx = 1:length(ctd.measure$ClonessStatscale))
rownames(dat) = dat$gene
dat$Significant <- ifelse(ctd.measure$ClonessStatscale >= thre, "Y", "N")
png("ClonessStatPlot.png", height = 800, width = 1200)
p8<-ggplot(dat, aes(x = ccidx, y = cc)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Closeness Stat. (Scaled)", x = "Gene") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 15) +
geom_text_repel(
data = subset(dat, cc >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.5, "lines"),
point.padding = unit(0.3, "lines")
)
p8
dev.off()
# plot9: Closen vs Btwness
thre = ctd.measure$ClonessStatscale[order(-ctd.measure$ClonessStatscale)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), btw = ctd.measure$ClonessStatscale, den = cov.measure$BtwnessStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("btwnessVScloness.png", height = 1000, width = 1000)
p9 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(x = "Betweenness Stat.", y = "Closenness Stat. (Scaled)") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p9
dev.off()
thre = cov.measure$BtwnessStat[order(-cov.measure$BtwnessStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), den = ctd.measure$ClonessStatscale, btw = cov.measure$BtwnessStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("btwnessVScloness_inv.png", height = 1000, width = 1000)
p9 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Betweenness Stat.", x = "Closenness Stat. (Scaled)") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p9
dev.off()
# plot10: CC vs Btwness
thre = ctd.measure$CluCoefStat[order(-ctd.measure$CluCoefStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), btw = ctd.measure$CluCoefStat, den = cov.measure$BtwnessStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("btwnessVScluscoef.png", height = 1000, width = 1000)
p10 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(x = "Betweenness Stat.", y = "Clustering Coefficient Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p10
dev.off()
thre = cov.measure$BtwnessStat[order(-cov.measure$BtwnessStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), den = ctd.measure$CluCoefStat, btw = cov.measure$BtwnessStat)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("btwnessVScluscoef_inv.png", height = 1000, width = 1000)
p10 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Betweenness Stat.", x = "Clustering Coefficient Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p10
dev.off()
# plot11: CC vs Closen
thre = ctd.measure$CluCoefStat[order(-ctd.measure$CluCoefStat)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), btw = ctd.measure$CluCoefStat, den = ctd.measure$ClonessStatscale)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("cluscoeffVScloseness.png", height = 1000, width = 1000)
p11 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(x = "Closenness Stat. (Scaled).", y = "Clustering Coefficient Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p11
dev.off()
thre = ctd.measure$ClonessStatscale[order(-ctd.measure$ClonessStatscale)]
thre = thre[20]
dat <- data.frame(gene = as.character(cov.measure$Gene), den = ctd.measure$CluCoefStat, btw = ctd.measure$ClonessStatscale)
rownames(dat) = dat$gene
dat$Significant <- ifelse(dat$btw >= thre, "Y", "N")
png("cluscoeffVScloseness_inv.png", height = 1000, width = 1000)
p11 <- ggplot(dat, aes(x = den, y = btw)) +
theme(legend.position = "none") +
geom_point(aes(color = Significant), size = 5) +
labs(y = "Closenness Stat. (Scaled).", x = "Clustering Coefficient Stat.") +
scale_color_manual(values = c("grey", "red")) +
theme_bw(base_size = 20) +
geom_text_repel(
data = subset(dat, btw >= thre),
aes(label = gene),
size = 10,
box.padding = unit(0.35, "lines"),
point.padding = unit(0.5, "lines")
)
p11
dev.off()
|
0d95a63d913140a2e55999a3bd59dba7ced6cfa1 | f6990f4746aca0291bc6970286ad6d46b9866a55 | /man/mle.Rd | 8300423b6f07aff91581d1c9e12f17e453759f37 | [] | no_license | cran/merror | 3c730db7c55bb309b055f14a23ce120647aacd3d | 9b606d2e3ed3b6f89df35c0a4e73a5b8b50584a2 | refs/heads/master | 2023-08-31T05:10:07.313831 | 2023-08-29T13:20:02 | 2023-08-29T14:30:47 | 17,697,402 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 824 | rd | mle.Rd | \name{mle}
\alias{mle}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Compute maximum likelihood estimates of precision. }
\description{
This is an internal function that computes the maximum likelihood estimates of precision for the constant
bias model using paired data.
}
\usage{
mle(v, r, ni)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{v}{ Variance-Covariance matrix for the n x N items by methods measurement data. }
\item{r}{ Initial estimates of imprecision, usually Grubbs. }
\item{ni}{ No. of items measured. }
}
\value{
An N vector containing the maximum likelihood estimates of precision.
}
\references{ Jaech, J. L. (1985) \emph{Statistical Analysis of Measurement Errors}. New York: Wiley. }
\author{ Richard A. Bilonick }
\keyword{ htest }
|
e0dfe4007f7d5a45c7a42971b45ed52bfdc009d5 | 390ad289fbf96b3dcf90f88acfc71531801391ec | /man/dot-get_reduced_dims.Rd | f99a42f070cca50e312e638a27949bb13670e392 | [
"MIT"
] | permissive | keshav-motwani/scanalysis | 8f7e30f1207b35dca0424c6c6856e15c624d36fa | 2276832c42224efd5d059b5d327aa6468e62a053 | refs/heads/master | 2023-02-27T14:43:44.118883 | 2021-02-03T04:24:13 | 2021-02-03T04:24:13 | 232,159,917 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 438 | rd | dot-get_reduced_dims.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_reduced_dimensions.R
\name{.get_reduced_dims}
\alias{.get_reduced_dims}
\title{Get reduced dimensions of object}
\usage{
.get_reduced_dims(sce, type)
}
\arguments{
\item{sce}{SingleCellExperiment object}
\item{type}{Name of reduction type in reducedDims}
}
\value{
}
\description{
Get reduced dimensions of object
}
\examples{
NULL
}
\keyword{internal}
|
d95ab5c8a7847902bbc16e35162e824db5ab8290 | cb96d3d279e96a8803fa0fb04b20328c4f603c1e | /R/duplicateFinder.R | 4e0874996e3206685ec13d190005f0634970681c | [] | no_license | Hossein-Fallahi/MetaGx | 06b599f6739409705726a8eaef31478e7804b42a | 53e9c40891c5f0f8ab94fe1fda693e48d17a6aff | refs/heads/master | 2023-03-15T08:39:33.740280 | 2017-01-02T22:45:16 | 2017-01-02T22:45:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,991 | r | duplicateFinder.R | ########################
## Benjamin Haibe-Kains
## All rights Reserved
## September 1, 2013
########################
`duplicateFinder` <-
function (eset, topvar.genes=1000, dupl.cor=0.99, method=c("pearson", "spearman", "kendall"), cor.matrix=FALSE, nthread=1) {
## find duplicates based on correlation of gene expresison profiles
#
# Arga:
# eset: an expressionSet object
# topvar.genes: number of most variant genes used to define the expression profiles
#
# Returns
# list of duplicates sample names
method <- match.arg(method)
if (topvar.genes < 3) { topvar.genes <- length(featureNames(eset)) }
## select the most variant genes
## at least in 80% of the datasets
iix <- apply(exprs(eset), 1, function (x, y) {
return ((sum(is.na(x)) / length(x)) < ( 1 - y))
}, y=0.8)
varg <- Biobase::featureNames(eset)[iix][order(apply(exprs(eset)[iix, , drop=FALSE], 1, var, na.rm=TRUE), decreasing=TRUE)[1:topvar.genes]]
## alternative, inefficient approach
# pairs <- t(combn(1:length(Biobase::sampleNames(eset)), 2, simplify=TRUE))
# splitix <- parallel::splitIndices(nx=nrow(pairs), ncl=nthread)
# splitix <- splitix[sapply(splitix, length) > 0]
# mcres <- parallel::mclapply(splitix, function(x, pairs, expr, method) {
# res <- apply(pairs[x, , drop=FALSE], 1, function (x, expr, method) {
# return (cor(x=expr[ , x[1], drop=FALSE], y=expr[ , x[2], drop=FALSE], method=method, use="complete.obs"))
# }, expr=expr, method=method)
# return (res)
# }, pairs=pairs, expr=Biobase::exprs(eset)[varg, , drop=FALSE], method=method)
# res <- t(do.call(cbind, mcres))
# res <- unlist(apply(res, 2, function (x, y, gid) {
# rr <- matrix(NA, nrow=length(gid), ncol=length(gid), dimnames=list(gid, gid))
# rr[y] <- x
# rr[y[ , 2:1]] <- x
# diag(rr) <- 1
# return (list(rr))
# }, y=pairs, gid), recursive=FALSE)
## more efficient but still slow approach
# splitix <- parallel::splitIndices(nx=length(Biobase::sampleNames(eset)), ncl=nthread)
# splitix <- splitix[sapply(splitix, length) > 0]
# mcres <- parallel::mclapply(splitix, function(splitix, expr, method) {
# cores <- cor(x=expr[ , splitix, drop=FALSE], y=expr, method=method, use="pairwise.complete.obs")
# }, expr=Biobase::exprs(eset)[varg, , drop=FALSE], method=method)
# cor.samples <- do.call(rbind, mcres)
## using mRMRe
nn <- mRMRe::get.thread.count()
mRMRe::set.thread.count(nthread)
expr <- mRMRe::mRMR.data(data=data.frame(Biobase::exprs(eset)[varg, , drop=FALSE]))
cor.samples <- mRMRe::mim(object=expr, continuous_estimator=method, method="cor")
mRMRe::set.thread.count(nn)
if (cor.matrix) { return (cor.samples) }
diag(cor.samples) <- NA
## create list of duplicates for each sample
duplix <- apply(cor.samples, 1, function (x, y) {
res <- names(x)[!is.na(x) & x > y]
return (res)
}, y=dupl.cor)
duplix <- duplix[sapply(duplix, length) > 0]
return (duplix)
}
|
ffef10f787225cd49420f78027b57259ec987930 | f6328efe81b7bd86c6e160fad49359f32568a8c1 | /R_Scripts/analysisforMarch16.R | 1af5950db81c76df99c05101a95d6f52edd2a214 | [] | no_license | robertipk/Entrainment | 7ef84fb37b93210f2f9c81236b31fe6bba190cef | 69566591d166e265f2efa49fc324cac75ab306cd | refs/heads/master | 2021-01-22T08:39:01.090811 | 2016-08-23T18:45:27 | 2016-08-23T18:45:27 | 66,334,475 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,716 | r | analysisforMarch16.R | library(dplyr)
source("superscript.R")
origTable <- read.csv("correcteddata.csv",header=TRUE)
origTable$speaker = paste(origTable$speaker,origTable$letter,sep="")
origTable$letter <- NULL
head(origTable)
loudness <- subset(origTable,select=c("speaker","loudness"))
F0finEnv <- subset(origTable,select=c("speaker","F0finEnv"))
jitterLocal <- subset(origTable,select=c("speaker","jitterLocal"))
shimmerLocal <- subset(origTable,select=c("speaker","shimmerLocal"))
F0finEnv <- F0finEnv %>% filter(F0finEnv != 0)
jitterLocal <- jitterLocal %>% filter(jitterLocal != 0)
shimmerLocal <- shimmerLocal %>% filter(shimmerLocal != 0)
loudnessAvg <- aggregate(loudness$loudness, by=list(loudness$speaker), FUN=mean)
F0finEnvAvg <- aggregate(F0finEnv$F0finEnv, by=list(F0finEnv$speaker), FUN=mean)
jitterLocalAvg <- aggregate(jitterLocal$jitterLocal, by=list(jitterLocal$speaker), FUN=mean)
shimmerLocalAvg <- aggregate(shimmerLocal$shimmerLocal, by=list(shimmerLocal$speaker), FUN=mean)
template<-read.csv("newtoplevel.csv",header=TRUE)
Avg <- select(loudnessAvg,name,loudness)
colnames(Avg)[1]<-"name"
colnames(Avg)[2]<-"loudness"
loudnessAvg$name<-lapply(loudnessAvg$name,function(x)substr(x,4,8))
template$self_feature <- apply(template,MARGIN=1,function(x)findValueSelf(Avg,x))
template$partner_feature <- apply(template,MARGIN=1,function(x)findValuePartner(Avg,x))
template$partner_diff <- apply(template,MARGIN=1,function(x)calcPartnerDifference(x))
template$nonpartner_diff <- apply(template,MARGIN=1,function(x)calcNonPartnerDifferenceMeanOfDifferences(template,x))
template$outlier<-addOutlierColumn(Avg,template)
template$normdiff <-apply(template,MARGIN=1,function(x)calcNormdiff(x))
all_ttests(template) |
e8594434d286a7f453194c7365c7d6d5209d9fd6 | bbe69e27263454120aa4ada60eaed9116fccb6da | /R/import_from_base.R | 78a94126669165cef73122c10edd3100fb9131be | [] | no_license | ropensci-archive/outsider.devtools | 702812e099b9983f022f985ac607e5004c620489 | 787f92887435e7aedf4721170511ca4e790d2df2 | refs/heads/master | 2023-04-17T17:31:03.227335 | 2022-06-17T07:11:38 | 2022-06-17T07:11:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,763 | r | import_from_base.R | # import outsider.base functions
#' @importFrom outsider.base arglist_get
#' @export
outsider.base::arglist_get
#' @importFrom outsider.base arglist_parse
#' @export
outsider.base::arglist_parse
#' @importFrom outsider.base dirpath_get
#' @export
outsider.base::dirpath_get
#' @importFrom outsider.base filestosend_get
#' @export
outsider.base::filestosend_get
#' @importFrom outsider.base wd_get
#' @export
outsider.base::wd_get
#' @importFrom outsider.base docker_img_ls
#' @export
outsider.base::docker_img_ls
#' @importFrom outsider.base image_install
#' @export
outsider.base::image_install
#' @importFrom outsider.base is_docker_available
#' @export
outsider.base::is_docker_available
#' @importFrom outsider.base outsider_init
#' @export
outsider.base::outsider_init
#' @importFrom outsider.base run
#' @export
outsider.base::run
#' @importFrom outsider.base is_installed
#' @export
outsider.base::is_installed
#' @importFrom outsider.base meta_get
#' @export
outsider.base::meta_get
#' @importFrom outsider.base modules_list
#' @export
outsider.base::modules_list
#' @importFrom outsider.base pkg_install
#' @export
outsider.base::pkg_install
#' @importFrom outsider.base uninstall
#' @export
outsider.base::uninstall
#' @importFrom outsider.base cat_line
#' @export
outsider.base::cat_line
#' @importFrom outsider.base char
#' @export
outsider.base::char
#' @importFrom outsider.base func
#' @export
outsider.base::func
#' @importFrom outsider.base log_set
#' @export
outsider.base::log_set
#' @importFrom outsider.base stat
#' @export
outsider.base::stat
#' @importFrom outsider.base server_connect
#' @export
outsider.base::server_connect
#' @importFrom outsider.base server_disconnect
#' @export
outsider.base::server_disconnect
|
a8044abcf7acdeba9c753b01dea55ec0cee3134c | 11d8fbce4f2c9a45a1fa68d0a00919d6a063cba7 | /Data-Science/3_Getting_and_Cleaning_Data/Lecture/Week_2/Examples/Reading from HDF5.R | 58519667d5663e840f1bf560c91bd96d19d17558 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | shanky0507/Coursera-John-Hopkins | e03c9c8234ff634a047aaf0aea9912c8b96358bd | 1b555bf9d3aaef3bfdd5d4bcd0e3b5c0e734cf66 | refs/heads/master | 2020-08-12T06:42:10.942297 | 2019-10-12T20:08:52 | 2019-10-12T20:08:52 | 214,708,337 | 0 | 0 | MIT | 2019-10-12T20:06:29 | 2019-10-12T20:06:28 | null | UTF-8 | R | false | false | 853 | r | Reading from HDF5.R | ## Install Package
source("http://bioconductor.org/biocLite.R")
biocLite("rhdf5")
## Create Groups
library(rhdf5)
created = h5createFile("example.h5")
created
created = h5createGroup("example.h5","foo")
created = h5createGroup("example.h5","baa")
created = h5createGroup("example.h5","foo/foobaa")
h5ls("example.h5")
## Write to Groups
A = matrix(1:10,nr=5,nc=2)
h5write(A, "example.h5","foo/A")
B = array(seq(0.1,2.0,by=0.1),dim=c(5,2,2))
attr(B, "scale") <- "liter"
h5write(B, "example.h5","foo/foobaa/B")
h5ls("example.h5")
## Write a data set
df = data.frame(1L:5L,seq(0,1,length.out=5),
c("ab","cde","fghi","a","s"), stringsAsFactors=FALSE)
h5write(df, "example.h5","df")
h5ls("example.h5")
## Reading data
readA = h5read("example.h5","foo/A")
readB = h5read("example.h5","foo/foobaa/B")
readdf= h5read("example.h5","df")
readA |
65702cdc6924dc7db936a38304b7da4b5aa09cd8 | 12a10c22bf3d08e70f9830a22e94a47632a71d82 | /man/allmods.Rd | 7b7561e38069176c3eca3ff314c05a91c38846a4 | [] | no_license | dsambrano/PsyTools | 973136448bd3c3eb5127cf13152a811846cc4133 | 7884c5b51cdc853fc7377d9737eafee2640dec14 | refs/heads/master | 2021-01-20T12:41:17.671181 | 2017-05-05T15:46:40 | 2017-05-05T15:46:40 | 90,392,060 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,082 | rd | allmods.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/allmods.R
\name{allmods}
\alias{allmods}
\title{All Possible Models}
\usage{
allmods(dv="dv", ivs=c("iv1","iv2","iv3"),
data=dataset, plot = FALSE)
}
\arguments{
\item{dv}{A string of the variable name of you dependent variable.}
\item{ivs}{A list of strings of the names of you independent variables.}
\item{data}{A \code{data.frame} object.}
\item{plot}{A \code{logical} statement indicating whether you want the affiliated
plots.}
}
\value{
A matrix with the R^2, adj R^2, PRESS, and Mallow Cp for all possible
(non interaction) models given the variables.
}
\description{
This function produces the coeffiecients and the anova summary table
for Hierarchical regression.
}
\examples{
dv <- rnorm(500, 47, 12)
iv1 <- rnorm(500, 0, 6)
iv2 <- rnorm(500, 100, 5)
iv3 <- rnorm(500, 12, 3)
iv4 <- rnorm(500, 12, 100)
dataset <- data.frame(dv, iv1, iv2, iv3, iv4)
allmods(dv ="dv", ivs=c("iv1", "iv2", "iv3"),
data = dataset, plots = FALSE)
}
\author{
K. Preston and D. Sambrano
}
|
31d59dacf7b683afd0a294522be0dc24aa2f106d | c1d359cdf0281885744cdcd85d41a21e91218b43 | /man/KYCG_plotMetaEnrichment.Rd | e8faa4f8bad5367d690022c5eccb1568b4ef7e33 | [
"MIT"
] | permissive | zwdzwd/sesame | 20b2d29578661487db53432c8991d3c4478aa2c1 | 62fe6ef99a02e7f94b121fb601c3f368b8a4c1a8 | refs/heads/master | 2023-08-08T01:45:02.112492 | 2023-07-26T13:23:03 | 2023-07-26T13:23:03 | 122,086,019 | 37 | 26 | MIT | 2023-01-05T16:02:38 | 2018-02-19T16:00:34 | R | UTF-8 | R | false | true | 656 | rd | KYCG_plotMetaEnrichment.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KYCG_plot.R
\name{KYCG_plotMetaEnrichment}
\alias{KYCG_plotMetaEnrichment}
\title{Plot meta gene or other meta genomic features}
\usage{
KYCG_plotMetaEnrichment(result_list)
}
\arguments{
\item{result_list}{one or a list of testEnrichment}
}
\value{
a grid plot object
}
\description{
Plot meta gene or other meta genomic features
}
\examples{
cg_lists <- KYCG_getDBs("MM285.TFBS")
queries <- cg_lists[(sapply(cg_lists, length) > 40000)]
result_list <- lapply(queries, testEnrichment,
"MM285.metagene", silent=TRUE, platform="MM285")
KYCG_plotMetaEnrichment(result_list)
}
|
64fb8b1bd25216ed1c154813e38600c0843c78fb | 43e6ce29a417f19475aecb90945e80dd2e797040 | /shopping-cart.R | 61446ab381047b2971d547e8c3b0bc80106cdf1d | [] | no_license | wolfkden/BigData-R | 8e5725a5d9da2bc65124dc8d14a8e9cc837e1be4 | 51392eaf3e01930e9157a387fca6fc042090c94a | refs/heads/master | 2021-01-13T14:27:57.819783 | 2015-02-18T00:48:38 | 2015-02-18T00:48:38 | 30,945,645 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,788 | r | shopping-cart.R | for(x in as.character(alldays$ITEM_NAME)) nlist[length(nlist)+1]<-paste(unlist(strsplit(x,",")),collapse="+")
ax<-aggregate(cbind(ITEM_LIST=ITEM_ID,ITEM_COUNT=ITEM_ID,ITEM_NAME=ITEM_NAME)~CHK_NUM,alldays,FUN=function(x) { return(paste(sort(union(NULL,x)),collapse="__")) })
ax$ITEM_COUNT<-unlist(lapply(strsplit(ax$ITEM_LIST,"__"),FUN=length))
agg<-aggregate(CHK_NUM~ITEM_COUNT,ax,FUN=length)
agg<-agg[order(agg$CHK_NUM,decreasing=TRUE),]
agg<-aggregate(CHK_NUM~ITEM_COUNT,ax,FUN=length)
agg<-agg[order(agg$CHK_NUM,decreasing=TRUE),]
agg<-aggregate(CHK_NUM~ITEM_LIST+ITEM_COUNT+ITEM_NAME,ax,FUN=length)
agg<-aggregate(CHK_NUM~ITEM_LIST,ax,FUN=length)
agg<-aggregate(CHK_NUM~ITEM_LIST+ITEM_COUNT+ITEM_NAME,ax,FUN=length)
agg<-agg[order(agg$CHK_NUM,decreasing=TRUE),]
ax<-aggregate(cbind(ITEM_LIST=ITEM_ID,ITEM_COUNT=ITEM_ID,ITEM_NAME=ITEM_NAME)~CHK_NUM,alldays,FUN=function(x) { return(paste(sort(union(NULL,x)),collapse="__")) })
a1<-aggregate(CHECK_COUNT~CHK_NUM+ITEM_ID,alldays,FUN=function(x) { return(c(mx=max(x),n=length(x))) })
a1<-cbind(a1,a1$CHECK_COUNT)
a1<-subset(a1,n==1 & mx==1)
a1$CHECK_COUNT<-NULL
a1<-aggregate(ITEM_ID~CHK_NUM, a1, FUN=function(x) { return(c(ITEM_ID=max(x), n=length(x))) })
a1<-cbind(a1,a1$ITEM_ID)
a1[[2]]<-NULL
a1<-subset(a1,n==1)
x1<-subset(axx[,c(1:2,5)],ITEM_COUNT>3)
x1<-melt(data.frame(t(apply(subset(axx[,c(1:2,5)],ITEM_COUNT==3), 1,
FUN=function(x) { return(unlist(x)) }))),id=c("CHK_NUM","ITEM_COUNT"))
x2<-melt(data.frame(t(apply(subset(axx[,c(1:2,6)],ITEM_COUNT==3), 1,
FUN=function(x) { return(unlist(x)) }))),id=c("CHK_NUM","ITEM_COUNT"))
x1$variable<-NULL
x2$variable<-NULL
names(x1)[3]<-"ITEM_PAIR"
names(x2)[3]<-"ITEM_WTS"
xx<-merge(x1,x2)
|
8debb9de8673ac3d8a7dbfbb6112a5546186bc1a | b626eca042800ee5572344262a165c1989235f73 | /paper/Rcode_eps/03-contour-plot.R | 0500fd350b1ba0b241d215be2409803d18787803 | [] | no_license | nicoballarini/SubgrPlots | 38f04f07478c672cae761b1a9cd1fa5f79062d6e | 61229c9f36c9650f71b178b970a80d2a7bf9471d | refs/heads/master | 2020-03-13T12:59:46.419273 | 2020-01-29T02:26:45 | 2020-01-29T02:26:45 | 131,130,461 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,712 | r | 03-contour-plot.R | ###############################################################################-
##
## This program creates the figures for the manuscript using the
## prca data that is included in this package
##
##
## Instead of using rm(list = ls()), make sure you start with a fresh R
## by restarting R -> Control+Shift+F10
# cat("\014") # Cleans the console
## Load needed libraries
## If SubgrPlots package is not installed, then open project and use following lines or
## in the build window, click Install and Restart
# devtools::build()
# devtools::install()
library(SubgrPlots) # Loads this package. Install it first
library(dplyr)
library(grid)
library(gridGraphics)
# Load the data to be used
data(prca)
dat <- prca
setup.ss = c(10,60,15,30)
dat %>%
rename(Weight = weight,
Age = age) -> dat
setEPS()
postscript(file = "paper/figures_eps/03-contour-plot.eps", width = 7.1, height = 3)
graphics::layout(matrix(c(6,6,7,8,8,
1,2,3,4,5), nrow=2, ncol=5, byrow = TRUE),
heights = c(.075, .925),
widths = c(4, .9, .25, 4, .9))
# graphics::layout(matrix(c(1, 2), nrow=1, ncol=2), widths=c(4,1))
plot_contour(dat,
covari.sel = c(8,9),
trt.sel = 3,
resp.sel = c(1,2),
outcome.type = "survival",
setup.ss = setup.ss,
n.grid = c(100,100),
brk.es = seq(-4.5,4.5,length.out = 101),
n.brk.axis = 7,
para.plot = c(0.5, 2, 6),
font.size = c(1, 1, .75, 1, .9),
title = NULL,
strip = paste("Treatment effect size (log hazard ratio)"),
show.overall = T,show.points = T,
filled = T, palette = "hcl", col.power = 0.75,
new.layout = FALSE)
graphics::par(mar=c(0,0,0,0))
plot(0, type = "n", axes = F, xlab = "", ylab = "")
plot_contour_localreg(dat,
covari.sel = c(8,9),
trt.sel = 3,
resp.sel = c(1,2),
n.grid = c(100,100),
font.size = c(1, 1, .75, 1, .9),
brk.es = seq(-4.5,4.5,length.out = 101),
n.brk.axis = 7,
strip = "Treatment effect size (log hazard ratio)",
outcome.type = "survival",
new.layout = FALSE)
gridGraphics::grid.echo()
p = grid.grab()
graphics::par(mar=c(0,0,0,0), xpd=TRUE)
plot(0, 0, type = "n", axes = F, xlab = "", ylab = "")
text(-1, -.5, "(a)", cex = 1.5)
plot(0, 0, type = "n", axes = F, xlab = "", ylab = "")
plot(0, 0, type = "n", axes = F, xlab = "", ylab = "")
text(-1, -.5, "(b)", cex = 1.5)
dev.off()
|
e327aac8af9e5696be11832f4d167b824cd7e584 | 567d8f2a240cd3b7f3899b3fd5e3bd9328b2b895 | /R/simulate_cvd.R | 870bb87a39e4e646e5aba901a8d1d117819b9069 | [] | no_license | cran/colorspace | ec9123555d9a820c2a2c639b94d28734563df6e0 | fadb043aeb85048a0a2b9daddbb258002d0a4dfc | refs/heads/master | 2023-01-24T18:54:37.214672 | 2023-01-23T10:40:02 | 2023-01-23T10:40:02 | 17,695,192 | 7 | 3 | null | 2017-08-29T03:43:34 | 2014-03-13T04:18:48 | R | UTF-8 | R | false | false | 9,060 | r | simulate_cvd.R | #' Simulate Color Vision Deficiency
#'
#' Transformation of R colors by simulating color vision deficiencies,
#' based on a CVD transform matrix.
#'
#' Using the physiologically-based model for simulating color vision deficiency (CVD)
#' of Machado et al. (2009), different kinds of limitations can be
#' emulated: deuteranope (green cone cells defective), protanope (red cone cells defective),
#' and tritanope (blue cone cells defective).
#' The workhorse function to do so is \code{simulate_cvd} which can take any vector
#' of valid R colors and transform them according to a certain CVD transformation
#' matrix (see \code{\link{cvd}}) and transformation equation.
#'
#' The functions \code{deutan}, \code{protan}, and \code{tritan} are the high-level functions for
#' simulating the corresponding kind of colorblindness with a given severity.
#' Internally, they all call \code{simulate_cvd} along with a (possibly interpolated)
#' version of the matrices from \code{\link{cvd}}. Matrix interpolation can be carried out with
#' the function \code{interpolate_cvd_transform} (see examples).
#'
#' If input \code{col} is a matrix with three rows named \code{R}, \code{G}, and
#' \code{B} (top down) they are interpreted as Red-Green-Blue values within the
#' range \code{[0-255]}. Then the CVD transformation is applied directly to these
#' coordinates avoiding any further conversions.
#'
#' Finally, if \code{col} is a formal \code{\link[colorspace]{color-class}} object, then its
#' coordinates are transformed to (s)RGB coordinates, as described above, and returned as a formal
#' object of the same class after the color vision deficiency simulation.
#'
#' Up to version 2.0-3 of the package, the CVD transformations had been applied
#' directly to the gamma-corrected sRGB coordinates (corresponding to the hex coordinates
#' of the colors), following the illustrations of Machado et al. (2009). However,
#' the paper implicitly relies on a linear RGB space (see page 1294, column 1) where their
#' linear matrix transformations for simulating color vision deficiencies are applied.
#' Therefore, starting from version 2.1-0 of the package, a new argument \code{linear = TRUE}
#' has been added that first maps the provided colors to linearized RGB coordinates, applies
#' the color vision deficiency transformation, and then maps back to gamma-corrected sRGB
#' coordinates. Optionally, \code{linear = FALSE} can be used to restore the behavior
#' from previous versions. For most colors the difference between the two strategies is
#' negligible but for some highly-saturated colors it becomes more noticable, e.g., for
#' red, purple, or orange.
#'
#' @param col vector of R colors. Can be any of the three kinds of R colors,
#' i.e., either a color name (an element of \code{\link[grDevices]{colors}}), a hexadecimal (hex)
#' string of the form \code{"#rrggbb"} or \code{"#rrggbbaa"} (see \code{\link[grDevices]{rgb}}), or
#' an integer \code{i} meaning \code{palette()[i]}. Additionally, \code{col} can be
#' a formal \code{\link[colorspace]{color-class}} object or a matrix with three named
#' rows (or columns) containing R/G/B (0-255) values.
#' @param severity numeric. Severity of the color vision defect, a number between 0 and 1.
#' @param cvd_transform numeric 3x3 matrix, specifying the color vision deficiency transform matrix.
#' @param linear logical. Should the color vision deficiency transformation be applied to the
#' linearized RGB coordinates (default)? If \code{FALSE}, the transformation is applied to the
#' gamma-corrected sRGB coordinates (which was the default up to version 2.0-3 of the package).
#' @param cvd list of cvd transformation matrices. See \code{\link{cvd}} for available options.
#'
#' @return A color object as specified in the input \code{col} (hexadecimal string, RGB matrix,
#' or formal color class) with simulated color vision deficiency.
#'
#' @references Machado GM, Oliveira MM, Fernandes LAF (2009).
#' \dQuote{A Physiologically-Based Model for Simulation of Color Vision Deficiency.}
#' \emph{IEEE Transactions on Visualization and Computer Graphics}. \bold{15}(6), 1291--1298.
#' \doi{10.1109/TVCG.2009.113}
#' Online version with supplements at
#' \url{http://www.inf.ufrgs.br/~oliveira/pubs_files/CVD_Simulation/CVD_Simulation.html}.
#'
#' Zeileis A, Fisher JC, Hornik K, Ihaka R, McWhite CD, Murrell P, Stauffer R, Wilke CO (2020).
#' \dQuote{colorspace: A Toolbox for Manipulating and Assessing Colors and Palettes.}
#' \emph{Journal of Statistical Software}, \bold{96}(1), 1--49.
#' \doi{10.18637/jss.v096.i01}
#' @keywords colors cvd colorblind
#' @seealso \code{\link{cvd}}
#' @export
#' @examples
#' # simulate color-vision deficiency by calling `simulate_cvd` with specified matrix
#' simulate_cvd(c("#005000", "blue", "#00BB00"), tritanomaly_cvd["6"][[1]])
#'
#' # simulate color-vision deficiency by calling the shortcut high-level function
#' tritan(c("#005000", "blue", "#00BB00"), severity = 0.6)
#'
#' # simulate color-vision deficiency by calling `simulate_cvd` with interpolated cvd matrix
#' simulate_cvd(c("#005000", "blue", "#00BB00"),
#' interpolate_cvd_transform(tritanomaly_cvd, severity = 0.6))
#'
#' # apply CVD directly on wide RGB matrix (with R/G/B channels in rows)
#' RGB <- diag(3) * 255
#' rownames(RGB) <- c("R", "G", "B")
#' deutan(RGB)
#'
#' @importFrom grDevices col2rgb
simulate_cvd <- function(col, cvd_transform, linear = TRUE) {
## determine input type
input_type <- if (inherits(col, "color")) {
## S4 colorspace class
"colorspace"
} else if (is.matrix(col)) {
## named RGB matrix (0-255)
"matrix"
} else if (is.character(col) && (all(substr(col, 1L, 1L) == "#") & all(nchar(col) %in% c(7L, 9L)))) {
## all hex
"hex"
} else {
## assume built-in colors
"other"
}
## indexes of missing values (if hex)
NAidx <- NULL
## convert input to wide RGB matrix (0-255)
if (input_type == "colorspace") {
color_class <- class(col)
col <- t(coords(as(col, if(linear) "RGB" else "sRGB"))) * 255
} else if (input_type == "matrix") {
if(NROW(col) != 3L && NCOL(col) == 3L && all(toupper(colnames(col)) == c("R", "G", "B"))) {
col <- t(col)
transpose <- TRUE
} else {
transpose <- FALSE
}
stopifnot(all(toupper(rownames(col)) == c("R", "G", "B")))
} else if (input_type == "hex") {
# Save transparency value for later
alpha <- substr(col, 8L, 9L)
# keep indizes of NA colors
NAidx <- which(is.na(col))
col <- substr(col, 1L, 7L)
col <- grDevices::col2rgb(col)
} else {
# keep indizes of NA colors
NAidx <- which(is.na(col))
col <- grDevices::col2rgb(col, alpha = TRUE)
## extract alpha values (if non-FF)
alpha <- format(as.hexmode(col[4L, ]), width = 2L, upper.case = TRUE)
alpha[alpha == "FF"] <- ""
## retain only RGB
col <- col[1L:3L, ]
}
if (linear && input_type %in% c("hex", "other")) {
sRGB_to_linearRGB <- function(x) {
x <- x/255
y <- ((x + 0.055)/1.055)^2.4
small <- x <= 0.03928
y[small] <- x[small]/12.92
return(y * 255)
}
col <- sRGB_to_linearRGB(col)
}
## transform color
RGB <- cvd_transform %*% col
rownames(RGB) <- c("R", "G", "B")
## bound RGB values
RGB[RGB < 0] <- 0
RGB[RGB > 255] <- 255
if (linear && input_type %in% c("hex", "other")) {
linearRGB_to_sRGB <- function(y) {
y <- y/255
x <- 1.055 * y^(1/2.4) - 0.055
small <- y <= 0.03928/12.92
x[small] <- 12.92 * y[small]
return(x * 255)
}
RGB <- linearRGB_to_sRGB(RGB)
}
## convert back to input type
if (input_type == "colorspace") {
col <- t(RGB/255)
col <- if(linear) RGB(col) else sRGB(col)
col <- as(col, color_class)
} else if (input_type == "matrix") {
col <- if(transpose) t(RGB) else RGB
} else {
RGB <- round(RGB)
col <- paste(grDevices::rgb(RGB[1L, ], RGB[2L, ], RGB[3L, ], maxColorValue = 255), alpha, sep = "")
if(length(NAidx) > 0L) col[NAidx] <- NA
}
return(col)
}
#' @rdname simulate_cvd
#' @export
deutan <- function(col, severity = 1, linear = TRUE) {
simulate_cvd(col, cvd_transform = interpolate_cvd_transform(deutanomaly_cvd, severity), linear = linear)
}
#' @rdname simulate_cvd
#' @export
protan <- function(col, severity = 1, linear = TRUE) {
simulate_cvd(col, cvd_transform = interpolate_cvd_transform(protanomaly_cvd, severity), linear = linear)
}
#' @rdname simulate_cvd
#' @export
tritan <- function(col, severity = 1, linear = TRUE) {
simulate_cvd(col, cvd_transform = interpolate_cvd_transform(tritanomaly_cvd, severity), linear = linear)
}
#' @rdname simulate_cvd
#' @export
interpolate_cvd_transform <- function(cvd, severity = 1) {
if (severity <= 0) {
cvd[[1]]
} else if (severity >= 1) {
cvd[[11]]
} else {
s <- 10*severity
i1 <- floor(s)
i2 <- ceiling(s)
if (i1 == i2) {
cvd[[i1+1]]
}
else {
(i2-s)*cvd[[i1+1]] + (s-i1)*cvd[[i2+1]]
}
}
}
|
99a09d7182ae3578277d7fe48fd6fc0c39c1f1ef | f2384c2f83c77b900e493be75d711fc5e8a561f9 | /K_Nearest_Neigbour.R | 32c3e7c5649bb336e114d28650673f8fa8664dc1 | [] | no_license | RituUW/Data-mining-using-R | d0e32a38daad8a2b62076cdc0e2653c730bdca06 | 4039e4f7be08042b110d941bc49533257b5c1d8f | refs/heads/main | 2023-02-02T08:56:37.962907 | 2020-12-18T06:56:13 | 2020-12-18T06:56:13 | 322,501,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 892 | r | K_Nearest_Neigbour.R | #K NEAREST NEIGBOURS
# load data
mower.df <- read.csv("RidingMowers.csv", stringsAsFactors = TRUE)
set.seed(11)
# partition data
train.index <- sample(1:nrow(mower.df), 0.6*nrow(mower.df))
train.df <- mower.df[train.index, ]
valid.df <- mower.df[-train.index,]
train.norm.df <- train.df
valid.norm.df <- valid.df
# normalize data
library(caret)
# compute mean and standard deviation of each column
norm.values <- preProcess(train.df[, 1:2], method=c("center", "scale"))
train.norm.df[, 1:2] <- predict(norm.values, train.df[, 1:2])
valid.norm.df[, 1:2] <- predict(norm.values, valid.df[, 1:2])
#install.packages("FNN")
library(FNN)
# use ?knn to find out more information
# It worth noting that the input argument cl must be a factor!
knn.pred <- knn(train.norm.df[, 1:2], valid.norm.df[, 1:2],
cl = train.norm.df[, 3], k = 5)
confusionMatrix(knn.pred, valid.norm.df[, 3])
|
b5b76a06b15b3082d70de6d7956114f0789903cf | a3db37c4dfebd618aeeb78065dd38af3f3490869 | /cachematrix.R | 2345c4aa7f144b570e945a7f8208f01ecdead501 | [] | no_license | souvik82/ProgrammingAssignment2 | ae9dae0da1081381328192ba4aca15e5dec722c8 | 9e171dc10d4856fc5249799b22cc28a7a69f9f4a | refs/heads/master | 2021-01-13T16:16:09.901463 | 2016-04-29T09:36:45 | 2016-04-29T09:36:45 | 57,369,380 | 1 | 0 | null | 2016-04-29T08:36:03 | 2016-04-29T08:36:03 | null | UTF-8 | R | false | false | 1,133 | r | cachematrix.R | ## makeCacheMatrix(): creates a special “matrix” object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
# use `<<-` to assign a value to an object in an environment
# different from the current environment.
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set =set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## cacheSolve(): computes the inverse of the “matrix” returned by makeCacheMatrix().
## If the inverse has already been calculated and the matrix has not changed, it’ll return the inverse from the cache directly.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
## get it from the cache and skips the computation.
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinverse(inv)
inv
}
## Test the result
## r <- rnorm(48)
## x <- matrix(r, nrow=4, ncol=4)
## y <- makeCacheMatrix(x)
## cacheSolve(y)
|
7e2881986d2bd118ea536fc8288e3389eed5f545 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/hdrcde/examples/plot.cde.Rd.R | cd79162f910b063ac236418d2ae37eeb01788015 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 318 | r | plot.cde.Rd.R | library(hdrcde)
### Name: plot.cde
### Title: Plots conditional densities
### Aliases: plot.cde
### Keywords: distribution hplot smooth
### ** Examples
faithful.cde <- cde(faithful$waiting,faithful$eruptions,
x.name="Waiting time", y.name="Duration time")
plot(faithful.cde)
plot(faithful.cde,plot.fn="hdr")
|
ec3326d6356c9fda4748fa785b2dee44ed1b364e | c2f8c87cf965423cd563a5fcd7bd7a38bf9200d0 | /man/run_civis.Rd | 75d0756dcc6e50a97585dbe0c7aaee0528bd105f | [] | no_license | mheilman/civis-r | 18f6a1831879f1d85daa45e6d05a8d5fa004248d | 68acbc93bb2168bccfea93ab0494121c220c93ce | refs/heads/master | 2020-12-19T22:14:11.218094 | 2019-10-01T14:10:00 | 2019-10-01T14:10:00 | 235,868,128 | 0 | 0 | null | 2020-01-23T19:18:05 | 2020-01-23T19:18:04 | null | UTF-8 | R | false | true | 1,036 | rd | run_civis.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scripts.R
\name{run_civis}
\alias{run_civis}
\title{Evaluate an R expression in a Civis Platform container}
\usage{
run_civis(expr, ...)
}
\arguments{
\item{expr}{code to evaluate}
\item{...}{arguments to \code{\link{CivisFuture}}}
}
\description{
Evaluate an R expression in a Civis Platform container
}
\details{
\code{run_civis} blocks until completion. For non-blocking calls,
use futures directly with \code{\link{civis_platform}}.
Attempts are made at detecting and installing necessary packages
within the container, and detecting global variables required in \code{expr}.
}
\examples{
\dontrun{
run_civis(2+2)
# specify required resources, and a specific image
run_civis(2+2,
required_resources = list(cpu = 1024, memory = 2048),
docker_image_name='image',
docker_image_tag = 'latest')
}
}
\seealso{
Other script_utils: \code{\link{civis_script}},
\code{\link{fetch_output_file_ids}},
\code{\link{run_template}}
}
\concept{script_utils}
|
1cdb58e38f9396cfeaf32a91165fef531f5c7f90 | 67eea017c474ab14755b8476ad29c19124766038 | /misc/evaluate_glm_logistic.R | 9a14b3de2b35894796180b0a102503ad4f8be5d9 | [
"MIT"
] | permissive | mutazag/mdsi | dd6a457c7269401d552cda162df8a37df92ca6fc | efecc8f650ddf6866154389f98d4ce0a9803db18 | refs/heads/master | 2021-06-01T20:23:12.920707 | 2020-03-29T01:21:32 | 2020-03-29T01:21:32 | 125,200,143 | 0 | 0 | MIT | 2018-05-26T16:58:12 | 2018-03-14T11:07:29 | R | UTF-8 | R | false | false | 1,792 | r | evaluate_glm_logistic.R | ## evalulate a simple logistic regression using glm()
# https://stats.stackexchange.com/questions/25389/obtaining-predicted-values-y-1-or-0-from-a-logistic-regression-model-fit
# data y simulated from a logistic regression model
# with with three predictors, n=10000
x = matrix(rnorm(30000),10000,3)
lp = 0 + x[,1] - 1.42*x[2] + .67*x[,3] + 1.1*x[,1]*x[,2] - 1.5*x[,1]*x[,3] +2.2*x[,2]*x[,3] + x[,1]*x[,2]*x[,3]
p = 1/(1+exp(-lp))
y = runif(10000)<p
# fit a logistic regression model
mod = glm(y~x[,1]*x[,2]*x[,3],family="binomial")
# using a cutoff of cut, calculate sensitivity, specificity, and classification rate
perf = function(cut, mod, y)
{
yhat = (mod$fit>cut)
w = which(y==1)
sensitivity = mean( yhat[w] == 1 )
specificity = mean( yhat[-w] == 0 )
c.rate = mean( y==yhat )
d = cbind(sensitivity,specificity)-c(1,1)
d = sqrt( d[1]^2 + d[2]^2 )
out = t(as.matrix(c(sensitivity, specificity, c.rate,d)))
colnames(out) = c("sensitivity", "specificity", "c.rate", "distance")
return(out)
}
s = seq(.01,.99,length=1000)
OUT = matrix(0,1000,4)
for(i in 1:1000) OUT[i,]=perf(s[i],mod,y)
plot(s,OUT[,1],xlab="Cutoff",ylab="Value",cex.lab=1.5,cex.axis=1.5,ylim=c(0,1),type="l",lwd=2,axes=FALSE,col=2)
axis(1,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5)
axis(2,seq(0,1,length=5),seq(0,1,length=5),cex.lab=1.5)
lines(s,OUT[,2],col="darkgreen",lwd=2)
lines(s,OUT[,3],col=4,lwd=2)
lines(s,OUT[,4],col="darkred",lwd=2)
box()
legend(0.5,.2,col=c(2,"darkgreen",4,"darkred"),
lwd=c(2,2,2,2),
c("Sensitivity","Specificity","Classification Rate","Distance"))
colnames(OUT) <- c("Sensitivity","Specificity","Classification Rate","Distance")
library(dplyr)
OUT[OUT[,"sensitivity"] == OUT[,"specificity"]]
mean(OUT[,"Sensitivity"] == OUT[,"Specificity"])
|
79cae1c023cd5457b0faa9e1658f8d1f973a54ca | ee976af89b21fa5a3b16c5eb9ffb99fd9b11ad91 | /plot4.R | 97423d453b6e942d8ea34a1a0087e5de7a128e2c | [] | no_license | jbrant/ExData_Project2 | 51b95f1cc4ff74fc21716874b61546b270960c3d | 75510d85f93f61ccb1152dfc88e71a25efc1d691 | refs/heads/master | 2021-01-19T16:58:44.074063 | 2014-07-26T03:40:34 | 2014-07-26T03:40:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,981 | r | plot4.R | #######################################################
## Plot 4
#######################################################
### Begin Setup ###
## Load Required libraries
library(ggplot2)
library(grid)
library(data.table)
## Read in National Emissions Inventory Dataset
if (!exists("NEI.data")) {
NEI.data <- as.data.table(readRDS("summarySCC_PM25.rds"))
}
## Read in Source Code Classification Dataset
if (!exists("SCC.data")) {
SCC.data <- as.data.table(readRDS("Source_Classification_Code.rds"))
}
### End Setup ###
## Merge the NEI and SCC datasets, including only the following columns:
## 1. SCC
## 2. Emissions
## 3. year
## 4. EI.Sector
data.merged <- merge(
x = NEI.data[, c("SCC", "Emissions", "year"), with = FALSE],
y = SCC.data[, c("SCC", "EI.Sector"), with = FALSE],
by = "SCC")
## Extract emissions data for coal combustion sources
data.coal <- data.merged[EI.Sector %in% data.merged[grep("Coal", data.merged$EI.Sector)]$EI.Sector]
## Calculate total emissions from coal combustion sources per year
total.coal.emissions <- aggregate(data.coal$Emissions, list(data.coal$year), sum)
## Reset the names
names(total.coal.emissions) <- c("Year", "Emissions")
## Open the PNG file device
png(filename = "plot4.png", width = 640, height = 640, units = "px")
## Plot the total emissions from coal for each of the four years
ggplot(total.coal.emissions, aes(x = Year, y = Emissions/1e5)) +
geom_point(color = "indianred", size = 3) +
geom_line(color = "steelblue") +
theme(panel.margin = unit(0.8, "lines"),
plot.title = element_text(face = "bold", size = 16, vjust = 1),
axis.title.x = element_text(face = "bold"),
axis.title.y = element_text(face = "bold")) +
scale_x_continuous(breaks = seq(1999, 2008, by = 3)) +
scale_y_continuous(limits = c(0, 6)) +
labs(x = "Year",
y = "Total Emissions (in 100,000 tons)",
title = "Total Emissions from Coal Combustion (for U.S.)")
## Close the file device
dev.off() |
6d11fa558921e1222daa9e80f1aa35165e20ec2f | 622c68ff62c4bf91664c4157fdcadcc1d7359a0a | /man/multiply.p.s.epsilon.Rd | 19f7e5b6e8735389f2fe8f7863785890639f4575 | [] | no_license | kdkorthauer/MADGiC | 791b40130ea9df515cbd1f1ccd9aed90e730e378 | 4e763b526ebb8cbc87bdd6688761794e55b854cb | refs/heads/master | 2021-01-10T08:52:45.487014 | 2020-06-09T16:38:51 | 2020-06-09T16:38:51 | 47,279,073 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 2,419 | rd | multiply.p.s.epsilon.Rd | \name{multiply.p.s.epsilon}
\alias{multiply.p.s.epsilon}
\title{Multiply the constants in \code{exome.constants} by the relative rate
parameters of the background mutation model}
\usage{
multiply.p.s.epsilon(X, p, s, epsilon, delta, gene)
}
\arguments{
\item{X}{a list with one entry for each chromosome, where
each entry is a matrix containing a row for each coding
base pair and the following columns: 1. base pair
position, 2. nucleotide number (see
\code{\link{convert.seq.to.num}}), 3. number of possible
nonsilent transitions, 4. number of possible nonsilent
transversions, 5. number of possible silent transitions,
6. number of possible silent transversions, and 7.
whether the position is in a CpG dinucleotide.}
\item{p}{a vector of length 7 containing the mutation
type relative rate parameters in the following order: A/T
transition, A/T transversion, non-CpG transition, non-CpG
transversion, CpG transition, CpG transversion, and
Indel.}
\item{s}{a vector of length 3 containing the relative
rate parameters for the three replication timing regions
(1=Early, 2=Middle, 3=Late)}
\item{epsilon}{a vector of length 3 containing the
relative rate parameters for the three expression levels
(1=Low, 2=Medium, 3=High)}
\item{delta}{vector of length 2 where the second element
represents the relative rate of mutation in olfactory
receptor (OR) genes compared to all others within a
similar replication timing and expression level category.
First element set to 1 (reference category).}
\item{gene}{a list with one entry for each gene, each
entry is another list of 5 elements: Ensembl name,
chromosome, base pairs, replication timing region
(1=Early, 2=Middle, 3=Late), and expression level (1=Low,
2=Medium, 3=High).}
}
\value{
an object of the same structure as exome.constants, but
columns 3-6 (e, f, c, d) have been multiplied by relative
rate parameters \code{p}, \code{s}, and \code{epsilon}.
}
\description{
A function to multiply the constants e, f, c, and d in
\code{exome.constants} by the relative rate parameters of
the background mutation model. The parameters used
depend on the mutation type, nucleotide context of the
position, and the replication timing region and
expression level of the gene that the position resides
in.
}
\note{
This internal function is not intended to be called by
the user.
}
|
46582ce4101f2d03f91de3bb2d2ee32f92c647c7 | 386708f184c5e25d44f1fb877fae9b4ebc3de286 | /ui.R | 7b21a5a070429986acd9d04b35a24640af92f241 | [] | no_license | DevinEnglish/bad-drivers-project | 7aeda0eb5834014911040eaea68f829abba8aab4 | 62f88cf7f667ed0b2a1afbb36cb3fcae7e7fb26c | refs/heads/master | 2020-05-21T13:19:28.064371 | 2019-06-05T23:14:14 | 2019-06-05T23:14:14 | 186,068,779 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 19,595 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(leaflet)
library(htmltools)
library(plotly)
bad_driving <- read.csv("data/bad-drivers.csv", stringsAsFactors = FALSE)
state_accidents <- read.csv("data/accidents-per-state-2017.csv", stringsAsFactors = FALSE)
alcohol_levels <- read.csv("data/BAC-levels-of-drivers-in-accidents.csv", stringsAsFactors = FALSE)
month <- read.csv("data/crashes-by-month-2017.csv", stringsAsFactors = FALSE)
severity <- read.csv("data/Crashes-by-severity.csv", stringsAsFactors = FALSE)
time_of_day <- read.csv("data/crashes-time-of-day-2017.csv", stringsAsFactors = FALSE)
driver_demographic <- read.csv("data/driver-demographic-2017.csv", stringsAsFactors = FALSE)
victim_types <- read.csv("data/injurys-and-victim-type.csv", stringsAsFactors = FALSE)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
div(img(src='banner.png'), align="center"),
navbarPage(theme = shinythemes::shinytheme("slate"),
"Bad Drivers",
#Introduction page
tabPanel("Home",
fluidRow(column(7,offset = 3,
h1("Bad Drivers and Insurance Statistics in the United States of America 2017", align="center")
)),
p("On average 3,287 die from a car accident every day, that is a lot of lives lost. Bad driving is a big problem in the United states
taking the 3rd place spot on leading causes of death. While we have come a long way, increasing car safety through seatbelts and airbags,
and educating more of the population on the importance of safe driving habits, there is still more to be done. The information in this application
was gathered by the National Highway Traffic Safety Administration 2017 anual motor safety report. This application's aim is to educate insurance companies
and other services related to road safety on trends to better implement their services, as well as provide information to the general public to be more
aware of their own safety efforts."),
div(img(src='photo1.jpg', width="1000px", height="500px"), align="center")
),
#Interactive maps
tabPanel("State Data",
h2("Number of drivers involved in fatal collisions (per billion miles)"),
leafletOutput("map1"),
div("As Shown in this map, Southern, Central, and Central-Northern states have the highest number of drivers
involved in fatal collisions per 1 billion miles. As a reminder, this is just correlation so further investigation
is necessary in order to establish a well-established conclusion."),
h2("Percentage of drivers who were speeding"),
leafletOutput("map2"),
div("While many drivers were speeding at the time of the accident, only Pennsylvania had a percentage that was 50 percent or above.
Perhaps this suggests that most acts of incompetence that cause accidents are not speeding-related and that there are other signifiers
to take into account when it comes to predicting what kinds of accidents to expect."),
h2("Percentage of drivers who were alcohol impaired"),
leafletOutput("map3"),
div("On average, between 20 and 35 percent of fatal accidents were attributed to alcohol impairment in most states. However, States
like Texas, South Carolina, Hawaii, Montana, Rhode Island and North Dakota had percentages that were in the high thirties or even
low forties."),
h2("Percentage of drivers who were NOT distracted"),
leafletOutput("map4"),
div("Surprisingly, The vast majority of drivers at the time of accidents were not classified as distracted. Wisconsin and
Mississippi are exceptions as only 39 and 10 percent of drivers in accidents respectively were not classified as distracted."),
h2("Percentage of drivers who had not been involved in previous accidents"),
leafletOutput("map5"),
div("The majority of drivers involved in such accidents do not have a history of accidents in the past. However, the percentage
of those who were not involved in previous accidents does vary by state. The East Coast seems to have noticeably lower percentages
of drivers who were not involved in previous accidents than other states. Maybe this could be due to higher population density leading
to a higher concentrations of motor vehicles on the roads that is responsible for people being more likely to be involved in accidents in the past."),
h2("Price of car insurance premiums"),
leafletOutput("map6"),
div("Most of the North Eastern USA, Florida, and Louisiana have the highest car insurance premiums.
Overall, states in the south have higher insurance premiums than states in the north. The midwest is
notable for having relatively low car insurance premiums in comparison to other states. Another interesting,
detail is that Washington and Montana are notable for having higher insurance premiums in comparison to \
nearby states. "),
h2("Cost of losses incurred by insurance companies per insured driver"),
leafletOutput("map7"),
div("The East coast and Southern states are shown to have the highest costs of losses incurred by insurance
companies per insured drivers. This could be linkd to the number of drivers involved in fatal collisions
per 1 billion miles per state. However, Montana, North Dakota, and South Dakota have some of the highest
numbers of drivers involved in fatal collisions per 1 billion miles but still has some of the lowest costs
of losses incurred by insurance. Further investigation would be necessary to find a better sense of correlation
and causation.")
),
tabPanel("Drunk Driving",
navbarPage("The Data",
tabPanel("Drunk Driving",
sidebarLayout(
sidebarPanel(
sliderInput("yearForAlcGraph", label = h3("Year"), min = 1988, #fix scaling to be in years
max = 2017, value = 2000, animate = TRUE), p("While many accidents happened when the driver had a BAC of 0, these accidents were likely caused
by other distractions such as texting while driving or other distracted driving. While those accidents are a majority
accidents caused by those legally intoxicated have large numbers as well. Since 1988 the number of accidents caused by drunk driving
has gone down significantly as research and awareness of achohol's harm has increased. Services such as Uber and Lift have also helped to
decrease the number of intoxiated people on the roads, by providing them a safe way to get home.")),
mainPanel(plotlyOutput("alcoholLevelsGraph"), p(" Getting behind the wheel of a vehicle after a night of brinking is
a crime. Driving under the influence (DUI) is defined as operating a vehicle with a blood alcohol content of 0.08% or
above. A DUI is classified as a misdemeanor in all states, and punishment usually means up to six months in jail on a first offense, time may be increased by situation.
Even drinking a small amount of alcohol can be harmful. The effects of alcohol put you at a higher risk of accident
or road injury. It takes a lot of ability to drive safely such as concentration and quick judgments
both of which alcohol affects"),
p(" Slow reaction times: Drinking slows your response time which increases the likelihood of an accident."),
p(" Reduced coordination: Drinking affects your vision, and hand and foot coordination, all of which are very important when driving."),
p(" Decreased concentration: Alcohol greatly affects concentration, something very important when driving such as staying in your lane,
paying attention to other cars and understanding traffic signs.")
)
)
),
tabPanel("Drunk Driving Table",mainPanel(tableOutput("alcoholLevelsTable")))
)
),
tabPanel("Trends in 2017",
navbarPage("The Data",
tabPanel("Monthly Trends",
sidebarLayout(
sidebarPanel(radioButtons("byMonthAccidentType", label = h3("Type of Accident"),
choices = list("Fatal" = "Fatal", "Injury Only" = "Injury Only", "Property Damage" = "Property Damage"),
selected = "Fatal"), p("Fatal accidents seem to decline when transitioning from Fall to winter and rising significantly from early winter to mid summer.
Injuries also appear to be more common during the spring, summer, and fall compared to winter months. Another interesting observation is that mid summer
shows noticeably fewer instances of propoerty damage compared to winter, spring, and fall. ")
),
mainPanel(plotlyOutput("byMonthGraph"))
)
),
tabPanel("Weekly Trends",
sidebarLayout(
sidebarPanel(selectInput("dayOfWeek", label = h3("Day of the Week"), choices=list("Monday"=3,"Tuesday"=4,"Wednesday"=5,"Thursday"=6,"Friday"=7,
"Saturday"=8,"Sunday"=2), selected = 3),
p("The average number of accidents seems to rise from early moorning to 3-6 PM and declines shortly after. Perhaps this is due to rush hour leading
to a higher concentration of cars on the road")
),
mainPanel(plotlyOutput("TODGraph"))
)
),
navbarMenu("Tables",
tabPanel("Monthly Trends Table",
tableOutput("monthTable")
),
tabPanel("Weekly Trends Table",
tableOutput("timeOfDayTable"))
)
)
),
tabPanel("Trends Through the Years",
navbarPage("The Data",
tabPanel("Accident Severity",
sidebarLayout(
sidebarPanel(radioButtons("severityAccidentType", label = h3("Type of Accident"),
choices = list("Fatal" = "Fatal", "Injury Only" = "Injury Only", "Property Damage" = "Property Damage"),
selected = "Fatal"),
p("Over the years, fatal injuries have decreased and/or plateaued. However, starting in 2010, the number of fatal accidents has begun to rise. Injury-only
accidents also experienced similar declines and raises except without the same plateauing phenomenon. However, for instances of property damage, relatively consistent
numbers have been maintained between the mid 1990s and late 2000's. ")
),
mainPanel(plotlyOutput("severityGraph"), p("Many inovative technologies have helped reduce the number of accidents on our roads. Some of those technologies are..."),p("Passenger restraints such as seat belts"),p("Airbags"),
p("Crash avoidance equipment such as lights and reflectors"), p("Driver assistance systems such as Electronic Stability Control"), p("Safety glass"), p("Car services such as Uber help get intoxicated people off the road"))
)
),
tabPanel("Severity Table",
tableOutput("severityTable"))
)),
tabPanel("Accident Demographic",
navbarPage("The Data",
tabPanel("Driver Demographic",
sidebarLayout(
sidebarPanel(radioButtons("age", label = h3("Age"),
choices = list("<16" = "<16", "16-20" = "16-20", "21-24" = "21-24", "25-34" = "25-34",
"35-44" = "35-44", "45-54" = "45-54", "55-64" = "55-64", "65-74" = "65-74", ">75" = ">74"),
selected = "<16"),
p("In any given age group, males were more likely to be involved in car accidents than females. The age group that had the most number of accidents were males and females age 25-34")
),
mainPanel(plotlyOutput("demoGraph"))
)
),
tabPanel("Victim Demographic",
sidebarLayout(
sidebarPanel(radioButtons("victimType", label = h3("Type of Transportation Victim Was Using"),
choices = list("Passenger Car"= "Passenger Car", "Light Truck"= "Light Truck",
"Large Truck"= "Large Truck", "Bus"= "Bus","Motorcycle"="Motorcycle",
"Pedestrian"="Pedestrian", "Cyclist"="Cyclist", "Total"= "Total"), selected = "Passenger Car"),
p("In 1997, the total number of vehiclees victimized in accidents peaked at 3.348 Million vehicles. Every year, the majority of vehicles victimized in accidents
were Light Trucks.")
),
mainPanel(plotlyOutput("victimDemo"))
)),
navbarMenu("Tables",
tabPanel("Driver Demographic Table",
tableOutput("driverDemographicTable")
),
tabPanel("Victim Demogaphic",
tableOutput("victimTypeTable")
)
)
)
)
)
))
|
dbe36f579fd6ba0db7f89ef5bb2fc9d0ad4eb41d | b05bd9f10c51f8115043fcfd0e42caf8c7a0e555 | /assignment5/run_periodically.R | 1f2cb636667667d095394949742c1143c51b8433 | [] | no_license | johngonz/datascience_uwash | 56e440d34e95bb2899033624d978fd7f4b527918 | 9f2a11f0083e1885fb0d591c1136b31d0cd3a981 | refs/heads/master | 2021-01-10T09:55:50.717129 | 2015-12-09T09:24:22 | 2015-12-09T09:24:22 | 43,198,338 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 345 | r | run_periodically.R | # Run this code periodically
#T
if(!require(installr)) {install.packages("installr"); require(installr)} #load / install+load installr
updateR()
# This code updates all R packages
x <- packageStatus(repositories="http://cran.r-project.org/src/contrib")
st <- x$avai["Status"]
install.packages(rownames(st)[which(st$Status=="not installed")]) |
8498fef2b90b1d815942ba54bb17096365fe055d | e68e99f52f3869c60d6488f0492905af4165aa64 | /man/torch_greater.Rd | 887819a8ad43c7104a9243f22610c1a295557684 | [
"MIT"
] | permissive | mlverse/torch | a6a47e1defe44b9c041bc66504125ad6ee9c6db3 | f957d601c0295d31df96f8be7732b95917371acd | refs/heads/main | 2023-09-01T00:06:13.550381 | 2023-08-30T17:44:46 | 2023-08-30T17:44:46 | 232,347,878 | 448 | 86 | NOASSERTION | 2023-09-11T15:22:22 | 2020-01-07T14:56:32 | C++ | UTF-8 | R | false | true | 501 | rd | torch_greater.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen-namespace-docs.R,
% R/gen-namespace-examples.R, R/gen-namespace.R
\name{torch_greater}
\alias{torch_greater}
\title{Greater}
\usage{
torch_greater(self, other)
}
\arguments{
\item{self}{(Tensor) the tensor to compare}
\item{other}{(Tensor or float) the tensor or value to compare}
}
\description{
Greater
}
\section{greater(input, other, *, out=None) -> Tensor }{
Alias for \code{\link[=torch_gt]{torch_gt()}}.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.