blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81e2e40551ad35c7736d08912cbc8295098282bb
|
229e9aba1aeab0e0d9aa6c5e391ab502bd94e74a
|
/R/datadocumentation.R
|
e3448b722ad395af95d58ee2fd85d7cacc2661b9
|
[] |
no_license
|
l1994z1116q3/qmj
|
ecb855e7dc45754076b0380b7230466e2da95c39
|
9312d91aeb380771626f779ec0dd91b7185d477b
|
refs/heads/master
| 2021-01-17T20:13:55.030418
| 2015-01-21T05:18:19
| 2015-01-21T05:18:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,714
|
r
|
datadocumentation.R
|
#' Balancesheets of all companies
#'
#' A dataset containing all balancesheets, if available,
#' for all companies in the companies.RData file. Among other things,
#' stores:
#' \itemize{
#' \item Cash & Equivalents.
#' \item Short Term Investments.
#' \item Total Inventory.
#' \item Prepaid Expenses.
#' \item Total Current Assets.
#' }
#'
#' @name balancesheets
#' @docType data
#' @keywords data
NULL
#' Cashflow statements of all companies
#'
#' A dataset containing all cashflow statements, if available,
#' for all companies in the companies.RData file. Among other things,
#' stores:
#' \itemize{
#' \item Net Income/Starting Line.
#' \item Depreciation/Depletion.
#' \item Amortization.
#' \item Deferred Taxes.
#' \item Changes in Working Capital.
#' }
#'
#' @name cashflows
#' @docType data
#' @keywords data
NULL
#' Income statements of all companies
#'
#' A dataset containing all income statements, if available,
#' for all companies in the companies.RData file. Among other things,
#' stores:
#' \itemize{
#' \item Total Revenue.
#' \item Gross Profit.
#' \item Research & Development.
#' \item Selling/General/Admin. Expenses, Total.
#' \item Operating Income.
#' }
#'
#' @name incomestatements
#' @docType data
#' @keywords data
NULL
#' A list of all companies of interest
#'
#' Stores sample set of companies (namely, all those starting with the letter "A" as found on investorguide.com)
#'
#' @name companies
#' @docType data
#' @keywords data
NULL
#' A dataframe of price returns for companies
#'
#' Stores calculated price returns (pret) for companies in companies.RData
#'
#' @name dailydata
#' @docType data
#' @keywords data
NULL
|
12b5a9f1dbc3aaad0112d34278418f21a6237b16
|
1501d8cf2be17d8df0a981cbf50819e12a45f137
|
/FinalExam/RScript_Final_6.R
|
9186031d73dcd21b15c00d956ce03d639450adc8
|
[] |
no_license
|
snandi/Stat641_Fall2015
|
b93bdfbe2916f514eaf3672fbcfbd24fccbf809f
|
2efc75f457357c865b04dc1fe618c48702079e73
|
refs/heads/master
| 2021-01-21T21:55:30.539662
| 2016-03-30T20:05:22
| 2016-03-30T20:05:22
| 42,122,464
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,784
|
r
|
RScript_Final_6.R
|
rm(list = ls(all.names = TRUE))
rm(list = objects(all.names = TRUE))
#dev.off()
########################################################################
## This script is Prob 5, Final Exam, Stat 641, Fall 2015
########################################################################
library(xtable)
library(survival)
########################################################################
## Load header files and source functions
########################################################################
#source('~/Courses/Stat641_Fall2015/HeaderFile_Nandi.R')
#source('~/RScripts/HeaderFile_Nandi.R')
RScriptPath <- '~/Courses/Stat641_Fall2015/FinalExam/'
source('~/Courses/Stat641_Fall2015/fn_Library_Stat641.R')
Filename <- paste0(RScriptPath, 'dataFinal2.csv')
Data <- read.csv(Filename)
source('ldbounds.R')
########################################################################
## Part a
########################################################################
mod1 <- survdiff(Surv(time1, dead1) ~ z, data = Data)
mod1
mod2 <- survdiff(Surv(time2, dead2) ~ z, data = Data)
mod2
mod3 <- survdiff(Surv(time3, dead3) ~ z, data = Data)
mod3
mod4 <- survdiff(Surv(time4, dead4) ~ z, data = Data)
mod4
mod5 <- survdiff(Surv(time5, dead5) ~ z, data = Data)
mod5
Table_a <- cbind(Period = c(1:5),
Chi = c(round(mod1$chisq, 4), round(mod2$chisq, 4), round(mod3$chisq, 4), round(mod4$chisq, 4), round(mod5$chisq, 4)),
Z = c(round(sqrt(mod1$chisq), 4), round(sqrt(mod2$chisq), 4), round(sqrt(mod3$chisq), 4), round(sqrt(mod4$chisq), 4), round(sqrt(mod5$chisq), 4))
)
xtable(Table_a, digits = c(0, 0, 4, 4))
########################################################################
## Part b
########################################################################
N2 <- 1272
IRatio1 <- c(sum(mod1$obs), sum(mod2$obs), sum(mod3$obs), sum(mod4$obs), N2)/N2
Bound1 <- bounds(IRatio1, iuse=3, alpha=0.025, phi = 2)
xtable(summary(Bound1)$bounds, digits = c(0, 4, 4, 4, 4, 4, 4))
########################################################################
## Part c
########################################################################
sum(mod5$obs)
IRatio2 <- c(sum(mod1$obs), sum(mod2$obs), sum(mod3$obs), sum(mod4$obs), sum(mod5$obs))/sum(mod5$obs)
round(IRatio2, 4)
Bound2 <- bounds(IRatio2, iuse=3, alpha=0.025, phi = 2)
xtable(summary(Bound2)$bounds, digits = c(0, 4, 4, 4, 4, 4, 4))
########################################################################
## Part e
########################################################################
DriftObj_e <- drift(
t = IRatio,
zb = c(Bound1$upper[1:4], Bound2$upper[5]),
## zb = c(Bound2$upper),
za = rep(-10, 5),
drft = 0
)
DriftObj_e$power
lr1 <- drift(zb = c(Bound1$upper[1]), za = rep(-10,1), t = IRatio2[1], drft = 0)
lr2 <- drift(zb = c(Bound1$upper[1], Bound2$upper[5]), za = rep(-10,2), t = IRatio2[1:2], drft = 0)
lr3 <- drift(zb = c(Bound1$upper[1:2], Bound2$upper[5]), za = rep(-10,3), t = IRatio2[1:3], drft = 0)
lr4 <- drift(zb = c(Bound1$upper[1:3], Bound2$upper[5]), za = rep(-10,4), t = IRatio2[1:4], drft = 0)
lr5 <- drift(zb = c(Bound1$upper[1:4], Bound2$upper[5]), za = rep(-10,5), t = IRatio2[1:5], drft = 0)
lr1$exit
lr2$exit
lr3$exit
lr4$exit
lr5$exit
########################################################################
## Part f
########################################################################
bK <- Bound2$upper.bound[5]
T <- IRatio2
Z <- Table_a[,'Z']
Z[1] <- -Z[1]
B <- sqrt(T)*Z
Theta <- -log(0.8)*sqrt(1140/4)
1 - pnorm((bK - B[1]-(1-T[1])*Theta)/(sqrt(1-T[1])))
1 - pnorm((bK - B[2]-(1-T[2])*Theta)/(sqrt(1-T[2])))
1 - pnorm((bK - B[3]-(1-T[3])*Theta)/(sqrt(1-T[3])))
1 - pnorm((bK - B[4]-(1-T[4])*Theta)/(sqrt(1-T[4])))
|
7899bb27a5a978c8d94b847bc70ba918c5195ddd
|
5d4a3cd2b852da7134631c2540b4f500bb1baa13
|
/electricity/electricity.R
|
ee10ea6dfc18625d566b256b9701dcae22af270f
|
[] |
no_license
|
NTomasetti/UVB
|
23b46b1f659ef54f7b5925ce5f9822ec032b79bf
|
0bcfaa41f2b2468aa601ae2df214afb9100c570b
|
refs/heads/master
| 2020-03-09T16:13:49.763912
| 2018-10-05T01:54:09
| 2018-10-05T01:54:09
| 128,879,286
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,276
|
r
|
electricity.R
|
library(tidyverse)
library(lubridate)
library(Rcpp)
library(RcppArmadillo)
library(RcppEigen)
library(rstan)
source('RFuns.R')
source('electricity/elecAux.R')
sourceCpp('electricity/electricityT.cpp')
N <- 10
y <- readRDS('electricity/elecY.RDS')[,1:N]
y <- log(y + 0.01)
x <- readRDS('electricity/elecX.RDS')[1:nrow(y),]
Tseq <- c(48 * 31, 48 * 92)
for(t in 93:(93 + 365)){
Tseq <- c(Tseq, 48 * t)
}
TseqTrain <- c(48 * 31)
for(t1 in 32:92){
TseqTrain <- c(TseqTrain, 48 * t1)
}
K <- c(1, 2)
order <- c(1, 2, 3, 48, 96, 144, 336)
dim <- 2 + ncol(x) + length(order)
samples <- 100
switch <- 1
k <- 2
var <- 0
batchSize <- 20
results <- list()
counter <- 1
pkFull <- array(0, dim = c(N, K[k], length(Tseq)))
psFull <- array(0, dim = c(N, K[k], length(Tseq)))
lambda <- NULL
## Add K[k] many sets of switching params
if(switch){
lambda <- rep(c(0, 0, 0, 0.1, 0, 0.1, 0, 0, 0.1), 2*K[k])
}
## Repeat for Each Dynamic model: Log variance, mean, sarima parameters, temperature coefficient
## Generate different random numbers for the mean of each model
for(ki in 1:K[k]){
lambda <- c(lambda, c(rnorm(dim, 0, 0.1), diag(0.1, dim)))
}
priorPS <- matrix(0.1, K[k], N)
if(switch){
priorMean <- array(0, dim = c(dim, K[k], 2))
priorMean[3,,1] <- log(0.01)
priorLinv <- array(0, dim = c(dim, dim, 2*K[k]))
for(ki in 1:K[k]){
priorLinv[1:3, 1:3, ki] <- solve(chol(diag(1, 3)))
priorLinv[4:5, 1:2, ki] <- solve(chol(diag(1, 2)))
priorLinv[,,K[k] + ki] <- solve(chol(diag(1, dim)))
}
} else {
priorMean <- array(0, dim = c(dim, K[k], 1))
priorLinv <- array(0, dim = c(dim, dim, K[k]))
for(ki in 1:K[k]){
priorLinv[,, ki] <- solve(chol(diag(1, dim)))
}
}
priorProbK <- matrix(1/K[k], nrow = K[k], ncol = N)
pkFull[,,1] <- t(priorProbK)
psFull[,,1] <- t(priorPS)
fitMat <- matrix(0, length(lambda), length(Tseq))
for(t in 2:(length(Tseq)-1)){
print(paste0('t: ', t, ', switch : ', switch, ', k: ', k, ', time: ', Sys.time()))
# Fit model, either via standard VB (t = 2 --> First fit) or updated VB
# Switching and non switching need different inputs so are split with if(switch)
if(t == 2){
if(var){
for(t1 in 2:(length(TseqTrain) - 1)){
print(paste(t1, Sys.time()))
if(t1 == 2){
fit <- fitVB(data = y[1:TseqTrain[2],],
lambda = lambda,
model = elecModel,
dimTheta = (4*switch + dim) * K[k],
S = 50,
maxIter = 2000,
threshold = 0.05 * N,
priorMean = priorMean,
priorLinv = priorLinv,
probK = priorProbK,
ps1 = priorPS,
order = order,
Tn = TseqTrain[2] - TseqTrain[1],
x = x[1:TseqTrain[2],],
uniformRho = TRUE,
var = FALSE,
switching = switch,
batch = batchSize)
} else {
updatePriors <- updatePrior(fit$lambda, FALSE, switch, K[k], dim)
theta <- sampleTheta(updatePriors, samples, FALSE, switch, K[k], dim, 11:13)
updateProbK <- matrix(0, K[k], N)
updateProbS1 <- matrix(0, K[k], N)
for(j in 1:N){
for(i in 1:samples){
if(switch){
for(ki in 1:K[k]){
probs <- probSKHF(y = y[1:TseqTrain[t1-1], j],
x = x[1:TseqTrain[t1-1], ],
thetaD = matrix(theta$tDynFull[,,ki,i], ncol = 1),
thetaC = theta$tConsFull[,ki,i],
order = order,
pS1 = priorPS[ki, j],
prior = priorProbK[ki, j],
T = TseqTrain[t1-1] - TseqTrain[1],
var = FALSE)
updateProbK[ki,j] <- updateProbK[ki,j] + probs[1] / samples
updateProbS1[ki, j] <- updateProbS1[j] + probs[2] / samples
}
} else {
for(ki in 1:K[k]){
probs <- probK(y = y[1:TseqTrain[t1-1], j],
x = x[1:TseqTrain[t1-1], ],
theta = matrix(theta$tDynFull[,,ki,i], ncol = 1),
order = order,
prior = priorProbK[ki, j],
T = TseqTrain[t1-1] - TseqTrain[1],
var = FALSE)
updateProbK[ki,j] <- updateProbK[ki,j] + probs / samples
}
}
}
}
updateProbK <- apply(updateProbK, 2, function(x){
y = x - max(x);
exp(y) / sum(exp(y))
})
fit <- fitVB(data = y[1:TseqTrain[t1],],
lambda = c(fit$lambda),
model = elecModel,
dimTheta = (4*switch + dim) * K[k],
S = 50,
maxIter = 2000,
threshold = 0.05 * N,
priorMean = updatePriors$updateMean,
priorLinv = updatePriors$updateLinv,
probK = updateProbK,
ps1 = updateProbS1,
order = order,
Tn = TseqTrain[t1] - TseqTrain[t1-1],
x = x[1:TseqTrain[t1],],
uniformRho = FALSE,
var = FALSE,
switching = switch,
batch = batchSize)
}
}
saveRDS(fit, 'varSetup.RDS')
lambdaVar <- NULL
if(switch){
lambdaVar <- fit$lambda[1: (10 * K[k])]
}
paramPerMod <- dim * (dim + 1)
for(ki in 1:K[k]){
# Attach Means, repeat over 48 halfhours
lambdaVar <- c(lambdaVar, rep(fit$lambda[10 * K[k] * switch + (ki - 1) * paramPerMod + 1:dim], rep(48, dim)))
U <- matrix(fit$lambda[10 * K[k] * switch + (ki - 1) * paramPerMod + dim + 1:(dim^2)], dim)
Sigma <- t(U) %*% U
logsd <- log(sqrt(diag(Sigma)))
# Attach log(sd)
lambdaVar <- c(lambdaVar, rep(logsd, rep(48, dim)))
}
priorMean <- array(0, dim = c(dim, 48, switch + K[k]))
priorLinv <- array(0, dim = c(48, 48, (dim + switch) * K[k]))
if(switch){
priorMean[2, , 1] <- log(0.01)
for(ki in 1:K[k]){
priorLinv[1:2, 1:2, ki] <- priorLinv[3:4, 1:2, ki] <- solve(chol(diag(10, 2)))
}
}
initialVar <- 1
updateVar <- 0.25
varSeq <- cumsum(c(initialVar, rep(updateVar, 47)))
Sigma <- matrix(0, 48, 48)
for(i in 1:48){
for(j in 1:48){
Sigma[i, j] <- min(varSeq[i], varSeq[j])
}
}
SigmaLinv <- solve(t(chol(Sigma)))
for(ki in 1:K[k]){
for(i in 1:dim){
priorLinv[,,K[k] * switch + (ki - 1) * dim + i] <- SigmaLinv
}
}
fit <- fitVB(data = y[1:Tseq[2],],
lambda = lambdaVar,
model = elecModel,
dimTheta = (4*switch + dim * 48) * K[k],
S = 50,
maxIter = 2000,
threshold = 0.05 * N,
priorMean = priorMean,
priorLinv = priorLinv,
probK = priorProbK,
ps1 = priorPS,
order = order,
Tn = Tseq[2] - Tseq[1],
x = x[1:Tseq[2],],
uniformRho = TRUE,
var = TRUE,
switching = switch,
batch = batchSize)
saveRDS(fit, 'varInitialFit.RDS')
fitMat <- matrix(0, length(lambdaVar), length(Tseq))
} else {
fit <- fitVB(data = y[1:Tseq[2],],
lambda = lambda,
model = elecModel,
dimTheta = (5*switch + dim) * K[k],
S = 10,
maxIter = 2000,
threshold = 0.05 * N,
priorMean = priorMean,
priorLinv = priorLinv,
probK = priorProbK,
ps1 = priorPS,
order = order,
Tn = Tseq[2] - Tseq[1],
x = x[1:Tseq[2],],
uniformRho = TRUE,
var = FALSE,
switching = switch)
# batch = batchSize)
}
} else {
fit <- fitVB(data = y[1:Tseq[t],],
lambda = c(fit$lambda),
model = elecModel,
dimTheta = (4*switch + ifelse(var, 48, 1) * dim) * K[k],
S = 50,
maxIter = 2000,
threshold = 0.05 * N,
priorMean = updatePriors$updateMean,
priorLinv = updatePriors$updateLinv,
probK = updateProbK,
ps1 = updateProbS1,
order = order,
Tn = Tseq[t] - Tseq[t-1],
x = x[1:Tseq[t],],
uniformRho = FALSE,
var = var,
switching = switch,
batch = batchSize)
}
fitMat[,t] <- fit$lambda
updatePriors <- updatePrior(fit$lambda, var, switch, K[k], dim)
theta <- sampleTheta(updatePriors, samples, var, switch, K[k], dim, 11:13)
# For each household:
# 1) Calculate p(k = j) / p(S_T = 1)
# 2) Create forecast densities per k and combine with above probabilities
# 3) Calculate updated priors for p(k) and p(s) to be pushed into the next update
# 4) Evaluate forecast densities
# Steps 1) and 2) are handled in c++
support <- seq(log(0.009), max(y[Tseq[t-1]:Tseq[t],]) + sd(y[Tseq[t-1]:Tseq[t],]), length.out = 5000)
updateProbK <- matrix(0, K[k], N)
updateProbS1 <- matrix(0, K[k], N)
for(j in 1:N){
density <- matrix(0, 5000, 48)
for(i in 1:samples){
if(switch){
newPK <- rep(-Inf, K[k])
newPS <- rep(0, K[k])
for(ki in 1:K[k]){
if(pkFull[j, ki, t-1] > 0){
probs <- probSKHF(y = y[1:Tseq[t], j],
x = x[1:Tseq[t],],
thetaD = theta$tDynFull[,,ki, i],
thetaC = theta$tConsFull[,ki, i],
order = order,
pS1 = psFull[j, ki, t-1],
prior = pkFull[j, ki, t-1],
T = Tseq[t] - Tseq[t-1],
var = var)
newPK[ki] <- probs[1]
newPS[ki] <- probs[2]
}
}
newPK <- newPK - max(newPK)
newPK <- exp(newPK) / sum(exp(newPK))
fcDensity <- forecastHF(y = y[1:Tseq[t+1], j],
x = x[1:Tseq[t+1],],
thetaC = theta$tConsFull[,,i],
thetaD = array(theta$tDynFull[,,,i], dim = c(dim(theta$tDynFull)[1:3])),
fcVar = array(theta$fcVar[,,,i], dim = c(dim(theta$fcVar)[1:3])),
order = order,
pS1prior = newPS,
pkNorm = newPK,
support = support,
var = var)
density <- density + fcDensity / samples
updateProbK[,j] <- updateProbK[,j] + newPK / samples
updateProbS1[j] <- updateProbS1[j] + newPS / samples
} else {
newDataLL <- rep(0, K[k])
for(ki in 1:K[k]){
if(pkFull[j, ki, t-1] > 0){
newDataLL[ki] <- sum(arLikelihood(y[1:Tseq[t], j], x[1:Tseq[t],], matrix(theta$tDynFull[,,ki,i], nrow = dim),
order, Tseq[t] - Tseq[t-1], TRUE, var))
}
}
newDataLL <- newDataLL + log(pkFull[j,,t-1])
newDataLL[is.na(newDataLL)] <- -Inf
newDataLL <- newDataLL - max(newDataLL)
newPK <- exp(newDataLL) / sum(exp(newDataLL))
updateProbK[,j] <- updateProbK[,j] + newPK / samples
fcDensity <- forecastStandard(y = y[1:Tseq[t+1], j],
x = x[1:Tseq[t+1],],
theta = array(theta$tDynFull[,,,i], dim = c(dim(theta$tDynFull)[1:3])),
order = order,
pkNorm = newPK,
fcVar = array(theta$fcVar[,,,i], dim = c(dim(theta$fcVar)[1:3])),
support = support,
var = var)
density <- density + fcDensity / samples
}
}
for(h in 1:48){
lower <- max(which(support < y[Tseq[t] + h, j]))
upper <- min(which(support > y[Tseq[t] + h, j]))
dens <- linearInterpolate(support[lower], support[upper], density[lower, h], density[upper, h], y[Tseq[t] + h, j])
map <- support[which.max(density[,h])]
results[[counter]] <- data.frame(ls = log(dens),
map = map,
actual = y[Tseq[t] + h, j],
id = j,
h = h,
t = Tseq[t],
k = K[k],
var = var,
switch = switch)
counter <- counter + 1
}
}
if(any(round(colSums(updateProbK), 5) != 1)){
break
print('check pk')
}
pkFull[,,t] <- t(updateProbK)
psFull[,,t] <- t(updateProbS1)
#print(qplot(rowSums(pkFull[,,t])))
saveRDS(list(fitMat, pkFull, updateProbS1, results, t), 'elecFitSw.RDS')
}
results <- readRDS('elecFit.RDS')
results <- bind_rows(results)
write.csv(results, paste0('elec_k',K[k], 's', switch, '.csv'), row.names = FALSE)
varInit <- readRDS('fit_VAR_K3_3.RDS')
varFit <- updatePrior(varInit$lambda, TRUE, FALSE, 3, dim)
varMean <- varFit$updateMean
varMean[1,,] <- exp(varMean[1,,] + 0.5 * varFit$updateSd[1,,]^2)
varMean <- data.frame(mean = c(varMean),
group = rep(1:K[k], rep(48 * 17, 3)),
halfhour = rep(1:48, rep(17, 48)),
var = c('sigma^{2}', 'intercept', 'temp', 'day1', 'day2', 'day3', 'day4', 'day5', 'day6', 'publicHol',
'phi[1]', 'phi[2]', 'phi[3]', 'phi[48]', 'phi[96]', 'phi[144]', 'phi[336]'))
varMean %>%
spread(var, mean) %>%
mutate(day1 = intercept + day1,
day2 = intercept + day2,
day3 = intercept + day3,
day4 = intercept + day4,
day5 = intercept + day5,
day6 = intercept + day6) %>%
rename(Sunday = intercept, Monday = day1, Tuesday = day2, Wedesday = day3, Thursday = day4, Friday = day5, Saturday = day6) %>%
gather(var, mean, -group, -halfhour) %>%
mutate(var = factor(var, levels = c('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'publicHol', 'temp',
'sigma^{2}', 'phi[1]', 'phi[2]', 'phi[3]', 'phi[48]', 'phi[96]', 'phi[144]', 'phi[336]'))) -> varFit
ggplot(varFit) + geom_line(aes(halfhour, mean, colour = factor(group))) +
facet_wrap(~var, scales = 'free', labeller = label_parsed, ncol = 5) +
theme_bw()
results %>%
filter(h %in% c(1, 2, 6, 12, 24, 48)) %>%
mutate(mapExp = exp(map) - 0.01,
mapActual = exp(actual) - 0.01,
week = floor(t / 336)) %>%
group_by(t, h) %>%
summarise(map = sum(mapExp),
actual = sum(mapActual),
week = head(week, 1)) %>%
mutate(ape = abs(map - actual) / actual) %>%
ungroup() %>%
group_by(h, week) %>%
summarise(mape = mean(ape),
t = min(t)) %>%
ggplot() + geom_line(aes(t, mape)) + facet_wrap(~h)
results %>%
filter(h %in% c(1, 2, 6, 12, 24, 48)) %>%
group_by(t, h) %>%
summarise(ls = mean(ls, na.rm = TRUE)) %>%
ggplot() + geom_line(aes(t, ls)) + facet_wrap(~h)
|
9a6a521fd5b6e14cfdf1bfb6acf9bc13a2d3a1d5
|
9e7c75c97ab27056531f68ec0990e448d1740eea
|
/R/is.leapyear.R
|
492dd622e04ee171636b0600038e2471aee5d780
|
[] |
no_license
|
AkselA/R-ymse
|
e3a235eca7b6b8368c6775af33295254d0ecb093
|
1d66631cc7e0cd36b38abcaee5d8111573723588
|
refs/heads/master
| 2020-03-28T12:42:24.854132
| 2020-03-19T11:25:13
| 2020-03-19T11:25:13
| 148,326,253
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 552
|
r
|
is.leapyear.R
|
is.leapyear <- function(x) {
UseMethod("is.leapyear")
}
is.leapyear.default <- function(x) {
(x %% 4 == 0) & ((x %% 400 == 0) | (x %% 100 != 0))
}
is.leapyear.Date <- function(x) {
is.leapyear.default(as.POSIXlt(x)$year + 1900)
}
is.leapyear.POSIXct <- function(x) {
is.leapyear.default(as.POSIXlt(x)$year + 1900)
}
is.leapyear.POSIXlt <- function(x) {
is.leapyear.default(x$year + 1900)
}
# y <- as.Date(c("2000-2-2", "2004-2-2", "1900-2-2", "1901-2-2", "1904-2-2"))
# is.leapyear(y)
# d <- c(2000, 2004, 1900, 1901, 1904)
# is.leapyear(d)
|
94d41704b58785f0941405b64e55520dd2d95931
|
ccd6d2d69e7c1e9680b954b96e96c3bb52c865ec
|
/R/phi.R
|
dd9531cb216cec94d34895659bdd972dfa853faf
|
[] |
no_license
|
jmgirard/sjstats
|
3578aa71921d94f9173b93ca108279e353f2a965
|
d951c125e8d35cc2ff339472a47b1e56aeb72b9a
|
refs/heads/master
| 2020-05-24T04:39:15.173761
| 2019-04-29T14:17:09
| 2019-04-29T14:17:09
| 187,097,228
| 1
| 0
| null | 2019-05-16T20:30:57
| 2019-05-16T20:30:56
| null |
UTF-8
|
R
| false
| false
| 433
|
r
|
phi.R
|
#' @rdname xtab_statistics
#' @export
phi <- function(tab) {
# convert to flat table
if (!inherits(tab, "ftable")) tab <- stats::ftable(tab)
tb <- summary(MASS::loglm(~1 + 2, tab))$tests
sqrt(tb[2, 1] / sum(tab))
}
#' @rdname xtab_statistics
#' @export
cramer <- function(tab) {
# convert to flat table
if (!inherits(tab, "ftable")) tab <- stats::ftable(tab)
sqrt(phi(tab)^2 / min(dim(tab) - 1))
}
|
de748e8bef505e69978043764c1f42adf679cdb3
|
d05c1cf07cb9cd4e946b2e6fc1e3c16a49ca3589
|
/scripts/Class-22.R
|
6fcfed9f8e343ac07c544868b980683c515134d2
|
[] |
no_license
|
mcolvin/WFA8433-Natural-Resource-Decision-Making
|
c26312bcfa27529114f41e39817fd3bb8f5c35a9
|
ee9b7b6516261c005a0d78cebac9900d748be94d
|
refs/heads/master
| 2021-01-11T03:42:49.128837
| 2019-05-06T15:38:43
| 2019-05-06T15:38:43
| 71,399,621
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,899
|
r
|
Class-22.R
|
## ----unnamed-chunk-1---- ##
deck<- expand.grid(
suit = c("Diamond", "Club", "Heart", "Spade"),
card = c("Ace", "Deuce", "Three", "Four","Five",
"Six", "Seven", "Eight", "Nine", "Ten",
"Jack", "Queen", "King"))
deck$id<-1:nrow(deck) # for sampling later
ncards<-nrow(deck)
prX<- nrow(deck[deck$suit=="Heart",])/ncards #p(x=Heart)
prY<- nrow(deck[deck$suit=="Spade",])/ncards #p(x=Spade)
## ----unnamed-chunk-2---- ##
prX
prY
## ----unnamed-chunk-3---- ##
reps<-50000
indx<-sample(nrow(deck),reps,replace=TRUE) # index for card selected
out<-data.frame(firstCardSuit=deck[indx,]$suit)
prop.table(table(out)) # all close to 0.25
## ----unnamed-chunk-4---- ##
out$secondCardSuite<-NA
# SIMULATE THE PROCESS
for(i in 1:reps)
{
# SAMPLE ANOTHER CARD AND GET THE SUITE
id<- sample(deck$id[-indx[i]],1)
out$secondCardSuit[i]<- as.character(deck$suit[id])
}
## ----unnamed-chunk-5---- ##
out$tmp<-1
outcomes<-aggregate(tmp~firstCardSuit+secondCardSuit,out,FUN=sum)
outcomes$p<- outcomes$tmp/reps
## ----unnamed-chunk-6---- ##
outcomes
## ----unnamed-chunk-7---- ##
nrow(out[out$firstCardSuit=="Heart" & out$secondCardSuit=="Spade",])/reps
nrow(out[out$firstCardSuit=="Spade" & out$secondCardSuit=="Heart",])/reps
## ----unnamed-chunk-8---- ##
dnorm(140,125,10) #p(x|H_i), 3rd model
## ----unnamed-chunk-9---- ##
dnorm(140,c(100,150,125,130),100)
## ----unnamed-chunk-10---- ##
priors<-rep(.25,4)# prior weights
observed<-140
predicted<-c(100,150,125,135)
sd<-10
like<-dnorm(observed,predicted,sd) # p(x|Hi)
post<-like*priors/sum(like*priors) # P(Hi): priors, p(x): sum(like*post)
summ<-cbind(priors,predicted, like,post)
models<-data.frame(priors=priors, pred=predicted,like=like,post=post)
## ----unnamed-chunk-11---- ##
models
## ----unnamed-chunk-12---- ##
models$sd<- 10
## ----unnamed-chunk-13---- ##
sd<-25
like<-dnorm(observed,predicted,sd)
post<-like*priors/sum(like*priors)
summ<-cbind(priors,predicted, like,post)
app<-data.frame(priors=priors, pred=predicted,like=like,post=post,sd=sd)
models<-rbind(models,app)
## ----unnamed-chunk-14---- ##
sd<-100
like<-dnorm(observed,predicted,sd)
post<-like*priors/sum(like*priors)
summ<-cbind(priors,predicted, like,post)
app<-data.frame(priors=priors, pred=predicted,like=like,post=post,sd=sd)
models<-rbind(models,app)
## ----unnamed-chunk-15---- ##
models
## ----unnamed-chunk-16---- ##
weights<- matrix(models$post,
ncol=4,nrow=3,
dimnames=list(c("10","25","100"),c("H1","H2","H3","H4")),
byrow=TRUE)
## ----unnamed-chunk-17---- ##
barplot(weights,beside=TRUE,ylim=c(0,0.5),
las=1,
xlab="Hypothesis",
ylab="Posterior probability",
col=c("grey10","grey40","grey80"))
legend("topleft",
legend=c("SD=10","SD=25","SD=100"),
fill=c("grey10","grey40","grey80"))
abline(h=0.25, col="red",lty=2)
text(x=0.75,y=0.265,
labels="Prior probability",
pos=4)
box()
## ----unnamed-chunk-18---- ##
models<-data.frame()
observed<- 140
sd<-100
like<-dnorm(observed,predicted,sd)
priors<-rep(.25,4)# prior weights
post<-like*priors/sum(like*priors)
summ<-cbind(priors,predicted, like,post)
app<-data.frame(hypothesis=c(1:4),priors=priors,
pred=predicted,like=like,post=post,year=1)
models<-rbind(models,app)
## ----unnamed-chunk-19---- ##
models
## ----unnamed-chunk-20---- ##
observed<- 139
sd<-100
like<-dnorm(observed,predicted,sd)
priors<-post # make posterior for year 1 as priors for year 2
post<-like*priors/sum(like*priors)
summ<-cbind(priors,predicted, like,post)
app<-data.frame(hypothesis=c(1:4),priors=priors,
pred=predicted,like=like,post=post,year=2)
models<-rbind(models,app)
## ----unnamed-chunk-21---- ##
observed<- 143
sd<-100
like<-dnorm(observed,predicted,sd)
priors<-post # make posterior for year 2 as priors for year 3
post<-like*priors/sum(like*priors)
summ<-cbind(priors,predicted, like,post)
app<-data.frame(hypothesis=c(1:4),priors=priors,
pred=predicted,like=like,post=post,year=3)
models<-rbind(models,app)
## ----unnamed-chunk-22---- ##
observed<- 125
sd<-100
like<-dnorm(observed,predicted,sd)
priors<-post # make posterior for year 3 as priors for year 4
post<-like*priors/sum(like*priors)
summ<-cbind(priors,predicted, like,post)
app<-data.frame(hypothesis=c(1:4),priors=priors,
pred=predicted,like=like,post=post,year=4)
models<-rbind(models,app)
## ----unnamed-chunk-23---- ##
observed<- 138
sd<-100
like<-dnorm(observed,predicted,sd)
priors<-post # make posterior for year 4 as priors for year 5
post<-like*priors/sum(like*priors)
summ<-cbind(priors,predicted, like,post)
app<-data.frame(hypothesis=c(1:4),priors=priors,
pred=predicted,like=like,post=post,year=5)
models<-rbind(models,app)
## ----unnamed-chunk-24---- ##
models
## ----unnamed-chunk-25---- ##
plot(post~year,
data=models,
xlab="Year",
ylab="Posterior probability",
type='n')
points(post~year,
data=models,
subset=hypothesis==1,
type='b',
col="black")
points(post~year,
data=models,
subset=hypothesis==2,
type='b',
col="red")
points(post~year,
data=models,
subset=hypothesis==3,
type='b',
col="green")
points(post~year,
data=models,
subset=hypothesis==4,
type='b',
col="blue")
legend("bottomleft",
legend=c("H1","H2","H3","H4"),
lty=1,
pch=1,
col=c("black","red","green","blue"))
## ----unnamed-chunk-26---- ##
est_flow_decreasePower <- 30
est_flow_controlFlatheads <- 15
## ----unnamed-chunk-27---- ##
est_predation_decreasePower <- 15
est_predation_controlFlatheads <- 25
## ----unnamed-chunk-28---- ##
Flow <- 0.5 # PRIOR PROBABILITY FOR FLOW MODEL
Predation <- 0.5 # PRIOR PROBABILITY FOR PREDATION MODEL
## ----unnamed-chunk-29---- ##
decreasePowerGeneration<- Flow*est_flow_decreasePower +
Predation*est_predation_decreasePower
## ----unnamed-chunk-30---- ##
controlFlatheads <- Flow*est_flow_controlFlatheads +
Predation*est_predation_controlFlatheads
## ----unnamed-chunk-31---- ##
decreasePowerGeneration
controlFlatheads
## ----unnamed-chunk-32---- ##
decision<-"Decrease Power Generation"
## ----unnamed-chunk-33---- ##
obs<- 21
## ----unnamed-chunk-34---- ##
## conditional Likelihoods for Decrease power generation action
flow_like<-dpois(obs,est_flow_decreasePower)
predation_like<-dpois(obs,est_predation_decreasePower)
## ----unnamed-chunk-35---- ##
flow_like
predation_like
## ----unnamed-chunk-36---- ##
flow_post<-flow_like*Flow/(flow_like*Flow + predation_like*Predation)
predation_post<- predation_like*Predation/(flow_like*Flow + predation_like*Predation)
flow_wghts<-flow_post
predation_weights<- predation_post
## ----unnamed-chunk-37---- ##
flow_post
predation_post
## ----unnamed-chunk-38---- ##
Flow <- flow_post
Predation <- predation_post
## ----unnamed-chunk-39---- ##
decreasePowerGeneration<- Flow*est_flow_decreasePower +
Predation*est_predation_decreasePower
## ----unnamed-chunk-40---- ##
controlFlatheads <- Flow*est_flow_controlFlatheads +
Predation*est_predation_controlFlatheads
## ----unnamed-chunk-41---- ##
decreasePowerGeneration
controlFlatheads
## ----unnamed-chunk-42---- ##
decision<-c(decision , "Control Flathead Catfish")
## ----unnamed-chunk-43---- ##
obs<- 18
## ----unnamed-chunk-44---- ##
flow_like<-dpois(obs,est_flow_controlFlatheads)
predation_like<-dpois(obs,est_predation_controlFlatheads)
## ----unnamed-chunk-45---- ##
flow_post<-flow_like*Flow/(flow_like*Flow + predation_like*Predation)
predation_post<- predation_like*Predation/(flow_like*Flow + predation_like*Predation)
flow_wghts<-c(flow_wghts,flow_post) # keep track of to look at later
predation_weights<- c(predation_weights,predation_post) # keep track of to look at later
## ----unnamed-chunk-46---- ##
# New model weights
flow_post
predation_post
## ----unnamed-chunk-47---- ##
Flow <- flow_post
Predation <- predation_post
## ----unnamed-chunk-48---- ##
decreasePowerGeneration<- Flow*est_flow_decreasePower +
Predation*est_predation_decreasePower
controlFlatheads <- Flow*est_flow_controlFlatheads +
Predation*est_predation_controlFlatheads
decreasePowerGeneration
controlFlatheads
decision<- c(decision, "Decrease Power Generation")
## ----unnamed-chunk-49---- ##
obs<- 22
flow_like<-dpois(obs,est_flow_decreasePower)
predation_like<-dpois(obs,est_predation_decreasePower)
flow_post<-flow_like*Flow/(flow_like*Flow + predation_like*Predation)
predation_post<- predation_like*Predation/(flow_like*Flow + predation_like*Predation)
## ----unnamed-chunk-50---- ##
flow_wghts<-c(flow_wghts,flow_post)
predation_weights<- c(predation_weights,predation_post)
## ----unnamed-chunk-51---- ##
Flow <- flow_post
Predation <- predation_post
## ----unnamed-chunk-52---- ##
decreasePowerGeneration<- Flow*est_flow_decreasePower +
Predation*est_predation_decreasePower
controlFlatheads <- Flow*est_flow_controlFlatheads +
Predation*est_predation_controlFlatheads
decreasePowerGeneration
controlFlatheads
decision<- c(decision, "Decrease Power Generation")
## ----unnamed-chunk-53---- ##
obs<- 17
## conditional Likelihoods for Decrease.power.generation action
flow_like<-dpois(obs,est_flow_decreasePower)
predation_like<-dpois(obs,est_predation_decreasePower)
flow_post<-flow_like*Flow/(flow_like*Flow + predation_like*Predation)
predation_post<- predation_like*Predation/(flow_like*Flow + predation_like*Predation)
## ----unnamed-chunk-54---- ##
flow_wghts<-c(flow_wghts,flow_post)
predation_weights<- c(predation_weights,predation_post)
## ----unnamed-chunk-55---- ##
Flow <- flow_post
Predation <- predation_post
## ----unnamed-chunk-56---- ##
decreasePowerGeneration<- Flow*est_flow_decreasePower +
Predation*est_predation_decreasePower
controlFlatheads <- Flow*est_flow_controlFlatheads +
Predation*est_predation_controlFlatheads
decreasePowerGeneration
controlFlatheads
decision<- c(decision, "Control Flathead Catfish")
## ----unnamed-chunk-57---- ##
obs<- 15
## conditional Likelihoods
flow_like<-dpois(obs,est_flow_controlFlatheads)
predation_like<-dpois(obs,est_predation_controlFlatheads)
flow_post<-flow_like*Flow/(flow_like*Flow + predation_like*Predation)
predation_post<- predation_like*Predation/(flow_like*Flow + predation_like*Predation)
## ----unnamed-chunk-58---- ##
flow_wghts<-c(flow_wghts,flow_post)
predation_weights<- c(predation_weights,predation_post)
## ----unnamed-chunk-59---- ##
Flow <- flow_post
Predation <- predation_post
## ----unnamed-chunk-60---- ##
decreasePowerGeneration<- Flow*est_flow_decreasePower +
Predation*est_predation_decreasePower
controlFlatheads <- Flow*est_flow_controlFlatheads +
Predation*est_predation_controlFlatheads
decreasePowerGeneration
controlFlatheads
decision<- c(decision, "Control Flathead Catfish")
## ----unnamed-chunk-61---- ##
obs<- 13
flow_like<-dpois(obs,est_flow_controlFlatheads)
predation_like<-dpois(obs,est_predation_controlFlatheads)
flow_post<-flow_like*Flow/(flow_like*Flow + predation_like*Predation)
predation_post<- predation_like*Predation/(flow_like*Flow + predation_like*Predation)
## ----unnamed-chunk-62---- ##
flow_wghts<-c(flow_wghts,flow_post)
predation_weights<- c(predation_weights,predation_post)
## ----unnamed-chunk-63---- ##
decision
flow_wghts
predation_weights
## ----unnamed-chunk-64---- ##
plot(flow_wghts,
xlab="Year",
ylab="Probability",
ylim=c(0,1),
type='b',
col='blue',
las=1)
points(predation_weights,
type='b',
col='green')
legend("topleft",
legend=c("Flow variability","Predation"),
lty=1,
col=c("blue","green"),
lwd=2)
|
c391a4353fc056d2276fef138a4ef33012a04b57
|
9ef6d9e20c665f66a0338677d30c68d8822b38a2
|
/R/pre.R
|
fde0beacc03fda77b46717f108157f6007cb191b
|
[] |
no_license
|
RishiSadhir/pre
|
fcb8e16771e836dc67471ea40e309c839df389dd
|
c61646c32f0cd5cbae330711259f464a2a5e44b0
|
refs/heads/master
| 2021-01-19T11:17:58.944328
| 2017-04-01T11:41:37
| 2017-04-01T11:41:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 73,316
|
r
|
pre.R
|
utils::globalVariables("%dopar%")
#' Derive a prediction rule ensemble
#'
#' \code{pre} derives a sparse ensemble of rules and/or linear functions for
#' prediction of a continuous or binary outcome.
#'
#' @param formula a symbolic description of the model to be fit of the form
#' \code{y ~ x1 + x2 + ...+ xn}. If the output variable (left-hand side of the
#' formala) is a factor, an ensemble for binary classification is created.
#' Otherwise, an ensemble for prediction of a continuous variable is created.
#' Note that input variables may not have 'rule' as (part of) their name, and
#' the formula may not exclude the intercept (that is \code{+ 0} or \code{- 1}
#' may not be used in the right-hand side of the formula).
#' @param data matrix or data.frame containing the variables in the model. When a
#' matrix is specified, it must be of class \code{"numeric"} (the input and output
#' variable must be continuous; the input variables may be 0-1 coded variables).
#' When a data.frame is specified, the output variable must be of
#' class \code{"numeric"} and must be a continuous variable; the input variables
#' must be of class \code{"numeric"} (for continuous input variables),
#' \code{"logical"} (for binary variables), \code{"factor"} (for nominal input
#' variables with 2 or more levels), or \code{"ordered" "factor"} (for
#' ordered input variables).
#' @param type character. Type of base learners to be included in ensemble.
#' Defaults to "both" (intial ensemble included both rules and linear functions).
#' Other option may be "rules" (for prediction rules only) or "linear" (for
#' linear functions only).
#' @param weights an optional vector of observation weights to be used for
#' deriving the ensemble.
#' @param sampfrac numeric value greater than 0, and smaller than or equal to 1.
#' Fraction of randomly selected training observations used to produce each tree.
#' Setting this to values < 1 will result in subsamples being drawn without
#' replacement (i.e., subsampling). Setting this equal to 1 will result in
#' bootstrap sampling.
#' @param maxdepth numeric. Maximal number of conditions in rules.
#' @param learnrate numeric. Learning rate for sequentially induced trees. If
#' \code{NULL} (default), the learnrate is set to .01 for regression and to 0
#' for classification. Setting the learning rate to values > 0 for classification
#' dramatically increases computation time.
#' @param removeduplicates logical. Remove rules from the ensemble which have
#' the exact same support in training data?
#' @param mtry numeric. Number of randomly selected predictor variables for
#' creating each split in each tree. Ignored for nominal output variables if
#' \code{learnrate} > 0.
#' @param thres numeric. Threshold for convergence.
#' @param standardize logical. Standardize rules and linear terms before
#' estimating the regression model? As this will also standardize dummy coded
#' factors, users are adviced to use the default: \code{standardize = FALSE}.
#' @param winsfrac numeric. Quantiles of data distribution to be used for
#' winsorizing linear terms. If set to 0, no winsorizing is performed. Note
#' that ordinal variables are included as linear terms in estimating the
#' regression model, and will also be winsorized.
#' @param normalize logical. Normalize linear variables before estimating the
#' regression model? Normalizing gives linear terms the same a priori influence
#' as a typical rule.
#' @param nfolds numeric. Number of folds to be used in performing cross
#' validation for determining penalty parameter.
#' @param mod.sel.crit character. Model selection criterion to be used for
#' deriving the final ensemble. The default is \code{"deviance"}, which uses
#' squared-error for gaussian models (a.k.a. \code{"mse"}) and binomial deviance
#' for logistic regression. \code{"class"} would give misclassification error,
#' \code{"auc"} would give area under the ROC curve. Further, \code{"mse"} or
#' \code{"mae"} (mean squared and mean absolute error) would measure the deviation
#' from the fitted mean to the binary or continuous response.
#' @param verbose logical. Should information on the initial and final ensemble
#' be printed to the command line?
#' @param par.init logical. Should parallel foreach be used to generate initial
#' ensemble? Only used when \verb{learnrate == 0}. Must register parallel
#' beforehand, such as doMC or others.
#' @param par.final logical. Should parallel foreach be used to perform cross
#' validation for selecting the final ensemble? Must register parallel beforehand,
#' such as doMC or others.
#' @param ntrees numeric. Number of trees to generate for the initial ensemble.
#' @param ... Additional arguments to be passed to
#' \code{\link[glmnet]{cv.glmnet}}.
#' @note The code for deriving rules from the nodes of trees was taken from an
#' internal function of the \code{partykit} package of Achim Zeileis and Torsten
#' Hothorn.
#' @return an object of class \code{pre}, which is a list with many elements
#' @details Inputs can be continuous, ordered or factor variables. Continuous
#' variables
#' @examples \donttest{
#' airq.ens <- pre(Ozone ~ ., data = airquality[complete.cases(airquality),], verbose = TRUE)}
#' @import glmnet partykit datasets
#' @export
pre <- function(formula, data, type = "both", weights = rep(1, times = nrow(data)),
sampfrac = .5, maxdepth = 3L, learnrate = NULL,
removeduplicates = TRUE, mtry = Inf, ntrees = 500,
thres = 1e-07, standardize = FALSE, winsfrac = .025,
normalize = TRUE, nfolds = 10L, mod.sel.crit = "deviance",
verbose = FALSE, par.init = FALSE, par.final = FALSE, ...)
{ ###################
## Preliminaries ##
###################
if(par.init | par.final) {
if (!("foreach" %in% installed.packages()[,1])) {
warning("Parallel computation requires package foreach, which is not installed. Argument parallel will be set to FALSE.
To run in parallel, download and install package foreach from CRAN, and run again.")
par.init <- par.final <- FALSE
}
}
if (!is.data.frame(data)) {
stop("data should be a data frame.")
}
if (length(sampfrac) != 1 || sampfrac < 0.01 || sampfrac > 1) {
stop("Bad value for 'sampfrac'")
}
if (length(type) != 1 || (type != "rules" & type != "both" & type != "linear")) {
stop("Argument type should equal 'both', 'rules' or 'linear'")
}
if (length(winsfrac) != 1 || winsfrac < 0 || winsfrac > 0.5) {
stop("Bad value for 'winsfrac'.")
}
if (!is.logical(verbose)) {
stop("Bad value for 'verbose'.")
}
orig_data <- data
data <- model.frame(formula, data, na.action = NULL)
x_names <- attr(attr(data, "terms"), "term.labels")
y_name <- names(data)[attr(attr(data, "terms"), "response")]
formula <- formula(data)
n <- nrow(data)
if (is.factor(data[,y_name])) {
classify <- TRUE
if (is.null(learnrate)) {
learnrate <- 0
}
} else {
classify <- FALSE
if (is.null(learnrate)) {
learnrate <- .01
}
}
if (!(is.numeric(data[,y_name]) | is.factor(data[,y_name]))) {
stop("Response variable should be continuous (class numeric) or binary (class
factor)")
}
if (nlevels(data[,y_name]) > 2) {
stop("No support for multinomial output variables yet.")
}
if (any(sapply(data[,x_names], is.character))) {
stop("Variables specified in formula and data argument are of class character.
Please coerce to class 'numeric', 'factor' or 'ordered' 'factor':",
x_names[sapply(data[,x_names], is.character)])
}
if (classify & learnrate != 0 & !is.infinite(mtry)) {
warning("Value specified for mtry will not be used when the outcome variable
is binary and learnrate > 0", immediate. = TRUE)
}
if (any(is.na(data))) {
data <- data[complete.cases(data),]
n <- nrow(data)
warning("Some observations have missing values and will be removed.
New sample size is ", n, ".\n", immediate. = TRUE)
}
if (verbose) {
if (classify) {
cat("A rule ensemble for prediction of a categorical output variable will be
created.\n")
} else {
cat("A rule ensemble for prediction of a continuous output variable will
be created.\n")
}
}
#############################
## Derive prediction rules ##
#############################
if (type != "linear") {
if (learnrate == 0) { # always use ctree()
if(par.init) {
rules <- foreach::foreach(i = 1:ntrees, .combine = "c", .packages = "partykit") %dopar% {
# Take subsample of dataset
if (sampfrac == 1) { # then bootstrap
subsample <- sample(1:n, size = n, replace = TRUE)
} else { # else subsample
subsample <- sample(1:n, size = round(sampfrac * n), replace = FALSE)
}
subsampledata <- data[subsample,]
# Make sure ctree() can find object specified by weights argument:
environment(formula) <- environment()
# Grow ctree on subsample:
tree <- ctree(formula, data = subsampledata, weights = weights[subsample],
maxdepth = maxdepth, mtry = mtry)
# Collect rules from tree:
unlist(list.rules(tree))
}
} else {
rules <- c()
for(i in 1:ntrees) {
# Take subsample of dataset
if (sampfrac == 1) { # then bootstrap
subsample <- sample(1:n, size = n, replace = TRUE)
} else { # else subsample
subsample <- sample(1:n, size = round(sampfrac * n), replace = FALSE)
}
subsampledata <- data[subsample,]
# Make sure ctree() can find object specified by weights argument:
environment(formula) <- environment()
# Grow tree on subsample:
tree <- ctree(formula, data = subsampledata, weights = weights[subsample],
maxdepth = maxdepth, mtry = mtry)
# Collect rules from tree:
rules <- append(rules, unlist(list.rules(tree)))
}
}
}
if (learnrate > 0) {
rules <- c()
if (!classify) {
y_learn <- data[,y_name]
for(i in 1:ntrees) {
# Take subsample of dataset
if (sampfrac == 1) { # then bootstrap
subsample <- sample(1:n, size = n, replace = TRUE)
} else { # else subsample
subsample <- sample(1:n, size = round(sampfrac * n), replace = FALSE)
}
subsampledata <- data[subsample,]
subsampledata[,y_name] <- y_learn[subsample]
# Make sure ctree() can find object specified by weights argument:
environment(formula) <- environment()
# Grow tree on subsample:
tree <- ctree(formula, data = subsampledata, weights = weights[subsample],
maxdepth = maxdepth, mtry = mtry)
# Collect rules from tree:
rules <- append(rules, unlist(list.rules(tree)))
# Substract predictions from current y:
y_learn <- y_learn - learnrate * predict(tree, newdata = data)
}
}
if (classify) {
data2 <- data.frame(data, offset = 0)
glmtreeformula <- formula(paste(paste(y_name, " ~ 1 |"),
paste(x_names, collapse = "+")))
for(i in 1:ntrees) {
# Take subsample of dataset:
if (sampfrac == 1) { # then bootstrap:
subsample <- sample(1:n, size = n, replace = TRUE)
} else { # else subsample:
subsample <- sample(1:n, size = round(sampfrac * n), replace = FALSE)
}
subsampledata <- data2[subsample,]
# Make sure glmtree() can find object specified by weights argument:
environment(formula) <- environment()
# Grow tree on subsample:
tree <- glmtree(glmtreeformula, data = subsampledata, family = "binomial",
weights = weights[subsample], maxdepth = maxdepth + 1,
offset = offset)
# Collect rules from tree:
rules <- append(rules, unlist(list.rules(tree)))
# Update offset:
data2$offset <- data2$offset + learnrate * predict(
tree, newdata = data2, type = "link")
}
}
}
nrules <- length(rules)
if (verbose){
cat("\nA total of", ntrees, "trees and ", nrules, "rules were
generated initially.")
}
# Keep unique, non-empty rules only:
rules <- unique(rules[!rules==""])
if (verbose) {
cat("\n\nA total of", nrules - length(rules), "rules were empty
and removed from the initial ensemble.")
}
# Create dataframe with 0-1 coded rules:
if (length(rules) > 0) {
rulevars <- data.frame(
rule1 = as.numeric(with(data, eval(parse(text = rules[[1]])))))
for(i in 2:length(rules)) {
rulevars[,paste("rule", i, sep="")] <- as.numeric(
with(data, eval(parse(text = rules[[i]]))))
}
if (removeduplicates) {
# Remove rules with identical support:
duplicates <- duplicated(t(rulevars))
duplicates.removed <- data.frame(name = colnames(rulevars)[duplicates],
description = rules[duplicates])
rulevars <- rulevars[,!duplicates]
rules <- rules[!duplicates]
if (verbose) {
cat("\n\nA total of", sum(duplicates), "generated rules had
support identical to earlier rules and were removed from the initial
ensemble ($duplicates.removed shows which, if any).")
}
}
if (verbose) {
cat("\n\nAn initial ensemble consisting of", ncol(rulevars), "rules was
succesfully created.")
}
} else {
warning("No prediction rules could be derived from dataset.", immediate. = TRUE)
}
}
######################################################
## Prepare rules, linear terms and outcome variable ##
######################################################
x <- data[,x_names]
# convert ordered categorical predictor variables to linear terms:
x[,sapply(x, is.ordered)] <- as.numeric(as.character(x[,sapply(x, is.ordered)]))
if (type == "rules" & length(rules) > 0) {
x <- rulevars
x_scales <- NULL
} else { # if type is not rules, linear terms should be prepared:
# Winsorize numeric variables (section 5 of F&P(2008)):
if (winsfrac > 0) {
wins_points <- data.frame(varname = names(x), value = NA)
for(i in 1:ncol(x)) {
if (is.numeric(x[,i])) {
lim <- quantile(x[,i], probs = c(winsfrac, 1 - winsfrac))
x[x[,i] < lim[1], i] <- lim[1]
x[x[,i] > lim[2], i] <- lim[2]
wins_points$value[i] <- paste(lim[1], "<=", names(x)[i], "<=", lim[2])
}
}
} else {
wins_points <- NULL
}
# normalize numeric variables:
if (normalize) {
# Normalize linear terms (section 5 of F&P08), if there are any:
if (sum(sapply(x, is.numeric)) > 0) {
x_scales <- sapply(x[sapply(x, is.numeric)], sd, na.rm = TRUE) / 0.4
x[,sapply(x, is.numeric)] <- scale(x[,sapply(x, is.numeric)],
center = FALSE, scale = x_scales)
} else {
x_scales <- NULL
}
} else {
x_scales <- NULL
}
# If both rules and linear terms are in ensemble, combine both:
if (type == "both" & length(rules) > 0) {
x <- data.frame(x, rulevars)
}
}
modmat_formula <- formula(
paste(" ~ -1 +", paste(colnames(x), collapse = "+")))
x <- model.matrix(modmat_formula, data = x)
y <- data[,y_name]
##################################################
## Perform penalized regression on the ensemble ##
##################################################
if (classify) {
family <- "binomial"
} else {
family <- "gaussian"
}
glmnet.fit <- cv.glmnet(x, y, nfolds = nfolds, standardize = standardize,
type.measure = mod.sel.crit, thres = thres,
weights = weights, family = family, parallel = par.final,
...)
####################
## Return results ##
####################
lmin_ind <- which(glmnet.fit$lambda == glmnet.fit$lambda.min)
l1se_ind <- which(glmnet.fit$lambda == glmnet.fit$lambda.1se)
if (verbose) {
cat("\n\nFinal ensemble with minimum cv error: \n lambda = ",
glmnet.fit$lambda[lmin_ind], "\n number of terms = ",
glmnet.fit$nzero[lmin_ind], "\n mean cv error (se) = ",
glmnet.fit$cvm[lmin_ind], " (", glmnet.fit$cvsd[lmin_ind], ")",
"\n\nFinal ensemble with cv error within 1se of minimum: \n lambda = ",
glmnet.fit$lambda[l1se_ind], "\n number of terms = ",
glmnet.fit$nzero[l1se_ind], "\n mean cv error (se) = ",
glmnet.fit$cvm[l1se_ind], " (", glmnet.fit$cvsd[l1se_ind], ")\n", sep="")
}
result <- list(glmnet.fit = glmnet.fit, call = match.call(), weights = weights,
data = data, normalize = normalize, x_scales = x_scales,
type = type, x_names = x_names, y_name = y_name,
modmat = x, modmat_formula = modmat_formula,
wins_points = wins_points,
classify = classify, formula = formula, orig_data = orig_data)
if (type != "linear" & length(rules) > 0) {
result$duplicates.removed <- duplicates.removed
result$rules <- data.frame(rule = names(rulevars), description = rules)
result$rulevars <- rulevars
} else {
result$duplicates.removed <- NULL
result$rules <- NULL
result$rulevars <- NULL
}
class(result) <- "pre"
return(result)
}
# Internal function for transforming tree into a set of rules:
# Taken and modified from package partykit, written by Achim Zeileis and
# Torsten Hothorn
list.rules <- function (x, i = NULL, ...)
{
if (is.null(i))
i <- partykit::nodeids(x, terminal = TRUE)
if (length(i) > 1) {
ret <- sapply(i, list.rules, x = x)
names(ret) <- if (is.character(i))
i
else names(x)[i]
return(ret)
}
if (is.character(i) && !is.null(names(x)))
i <- which(names(x) %in% i)
stopifnot(length(i) == 1 & is.numeric(i))
stopifnot(i <= length(x) & i >= 1)
i <- as.integer(i)
dat <- partykit::data_party(x, i)
if (!is.null(x$fitted)) {
findx <- which("(fitted)" == names(dat))[1]
fit <- dat[, findx:ncol(dat), drop = FALSE]
dat <- dat[, -(findx:ncol(dat)), drop = FALSE]
if (ncol(dat) == 0)
dat <- x$data
}
else {
fit <- NULL
dat <- x$data
}
rule <- c()
recFun <- function(node) {
if (partykit::id_node(node) == i) {
return(NULL)
}
kid <- sapply(partykit::kids_node(node), partykit::id_node)
whichkid <- max(which(kid <= i))
split <- partykit::split_node(node)
ivar <- partykit::varid_split(split)
svar <- names(dat)[ivar]
index <- partykit::index_split(split)
if (is.factor(dat[, svar])) {
if (is.null(index))
index <- ((1:nlevels(dat[, svar])) > partykit::breaks_split(split)) +
1
slevels <- levels(dat[, svar])[index == whichkid]
srule <- paste(svar, " %in% c(\"", paste(slevels,
collapse = "\", \"", sep = ""), "\")", sep = "")
}
else {
if (is.null(index)) {
index <- 1:length(kid)
}
breaks <- cbind(c(-Inf, partykit::breaks_split(split)), c(partykit::breaks_split(split),
Inf))
sbreak <- breaks[index == whichkid, ]
right <- partykit::right_split(split)
srule <- c()
if (is.finite(sbreak[1])) {
srule <- c(srule, paste(svar, ifelse(right, ">",
">="), sbreak[1]))
}
if (is.finite(sbreak[2])) {
srule <- c(srule, paste(svar, ifelse(right, "<=",
"<"), sbreak[2]))
}
srule <- paste(srule, collapse = " & ")
}
rule <<- c(rule, srule)
return(recFun(node[[whichkid]]))
}
node <- recFun(partykit::node_party(x))
paste(rule, collapse = " & ")
}
#' Print method for objects of class pre
#'
#' \code{print.pre} prints information about the generated prediction rule
#' ensemble to the command line
#'
#' @param x An object of class \code{\link{pre}}.
#' @param penalty.par.val character. Information for which final prediction rule
#' ensemble(s) should be printed? The ensemble with penalty parameter criterion
#' yielding minimum cv error (\code{"lambda.min"}) or penalty parameter
#' yielding error within 1 standard error of minimum cv error ("\code{lambda.1se}")?
#' @param ... Additional arguments, currently not used.
#' @return Prints information about the generated prediction rule ensembles,
#' @examples \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data=airquality[complete.cases(airquality),])
#' coefs <- print(airq.ens)}
#' @export
#' @method print pre
print.pre <- function(x, penalty.par.val = "lambda.1se", ...) {
if (penalty.par.val == "lambda.1se") {
l1se_ind <- which(x$glmnet.fit$lambda == x$glmnet.fit$lambda.1se)
cat("\nFinal ensemble with cv error within 1se of minimum: \n lambda = ",
x$glmnet.fit$lambda[l1se_ind], "\n number of terms = ",
x$glmnet.fit$nzero[l1se_ind], "\n mean cv error (se) = ",
x$glmnet.fit$cvm[l1se_ind],
" (", x$glmnet.fit$cvsd[l1se_ind], ") \n\n", sep="")
tmp <- coef(x, penalty.par.val = "lambda.1se")
return(tmp[tmp$coefficient != 0, ])
}
if (penalty.par.val == "lambda.min") {
lmin_ind <- which(x$glmnet.fit$lambda == x$glmnet.fit$lambda.min)
cat("Final ensemble with minimum cv error: \n\n lambda = ",
x$glmnet.fit$lambda[lmin_ind], "\n number of terms = ",
x$glmnet.fit$nzero[lmin_ind], "\n mean cv error (se) = ",
x$glmnet.fit$cvm[lmin_ind],
" (", x$glmnet.fit$cvsd[lmin_ind], ") \n\n", sep = "")
tmp <- coef(x, penalty.par.val = "lambda.min")
return(tmp[tmp$coefficient != 0, ])
}
}
#' Full k-fold cross validation of a pre
#'
#' \code{cvpre} performs k-fold cross validation on the dataset used to create
#' the ensemble, providing an estimate of predictive accuracy on future observations.
#'
#' @param object An object of class \code{\link{pre}}.
#' @param k integer. The number of cross validation folds to be used.
#' @param verbose logical. Should progress of the cross validation be printed
#' to the command line?
#' @param pclass numeric. Only used for classification. Cut-off value between
#' 0 and 1 to be used for classifying to second class.
#' @param penalty.par.val character. Calculate cross-validated error for ensembles
#' with penalty parameter criterion giving minimum cv error (\code{"lambda.min"})
#' or giving cv error that is within 1 standard error of minimum cv error
#' ("\code{lambda.1se}")?
#' @param parallel logical. Should parallel foreach be used? Must register parallel
#' beforehand, such as doMC or others.
#' @return A list with three elements: \code{$cvpreds} (a vector with cross-validated
#' predicted y values), \code{$ss} (a vector indicating the cross-validation subsample
#' each training observation was assigned to) and \code{$accuracy}. For continuous
#' outputs, accuracy is a list with elements \code{$MSE} (mean squared error on test
#' observations), \code{$MAE} (mean absolute error on test observations). For
#' classification, accuracy is a list with elements
#' \code{$SEL} (mean squared error on predicted probabilities), \code{$AEL} (mean absolute
#' error on predicted probabilities), \code{$MCR} (average misclassification error rate)
#' and \code{$table} (table with proportions of (in)correctly classified observations
#' per class).
#' @examples \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data = airquality[complete.cases(airquality),])
#' airq.cv <- cvpre(airq.ens)}
cvpre <- function(object, k = 10, verbose = FALSE, pclass = .5,
penalty.par.val = "lambda.1se", parallel = FALSE) {
folds <- sample(rep(1:k, length.out = nrow(object$orig_data)),
size = nrow(object$orig_data), replace = FALSE)
if (parallel) {
cvpreds_unsorted <- foreach::foreach(i = 1:k, .combine = "rbind") %dopar% {
cl <- object$call
cl$verbose <- FALSE
cl$data <- object$orig_data[folds != i,]
cvobject <- eval(cl)
if (object$classify) {
data.frame(fold = rep(i, times = length(folds) - nrow(cvobject$orig_data)),
preds = predict.pre(cvobject, type = "response",
newdata = object$orig_data[folds == i,],
penalty.par.val = penalty.par.val))
} else {
data.frame(fold = rep(i, times = length(folds) - nrow(cvobject$orig_data)),
preds = predict.pre(cvobject, penalty.par.val = penalty.par.val,
newdata = object$orig_data[folds == i,]))
}
}
cvpreds <- rep(NA, times = nrow(object$orig_data))
for (i in 1:k) {
cvpreds[folds == i] <- cvpreds_unsorted[cvpreds_unsorted$fold ==i, "preds"]
}
} else {
if (verbose) {
cat("Running cross validation in fold ")
}
cvpreds <- rep(NA, times = nrow(object$orig_data))
for (i in 1:k) {
if (verbose) {
cat(i, " of ", k, ", ", sep = "")
}
cl <- object$call
cl$verbose <- FALSE
cl$data <- object$orig_data[folds != i,]
cvobject <- eval(cl)
if (object$classify) {
cvpreds[folds == i] <- predict.pre(
cvobject, newdata = object$orig_data[folds == i,], type = "response",
penalty.par.val = penalty.par.val)
} else {
cvpreds[folds == i] <- predict.pre(
cvobject, newdata = object$orig_data[folds == i,],
penalty.par.val = penalty.par.val)
}
if (verbose & i == k) {
cat("done!\n")
}
}
}
accuracy <- list()
if (object$classify) {
accuracy$SEL<- mean((as.numeric(object$data[,object$y_name]) - 1 - cvpreds)^2)
accuracy$AEL <- mean(abs(as.numeric(object$data[,object$y_name]) - 1 - cvpreds))
cvpreds_d <- as.numeric(cvpreds > .5)
accuracy$MCR <- 1 - sum(diag(prop.table(table(cvpreds_d,
object$data[,object$y_name]))))
accuracy$table <- prop.table(table(cvpreds_d, object$data[,object$y_name]))
}
else {
accuracy$MSE <- mean((object$data[,object$y_name] - cvpreds)^2)
accuracy$MAE <- mean(abs(object$data[,object$y_name] - cvpreds))
}
result <- list(cvpreds = cvpreds, fold_indicators = folds, accuracy = accuracy)
return(result)
}
#' Coefficients for the final prediction rule ensemble
#'
#' \code{coef.pre} returns coefficients for prediction rules and linear terms in
#' the final ensemble
#'
#' @param object object of class \code{\link{pre}}
#' @param penalty.par.val character. Penalty parameter criterion to be used for
#' selecting final model: lambda giving minimum cv error (\code{"lambda.min"}) or
#' lambda giving cv error that is within 1 standard error of minimum cv error
#' ("\code{lambda.1se}").
#' @param ... additional arguments to be passed to \code{\link[glmnet]{coef.glmnet}}.
#' @return returns a dataframe with 3 columns: coefficient, rule (rule or
#' variable name) and description (\code{NA} for linear terms, conditions for
#' rules).
#' @examples \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data=airquality[complete.cases(airquality),])
#' coefs <- coef(airq.ens)}
#' @export
#' @method coef pre
coef.pre <- function(object, penalty.par.val = "lambda.1se", ...)
{
coefs <- as(coef.glmnet(object$glmnet.fit, s = penalty.par.val, ...),
Class = "matrix")
# coefficients for normalized variables should be unnormalized:
if (object$normalize & !is.null(object$x_scales) & object$type != "rules") {
coefs[names(object$x_scales),] <- coefs[names(object$x_scales),] /
object$x_scales
}
coefs <- data.frame(coefficient = coefs[,1], rule = rownames(coefs))
if (object$type != "linear" & !is.null(object$rules)) {
coefs <- merge(coefs, object$rules, all.x = TRUE)
coefs$description <- as.character(coefs$description)
} else {
coefs <- data.frame(rule = coefs$rule,
description = rep(NA, times = nrow(coefs)),
coefficient = coefs[,1])
}
if(!is.null(object$wins_points)) { # include winsorizing points in the
# description if they were used in generating the ensemble:
wp <- object$wins_points[!is.na(object$wins_points$value), ]
coefs[coefs$rule %in% wp$varname, ][
order(coefs[coefs$rule %in% wp$varname,]$rule), ]$description <-
wp[order(wp$varname), ]$value
}
return(coefs[order(abs(coefs$coefficient), decreasing = TRUE),])
}
#' Predicted values based on final unbiased prediction rule ensemble
#'
#' \code{predict.pre} generates predictions based on the final prediction rule
#' ensemble, for training or new (test) observations
#'
#' @param object object of class \code{\link{pre}}.
#' @param newdata optional dataframe of new (test) observations, including all
#' predictor variables used for deriving the prediction rule ensemble.
#' @param penalty.par.val character. Penalty parameter criterion to be used for
#' selecting final model: lambda giving minimum cv error ("lambda.min") or lambda
#' giving cv error that is within 1 standard error of minimum cv error
#' ("lambda.1se").
#' @param type character string. The type of prediction required; the default
#' \code{type = "link"} is on the scale of the linear predictors. Alternatively,
#' for nominal outputs, \code{type = "response"} gives the fitted probabilities
#' and \code{type = "class"} gives the predicted class membership.
#' @param ... currently not used.
#' @details When newdata is not provided, training data included in the specified
#' object is used.
#' @examples \donttest{
#' set.seed(1)
#' train <- sample(1:length(complete.cases(airquality)), size = 120)
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data = airquality[complete.cases(airquality),][train,])
#' predict(airq.ens)
#' predict(airq.ens, newdata = airquality[complete.cases(airquality),][-train,])}
#' @import Matrix
#' @export
#' @method predict pre
predict.pre <- function(object, newdata = NULL, type = "link",
penalty.par.val = "lambda.1se", ...)
{
if (is.null(newdata)) {
newdata <- object$modmat
} else {
if (!is.data.frame(newdata)) {
stop("newdata should be a data frame.")
}
newdata <- model.frame(object$call$formula, newdata, na.action = NULL)
# check if newdata has the same columns as object$orig_data:
if (!all(names(object$data) %in% c(names(newdata), object$y_name))) {
stop("newdata does not contain all predictor variables from the ensemble")
} else {
# take all input variables:
newdata <- newdata[,names(newdata) %in% object$x_names]
# add temporary y variable to create model.frame:
newdata[,object$y_name] <- object$orig_data[,object$y_name][1]
newdata <- model.frame(object$formula, newdata)
# check if all variables have the same levels:
if (!all(unlist(sapply(object$data, levels)) ==
unlist(sapply(newdata[names(object$data)], levels)))) {
stop("At least one variable in newdata has different levels than the
variables used to create the ensemble")
}
}
coefs <- as(coef.glmnet(object$glmnet.fit, s = penalty.par.val),
Class = "matrix")
# if there are rules in the ensemble, they should be evaluated:
if (object$type != "linear") {
# get names of rules with nonzero and zero coefficients:
nonzerorulenames <- names(coefs[coefs!=0,])[grep("rule", names(coefs[coefs!=0,]))]
zerorulenames <- names(coefs[coefs==0,])[grep("rule", names(coefs[coefs==0,]))]
if (length(nonzerorulenames) > 0) {
nonzerorules <- as.character(
object$rules$description[object$rules$rule %in% nonzerorulenames])
newrulevars <- data.frame(r1 = as.numeric(with(newdata, eval(parse(
text = nonzerorules[1])))))
names(newrulevars) <- nonzerorulenames[1]
if (length(nonzerorulenames) > 1) {
for(i in 2:length(nonzerorules)) {
newrulevars[,nonzerorulenames[i]] <- as.numeric(
with(newdata, eval(parse(text = nonzerorules[i]))))
}
}
# set all rules with zero coefficients to 0:
if (length(zerorulenames) > 0) {
for(i in zerorulenames) {
newrulevars[,i] <- 0
}
}
} else { # only check and assess rules with non-zero coefficients
if (length(zerorulenames) > 0) {
newrulevars <- data.frame(r1 = rep(0, times = nrow(newdata)))
names(newrulevars) <- zerorulenames[1]
for(i in zerorulenames[-1]) {
newrulevars[,i] <- 0
}
}
}
}
# convert ordered categorical variables to numeric variables:
newdata[,sapply(newdata, is.ordered)] <- as.numeric(as.character(
newdata[,sapply(newdata, is.ordered)]))
# linear terms normalized before application of glmnet should also be
# normalized before applying predict.glmnet:
if (object$normalize & object$type != "rules") {
newdata[,names(object$x_scales)] <- scale(
newdata[,names(object$x_scales)], center = FALSE, scale = object$x_scales)
}
if (object$type != "linear") {
newdata <- data.frame(newdata, newrulevars)
}
newdata <- MatrixModels::model.Matrix(object$modmat_formula, data = newdata,
sparse = TRUE)
}
# Get predictions:
preds <- predict.cv.glmnet(object$glmnet.fit, newx = newdata, s = penalty.par.val,
type = type)[,1]
return(preds)
}
#' Create partial dependence plot for a single variable
#'
#' \code{singleplot} creates a partial dependence plot, which shows the effect of
#' a predictor variable on the ensemble's predictions
#'
#' @param object an object of class \code{\link{pre}}
#' @param varname character vector of length one, specifying the variable for
#' which the partial dependence plot should be created.
#' penalty.par.val character. Penalty parameter criterion to be used for
#' selecting final model: lambda giving minimum cv error ("lambda.min") or lambda
#' giving cv error that is within 1 standard error of minimum cv error
#' ("lambda.1se").
#' @param nvals optional numeric vector of length one. For how many values of x
#' should the partial dependence plot be created?
#' @param type character string. Type of prediction to be plotted on y-axis.
#' \code{type = "response"} gives fitted values for continuous outputs and
#' fitted probabilities for nominal outputs. \code{type = "link"} gives fitted
#' values for continuous outputs and linear predictor values for nominal outputs.
#' @param penalty.par.val character. Penalty parameter criterion to be used for
#' selecting final model: lambda giving minimum cv error (\code{"lambda.min"}) or
#' lambda giving cv error that is within 1 standard error of minimum cv error
#' ("\code{lambda.1se}").
#' @details By default, a partial dependence plot will be created for each unique
#' observed value of the specified predictor variable. When the number of unique
#' observed values is large, this may take a long time to compute. In that case,
#' specifying the nvals argument can substantially reduce computing time. When the
#' nvals argument is supplied, values for the minimum, maximum, and (nvals - 2)
#' intermediate values of the predictor variable will be plotted. Note that nvals
#' can be specified only for numeric and ordered input variables. If the plot is
#' requested for a nominal input variable, the \code{nvals} argument will be
#' ignored and a warning is printed.
#' @examples \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data = airquality[complete.cases(airquality),])
#' singleplot(airq.ens, "Temp")}
#' @export
singleplot <- function(object, varname, penalty.par.val = "lambda.1se",
nvals = NULL, type = "response")
{
# preliminaries:
if (length(varname) != 1) {
stop("A partial dependence plot should be requested for 1 variable")
}
if (!is.character(varname)) {
stop("Specified varname should be of mode character")
}
if (is.factor(object$orig_data[,varname]) & !is.null(nvals)) {
warning("Plot is requested for variable of class factor. Value specified for
nvars will be ignored.", immediate. = TRUE)
nvals <- NULL
}
# Generate expanded dataset:
if (is.null(nvals)) {
newx <- unique(object$orig_data[,varname])
} else {
newx <- seq(
min(object$orig_data[,varname]), max(object$orig_data[,varname]), length = nvals)
}
exp_dataset <- object$orig_data[rep(row.names(object$orig_data), times = length(newx)),]
exp_dataset[,varname] <- rep(newx, each = nrow(object$orig_data))
# get predictions:
exp_dataset$predy <- predict.pre(object, newdata = exp_dataset, type = type,
penalty.par.val = penalty.par.val)
# create plot:
plot(aggregate(
exp_dataset$predy, by = exp_dataset[varname], data = exp_dataset, FUN = mean),
type = "l", ylab = "predicted y", xlab = varname, main =
paste("partial dependence on", varname))
# To be implemented:
# qntl = trimming factor for plotting numeric variables. Plots are shown for variable values in the range [quantile (qntl) - quantile(1-qntl)]. (Ignored for categorical variables (factors).)
# nval = maximum number of abscissa evaluation points for numeric variables. (Ignored for categorical variables (factors).)
# nav = maximum number of observations used for averaging calculations. (larger values provide higher accuracy with a diminishing return; computation grows linearly with nav)
# catvals = vector of names for values (levels) of categorical variable (factor). (Ignored for numeric variables or length(vars) > 1)
# samescale = plot vertical scaling flag .
# samescale = TRUE / FALSE => do/don't require same vertical scale for all plots.
# horiz = plot orientation flag for categorical variable barplots
# horiz = T/F => do/don't plot bars horizontally
# las = label orientation flag for categorical variable plots (horiz = F, only)
# las = 1 => horizontal orientation of value (level) names stored in catvals (if present)
# las = 2 => vertical orientation of value (level) names stored in catvals (if present)
# cex.names = expansion factor for axis names (bar labels) for categorical variable barplots
# col = color of barplot for categorical variables
# denqnt = quantile for data density tick marks along upper plot boundary for numeric variables ( < 1)
# denqnt <= 0 => no data density tick marks displayed
}
#' Create partial dependence plot for a pair of predictor variables
#'
#' \code{pairplot} creates a partial dependence plot to assess the effects of a
#' pair of predictor variables on the predictions of the ensemble
#'
#' @param object an object of class \code{\link{pre}}
#' @param varnames character vector of length two. Currently, pairplots can only
#' be requested for non-nominal variables. If varnames specifies the name(s) of
#' variables of class \code{"factor"}, an error will be printed.
#' @param penalty.par.val character. Should model be selected with lambda giving
#' minimum cv error ("lambda.min"), or lambda giving cv error that is within 1
#' standard error of minimum cv error ("lambda.1se")?
#' @param phi numeric. See \code{persp()} documentation.
#' @param theta numeric. See \code{persp()} documentation.
#' @param col character. Optional color to be used for surface in 3D plot.
#' @param nvals optional numeric vector of length 2. For how many values of
#' x1 and x2 should partial dependence be plotted? If \code{NULL}, all observed
#' values for the two predictor variables specified will be used (see details).
#' @param ticktype character string. If \code{"simple"} draws an arrow parallel
#' to the axes to indicate direction of increase; \code{"detailed"} draws ticks
#' on the axes as in 2D plots.
#' @param nticks the (approximate) number of tick marks to draw on the axes. Has
#' no effect if \code{ticktype = "simple"}.
#' @param type character string. Type of prediction to be plotted on z-axis.
#' \code{type = "response"} gives fitted values for continuous outputs and
#' fitted probabilities for nominal outputs. \code{type = "link"} gives fitted
#' values for continuous outputs and linear predictor values for nominal outputs.
#' @param ... Additional arguments to be passed to \code{\link[graphics]{persp}}.
#' @details By default, partial dependence will be plotted for each combination
#' of 20 values of the specified predictor variables. When \code{nvals = NULL} is
#' specified a dependence plot will be created for every combination of the unique
#' observed values of the two predictor variables specified. Therefore, using
#' \code{nvals = NULL} will often result in long computation times, and / or
#' memory allocation errors. Also, \code{\link{pre}} ensembles derived
#' from training datasets that are very wide or long may result in long
#' computation times and / or memory allocation errors. In such cases, reducing
#' the values supplied to \code{nvals} will reduce computation time and / or
#' memory allocation errors. When the nvals argument is supplied, values for the
#' minimum, maximum, and nvals - 2 intermediate values of the predictor variable
#' will be plotted. Furthermore, if none of the variables specified appears in
#' the final prediction rule ensemble, an error will occur.
#' @note The \code{pairplot} function uses the akima package to construct
#' interpolated surfaces and has an ACM license that restricts applications
#' to non-commercial usage, see
#' \url{https://www.acm.org/publications/policies/software-copyright-notice}
#' The \code{pairplot} function prints a note refering to this ACM licence.
#' @examples \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data = airquality[complete.cases(airquality),])
#' pairplot(airq.ens, c("Temp", "Wind"))}
#' @export
#' @import graphics
pairplot <- function(object, varnames, penalty.par.val = "lambda.1se", phi = 45,
theta = 315, col = "cyan", nvals = c(20, 20), ticktype = "detailed",
nticks = max(nvals), type = "response", ...)
{
# preliminaries:
if (!("akima" %in% installed.packages()[,1])) {
stop("Function pairplot requires package akima. Download and install package
akima from CRAN, and run again.")
}
if (length(varnames) != 2) {
stop("Partial dependence should be requested for 2 variables.")
}
if (!is.character(varnames)) {
stop("Specified varname should be of mode character.")
}
if (any(sapply(object$orig_data[,varnames], is.factor))) {
stop("3D partial dependence plots are currently not supported for factors.")
}
# generate expanded dataset:
if (is.null(nvals)){
newx1 <- unique(object$orig_data[,varnames[1]])
newx2 <- unique(object$orig_data[,varnames[2]])
} else {
newx1 <- seq(min(object$orig_data[,varnames[1]]), max(object$orig_data[,varnames[1]]),
length = nvals[1])
newx2 <- seq(min(object$orig_data[,varnames[2]]), max(object$orig_data[,varnames[2]]),
length = nvals[2])
}
nobs1 <- length(newx1)
nobs2 <- length(newx2)
nobs <- nobs1*nobs2
exp_dataset <- object$orig_data[rep(row.names(object$orig_data), times = nobs),]
exp_dataset[,varnames[1]] <- rep(newx1, each = nrow(object$orig_data)*nobs2)
exp_dataset[,varnames[2]] <- rep(rep(newx2, each = nrow(object$orig_data)),
times = nobs1)
# get predictions:
pred_vals <- predict.pre(object, newdata = exp_dataset, type = type,
penalty.par.val = penalty.par.val)
# create plot:
if (is.null(nvals)) nvals <- 3
xyz <- akima::interp(exp_dataset[,varnames[1]], exp_dataset[,varnames[2]],
pred_vals, duplicate = "mean")
persp(xyz, xlab = varnames[1], ylab = varnames[2], zlab = "predicted y",
phi = phi, theta = theta, col = col, ticktype = ticktype,
nticks = nticks, ...)
cat("NOTE: function pairplot uses package 'akima', which has an ACM license.
See also https://www.acm.org/publications/policies/software-copyright-notice.")
}
#' Calculate importances of base learners (rules and linear terms) and input
#' variables
#'
#' \code{importance} calculates importances for rules, linear terms and input
#' variables in the ensemble, and provides a bar plot of variable importances.
#'
#' @param object an object of class \code{\link{pre}}
#' @param plot logical. Should variable importances be plotted?
#' @param ylab character string. Plotting label for y-axis. Only used when
#' \code{plot = TRUE}.
#' @param main character string. Main title of the plot. Only used when
#' \code{plot = TRUE}.
#' @param global logical. Should global importances be calculated? If FALSE,
#' local importances are calculated, given the quantiles of the predictions F(x)
#' in \code{quantprobs}.
#' @param quantprobs optional numeric vector of length two. Only used when
#' \code{global = FALSE} (in which case specification of this argument is still
#' optional). Probabilities for calculating sample quantiles of the range of F(X),
#' over which local importances are calculated. The default provides variable
#' importances calculated over the 25\% highest values of F(X).
#' @param col character string. Plotting color to be used for bars in barplot.
#' @param round integer. Number of decimal places to round numeric results to.
#' If NA (default), no rounding is performed.
#' @param penalty.par.val character. Should model be selected with lambda giving
#' minimum cv error ("lambda.min"), or lambda giving cv error that is within 1
#' standard error of minimum cv error ("lambda.1se")?
#' @param ... further arguments to be passed to \code{barplot} (only used
#' when \code{plot = TRUE}).
#' @return A list with two dataframes: $baseimps, giving the importances for
#' baselearners in the ensemble, and $varimps, giving the importances for
#' variables that appear and do not appear in the ensemble.
#' @examples \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data=airquality[complete.cases(airquality),])
#' # calculate global importances:
#' importance(airq.ens)
#' # calculate local importances (default: over 25% highest predicted values):
#' importance(airq.ens, global = FALSE)
#' # calculate local importances (custom: over 25% highest predicted values):
#' importance(airq.ens, global = FALSE, quantprobs = c(0, .25))}
#' @export
importance <- function(object, plot = TRUE, ylab = "Importance",
main = "Variable importances", global = TRUE,
penalty.par.val = "lambda.1se",
quantprobs = c(.75, 1), col = "grey", round = NA, ...)
{
## Step 1: Calculate the importances of the base learners:
# get base learner coefficients:
coefs <- coef.pre(object, print = FALSE, penalty.par.val = penalty.par.val)
# give factors a description:
coefs$description[is.na(coefs$description)] <-
paste(as.character(coefs$rule)[is.na(coefs$description)], " ", sep = "")
coefs <- coefs[order(coefs$rule),]
# Get sds for every baselearner:
if (global) {
sds <- c(0, apply(object$modmat, 2, sd, na.rm = TRUE))
} else {
preds <- predict.pre(object, newdata = object$orig_data, type = "response")
local_modmat <- object$modmat[preds >= quantile(preds, probs = quantprobs[1]) &
preds <= quantile(preds, probs = quantprobs[2]),]
if (nrow(local_modmat) < 2) {stop("Requested range contains less than 2
observations, importances cannot be calculated")}
sds <- c(0, apply(local_modmat, 2, sd, na.rm = TRUE))
}
names(sds)[1] <- "(Intercept)"
sds <- sds[order(names(sds))]
if (all(names(sds) != coefs$rule)) {
stop("There seems to be a problem with the ordering or size of the
coefficient and sd vectors. Importances cannot be calculated.")
}
# baselearner importance is given by abs(coef*st.dev), see F&P section 6):
baseimps <- data.frame(coefs, sd = sds, imp = abs(coefs$coefficient)*sds)
## Step 2: Calculate variable importances:
# For factors, importances for each level should be added together.
# first get indicators for assignments in modmat which are not rules:
inds <- attr(object$modmat, "assign")[-grep("rule", colnames(object$modmat))]
# add names in modelframe and modelmatrix to baselearner importances:
frame.mat.conv <- data.frame(
modmatname = colnames(object$modmat)[-grep("rule", colnames(object$modmat))],
modframename = attr(attr(object$data, "terms"), "term.labels")[inds])
baseimps <- merge(frame.mat.conv, baseimps, by.x = "modmatname", by.y = "rule",
all.x = TRUE, all.y = TRUE)
# For rules, calculate the number of terms in each rule:
baseimps$nterms <- NA
for(i in 1:nrow(baseimps)) {
# If there is no "&" in rule description, there is only 1 term/variable in
# the base learner:
if (gregexpr("&", baseimps$description)[[i]][1] == -1) {
baseimps$nterms[i] <- 1
} else { # otherwise, the number of terms = the number of &-signs + 1
baseimps$nterms[i] <- length(gregexpr("&", baseimps$description)[[i]]) + 1
}
}
# Calculate variable importances:
varimps <- data.frame(varname = object$x_names, imp = 0)
# Get importances for rules:
for(i in 1:nrow(varimps)) { # for every variable:
# For every baselearner:
for(j in 1:nrow(baseimps)) {
# if the variable name appears in the rule:
if (gregexpr(paste(varimps$varname[i], " ", sep = ""),
baseimps$description[j])[[1]][1] != -1) {
# then count the number of times it appears in the rule:
n_occ <- length(
gregexpr(paste(varimps$varname[i], " ", sep = ""),
baseimps$description[j])[[1]]
)
# and add it to the importance of the variable:
varimps$imp[i] <- varimps$imp[i] + (n_occ * baseimps$imp[j] /
baseimps$nterms[j])
}
}
}
# Get importances for factor variables:
# if the variable appears several times in modframename, add those
# importances to the variable's importance:
for(i in object$x_names) {
if (sum(i == baseimps$modframename, na.rm = TRUE) > 1) {
varimps$imp[varimps$varname == i] <- sum(varimps$imp[varimps$varname == i],
baseimps$imp[i == baseimps$modframename], na.rm = TRUE)
}
}
## Step 3: return (and plot) importances:
baseimps <- baseimps[baseimps$imp != 0,]
baseimps <- baseimps[order(baseimps$imp, decreasing = TRUE),]
varimps <- varimps[order(varimps$imp, decreasing = TRUE),]
varimps <- varimps[varimps$imp != 0,]
if (plot == TRUE & nrow(varimps) > 0) {
barplot(height = varimps$imp, names.arg = varimps$varname, ylab = ylab,
main = main, col = col)
}
if (!is.na(round)) {
varimps[,"imp"] <- round(varimps[,"imp"], digits = round)
baseimps[,c("imp", "coefficient", "sd")] <- round(
baseimps[,c("imp", "coefficient", "sd")], digits = round)
}
return(list(varimps = varimps, baseimps = data.frame(rule = baseimps$modmatname,
baseimps[baseimps$description != "(Intercept) ", c("description", "imp", "coefficient", "sd")])))
}
#' Compute boostrapped null interaction models
#'
#' \code{bsnullinteract} generates bootstrapped null interaction models,
#' which can be used to derive a reference distribution of the test statistic
#' calculated with \code{\link{interact}}.
#'
#' @param object object of class \code{\link{pre}}.
#' @param nsamp numeric. Number of bootstrapped null interaction models to be
#' derived.
#' @param penalty.par.val character. Which value of the penalty parameter
#' criterion should be used? The value yielding minimum cv error
#' (\code{"lambda.min"}) or penalty parameter yielding error within 1 standard
#' error of minimum cv error ("\code{lambda.1se}")?
#' @param parallel logical. Should parallel foreach be used to generate initial
#' ensemble? Must register parallel beforehand, such as doMC or others.
#' @param verbose logical. should progress be printed to the command line?
#' @return A list of length \code{nsamp} with null interaction datasets, to be
#' used as input for \code{\link{interact}}.
#' @examples \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data=airquality[complete.cases(airquality),])
#' nullmods <- bsnullinteract(airq.ens)}
#' @details Computationally intensive. Progress info is printed to command line.
#' @export
bsnullinteract <- function(object, nsamp = 10, parallel = FALSE,
penalty.par.val = "lambda.1se", verbose = FALSE)
{
# Preliminaries:
if(parallel) {
if (!("foreach" %in% installed.packages()[,1])) {
warning("Parallel computating of function bsnullinteract() requires package foreach,
which is currently not installed. Argument parallel will be set to FALSE.
To run in parallel, download and install package foreach from CRAN, and run again.")
parallel <- FALSE
}
}
# create call for generating bootstrapped null models:
bsnullmodcall <- object$call
bsnullmodcall$maxdepth <- 1
# create call for model allowing for interactions, grown on bootstrapped
# datasets without interactions:
bsintmodcall <- object$call
bsintmodcall$verbose <- FALSE
# compute boostrapped null datasets (i.e., datasets with no interactions):
if (parallel) {
if (verbose) cat("This may take a while.")
bs.ens <- foreach::foreach(i = 1:nsamp) %dopar% {
# step 1: Take bootstrap sample {x_p, y_p}:
bsdataset <- object$orig_data[sample(1:nrow(object$orig_data),
nrow(object$orig_data), replace = TRUE),]
# step 2: Build F_A, a null interaction model involving main effects only using {x_p, y_p}:
bsnullmodcall$data <- bsdataset
bs.ens.null <- eval(bsnullmodcall)
# step 3: first part of formula 47 of F&P2008:
# Calculate predictions F_A(x) for original x, using the null interaction model F_A:
F_a_of_x <- predict.pre(bs.ens.null, newdata = object$orig_data)
# step 4: third part of formula 47 of F&P2008:
# Calculate predictions F_A(x_p):
F_A_of_x_p <- predict.pre(bs.ens.null, newdata = bsdataset,
penalty.par.val = penalty.par.val)
# step 5: Calculate ytilde of formula 47 of F&P2008:
ytilde <- F_a_of_x + bsdataset[,object$y_name] - F_A_of_x_p
# step 6: Build a model using (x,ytilde), using the same procedure as was
# originally applied to (x,y):
bsintmodcall$data <- object$orig_data
bsintmodcall$data[,object$y_name] <- ytilde
eval(match.call(pre, call = bsintmodcall))
}
} else {
bs.ens <- list()
if (verbose) cat("This may take a while. Computing null model ")
for(i in 1:nsamp) {
if (verbose) {cat(i, "of", nsamp, ", ")}
# step 1: Take bootstrap sample {x_p, y_p}:
bsdataset <- object$orig_data[sample(1:nrow(object$orig_data),
nrow(object$orig_data), replace = TRUE),]
# step 2: Build F_A, a null interaction model involving main effects only using {x_p, y_p}:
bsnullmodcall$data <- bsdataset
bs.ens.null <- eval(bsnullmodcall)
# step 3: first part of formula 47 of F&P2008:
# Calculate predictions F_A(x) for original x, using the null interaction model F_A:
F_a_of_x <- predict.pre(bs.ens.null, newdata = object$orig_data)
# step 4: third part of formula 47 of F&P2008:
# Calculate predictions F_A(x_p):
F_A_of_x_p <- predict.pre(bs.ens.null, newdata = bsdataset,
penalty.par.val = penalty.par.val)
# step 5: Calculate ytilde of formula 47 of F&P2008:
ytilde <- F_a_of_x + bsdataset[,object$y_name] - F_A_of_x_p
# step 6: Build a model using (x,ytilde), using the same procedure as was
# originally applied to (x,y):
bsintmodcall$data <- object$orig_data
bsintmodcall$data[,object$y_name] <- ytilde
bs.ens[[i]] <- eval(match.call(pre, call = bsintmodcall))
}
if (verbose) cat("done!\n")
}
return(bs.ens)
}
# Internal function for calculating H statistic (section 8.1, equation 45):
Hsquaredj <- function(object, varname, k = 10, penalty.par.val = NULL, verbose = FALSE) {
# Calculate the predicted value F(x) of the full model for each observation:
preds_x <- predict.pre(object, newdata = object$orig_data, penalty.par.val = penalty.par.val)
# Calculate the expected value of F_j(x_j), over all observed values x_/j,
# and the expected value of F_/j(x_/j), over all observed values x_j:
exp_dataset <- object$orig_data[rep(row.names(object$orig_data),
times = nrow(object$orig_data)),]
exp_dataset[,varname] <- rep(object$orig_data[,varname], each = nrow(object$orig_data))
# using predict.pre for a hughe dataset may lead to errors, so split
# computations up in k parts:
exp_dataset$ids <- sample(1:k, nrow(exp_dataset), replace = TRUE)
for(i in 1:k) {
if (verbose) cat(".")
exp_dataset[exp_dataset$ids==i, "yhat"] <- predict.pre(
object, newdata = exp_dataset[exp_dataset$ids==i,],
penalty.par.val = penalty.par.val)
}
# expected value of F_j(x_j), over all observed values x_/j:
exp_dataset$i_xj <- rep(1:nrow(object$orig_data), each = nrow(object$orig_data))
preds_xj <- aggregate(yhat ~ i_xj, data = exp_dataset, FUN = mean)$yhat
# expected value of F_/j(x_/j), over all observed values x_j:
exp_dataset$i_xnotj <- rep(1:nrow(object$orig_data), times = nrow(object$orig_data))
preds_xnotj <- aggregate(yhat ~ i_xnotj, data = exp_dataset, FUN = mean)$yhat
# H should be calculated based on centered functions:
preds_x <- scale(preds_x, center = TRUE, scale = FALSE)
preds_xj <- scale(preds_xj, center = TRUE, scale = FALSE)
preds_xnotj <- scale(preds_xnotj, center = TRUE, scale = FALSE)
if (sum(preds_x^2) > 0) {
return(sum((preds_x - preds_xj - preds_xnotj)^2) / sum(preds_x^2))
}
if (sum(preds_x^2) == 0) {
#warning("The denominator for calculating H squared was equal to zero. It was
# set to 1e-10 to allow for calculation of H squared", immediate. = TRUE)
return(sum((preds_x - preds_xj - preds_xnotj)^2) / 1e-10)
}
}
#' Calculate interaction statistics for user-specified variables
#'
#' \code{interact} calculates test statistics for assessing the strength of
#' interactions between the input variable(s) specified, and all other input
#' variables.
#'
#' @param object an object of class \code{\link{pre}}.
#' @param varnames character vector. Names of variables for which interaction
#' statistics should be calculated. If \code{NULL}, interaction statistics for
#' all predictor variables with non-zeor coefficients will be calculated (which
#' may take a long time).
#' @param k integer. Calculating interaction test statistics is a computationally
#' intensive, so calculations are split up in several parts to prevent memory
#' allocation errors. If a memory allocation error still occurs, increase k.
#' @param nullmods object with bootstrapped null interaction models, resulting
#' from application of \code{bsnullinteract}.
#' @param penalty.par.val character. Which value of the penalty parameter
#' criterion should be used? The value yielding minimum cv error
#' (\code{"lambda.min"}) or penalty parameter yielding error within 1 standard
#' error of minimum cv error ("\code{lambda.1se}")?
#' @param parallel logical. Should parallel foreach be used? Must register
#' parallel beforehand, such as doMC or others.
#' @param plot logical Should interaction statistics be plotted?
#' @param col character vector of length two. Color for plotting bars used. Only
#' used when \code{plot = TRUE}. Only first element of vector is used if
#' \code{nullmods = NULL}.
#' @param ylab character string. Label to be used for plotting y-axis.
#' @param main character. Main title for the bar plot.
#' @param legend logical. Should a legend be plotted in the top right corner of the
#' barplot?
#' @param verbose logical. Should progress information be printed to the
#' command line?
#' @param ... Additional arguments to be passed to \code{barplot}.
#' @examples
#' \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data=airquality[complete.cases(airquality),])
#' interact(airq.ens, c("Temp", "Wind", "Solar.R"))}
#' @details Can be computationally intensive, especially when nullmods is specified,
#' in which case setting \verb{parallel = TRUE} may improve speed.
#' @return If nullmods is not specified, the function returns the interaction
#' test statistic. If nullmods is specified, the function returns a list,
#' with elements \code{$H}, which is the test statistic of the interaction
#' strength, and \code{$nullH}, which is a vector of test statistics of the
#' interaction in each of the bootstrapped null interaction models. In the barplot,
#' yellow is used for plotting the interaction test statistic. When applicable,
#' blue is used for the mean in the bootstrapped null models.
#' @export
interact <- function(object, varnames = NULL, nullmods = NULL, k = 10, plot = TRUE,
penalty.par.val = "lambda.1se", col = c("yellow", "blue"),
ylab = "Interaction strength", parallel = FALSE,
main = "Interaction test statistics", legend = TRUE,
verbose = FALSE, ...)
{ # Preliminaries:
# Preliminaries:
if(parallel) {
if (!("foreach" %in% installed.packages()[,1])) {
warning("Parallel computating of function bsnullinteract() requires package foreach,
which is currently not installed. Argument parallel will be set to FALSE.
To run in parallel, download and install package foreach from CRAN, and run again.")
parallel <- FALSE
}
}
if (is.null(varnames)) {
# should only be variables with non-zero importances:
varnames <- as.character(importance(object, plot = FALSE,
penalty.par.val = penalty.par.val)$varimps$varname)
} else if (!all(varnames %in% object$x_names)) {
stop("Interaction statistics requested for one or more unknown input variables")
}
if (verbose) {
cat("This will take a while (",
k * (length(nullmods) + 1) * length(varnames), "dots ). ")
}
if (parallel) {
H <- foreach::foreach(i = 1:length(varnames), .combine = "c") %dopar% {
# Calculate H_j for the original dataset:
Hsquaredj(object = object, varname = varnames[i], k = k,
penalty.par.val = penalty.par.val, verbose = verbose)
}
names(H) <- varnames
if (!is.null(nullmods)) {
nullH <- foreach::foreach(i = 1:length(varnames), .combine = "cbind") %dopar% {
# Calculate H_j for the bootstrapped null models:
nullH <- c()
for(j in 1:length(nullmods)) {
nullH[j] <- Hsquaredj(object = nullmods[[j]], varname = varnames[i],
k = k, penalty.par.val = penalty.par.val,
verbose = verbose)
}
nullH
}
nullH <- data.frame(nullH)
names(H) <- colnames(nullH) <- varnames
}
} else { # if not parallel computation:
H <- c()
if (is.null(nullmods)) {
for(i in 1:length(varnames)) {
H[i] <- Hsquaredj(object = object, varname = varnames[i], k = k,
penalty.par.val = penalty.par.val, verbose = verbose)
}
} else { # Calculate H values for the training data and bootstrapped null models:
nullH <- data.frame()
for(i in 1:length(varnames)) {
H[i] <- Hsquaredj(object = object, varname = varnames[i], k = k,
penalty.par.val = penalty.par.val, verbose = verbose)
for(j in 1:length(nullmods)) {
nullH[j,i] <- Hsquaredj(object = nullmods[[j]], varname = varnames[i],
k = k, penalty.par.val = penalty.par.val,
verbose = verbose)
}
}
colnames(nullH) <- varnames
}
names(H) <- varnames
}
if (verbose) cat("\n")
if (plot) {
if (is.null(nullmods)) {
barplot(H, col = col[1], main = main, ...)
} else {
nullmeans <- vector()
for(i in 1:length(varnames)) {
nullmeans[i] <- mean(nullH[,i])
}
H2s <- as.vector(rbind(H, nullmeans))
barplot(H2s, col = col, ylab = ylab, main = main,
space = rep_len(1:0, length(H2s)), beside = TRUE,
names.arg = rep(varnames, each = 2), ...)
if (legend) {
legend("topright", c("observed", "bs null mod mean"), bty = "n",
col = col, pch = 15)
}
}
}
if(is.null(nullmods)) {
return(H)
} else {
return(list(trainingH2 = H, nullH2 = nullH))
}
}
#' Plot method for class pre
#'
#' \code{plot.pre} creates one or more plots depicting the rules in the final
#' ensemble as simple decision trees.
#'
#' @param x an object of class \code{\link{pre}}.
#' @param penalty.par.val character. Which value of the penalty parameter
#' criterion should be used? The value yielding minimum cv error
#' (\code{"lambda.min"}) or penalty parameter yielding error within 1 standard
#' error of minimum cv error ("\code{lambda.1se}")?
#' @param plot.dim numeric vector of length two, number of rows and columns of
#' the plotting window in which the rules should be plotted.
#' @param ... Currently not used.
#' @examples
#' \donttest{
#' set.seed(42)
#' airq.ens <- pre(Ozone ~ ., data=airquality[complete.cases(airquality),])
#' plot(airq.ens)}
#' @export
#' @method plot pre
plot.pre <- function(x, penalty.par.val = "lambda.1se", plot.dim = NULL, ...) {
# Preliminaries:
if (!("grid" %in% installed.packages()[,1])) {
stop("Function plot.pre requires package grid. Download and install package
grid from CRAN, and run again.")
}
# Get rules in final ensemble:
coefs <- importance(x, plot = FALSE, global = TRUE)$baseimps
coefs <- coefs[coefs$coefficient != 0,]
nonzerorules <- coefs[grep("rule", coefs$rule),]
if(is.null(plot.dim)) {
plot.dim <- rep(ceiling(sqrt(nrow(nonzerorules))), times = 2)
}
conditions <- list()
for(i in 1:nrow(nonzerorules)) { # i is a counter for rules
if (length(grep("&", nonzerorules$description[i], )) > 0) {
conditions[[i]] <- unlist(strsplit(nonzerorules$description[i], split = " & "))
} else {
conditions[[i]] <- nonzerorules$description[i]
}
}
# Create plotting regions:
grid::grid.newpage()
grid::pushViewport(grid::viewport(layout = grid::grid.layout(plot.dim[1], plot.dim[2])))
# Generate a tree for every rule:
for(i in 1:nrow(nonzerorules)) {
# Create lists of arguments and operators for every condition:
tmp <- list()
# check whether ?plot.the operator is " < ", " <= " or "%in% "
# split the string using the operator, into the variable name and splitting value, which is used to define split = partysplit(id, value)
# make it a list:
for (j in 1:length(conditions[[i]])) {
condition_j <- conditions[[i]][[j]]
tmp[[j]] <- character()
if (length(grep(" > ", condition_j)) > 0) {
tmp[[j]][1] <- unlist(strsplit(condition_j, " > "))[1]
tmp[[j]][2] <- " > "
tmp[[j]][3] <- unlist(strsplit(condition_j, " > "))[2]
}
if (length(grep(" <= ", condition_j)) > 0) {
tmp[[j]][1] <- unlist(strsplit(condition_j, " <= "))[1]
tmp[[j]][2] <- " <= "
tmp[[j]][3] <- unlist(strsplit(condition_j, " <= "))[2]
}
if (length(grep(" %in% ", condition_j)) > 0) {
tmp[[j]][1] <- unlist(strsplit(condition_j, " %in% "))[1]
tmp[[j]][2] <- " %in% "
tmp[[j]][3] <- unlist(strsplit(condition_j, " %in% "))[2]
}
}
ncond <- length(tmp)
# generate empty datasets for all the variables appearing in the rules:
treeplotdata <- data.frame(matrix(ncol = ncond))
for (j in 1:ncond) {
names(treeplotdata)[j] <- tmp[[j]][1]
if (tmp[[j]][2] == " %in% ") {
treeplotdata[,j] <- factor(treeplotdata[,j])
faclevels <- substring(tmp[[j]][3], first = 2)
faclevels <- gsub(pattern = "\"", replacement = "", x = faclevels, fixed = TRUE)
faclevels <- gsub(pattern = "(", replacement = "", x = faclevels, fixed = TRUE)
faclevels <- gsub(pattern = ")", replacement = "", x = faclevels, fixed = TRUE)
faclevels <- unlist(strsplit(faclevels, ", ",))
levels(treeplotdata[,j]) <- c(
levels(x$data[,tmp[[j]][1]])[levels(x$data[,tmp[[j]][1]]) %in% faclevels],
levels(x$data[,tmp[[j]][1]])[!(levels(x$data[,tmp[[j]][1]]) %in% faclevels)])
tmp[[j]][3] <- length(faclevels)
}
}
# generate partynode objects for plotting:
nodes <- list()
# Construct level 0 of tree (the two terminal nodes), conditional on operator of last condition:
if (tmp[[ncond]][2] == " > ") { # If condition involves " > ", the tree continues right:
nodes[[2]] <- list(id = 1L, split = NULL, kids = NULL, surrogates = NULL,
info = "exit")
nodes[[1]] <- list(id = 2L, split = NULL, kids = NULL, surrogates = NULL,
info = round(nonzerorules$coefficient[i], digits = 3))
} else { # If condition involves " <= " or " %in% " the tree continues left:
nodes[[2]] <- list(id = 1L, split = NULL, kids = NULL, surrogates = NULL,
info = round(nonzerorules$coefficient[i], digits = 3))
nodes[[1]] <- list(id = 2L, split = NULL, kids = NULL, surrogates = NULL,
info = "exit")
}
class(nodes[[1]]) <- class(nodes[[2]]) <- "partynode"
# if there are > 1 conditions in rule, loop for (nconditions - 1) times:
if (ncond > 1) {
for (lev in 1L:(ncond - 1)) { # lev is a counter for the level in the tree
if (tmp[[lev + 1]][2] == " > ") { # If condition involves " > ", the tree continues right:
nodes[[lev * 2 + 1]] <- list(id = as.integer(lev * 2 + 1),
split = partysplit(
as.integer(lev), breaks = as.numeric(tmp[[lev]][3])),
kids = list(nodes[[lev * 2 - 1]], nodes[[lev * 2]]),
surrogates = NULL, info = NULL)
nodes[[lev * 2 + 2]] <- list(id = as.integer(lev * 2 + 2), split = NULL,
kids = NULL, surrogates = NULL, info = "exit")
} else { # If condition involves " <= " or " %in% " the tree continues left:
nodes[[lev * 2 + 1]] <- list(id = as.integer(lev * 2 + 1), split = NULL,
kids = NULL, surrogates = NULL, info = "exit")
nodes[[lev * 2 + 2]] <- list(id = as.integer(lev * 2 + 2),
split = partysplit(
as.integer(lev), breaks = as.numeric(tmp[[lev]][3])),
kids = list(nodes[[lev * 2 - 1]], nodes[[lev * 2]]),
surrogates = NULL, info = NULL)
}
class(nodes[[lev * 2 + 1]]) <- class(nodes[[lev * 2 + 2]]) <- "partynode"
}
}
# Construct root node:
lev <- ncond
nodes[[lev * 2 + 1]] <- list(id = as.integer(lev * 2 + 2),
split = partysplit(as.integer(lev), breaks = as.numeric(tmp[[lev]][3])),
kids = list(nodes[[lev * 2]], nodes[[lev * 2 - 1]]),
surrogates = NULL, info = NULL)
class(nodes[[lev * 2 + 1]]) <- "partynode"
# Plot the rule:
grid::pushViewport(grid::viewport(layout.pos.col = rep(1:plot.dim[2], times = i)[i],
layout.pos.row = ceiling(i/plot.dim[1])))
fftree <- party(nodes[[lev * 2 + 1]], data = treeplotdata)
plot(fftree, newpage = FALSE,
main = paste(nonzerorules$rule[i], ": Importance", round(nonzerorules$imp[i], digits = 3), sep = ""),
inner_panel = node_inner(fftree, id = FALSE),
terminal_panel = node_terminal(fftree, id = FALSE))
grid::popViewport()
}
}
|
0214daf6bda20351c3795c54013c057851e09a14
|
04efe01489384e0babe71e1a0548e9d589c32166
|
/Heping/project1-v2.R
|
9b02829eb94b06ed1ecf189bab84745606ab1f4f
|
[] |
no_license
|
hpzheng/sys6021_codes
|
5749645527373bd6c86b05f1bddfdcb791f04cc3
|
13f87e0bec8e5f198f8cdee157b65d6e61d2d753
|
refs/heads/master
| 2021-01-10T10:07:40.921116
| 2015-10-14T17:59:38
| 2015-10-14T17:59:38
| 44,264,256
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,135
|
r
|
project1-v2.R
|
xdmgnd <- xdmgnd[-c(3226,3588),]
for (i in c('A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P'))
{
assign(paste("Method",i,sep=""), rep(0, nrow(xdmgnd)))
eval(parse(text=paste("Method",i,"[which(grepl('",i,"', xdmgnd$METHOD) == TRUE)] <- 1",sep="")))
}
xdmgnd.lm10<-lm(ACCDMG~MethodA+MethodB+MethodC+MethodD+MethodE+MethodF+MethodG+MethodH+MethodI+MethodJ+MethodK+MethodL+MethodM+MethodN+MethodO,data=xdmgnd)
summary(xdmgnd.lm10)
par(mfrow = c(2,2))
plot(xdmgnd.lm10)
par(mfrow = c(1,1))
xdmgnd.lm20<-lm(ACCDMG~(MethodA+MethodB+MethodC+MethodD+MethodE+MethodF+MethodG+MethodH+MethodI+MethodJ+MethodK+MethodL+MethodM+MethodN+MethodO+MethodP)^2,data=xdmgnd)
summary(xdmgnd.lm20)
par(mfrow = c(2,2))
plot(xdmgnd.lm20)
par(mfrow = c(1,1))
xdmgnd.lm30<-lm(ACCDMG^-0.5~SPDDIFF,data=xdmgnd)
summary(xdmgnd.lm30)
par(mfrow = c(2,2))
plot(xdmgnd.lm30)
par(mfrow = c(1,1))
xdmgnd.lm11<-lm(log(ACCDMG)~MethodA+MethodB+MethodC+MethodD+MethodE+MethodF+MethodG+MethodH+MethodI+MethodJ+MethodK+MethodL+MethodM+MethodN+MethodO+MethodP,data=xdmgnd)
summary(xdmgnd.lm11)
plot(density(log(xdmgnd$ACCDMG)))
par(mfrow = c(2,2))
plot(xdmgnd.lm11)
par(mxdmgnd.c(1,1))
library(MASS)
boxcox(xdmgnd.lm10) #box-cox plot
boxcox(xdmgnd.lm10, plotit=T, lambda=seq(-2,2,by=0.5))
boxcox(xdmgnd.lm10, plotit=F)
max(boxcox(xdmgnd.lm10, plotit = F)$y)
boxcox(xdmgnd.lm10, plotit = F)$x[which.max(boxcox(xdmgnd.lm10, plotit = F)$y)]
# ACCDMG, first order model
L<-boxcox(xdmgnd.lm10, plotit = F)$x[which.max(boxcox(xdmgnd.lm10, plotit = F)$y)]
xdmgnd.lm12<-lm(ACCDMG^L~MethodA+MethodB+MethodC+MethodD+MethodE+MethodF+MethodG+MethodH+MethodI+MethodJ+MethodK+MethodL+MethodM+MethodN+MethodO,data=xdmgnd)
# Display regression results for boxcox model
summary(xdmgnd.lm12)
par(mfrow = c(2,2))
plot(xdmgnd.lm12)
par(mfrow =c(1,1))
# ACCDMG, second order model
L<-boxcox(xdmgnd.lm20, plotit = F)$x[which.max(boxcox(xdmgnd.lm20, plotit = F)$y)]
xdmgnd.lm22<-lm(ACCDMG^L~(MethodA+MethodB+MethodC+MethodD+MethodE+MethodF+MethodG+MethodH+MethodI+MethodJ+MethodK+MethodL+MethodM+MethodN+MethodO+MethodP)^2,data=xdmgnd)
# Display regression results for boxcox model
summary(xdmgnd.lm22)
par(mfrow = c(2,2))
plot(xdmgnd.lm22)
par(mfrow =c(1,1))
names(summary(xdmgnd.lm12))
names(xdmgnd.lm12)
sum(xdmgnd.lm12$res^2)
summary(xdmgnd.lm10)$adj.r.squared
AIC(xdmgnd.lm12)
BIC(xdmgnd.lm12)
source("../../Rcode/TestSet.R")
# create vectors to store PMSE
pmse10.result<-NULL; #Two empty vectors to record PMSEs
pmse12.result<-NULL;
for (i in c(1:20)){
#set test sets size:
test.size<-1/3
# generate training sets and test sets from original data:
xdmgnd.data<-test.set(xdmgnd,test.size)
# Build model with train set:
lm10.train<-lm(ACCDMG ~ MethodA+MethodB+MethodC+MethodD+MethodE+MethodF+MethodG+MethodH+MethodI+MethodJ+MethodK+MethodL+MethodM+MethodN+MethodO+MethodP, data=xdmgnd.data$train)
lm12.train<-lm(ACCDMG^L ~ MethodA+MethodB+MethodC+MethodD+MethodE+MethodF+MethodG+MethodH+MethodI+MethodJ+MethodK+MethodL+MethodM+MethodN+MethodO+MethodP, data=xdmgnd.data$train)
# First, how to predict with lm models:
lm10.pred<-predict(lm10.train,newdata=xdmgnd.data$test)
lm12.pred<-predict(lm12.train,newdata=xdmgnd.data$test)
# Next, compute PMSE:
pmse.lm10<-mse(lm10.pred,xdmgnd.data$test$ACCDMG)
pmse.lm12<-mse(lm12.pred,xdmgnd.data$test$ACCDMG)
# Add the PMSE for this run into your vector to stor PMSE
pmse10.result<-c(pmse10.result,pmse.lm10)
pmse12.result<-c(pmse12.result,pmse.lm12)
}
plot(1, 2, xlab = "Test")
# Compare models based over 20 runs of PMSE
plot(pmse1.result,type='b',col='blue',xlab="Index", ylab="PMSE")
lines(pmse2.result,type='b',col='red')
title(main="Model Comparison Based on PMSE")
# 1. Which model is better from visual inspection of graph?
# The blue one
# We can also use statistical tests to compare our models.
# Paired t test:
t.test(pmse1.result,pmse2.result,paired=T)
# Wilcoxon Test:
wilcox.test(pmse1.result,pmse2.result,paired=T)
|
3d290817fe006c8efc4399f41d83806fef080003
|
7c1f200930e5051c3f1dd25ab405467e3d894d85
|
/R/anscombe_tidy.R
|
5e22c03943ac1237db90f8437fce2508eb784265
|
[
"MIT"
] |
permissive
|
rstudio4edu/datarrraw
|
3ce9d3e242f5aa79234b586e8812079c0eff6522
|
b79c3b5c4b086fb71f3feaaa3334ec8b4808e89e
|
refs/heads/master
| 2020-06-08T03:02:16.322535
| 2019-06-21T21:16:50
| 2019-06-21T21:16:50
| 193,143,222
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 440
|
r
|
anscombe_tidy.R
|
#' Anscombe's quartet, tidied
#'
#' From the datasets package
#'
#' @return a [tibble][tibble::tibble-package]
#' @format A data frame with 44 rows and 4 variables:
#' \describe{
#' \item{observation}{which point}
#' \item{set}{one, two, three, or four as roman numerals}
#' \item{x}{the x's}
#' \item{y}{the y's}
#' }
#' @source \url{https://stat.ethz.ch/R-manual/R-devel/RHOME/library/datasets/html/anscombe.html}
"anscombe_tidy"
|
2c7f6f919a08fdad50d78d1dd9bf107c74cf29b5
|
8896fc3a3bc71872e0948690417a40b35363704a
|
/R/make_matrix.R
|
c4b4e3ff118db832e943ce2d1a615d184ef7117e
|
[] |
no_license
|
1014violin/MetabFUN
|
ef1050e4bc53dd58f38a610e5f445ce236598f43
|
608f15d823877ad78526e4acaf21cf7d64598f79
|
refs/heads/master
| 2022-12-20T09:21:45.122121
| 2020-10-03T21:04:10
| 2020-10-03T21:04:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,273
|
r
|
make_matrix.R
|
#' This function produces a matrix for hierarchical clustering or PCA
#' @author Daniel Braas
#' @param MS_data The data frame to be used for the matrix. Can be relative amounts, fractional contribution or percent labeled.
#' @param Type Which type of data is used. Right now this is not a used variable, but will become important if I add something for isotopologues.
#' @param anova A cutoff value: usually a p-value of some sort
#' @return A matrix that can be used as input for hierarchical clustering or PCA
#' @export
make_matrix <- function(MS_data, Type, anova=0.05){
if (sum(grepl('MID', names(MS_data))) >= 1) {
data9 <- MS_data %>%
filter(ANOVA <= anova) %>%
select(Name, Condition, Iso, contains('MID')) %>%
gather(Exp, Value, -Name, -Condition, -Iso) %>%
arrange(Name, Condition) %>%
#mutate(Condition_Exp = Condition:Exp, Condition=NULL, Exp=NULL) %>%
#mutate(Name_Iso = Name:Iso, Name=NULL, Iso=NULL) %>%
unite(Condition_Exp, c(Condition, Exp), sep='_') %>%
mutate(Condition_Exp=factor(Condition_Exp, levels=unique(Condition_Exp))) %>%
unite(Name_Iso, c(Name, Iso), sep='_') %>%
spread(Condition_Exp, Value)
#data9 <- data9[,!(colSums(data9[2:length(data9)], na.rm=T)==0)]
data9 <- data9[!(rowSums(data9[2:length(data9)], na.rm=T)==0),]
} else {
data9 <- MS_data %>%
filter(ANOVA <= anova) %>%
select(Name, Condition, grep('Exp|FC|Labeled', names(MS_data))) %>%
gather(Exp, Value, -Name, -Condition) %>%
arrange(Name, Condition) %>%
#mutate(Exp = as.numeric(gsub('Exp|FC|Labeled','',.$Exp))) %>%
group_by(Name) %>%
mutate(Nr.NA = sum(is.na(Value)),
Nr.Samples = n()) %>%
filter(Nr.NA < Nr.Samples - 1) %>%
ungroup() %>%
select(-Nr.NA, -Nr.Samples) %>%
#arrange(Name, Condition, Exp) %>%
unite(Condition_Exp, c(Condition, Exp), sep='_') %>%
mutate(Condition_Exp=factor(Condition_Exp, levels=unique(Condition_Exp))) %>%
spread(Condition_Exp, Value)
}
data9[is.na(data9)] <- 0
data5=as.matrix(data9[2:length(data9)])
rownames(data5) <- data9$Name
data5 <- data5[!(rowSums(data5))==0,]
data5 <- data5[,!(colSums(data5))==0]
return(data5)
}
|
06d52d7b4c70136c65a2c9d18d937967ab3d52f7
|
e9dc3f38def8ce76d19b7554702bee93df3b74b1
|
/plot5.R
|
42ad93443a89176f76170a6619691fa7f9357f40
|
[] |
no_license
|
emiliehwolf/EDA_Wk4Assignment
|
ddccd73c5a779cbacfb9628251e497f2f43e5a3c
|
c03fa61015552334aa08ce6983a8241fb63bc866
|
refs/heads/master
| 2021-01-22T05:32:59.821541
| 2017-06-02T02:27:09
| 2017-06-02T02:27:09
| 92,476,072
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,130
|
r
|
plot5.R
|
########################################################################
## plot5.R
## Author: Emilie H. Wolf
## Date: Sunday, May 28, 2017
## Description: This is one of the scripts for the final peer-review
## assignment (AKA Course Project 2) for Coursera's Exploratory Data
## Analysis class.
##
## Please ensure this R script is in the same folder as the .rds
## files and that your working directory is set to that folder.
########################################################################
## Read in the data
NEI <- readRDS("summarySCC_PM25.rds")
## Subset the rows both in Baltimore City and from on-road sources
bcmv <- NEI[NEI$fips == "24510" & NEI$type == "ON-ROAD",]
## Get ready to plot
library(ggplot2)
theme_set(theme_bw(base_size = 14))
## A nice bar graph
plot5 <- ggplot(bcmv, aes(as.factor(year), Emissions)) +
geom_bar(stat = "summary", fun.y = "sum", fill = "#cc3300") +
labs(y = "PM2.5 Emissions in Tons", x = "Year") +
labs(title = "PM2.5 Motor Vehicle Emissions in Baltimore City, MD")
## Save
ggsave(file = "plot5.png", plot5, width = 6.4, height = 4.8, units = "in")
|
967a13ac18f6d3b8b46b540d5778f6a127f94e56
|
21463e380ec7981b2946371ea3896f0f2cb0fa37
|
/Intro to DS/Lab Session 3 and 4/W2D1480191.R
|
5e16f8ba40e47bb68c5114312407ec8c0e97cb42
|
[] |
no_license
|
kiaorahao/DataScienceCourse
|
336e8934b59ce229a45d3f5a29405cbb757f9a92
|
80c9aa9e3e2bf49a6b5383892f7cc27b19b2f38f
|
refs/heads/master
| 2021-01-02T22:46:40.991966
| 2017-11-20T05:48:15
| 2017-11-20T05:48:15
| 99,386,183
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,528
|
r
|
W2D1480191.R
|
# 1. Create a scatter plot for the two variables.
library(readr)
table1 <- read_csv("~/Documents/OneDrive/PGD/Data Science/Lab Session 3 and 4/Datasets/Data Set 5.csv")
p = plot_ly(data = table1, type = 'scatter', mode = 'markers', x = ~Size, y = ~KW, name = 'Scatter')
# 2. Calculate a linear regression model.
fit = lm(data = table1, KW ~ Size)
p_l = plot_ly(data = table1, type = 'scatter', mode = 'markers', x = ~Size, y = ~KW, name = 'Scatter') %>%
add_trace(data = table1, type = 'scatter', mode = 'lines', x = ~Size, y = fitted(fit), name = 'Linear Model')
# 3. Calculate polynomial regression models of order 2 and 3.
model_ploy2 = lm(table1$KW~poly(table1$Size,2))
model_ploy3 = lm(table1$KW~poly(table1$Size,3))
# 4. Plot the regression models
p_2 = plot_ly(data = table1, type = 'scatter', mode = 'markers', x = ~Size, y = ~KW, name = 'Scatter') %>%
add_trace(data = table1, type = 'scatter', mode = 'lines', x = ~Size, y = fitted(model_ploy2), name = 'polynomial 2')
p_3 = plot_ly(data = table1, type = 'scatter', mode = 'markers', x = ~Size, y = ~KW, name = 'Scatter') %>%
add_trace(data = table1, type = 'scatter', mode = 'lines', x = ~Size, y = fitted(model_ploy3), name = 'polynomial 3')
p_all <- subplot(p_l, p_2,p_3)
# 5. Compare the accuracy of the three model.
error_l = fit$fitted.value-table1$KW
MSE_l = mean(error_l^2)
# [1] 14320.65
error_2 = fit_2$fitted.value-table1$KW
MSE_2 = mean(error_2^2)
# [1] 2618886
error_3 = fit_3$fitted.value-table1$KW
MSE_3 = mean(error_3^2)
# [1] 2618234
|
a915337a70593e0428b2922cd735be3e7d84a123
|
4dfae026a7c16a91e0eee543fbc1404009246db2
|
/R/alignment.R
|
ec2b46877449ec9019b6e229583f1c40348006bb
|
[
"MIT"
] |
permissive
|
cole-trapnell-lab/monocle3
|
2d32dddb777ba384470f3842b0fd7d27b857cd5b
|
2b17745d949db1243e95e69e39d2b4b1aa716c09
|
refs/heads/master
| 2023-09-03T07:06:43.428228
| 2023-08-18T22:50:49
| 2023-08-18T22:50:49
| 167,440,342
| 280
| 110
|
NOASSERTION
| 2023-01-24T21:25:37
| 2019-01-24T21:26:18
|
R
|
UTF-8
|
R
| false
| false
| 9,128
|
r
|
alignment.R
|
#' Align cells from different groups within a cds
#'
#' @description Data sets that contain cells from different groups often
#' benefit from alignment to subtract differences between them. Alignment
#' can be used to remove batch effects, subtract the effects of treatments,
#' or even potentially compare across species.
#' \code{align_cds} executes alignment and stores these adjusted coordinates.
#'
#' This function can be used to subtract both continuous and discrete batch
#' effects. For continuous effects, \code{align_cds} fits a linear model to the
#' cells' PCA or LSI coordinates and subtracts them using Limma. For discrete
#' effects, you must provide a grouping of the cells, and then these groups are
#' aligned using Batchelor, a "mutual nearest neighbor" algorithm described in:
#'
#' Haghverdi L, Lun ATL, Morgan MD, Marioni JC (2018). "Batch effects in
#' single-cell RNA-sequencing data are corrected by matching mutual nearest
#' neighbors." Nat. Biotechnol., 36(5), 421-427. doi: 10.1038/nbt.4091
#'
#' @param cds the cell_data_set upon which to perform this operation
#' @param preprocess_method a string specifying the low-dimensional space
#' in which to perform alignment, currently either PCA or LSI. Default is
#' "PCA".
#' @param residual_model_formula_str NULL or a string model formula specifying
#' any effects to subtract from the data before dimensionality reduction.
#' Uses a linear model to subtract effects. For non-linear effects, use
#' alignment_group. Default is NULL.
#' @param alignment_group String specifying a column of colData to use for
#' aligning groups of cells. The column specified must be a factor.
#' Alignment can be used to subtract batch effects in a non-linear way.
#' For correcting continuous effects, use residual_model_formula_str.
#' Default is NULL.
#' @param alignment_k The value of k used in mutual nearest neighbor alignment
#' @param verbose Whether to emit verbose output during dimensionality
#' reduction
#' @param build_nn_index logical When this argument is set to TRUE,
#' align_cds builds the nearest neighbor index from the
#' aligned reduced matrix for later use. Default is FALSE.
#' @param nn_control An optional list of parameters used to make the nearest
#' neighbor index. See the set_nn_control help for detailed information.
#' @param ... additional arguments to pass to limma::lmFit if
#' residual_model_formula is not NULL
#' @return an updated cell_data_set object
#' @examples
#' \donttest{
#' cell_metadata <- readRDS(system.file('extdata',
#' 'worm_embryo/worm_embryo_coldata.rds',
#' package='monocle3'))
#' gene_metadata <- readRDS(system.file('extdata',
#' 'worm_embryo/worm_embryo_rowdata.rds',
#' package='monocle3'))
#' expression_matrix <- readRDS(system.file('extdata',
#' 'worm_embryo/worm_embryo_expression_matrix.rds',
#' package='monocle3'))
#'
#' cds <- new_cell_data_set(expression_data=expression_matrix,
#' cell_metadata=cell_metadata,
#' gene_metadata=gene_metadata)
#'
#' cds <- preprocess_cds(cds)
#' cds <- align_cds(cds, alignment_group =
#' "batch", residual_model_formula_str = "~ bg.300.loading +
#' bg.400.loading + bg.500.1.loading + bg.500.2.loading +
#' bg.r17.loading + bg.b01.loading + bg.b02.loading")
#' }
#' @export
align_cds <- function(cds,
preprocess_method = c("PCA", "LSI"),
alignment_group=NULL,
alignment_k=20,
residual_model_formula_str=NULL,
verbose=FALSE,
build_nn_index=FALSE,
nn_control=list(),
...){
assertthat::assert_that(
tryCatch(expr = ifelse(match.arg(preprocess_method) == "",TRUE, TRUE),
error = function(e) FALSE),
msg = "preprocess_method must be one of 'PCA' or 'LSI'")
preprocess_method <- match.arg(preprocess_method)
preproc_res <- SingleCellExperiment::reducedDims(cds)[[preprocess_method]]
assertthat::assert_that(!is.null(preproc_res),
msg = paste0("Preprocessing for '",
preprocess_method, "' does not exist. ",
"Please make sure you have run ",
"preprocess_cds with",
"preprocess_method = '",
preprocess_method,
"' before calling align_cds."))
if(build_nn_index) {
nn_control <- set_nn_control(mode=1,
nn_control=nn_control,
nn_control_default=get_global_variable('nn_control_annoy_cosine'),
nn_index=NULL,
k=NULL,
verbose=verbose)
}
set.seed(2016)
cds <- initialize_reduce_dim_metadata(cds, 'Aligned')
cds <- initialize_reduce_dim_model_identity(cds, 'Aligned')
if (!is.null(residual_model_formula_str)) {
if (verbose) message("Removing residual effects")
X.model_mat <- Matrix::sparse.model.matrix(
stats::as.formula(residual_model_formula_str),
data = colData(cds),
drop.unused.levels = TRUE)
fit <- limma::lmFit(Matrix::t(preproc_res), X.model_mat, ...)
beta <- fit$coefficients[, -1, drop = FALSE]
beta[is.na(beta)] <- 0
preproc_res <- Matrix::t(as.matrix(Matrix::t(preproc_res)) -
beta %*% Matrix::t(X.model_mat[, -1]))
cds@reduce_dim_aux[['Aligned']][['model']][['beta']] <- beta
}
if(!is.null(alignment_group)) {
message("Aligning cells from different batches using Batchelor.\n",
"Please remember to cite:\n\t Haghverdi L, Lun ATL, ",
"Morgan MD, Marioni JC (2018). 'Batch effects in ",
"single-cell RNA-sequencing data are corrected by matching ",
"mutual nearest neighbors.' Nat. Biotechnol., 36(5), ",
"421-427. doi: 10.1038/nbt.4091")
corrected_PCA = batchelor::reducedMNN(as.matrix(preproc_res),
batch=colData(cds)[,alignment_group],
k=alignment_k)
preproc_res = corrected_PCA$corrected
cds <- add_citation(cds, "MNN_correct")
}
SingleCellExperiment::reducedDims(cds)[["Aligned"]] <- as.matrix(preproc_res)
#
# Notes:
# o the functions save_transform_models/load_transform_models
# expect that the reduce_dim_aux slot consists of a S4Vectors::SimpleList
# that stores information about methods with the elements
# reduce_dim_aux[[method]][['model']] for the transform elements
# reduce_dim_aux[[method]][[nn_method]] for the annoy index
# and depends on the elements within model and nn_method.
#
cds@reduce_dim_aux[['Aligned']][['model']][['preprocess_method']] <- preprocess_method
cds@reduce_dim_aux[['Aligned']][['model']][['alignment_group']] <- alignment_group
cds@reduce_dim_aux[['Aligned']][['model']][['alignment_k']] <- alignment_k
cds@reduce_dim_aux[['Aligned']][['model']][['residual_model_formula_str']] <- residual_model_formula_str
matrix_id <- get_unique_id(SingleCellExperiment::reducedDims(cds)[["Aligned"]])
reduce_dim_matrix_identity <- get_reduce_dim_matrix_identity(cds, preprocess_method)
cds <- set_reduce_dim_matrix_identity(cds, 'Aligned',
'matrix:Aligned',
matrix_id,
reduce_dim_matrix_identity[['matrix_type']],
reduce_dim_matrix_identity[['matrix_id']],
'matrix:Aligned',
matrix_id)
reduce_dim_model_identity <- get_reduce_dim_model_identity(cds, preprocess_method)
cds <- set_reduce_dim_model_identity(cds, 'Aligned',
'matrix:Aligned',
matrix_id,
reduce_dim_model_identity[['model_type']],
reduce_dim_model_identity[['model_id']])
if( build_nn_index ) {
nn_index <- make_nn_index(subject_matrix=SingleCellExperiment::reducedDims(cds)[['Aligned']],
nn_control=nn_control,
verbose=verbose)
cds <- set_cds_nn_index(cds=cds,
reduction_method='Aligned',
nn_index,
verbose=verbose)
}
else
cds <- clear_cds_nn_index(cds=cds, reduction_method='Aligned', 'all')
cds
}
|
20fafecfa95cc82d8a39c0258e8b097d31a4bc0b
|
d6302bdd07645e0da8ad4430a261d3ebe2149435
|
/man/visualizeSampleClustering.Rd
|
9d68a24f39542a0d44c9b21a621ebed8c8caa34e
|
[] |
no_license
|
cran/RclusTool
|
3a8fec24edeaedee42ef0f255f32dfc4be107dfe
|
7ed428f6c896889a9b291a279e1e82f8f6d9cd3b
|
refs/heads/master
| 2022-09-04T15:47:33.547991
| 2022-08-29T07:40:08
| 2022-08-29T07:40:08
| 236,879,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,190
|
rd
|
visualizeSampleClustering.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampleView.R
\name{visualizeSampleClustering}
\alias{visualizeSampleClustering}
\title{Interactive figure with 2D scatter-plot}
\usage{
visualizeSampleClustering(
data.sample,
label = NULL,
clustering.name = "proposed clustering",
cluster.summary = NULL,
RclusTool.env = initParameters(),
prototypes = NULL,
profile.mode = "none",
selection.mode = "none",
compare.mode = "off",
pairs = NULL,
features.mode = "initial",
wait.close = FALSE,
fontsize = 9
)
}
\arguments{
\item{data.sample}{list containing features, profiles and clustering results.}
\item{label}{vector of labels.}
\item{clustering.name}{character vector specifying the clustering method used to get labels.}
\item{cluster.summary}{data.frame containing the clusters summaries (as returned by 'clusterSummary').}
\item{RclusTool.env}{environment in which all global parameters, raw data and results are stored.}
\item{prototypes}{list containing vectors of prototypes indices.}
\item{profile.mode}{character vector specifying the plot mode of profiles. Must be 'none' (default), 'whole sample', 'cluster i' or 'constrained pairs'.}
\item{selection.mode}{character vector specifying the selection mode of profiles. Must be 'none' (default), 'prototypes' or 'pairs'.}
\item{compare.mode}{character vector specifying the mode of comparison between two clusterings results. Must be 'off' (default) or 'on'.}
\item{pairs}{list of constrained pairs (must-link and cannot-link).}
\item{features.mode}{character vector specifying the plot mode of features (projection in a specific space). Must be 'initial' (default), 'preprocessed', 'pca', 'pca_full' or 'spectral', or prefixed versions ('sampled', 'scaled') of those space names.}
\item{wait.close}{boolean: if FALSE (default), the following steps of the analysis calculations are computed even if the window is not closed.}
\item{fontsize}{size of font (default is 9)}
}
\value{
prototypes in \code{selection.mode} = "prototypes" mode, pairs in \code{selection.mode} = "pairs" mode.
}
\description{
Open an interactive figure with 2D scatter-plot of all particles with axis choice. Grey color (label=0) is for data to cleaned or to remove in classification process.
}
\details{
visualizeSampleClustering opens an interactive figure with 2D scatter-plot of all particles with axis choice
}
\examples{
dat <- rbind(matrix(rnorm(100, mean = 2, sd = 0.3), ncol = 2),
matrix(rnorm(100, mean = 4, sd = 0.3), ncol = 2),
matrix(rnorm(100, mean = 6, sd = 0.3), ncol = 2))
colnames(dat) <- c("x","y")
tf1 <- tempfile()
write.table(dat, tf1, sep=",", dec=".")
sig <- data.frame(ID=rep(1:150, each=30), SIGNAL=rep(dnorm(seq(-2,2,length=30)),150))
tf2 <- tempfile()
write.table(sig, tf2, sep=",", dec=".")
x <- importSample(file.features=tf1, file.profiles=tf2)
res <- KmeansQuick(x$features$initial$x, K=3)
new.labels <- formatLabelSample(res$cluster, x)
visualizeSampleClustering(x, label = new.labels, clustering.name="K-means",
profile.mode="whole sample")
}
\seealso{
\code{\link{plotProfile}}, \code{\link{plotSampleFeatures}}
}
|
9aabee23b06c5f6c832dc7fa22a4841ebd159002
|
e8f80d9a61d542bcbf3386aa91c8ccc037d048c2
|
/scripts/sero_estim_2.R
|
9c4b830cbc240dd06ce94ff170bbbfbdaa03435c
|
[] |
no_license
|
kendavidn/yaounde_serocovpop_shared
|
f38b088e4ffc6d77d23c1a608b2296cbf6e63646
|
cd1b05599dd8ddc2fdd3e8c3091714087ca35b7e
|
refs/heads/master
| 2023-04-16T08:41:54.477126
| 2021-08-18T22:42:07
| 2021-08-18T22:42:07
| 356,265,195
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,811
|
r
|
sero_estim_2.R
|
source(here::here("scripts/infection_regn_2.R"))
## Sensitivity validation data from a panel of formerly hospitalized COVID-19 patients.
## <https://www.sciencedirect.com/science/article/pii/S1386653220303875#bib0060>
positives <- 82
true_positives <- 75
sensit <- true_positives/positives
sensit_conf_int <- binom.test(x = true_positives, n = positives)$conf.int
## Specificity validation data from own work (Projet EPICO)
negatives <- 246
true_negatives <- 230
specif <- true_negatives/negatives
specif_conf_int <- binom.test(x = true_negatives, n = negatives)$conf.int
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~ Seroprevalence calculate ----
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## build list over which to iterate
## for each grouping, filter the table
## then calculate crude seroprevalence, population-weighted seroprevalence,
## and population-weighted, test-adjusted seroprevalence
unique_vals_to_df <- function(df, varnames) {
df_out <- data.frame()
for (varname in varnames) {
df_sub <- df[varname]
labels <- unique(df_sub[[varname]])
## to keep factors arranged as they came in
if (is.factor(df_sub[[varname]])) {labels <- levels(df_sub[[varname]])}
rows_to_bind <- data.frame(varname, labels)
df_out <- bind_rows(df_out, rows_to_bind)
}
return(df_out)
}
## arrange to match the order for risk factor analysisw
yao_arranged <-
yao %>%
## arrange cat
mutate(cat_age = factor(cat_age, levels = c("5 - 14", "15 - 29",
"30 - 44", "45 - 64", "65 +"))) %>%
mutate(cat_BMI = as.character(cat_BMI)) %>%
mutate(cat_BMI = replace_na(cat_BMI, "Missing BMI")) %>%
mutate(cat_BMI = factor(cat_BMI, levels = c("\\< 18.5 (Underweight)",
"18.5 - 24.9",
"25 - 30 (Overweight)",
" \\> 30 (Obese)",
"Missing BMI"))) %>%
mutate(cat_n_hhld_indiv = factor(cat_n_hhld_indiv, levels = c("1 - 2", "3 - 5", "\\> 5")))
seroprev_iterator <-
unique_vals_to_df(yao_arranged,
varnames = c(
"all_respondents",
"cat_sex",
"cat_age",
#"cat_BMI",
#"has_contact_traveler",
"loc_hhld_area"
#,
#"cat_n_hhld_indiv"
))
## initialize
seroprev_table <- data.frame()
for (i in 1:nrow(seroprev_iterator)){
curr_group <- seroprev_iterator[i, ]
yao_subset <- yao_arranged[yao_arranged[[curr_group[[1]]]] == as.character(curr_group[[2]]), ]
## create svy objects within loop because they can't be filtered for some reason
yao_unweighted <-
survey::svydesign(ids = ~id_hhld, ## sampling unit
strata = NULL,
weights = ~1,
data = yao_subset)
yao_weighted <-
survey::svydesign(ids = ~id_hhld, ## sampling unit
strata = NULL,
weights = ~weight_per_individual,
data = yao_subset)
# ~~~~ crude ----
seroprev_crude <-
survey::svyciprop(formula = ~cat_igg_result_num,
design = yao_unweighted,
method = "logit")
seroprev_crude_ci <- attr(seroprev_crude, "ci")
# ~~~~ population weighted ----
seroprev_pop_weighted <-
survey::svyciprop(formula = ~ cat_igg_result_num,
design = yao_weighted,
method = "logit")
seroprev_pop_weighted_ci <- attr(seroprev_pop_weighted, "ci")
# ~~~~ population weighted, test_adjusted ----
seroprev_pop_weighted_test_adj <-
bootstrap_results <-
bootComb::adjPrevSensSpecCI(
prev = seroprev_pop_weighted[[1]], ## observed prevalence
sens = sensit, ## observed sensitivity from validation study
spec = specif, ## observed specificity from validation study
prevCI = seroprev_pop_weighted_ci[1:2], ## prevalence 95% confidence interval
sensCI = sensit_conf_int[1:2], ## sensitivity 95% confidence interval
specCI = specif_conf_int[1:2], ## specificity 95% confidence interval
N = 1e5, ## number of bootstrap samples
method = "quantile", ## use quantiles to compute seroprev 95% CI
alpha = 0.05,
doPlot = FALSE ## no plot
)
seroprev_pop_weighted_test_adj_ci <- seroprev_pop_weighted_test_adj$conf.int
# ~~~~ combine into single row of table ----
seroprev_crude_print <-
paste0(formatC(100*seroprev_crude[[1]], digits = 1, format = "f"),"%",
" (",
formatC(100*seroprev_crude_ci[[1]], digits = 1, format = "f"),
" - ",
formatC(100*seroprev_crude_ci[[2]], digits = 1, format = "f"),
")")
seroprev_pop_weighted_print <-
paste0(formatC(100*seroprev_pop_weighted, digits = 1, format = "f"),"%",
" (",
formatC(100*seroprev_pop_weighted_ci[[1]], digits = 1, format = "f"),
" - ",
formatC(100*seroprev_pop_weighted_ci[[2]], digits = 1, format = "f"),
")")
seroprev_pop_weighted_test_adj_print <-
paste0(formatC(100*seroprev_pop_weighted_test_adj[[1]], digits = 1, format = "f"),"%",
" (",
formatC(100*seroprev_pop_weighted_test_adj_ci[[1]], digits = 1, format = "f"),
" - ",
formatC(100*seroprev_pop_weighted_test_adj_ci[[2]], digits = 1, format = "f"),
")")
total_sample <- nrow(yao_subset)
num_positive <- nrow(filter(yao_subset, cat_igg_result == "Positive"))
if (curr_group[[1]] == "all_respondents") {labels <- "Total"} else {labels <- curr_group[[2]]}
table_row <-
data.frame(grouping = curr_group[[1]],
labels = labels,
total_sample,
num_positive,
seroprev_crude_print,
seroprev_pop_weighted_print,
seroprev_pop_weighted_test_adj_print)
seroprev_table <- rbind(seroprev_table, table_row)
}
sex_label <- data.frame(labels = "Sex")
age_group_label <- data.frame(labels = "Age group")
neighborhood_label <- data.frame(labels = "Neighborhood")
igg_pos_table_print <-
bind_rows(seroprev_table[1,],
sex_label,
seroprev_table[2:3,],
age_group_label,
seroprev_table[4:8,],
neighborhood_label,
seroprev_table[9:17,]
) %>%
select(-"grouping") %>%
rename(Crude = seroprev_crude_print,
`Age-sex-weighted` = seroprev_pop_weighted_print,
`Age-sex-weighted,\ntest-adjusted` = seroprev_pop_weighted_test_adj_print) %>%
huxtable() %>%
set_contents(1, 1:3, c("", "", "") ) %>%
insert_row("", "n", "Seropos.", "Seroprevalence (95% confidence interval)", "", "", after = 0) %>%
merge_cells(1, 4:6) %>%
set_bold(1, col = everywhere) %>%
theme_basic() %>%
set_latex_float("h!") %>%
set_all_padding(0.5)
# ~~~~ combine into single row of table ----
ggplot_sens_spec_prev <-
ggplot_adjPrevSensSpecCI(
prev = seroprev_pop_weighted[[1]], ## observed prevalence
sens = sensit, ## observed sensitivity from validation study
spec = specif, ## observed specificity from validation study
prevCI = seroprev_pop_weighted_ci[1:2], ## prevalence 95% confidence interval
sensCI = sensit_conf_int[1:2], ## sensitivity 95% confidence interval
specCI = specif_conf_int[1:2], ## specificity 95% confidence interval
N = 1e5, ## number of bootstrap samples
method = "quantile", ## use quantiles to compute seroprev 95% CI
alpha = 0.05,
doPlot = FALSE ## no plot
)
|
ccbbad5a55d81d68f17f8a5f0885ed60b3eda72a
|
5c5e7c68421b9d37b855db680c06a9ebb0e64847
|
/R/extract_posterior_summaries.R
|
47af212bf793e68f8d3907a7bb48352cf3d2b37d
|
[
"BSD-3-Clause"
] |
permissive
|
hrnasif/571-group8-paramedic
|
2ed03a0fa479f1288abe01c0718824ed3e79b116
|
4ee7879253fb18211714a15a8bb5d3131754f943
|
refs/heads/main
| 2023-03-25T02:59:50.605937
| 2021-03-22T02:06:37
| 2021-03-22T02:06:37
| 344,750,402
| 0
| 0
|
NOASSERTION
| 2021-03-22T02:06:38
| 2021-03-05T08:54:12
|
R
|
UTF-8
|
R
| false
| false
| 4,883
|
r
|
extract_posterior_summaries.R
|
#' Extract posterior summaries from a hierarchical model fit
#'
#' Return point estimates and credible intervals for the true concentration, and point estimates and prediction intervals for estimated qPCR obtained through a Stan fit.
#'
#' @param stan_mod the model summary object from Stan.
#' @param stan_samps the list of MCMC samples from Stan.
#' @param taxa_of_interest the indices of the taxa for which point estimates and posterior summaries are desired.
#' @param mult_num the number to multiply the resulting estimates and standard deviations by (defaults to 1).
#' @param level the \code{alpha} level for prediction intervals (defaults to 0.95, for a nominal 95\% prediction interval).
#' @param interval_type the type of prediction interval desired (defaults to "wald", but "quantile" is also acceptable).
#'
#' @return An object of class \code{paramedic}. See Details for more information
#'
#' @details A \code{paramedic} object is a list containing the following elements:
#' \itemize{
#' \item{estimates}{ - the point estimates of qPCR (a matrix with dimension sample size by number of taxa).}
#' \item{pred_intervals}{ - predction intervals for qPCR (an array with dimension sample size by 2 by number of taxa).}
#' \item{est_efficiency}{ - point estimates for estimated varying efficiency, if varying efficiency was modeled (a vector of length number of taxa); otherwise, NA.}
#' \item{efficiency_intervals}{ - posterior level \code{level}\eqn{\times}100\% confidence intervals for the true efficiency, if efficiency was modeled (a matrix of dimension number of taxa by 2); otherwise, NA.}
#' }
#'
#' @examples
#' # load the package, read in example data
#' library("paramedic")
#' data(example_16S_data)
#' data(example_qPCR_data)
#'
#' # run paramedic (with an extremely small number of iterations, for illustration only)
#' # on only the first 10 taxa
#' mod <- run_paramedic(W = example_16S_data[, 1:10], V = example_qPCR_data,
#' n_iter = 30, n_burnin = 25,
#' n_chains = 1, stan_seed = 4747)
#'
#' # get summary, samples
#' mod_summ <- rstan::summary(mod$stan_fit, probs = c(0.025, 0.975))$summary
#' mod_samps <- rstan::extract(mod$stan_fit)
#'
#' # extract relevant summaries
#' summs <- extract_posterior_summaries(stan_mod = mod_summ, stan_samps = mod_samps,
#' taxa_of_interest = 1:3,
#' mult_num = 1, level = 0.95, interval_type = "wald")
#'
#' @export
extract_posterior_summaries <- function(stan_mod, stan_samps, taxa_of_interest, mult_num = 1, level = 0.95, interval_type = "wald") {
# check to make sure all taxa of interest are actual taxa
check_taxa <- lapply(as.list(taxa_of_interest), function(x) any(grepl(paste0(",", x, "]"), rownames(stan_mod), fixed = TRUE)))
if (!any(unlist(check_taxa))) stop("One or more of your taxa of interest are not present in the sampling output. Please specify only taxa for which you have samples.")
# get the posterior estimates
mu_summ_lst <- lapply(as.list(taxa_of_interest), function(x) stan_mod[grepl("mu", rownames(stan_mod)) & !grepl("log", rownames(stan_mod)) & grepl(paste0(",", x, "]"), rownames(stan_mod), fixed = TRUE), c(1, 3, 4, 5)]*mult_num)
est_lst <- lapply(mu_summ_lst, function(x) x[, 1])
sd_lst <- lapply(mu_summ_lst, function(x) x[, 2])
ci_lst <- lapply(mu_summ_lst, function(x) x[, c(3, 4)])
estimates <- do.call(cbind, est_lst)
sd <- do.call(cbind, sd_lst)
rownames(estimates) <- as.character(1:dim(estimates)[1])
rownames(sd) <- as.character(1:dim(sd)[1])
# get cis
credible_intervals <- sapply(1:length(mu_summ_lst), function(x) mu_summ_lst[[x]][, c(3, 4)], simplify = "array")
rownames(credible_intervals) <- as.character(1:dim(estimates)[1])
# get prediction intervals
if (interval_type == "wald") {
intervals <- sapply(1:length(taxa_of_interest), function(x) gen_wald_interval(estimates[, x], sd[, x], alpha = 1 - level), simplify = "array")
} else if (interval_type == "quantile") {
intervals <- sapply(1:length(taxa_of_interest), function(x) gen_quantile_interval(mu_quantiles = credible_intervals[, , x], mu_samps = stan_samps$mu[, , taxa_of_interest[x]], div_num = mult_num, alpha = 1 - level, type = "credible_quantiles"), simplify = "array")
} else { # next, add quantile
stop("Unsupported prediction interval type. Please enter one of 'quantile' or 'wald'.")
}
# extract summaries of varying efficiency, if they exist
if (any(grepl("e", rownames(stan_mod)) & !grepl("beta", rownames(stan_mod)))) {
e <- stan_mod[grepl("e", rownames(stan_mod)) & !grepl("beta", rownames(stan_mod)), 1]
e_intervals <- stan_mod[grepl("e", rownames(stan_mod)) &!grepl("beta", rownames(stan_mod)), c(4, 5)]
} else {
e <- NA
e_intervals <- NA
}
return(list(estimates = estimates, pred_intervals = intervals, cred_intervals = credible_intervals, est_efficiency = e, efficiency_intervals = e_intervals, sd = sd))
}
|
61913f045a7430091136fa7aaf0c761790edb7e9
|
c3c9324afc6873b0de45a3cffcdfcdb884163288
|
/language/grammars/semr-ccg.r
|
4a81485c4e476f86fc79148dfcafd9be56f4dad1
|
[] |
no_license
|
lefft/boosh
|
273c44b04f65f64937fc319d5f4542a0b79daf4a
|
e4c0665ab1db9c7b47ce4085bf070fed9d8f37ea
|
refs/heads/master
| 2020-03-28T00:13:11.754076
| 2018-01-04T21:16:31
| 2018-01-04T21:16:31
| 64,482,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,385
|
r
|
semr-ccg.r
|
# need a more concrete plan before starting to hack!!!
# need a more concrete plan before starting to hack!!!
# need a more concrete plan before starting to hack!!!
### NOTES TO SELF ON BUILDING A LIL GRAMMAR -----------------------------------
# an ex bigram trans prob 'grammar' could be like
# a >> dog=.4,cat=.3,mouse=.2,guy=.1
#
# dog >> barked=.4,saw=.2,bit=.2,chased=.1,saw=.1
#
# <S> >> a=.3,I=.3,the=.3,yesterday=.1
#
# barked >> </S>=.5,and=.3,yesterday=.2
#
# ...
# corpus
# the dog barked
# the cat fell
# the dog chased the cat
# the cat scratched the dog
# the cat chased the mouse
# the mouse ate the cheese
# i liked the dog
# the dog liked the mouse
# the mouse liked the cheese
# the mouse scurried
# the dog was nice
# the cat was mean
# the mouse was funny
# the cheese was yellow
# i was drunk
# i was high
# lexical cats
# AP := nice, mean, funny, yellow, drunk, high
# N := dog, cat, mouse, cheese (or: (NP/N)\NP)
# NP := i, you
#
# NP/N := the, a
# (NP\S)/NP := liked, chased, scratched
# NP\S := barked, fell, scurried
# (NP\S)/AP := was
# (S\SC)/S := and, or
# combination modes:
slash_left <- ""
slash_right <- function(Cleft, Cright){
}
# D N --> NP (because D = NP/N)
# df of rules:
rules <- data.frame(
lhs = c("S", "NP", "NP", "VP", "VP", "VP"),
rhs = c("NP VP", "D N", "NP", "Vt NP", "Vi", "Vc A"),
stringsAsFactors=FALSE
)
lex <- list(
AP = c("nice","mean","funny","yellow","drunk","high"),
N = c("dog","cat","mouse","cheese"),
NP = c("i","you","fido","felix"),
D = c("the","a"),
Vt = c("liked","chased","scratched"),
Vi = c("barked","fell","scurried"),
Vc = c("was"),
C = c("and","or")
)
# lCats <- names(lex)
# make a couple of sentences:
# get the category of a word [ASSUMES NO AMBIGUITY!!!]
wcat <- function(word){
for (x in seq_along(lex)){
if (word %in% lex[[x]]){
return(names(lex)[x])
}
}
}
wcat("liked")
# combine two words [ASSUMES ONLY ONE POSSIBLE RHS!!!]
comb <- function(w1, w2){
w1_cat <- wcat(w1)
w2_cat <- wcat(w2)
rhs <- paste(w1_cat, w2_cat, sep=" ")
lhs <- rules$lhs[rules$rhs==rhs]
return(setNames(paste(w1, w2, sep=" "), nm=lhs))
}
comb("liked", "felix")
comb("the", "dog")
### NOTES TO SELF ON USING COMPSEM EXERCISES ----------------------------------
### BELOW HERE IS THE OTHER FILE, HERE JUST FOR REFERENCE ---------------------
if (FALSE){
# === === === === === === ===
# [notes in: readme.md] ===
# === === === === === === ===
### 0. load functions + dependencies ##########################################
# === === === === === === === === === === === === === === === === === ===
source("functions.r")
### 1. bare-bones extensional semantics #######################################
# === === === === === === === === === === === === === === === === === ===
### __semantics for constants #########
# === === === === === ===
#
# noun and name meanings
dog <- make_cnoun(DOG)
cat <- make_cnoun(CAT)
john <- make_pname(JOHN)
mary <- make_pname(MARY)
# verb meanings
laughed <- make_itv(LAUGHED)
barked <- make_itv(BARKED)
purred <- make_itv(PURRED)
ran <- make_itv(RAN)
chased <- make_tv(CHASED)
bit <- make_tv(BIT)
gave <- make_itv(GAVE)
# logical connective meanings
not <- function(p) !p
and <- function(q) function(p) p & q
or <- function(q) function(p) p | q
ifthen <- function(q) function(p) !p | q
# determiner meanings [this is kinda bastardized]
every <- make_det(condition=all)
a <- make_det(condition=any)
two <- make_det(condition=function(x)sum(x)>1)
the <- function(P){
if (length(P)!=1){return(NULL); message("psfail!")}
return(paste0("the_", P))
}
#
### end lexical semantics
# === === === === === ===
### __toy model specification #########
# === === === === === ===
#
# noun extensions
DOG <- c("fido","spot","sparky")
CAT <- c("felix","garfield")
PERSON <- c("john","mary")
THING <- c(DOG, CAT, PERSON)
# name extensions
JOHN <- "john"
MARY <- "mary"
# domain of discourse
D <- c(DOG,CAT,PERSON)
# intransitive verb extensions
LAUGHED <- c("john","fido","sparky","garfield")
BARKED <- c("fido","sparky","spot")
PURRED <- c("felix","garfield")
RAN <- c("mary","sparky")
# transitive verb extensions
CHASED <- c(ff=c("fido","felix"),syf=c("sparky","felix"),stf=c("spot","felix"))
BIT <- c(mj=c("mary","john"),fs=c("sparky","felix"),gj=c("garfield","john"))
ATE <- c(ff=c("fido","felix"),gm=c("garfield","mary"))
# ditransitive verb extensions
GAVE <- c(jfm=c("john","felix","mary"),msj=c("mary","sparky","john"))
#
### end model specification
# === === === === === ===
### testing phase 1:
#
# should be true that 'every dog chased a cat' on one reading; not on other
every(DOG)(BARKED)
for (x in DOG){print(c(x, barked(x)))}
every(CAT)(BARKED)
sapply(CAT, function(x) print(c(x, barked(x))))
# missing vectorization in some lexical entry :/
#
# no --> Q arg to every needs to be a vector not a func like P?
Vectorize((function(x){chased("felix")(x)}))(DOG)
# want to make things work so that this means what it should
Vectorize((function(x){chased(DOG)(x)}))("felix")
every(DOG)(Vectorize(function(x){chased("felix")(x)})(DOG))
# every(DOG)((function(x)Vectorize(chased)(x)("felix"))(DOG))
every(DOG)(THING) # so it can be every(vec)(vec)
every(THING)(DOG)
chased("fido")("felix")
chased("spot")("felix")
chased("sparky")("felix")
for (x in DOG){print(barked(x))}
# order of args wrong! x shd be direct obj!
for (x in DOG){print(chased(x)("felix"))}
### 2. basic model theory #####################################################
# === === === === === === === === === === === === === === === === === ===
# - need to introduce variables
# - shd be based on gallin ty2 (flexibly!)
# - keep simple where possible
# - just use built-ins for booleans (TRUE, FALSE) and NULL
# - use ent, bool as primitive types + generate functional types recursively
# - complex types interpreted as function space
# - an interp is a pair M = (D, V) for D a domain and V a valuation
# - truth defined relative to an interp plus an assignment (a "model")
### 3. simple cfg-based syntax ################################################
# === === === === === === === === === === === === === === === === === ===
}
|
f88f3530376dd6eb7a6515b4fe34a78857228424
|
266d82098b7c537a60ed5fc94bcf4a15c018b64e
|
/ARIMA.R
|
8b4c6ea97f6b9ebfc1d2d71636ce92637e45e6ee
|
[] |
no_license
|
tefeasel/R-Programs
|
00d023c5f0d47bae34614fad3ea0f8d5e11991a6
|
1ee68f712f39e58c4684b22783b61901da96c807
|
refs/heads/master
| 2020-05-23T03:10:36.161841
| 2019-02-05T19:14:57
| 2019-02-05T19:14:57
| 53,174,576
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 560
|
r
|
ARIMA.R
|
library(lubridate)
library(dplyr)
library(haven)
library(forecast)
setwd("")
data <- read_sas("")
rvr <- read_sas("")
rvr <- subset(rvr, JURIS == "APV" & REVCLS == 1)
data <- data %>%
filter(JURIS == "" & revcls == 1 & YEAR >= 2011) %>%
select(YEAR, MONTH, bcdd65, bhdd55, KWH, CUST, DAYS) %>%
mutate(use = KWH/CUST)
features <- data[,c("DAYS")]
auto <- auto.arima(data$use, xreg = features )
auto
predict <- forecast(auto, xreg = features)
predicted <- predict$mean
predicted <- data.frame(predicted)
|
290ca0b89ec65705537d7a595cd84b3dfb4982e1
|
a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3
|
/output/sources/authors/3111/cgwtools/lsclass.R
|
7c8ff1e47217e54252802103528b23649a9b2cfa
|
[] |
no_license
|
Irbis3/crantasticScrapper
|
6b6d7596344115343cfd934d3902b85fbfdd7295
|
7ec91721565ae7c9e2d0e098598ed86e29375567
|
refs/heads/master
| 2020-03-09T04:03:51.955742
| 2018-04-16T09:41:39
| 2018-04-16T09:41:39
| 128,578,890
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 231
|
r
|
lsclass.R
|
lsclass <-
function(type='numeric'){
inlist<-ls(.GlobalEnv)
classlist <- sapply(1:length(inlist),function(j) class(get(inlist[j])) )
tnams<-sapply(1:length(inlist), function(j)type %in% classlist[[j]] )
return(inlist[tnams])
}
|
470f7d70b1d7dac8a34b4a4b6daa43fc86723df8
|
0385bac47efa324cfa3b572593ab2cf25bd1c0fa
|
/R/dataframe_com.R
|
5d985bded255e706048004408943aea80ec20b7c
|
[] |
no_license
|
Pralhad-Analyst/keymat
|
3917453c6f0765d28350970bcb9fb1317d464fc3
|
883284061c72e6140888b69218540a4e5868d79b
|
refs/heads/master
| 2022-12-19T09:33:28.887652
| 2020-09-24T06:12:51
| 2020-09-24T06:12:51
| 298,185,162
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 349
|
r
|
dataframe_com.R
|
#' Commodity Dataframe
#'
#' @param dataset It is the dataset of keymaterials
#' @param commodityname the name of commodity such as Aluminum, etc.
#'
#' @return a price range of the commodity
#' @export
#'
#' @examples
dataframe_com <- function(dataset, commodityname) {
dataset %>%
filter(MaterialIndex == commodityname)
}
|
c2587644bc7a7629724018e4598352f85a59a14e
|
6a8f18cf122fe9adc0d02924f81188cb7d04eb47
|
/data_smoother_workingfile.R
|
e6085543b3fcdc21369ca311de130a4ce10e56e9
|
[] |
no_license
|
chryschang/data_smoother
|
fa636309fe8748b7a5bcbdd1fbbff930a3a6aebf
|
71e9fbc767177d0cb4ee429fe7a5de8077b5c02c
|
refs/heads/master
| 2021-06-16T13:42:22.555348
| 2017-04-03T20:25:04
| 2017-04-03T20:25:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,761
|
r
|
data_smoother_workingfile.R
|
#Plot Actot, Fluortot and Tair from SCOPE output.
library(readr) #import and read csv files
library(ggplot2) #generate plots
library(dplyr) #data manipulation: filter, summarize, mutate
library(zoo) #used for rolling average calculations
fluxes <- read_csv("path/to/fluxes.csv")
surftemp <- read_csv("path/to/surftemp.csv")
#identify variables
DOY <- as.numeric(fluxes$t) #use as.numeric to import as float
A <- as.numeric(fluxes$Actot)
Fluo <- as.numeric(fluxes$fluortot)
Tair <- as.numeric(surftemp$Ta)
#create new table and populate
new_table <- cbind()
new_table$t <- as.numeric(paste(DOY)) #t is the fractional Julian date
new_table$DOY <- as.numeric(paste(floor(DOY))) #DOY is the rounded integer Julian date for use later
new_table$A <- as.numeric(paste(A))
new_table$Tair <- as.numeric(paste(Tair))
new_table$Fluo <- as.numeric(paste(Fluo))
#convert new_table to dataframe
df = as.data.frame(new_table)
df = df[-1,] #remove first row of NAs
doy.means <- df %>% group_by(DOY) %>% summarize(meanT = mean(Tair, na.rm=TRUE), meanA = mean(A, na.rm=TRUE), meanF = mean(Fluo, na.rm=TRUE)) #using pipes to simplify above steps and save to variable
#individual variable plots
qplot(doy.means[1],doy.means[2], xlab="Julian Day", ylab="Mean air temperature (degC)", main="Daily Mean Air Temperature")
qplot(doy.means[1],doy.means[3], xlab="Julian Day", ylab="Mean Actot (umol CO2 m-2 s-1)", main="Daily Mean Assimilation")
qplot(doy.means[1],doy.means[4], xlab="Julian Day", ylab="Mean Fluorescence (W m-2)", main="Daily Mean Fluorescence")
#overlaid plots with 3 described axes
par(mar=c(5, 12, 4, 4) + 0.1) #create a left margin for the graph.
plot(doy.means$DOY, doy.means$meanT, axes=F, ylim=c(-10,40), xlab="", ylab="") #first variable. silence the x and y labels for manual insertion later
points(doy.means$DOY, doy.means$meanT,pch=20,col="red") #Tair points
axis(2,col="red",lwd=2) #Tair axis
mtext(2,text="Tair (degC)", line=2) #Tair axis header. line indicates the spacing
par(new=T)
plot(doy.means$DOY, doy.means$meanA, axes=F, ylim=c(0,20), xlab="", ylab="")
points(doy.means$DOY, doy.means$meanA,pch=20,col="green") #A points
axis(2, col="green",lwd=2, line=3.5) #A axis
mtext(2,text="Assimilation (umol CO2 m-2 s-1)", line=5.5) #A axis header
par(new=T)
plot(doy.means$DOY, doy.means$meanF, axes=F, ylim=c(0,1), xlab="", ylab="")
points(doy.means$DOY, doy.means$meanF,pch=20,col="blue") #Fluo points
axis(2, col="blue",lwd=2, line=7) #Fluo axis
mtext(2,text="Fluorescence (W m-2)", line=9) #Fluo axis header
axis(1,pretty(range(doy.means$DOY),10)) #x axis
mtext("Julian Day",side=1,col="black",line=2) #x axis header
legend(x=0,y=1,legend=c("Tair","Assimilation","Fluorescence"),pch=20, col=c("red","green","blue")) #legend
title("Mean Daily Values - MOz 2005")
#and now for the smoothing part...
temp.doy <- doy.means[[1]]
temp.t <- doy.means[[2]]
temp.a <- doy.means[[3]]
temp.f <- doy.means[[4]]
temp.zt <- zoo(temp.t, temp.doy)
temp.za <- zoo(temp.a, temp.doy)
temp.zf <- zoo(temp.f, temp.doy)
mt <- rollmean(temp.zt, 7, fill=list(NA, NULL, NA)) #7 day rolling average
ma <- rollmean(temp.za, 7, fill=list(NA, NULL, NA))
mf <- rollmean(temp.zf, 7, fill=list(NA, NULL, NA))
doy.means$mt <- as.numeric(paste(coredata(mt))) #insert to dataframe
doy.means$ma <- as.numeric(paste(coredata(ma)))
doy.means$mf <- as.numeric(paste(coredata(mf)))
#single plots
qplot(doy.means[1],doy.means[5], xlab="Julian Day", ylab="Mean air temperature (degC)", main="Air temperature, 5 day avg")
qplot(doy.means[1],doy.means[6], xlab="Julian Day", ylab="Mean assimilation (umol CO2 m-2 s-1)", main="Assimilation, 5 day avg")
qplot(doy.means[1],doy.means[7], xlab="Julian Day", ylab="Mean fluorescence (W m-2)", main="Fluorescence, 5 day avg")
#simple quick overlaid plots
ggplot(data = doy.means, aes(DOY)) + geom_line(aes(y=mt, colour="meanT")) + geom_line(aes(y=ma, colour="meanA")) + geom_line(aes(y=mf, colour="meanF"))
#overlaid plots with 3 described axes
par(mar=c(5, 12, 4, 4) + 0.1) #create a left margin for the graph.
plot(doy.means$DOY, doy.means$mt, axes=F, ylim=c(-10,40), xlab="", ylab="") #first variable. silence the x and y labels for manual insertion later
points(doy.means$DOY, doy.means$mt,pch=20,col="red") #Tair points
axis(2,col="red",lwd=2) #Tair axis
mtext(2,text="Tair (7-day avg; degC)", line=2) #Tair axis header. line indicates the spacing
par(new=T)
plot(doy.means$DOY, doy.means$ma, axes=F, ylim=c(0,20), xlab="", ylab="")
points(doy.means$DOY, doy.means$ma,pch=20,col="green") #A points
axis(2, col="green",lwd=2, line=3.5) #A axis
mtext(2,text="Assimilation (7-day avg; umol CO2 m-2 s-1)", line=5.5) #A axis header
par(new=T)
plot(doy.means$DOY, doy.means$mf, axes=F, ylim=c(0,1), xlab="", ylab="")
points(doy.means$DOY, doy.means$mf,pch=20,col="blue") #Fluo points
axis(2, col="blue",lwd=2, line=7) #Fluo axis
mtext(2,text="Fluorescence (7-day avg; W m-2)", line=9) #Fluo axis header
axis(1,pretty(range(doy.means$DOY),10)) #x axis
mtext("Julian Day",side=1,col="black",line=2) #x axis header
legend(x=0,y=1,legend=c("Tair","Assimilation","Fluorescence"),pch=20, col=c("red","green","blue")) #legend
title("7-day Running Averages - MOz 2005")
|
c2e1b05373444938353922928021288380e67914
|
37db197f8f46abfeeb14aca2ef04239f601c751c
|
/R/function_utils.R
|
ef79386c17d8a8f4dc06261712131fdaa895f0a7
|
[] |
no_license
|
statech/ggutils
|
2c0d685e5a11d455b7a10db80b96b067613be0da
|
42fe986a36d2ede85d5868ee9974a3ad7c426a20
|
refs/heads/master
| 2021-09-15T20:30:23.779127
| 2018-06-10T15:57:40
| 2018-06-10T15:57:40
| 103,665,915
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,404
|
r
|
function_utils.R
|
#' @title Test If An Object Is Empty
#'
#' @description An empty object is any of the following: \code{NULL}, \code{NA}
#' or vector of \code{NA}s, Object of length \code{0}, \code{''} if
#' \code{empty_str_triggers} equals to \code{TRUE}, and \code{FALSE} if
#' \code{false_triggers} equals to \code{TRUE}
#' @param obj Object: an object to be tested
#' @param empty_str_triggers Logical: \code{TRUE} (default) treats empty string
#' (i.e. \code{''}) as empty and \code{FALSE} otherwise
#' @param false_triggers Logical: \code{TRUE} treats logical value \code{FALSE}
#' as empty and \code{FALSE} (default) otherwise
#' @return A logical value that indicates whether the tested object is empty or
#' not
#' @examples
#' is_blank(NA)
#' is_blank('')
#' @export
is_blank <- function(obj, empty_str_triggers = TRUE, false_triggers = FALSE) {
if(is.function(obj)) return(FALSE)
return(
is.null(obj) ||
length(obj) == 0 ||
all(is.na(obj)) ||
(empty_str_triggers && all(obj == '')) ||
(false_triggers && all(!obj))
)
}
#' @title Test If A Column Is Found in A Data Frame
#'
#' @description A function that check if \code{column} is one of \code{df}'s
#' columns and returns an error message if not.
#' @param df List/Data frame
#' @param column Character: A column name
#' @param error_message Character: user-defined error message
#' @param call_ Logical: logical, indicating if the call should become part of
#' the error message. See \code{call.} in \code{\link[base]{stop}}
#' @return Error message if test fails
#' @examples
#' test_df <- data.frame(a = 1:10, b = 11:20)
#' column_in_dataframe(test_df, 'a')
#' \dontrun{
#' column_in_dataframe(test_df, 'c')
#' }
#' @export
column_in_dataframe <- function(df, column, error_message = NULL, call_ = F) {
if(!column %in% names(df)) {
df_name <- deparse(substitute(df))
column_name <- deparse(substitute(column))
if(is.null(error_message)) {
error_message <- paste0(
df_name, " doesn't have a column named '", column_name, "'"
)
}
stop(error_message, call. = call_)
}
}
#' @title Argument Verification
#'
#' @description Tests if a passed-in argument takes a value from a table of
#' candidate values and returns an error message if not.
#' @param arg Character: passed-in argument name
#' @param choices Character vector: a character vector of candidate values
#' @param error_message Character: user-defined error message
#' @param call_ Logical: logical, indicating if the call should become part of
#' the error message. See \code{call.} in \code{\link[base]{stop}}
#' @return Error message if test fails
#' @examples
#' alignment <- 'left'
#' arg_in_choices(alignment, c('left', 'center', 'right'))
#' @export
arg_in_choices <- function(arg, choices, error_message = NULL, call_ = F) {
if(!all(arg %in% choices)) {
arg_name <- deparse(substitute(arg))
if(is.null(error_message)) {
error_message <- paste0(
arg_name, ' must be one of the following:\n',
paste(choices, collapse = '\n')
)
}
stop(error_message, call. = call_)
}
}
#' @title Variable Class Test
#'
#' @description Tests if a variable of a certain class and returns an error
#' message if not
#' @param var Object: an object for class test
#' @param class_method Function vector: class test function, e.g.,
#' \code{\link[base]{is.numeric}}, \code{\link[base]{is.logical}} and etc.
#' @param class_name Character: name of the class
#' @param error_message Character: user-defined error message
#' @return Error message if test fails
#' @examples
#' obj <- 1:10
#' check_var_class(obj, is.numeric, 'numeric')
#' @export
check_var_class <- function(var, class_method,
class_name, error_message = NULL) {
class_method_name <- deparse(substitute(class_method))
if(!is.function(class_method)) {
stop(paste0(class_method_name, ' must be a callable function'))
}
if(!class_method(var)) {
var_name <- deparse(substitute(var))
if(is.null(error_message)) {
error_message <- paste0(
var_name, " must be of class '", class_name, "'"
)
}
stop(error_message, call. = call_)
}
}
|
fac8e0eee324d7a79bb8a03889b956b2296a962b
|
18a02f8fe73acbcea5745ff164882c7cb33a1404
|
/man/scluminex.Rd
|
60d45a59204a3e22287fb2b3dccf1316d095d074
|
[] |
no_license
|
cran/drLumi
|
e0998f5b1560563caee8dd644ef518806fc55f9e
|
0cb23d50a71683760fbe6b989c096525fd4ba602
|
refs/heads/master
| 2016-08-11T15:20:52.013721
| 2015-09-24T01:27:27
| 2015-09-24T01:27:27
| 48,079,249
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,631
|
rd
|
scluminex.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/scluminex.R
\name{scluminex}
\alias{scluminex}
\title{Estimates different models for each analyte.}
\usage{
scluminex(plateid, standard, background, lfct,
bkg = c("ignore","subtract", "include", "constraint"),
neill.method = "finest", fmfi = "median", fec = "ec",
fanalyte = "analyte", fwell = "well", fflag = "flag",
verbose = TRUE, ...)
}
\arguments{
\item{plateid}{character to identify the plate}
\item{standard}{a \code{data.frame} with standard values information}
\item{background}{a \code{data.frame} with the value of the blank controls.}
\item{lfct}{a character vector of SelfStarting models for background method.
They will be used in order if no convergence is achieved, ie: the first
\code{lfct} first, if no convergence the second function, etc. Options
are \code{SSl5}, \code{SSl4} and \code{SSexp}.}
\item{bkg}{character vector specifying how the background values are
treated. Options are 'ignore', 'subtract', 'include' or 'constraint'.}
\item{neill.method}{character specifying the grouping method for the
Neill test. Default 'finest'. Other options 'c-finest', 'percentiles'
or the name of the grouping variable.}
\item{fmfi}{name of the column with MFI values}
\item{fec}{name of the column with the concentration}
\item{fanalyte}{name of the column with the name of the analyte}
\item{fwell}{name of the variable with the well information}
\item{fflag}{name of the variable with the flag to not include a
record in the standard curve estimation}
\item{verbose}{logical whether show the process of estimation.}
\item{...}{other parameters for the model}
}
\value{
A list with the following components model, convergence,
coef, data, rsquare
\itemize{
\item{\code{model}}{, the nls model}
\item{\code{convergence}}{, convergence of the model}
\item{\code{coef}}{, coefficients values for the \code{nls} model}
\item{\code{data}}{, data of the model}
\item{\code{rsquare}}{, R^2 values for the performed models}
}
}
\description{
Given a dilutions points and background \code{data.frame} estimates
a model (in a recursive way is possible)
for a background method.
}
\details{
The models are fitted by the \code{nlsLM} function from the
\code{minpack.lm} package. The background data can be ignore, or use to
subtract the values of all MFI or be included as a point in the
standard curve with a value half of the lower value of the standard points.
If two or more blank controls are specified the geometric mean of the MFI
is used. The names on the two datasets need to be the same and are
specified by the fmfi, fec and fanalyte arguments of the function. The
routine should receive the values of the MFI from the luminex fluorescence
data. Analysis is performed in logarithm scale (base 10) both for the MFI
and the concentrations.
The grouping variable for the \code{neill.method} can specified if there
are replicates of doses in the assay. If there are no replicates
one of the three 'grouping' methods can be selected.
}
\examples{
# Load data
data(ecdata)
data(mfidata)
dat <- mfidata[mfidata$plate=="plate_1" & mfidata$analyte=="FGF",]
sdf <- data_selection(dat, ecdata)$plate_1
# Fit model and summary object
igmodels <- scluminex("plate_1",sdf$standard, sdf$background,
lfct=c("SSl4", "SSl5"),
bkg="ignore",
fmfi="mfi",
verbose=FALSE)
ss <- summary(igmodels)
# Information
names(igmodels)
names(igmodels$FGF)
# Summary data
ss
as.data.frame(ss)
as.data.frame(igmodels)
# Plot the standard curve
plot(igmodels,"sc")
}
|
fa1bc89ed3266ba1614f19284fd44ec56d5e1587
|
9484b93da2ee67dd0989745745b0ab3ce4567324
|
/code/old/test2.R
|
56a4bd1eb686c16b1ce3983dbb8ab3e77e8c1189
|
[
"MIT"
] |
permissive
|
SchlossLab/Sze_FollowUps_Microbiome_2017
|
0d8d2abefbec52914d0fce24c3f977d266583913
|
8ef69b722a0610d27ef898fa9ea0ab1385f6807b
|
refs/heads/master
| 2021-01-18T23:39:58.668847
| 2017-09-16T20:37:43
| 2017-09-16T20:37:43
| 46,723,430
| 1
| 6
| null | 2017-09-16T20:37:44
| 2015-11-23T13:53:29
|
HTML
|
UTF-8
|
R
| false
| false
| 5,761
|
r
|
test2.R
|
## Run Previous Analysis align follow up data
## Code used and modified from Niel .Rmd file
## Marc Sze
###Load needed Libraries and functions
source('code/functions.R')
source('code/graphFunctions.R')
loadLibs(c("pROC","randomForest","AUCRF", "Boruta", "dplyr", "tidyr", "ggplot2", "reshape2",
"gridExtra", "scales", "wesanderson", "foreach"))
### Read in necessary data
tax <- read.delim(
'data/process/old/followUps.final.an.unique_list.0.03.cons.taxonomy',
sep='\t', header=T, row.names=1)
shared <- read.delim(
'data/process/old/followUps.final.an.unique_list.0.03.subsample.0.03.filter.shared',
header=T, sep='\t')
### Organize tables for train and test sets (first focus is to look at non-cancer versus cancer)
metaF <- read.delim('data/process/old/followUps_metadata.txt', header=T, sep='\t') %>% mutate(lesion = factor(NA, levels=c(0,1)))
metaF$cancer[metaF$dx =='normal' | metaF$dx =='adenoma'] <- 0
metaF$cancer[metaF$dx =='cancer'] <- 1
metaF$cancer <- factor(metaF$cancer)
metaI <- read.delim('data/process/old/initials_metadata.tsv', header=T, sep='\t') %>% mutate(lesion = factor(NA, levels=c(0,1)))
metaI$cancer[metaI$dx == 'normal' | metaI$dx == 'adenoma'] <- 0
metaI$cancer[metaI$dx == 'cancer'] <- 1
metaI$cancer <- factor(metaI$cancer)
### Run Second organization to sort lesion vs. non-lesion
metaF$lesion[metaF$dx =='normal'] <- 0
metaF$lesion[metaF$dx =='adenoma' | metaF$dx == 'cancer'] <- 1
metaF$lesion <- factor(metaF$lesion)
metaI$lesion[metaI$dx == 'normal'] <- 0
metaI$lesion[metaI$dx == 'adenoma' | metaI$dx == 'cancer'] <- 1
metaI$lesion <- factor(metaI$lesion)
### Run third organization to sort SRN (Adv Adenoma) to cancer group and Adenoma to non-cancer group
metaF$SRNlesion[metaF$Dx_Bin =='normal' | metaF$Dx_Bin == 'Adenoma'] <- 0
metaF$SRNlesion[metaF$Dx_Bin =='adv Adenoma' | metaF$Dx_Bin == 'Cancer'] <- 1
metaF$SRNlesion <- factor(metaF$SRNlesion)
metaI$SRNlesion[metaI$Dx_Bin =='High Risk Normal' | metaI$Dx_Bin == 'Normal' | metaI$Dx_Bin == 'Adenoma'] <- 0
metaI$SRNlesion[metaI$Dx_Bin =='adv Adenoma' | metaI$Dx_Bin == 'Cancer'] <- 1
metaI$SRNlesion <- factor(metaI$SRNlesion)
### Run fourth organization to sort into three separate groups: normal, adenoma, cancer
metaF$threeway[metaF$dx == 'normal'] <- 0
metaF$threeway[metaF$dx == 'adenoma'] <- 1
metaF$threeway[metaF$dx == 'cancer'] <- 2
metaF$threeway <- factor(metaF$threeway)
metaI$threeway[metaI$dx == 'normal'] <- 0
metaI$threeway[metaI$dx == 'adenoma'] <- 1
metaI$threeway[metaI$dx == 'cancer'] <- 2
metaI$threeway <- factor(metaI$threeway)
write.csv(metaI, "data/process/old/metaI_modified.csv")
write.csv(metaF, "data/process/old/metaF_modified.csv")
### Need to amend and separate Adenoma and CRC
good_metaf <- read.csv('data/process/old/followUp_outcome_data.csv', header = T,
stringsAsFactors = F) %>% inner_join(metaF, by="EDRN")
metaFConly <- filter(good_metaf, Diagnosis == "adenocarcinoma" | Diagnosis == "N/D")
metaFAonly <- filter(good_metaf, Diagnosis == "adenoma")
###Pull out the thetayc distance between the initial and followup sample within the
###same person for all and split by adenoma or cancer
thetaCompTotal <- dissplit(
'data/process/old/followUps.final.an.unique_list.thetayc.0.03.lt.ave.dist', metaF)
intra_ade <- as.vector(unlist(thetaCompTotal['intra_ade']))
intra_canc <- as.vector(unlist(thetaCompTotal['intra_canc']))
inter <- as.vector(unlist(thetaCompTotal['inter']))
rm(thetaCompTotal)
###Pull out the share OTUs between the initial and followup sample within the
###same person for all and split by adenoma or cancer
sobsCompTotal <- dissplit(
'data/process/old/followUps.final.an.unique_list.thetayc.0.03.lt.ave.dist', metaF)
sobs_intra_ade <- as.vector(unlist(sobsCompTotal['intra_ade']))
sobs_intra_canc <- as.vector(unlist(sobsCompTotal['intra_canc']))
sobs_inter <- as.vector(unlist(sobsCompTotal['inter']))
rm(sobsCompTotal)
###Training and creating the models for prediction
# Create a list with all the data
testList <- list(
cancer = train <- inner_join(metaI, shared, by = c("sample" = "Group")) %>%
filter(!sample %in% good_metaf$initial) %>%
select(cancer, fit_result, contains("Otu0")) %>% na.omit(),
lesion = train <- inner_join(metaI, shared, by = c("sample" = "Group")) %>%
filter(!sample %in% good_metaf$initial) %>%
select(lesion, fit_result, contains("Otu0")) %>% na.omit(),
SRNlesion = train <- inner_join(metaI, shared, by = c("sample" = "Group")) %>%
filter(!sample %in% good_metaf$initial) %>%
select(SRNlesion, fit_result, contains("Otu0")) %>% na.omit(),
threeway = train <- inner_join(metaI, shared, by = c("sample" = "Group")) %>%
filter(!sample %in% good_metaf$initial) %>%
select(threeway, fit_result, contains("Otu0")) %>% na.omit())
# Using the seperation as Cancer (ALL)
modelsToTest <- c("cancer", "lesion", "SRNlesion", "threeway")
# Set up parallelization
#doParallel::registerDoParallel(cores=5)
#finalDataResults <- foreach(i=1:length(modelsToTest)) %dopar% {
# testdata(dataList, modelsToTest[i])
#}
### This doesnt work for the AUCRF function based on how it was written
### Can't seem to override the set functionality to look at global
### enivronment only.
# Sequentially
cancer_AUC_data <- testdata(testList, modelsToTest[1])
lesion_AUC_data <- testdata(testList, modelsToTest[2])
SRNlesion_AUC_data <- testdata(testList, modelsToTest[3])
threeway_AUC_data <- testdata(testList, modelsToTest[4])
# Save the output for later
save.image("data/process/old/test2.RData")
|
837fbe768f01b8ecfad958c8b8e7bef463d343aa
|
a822aa856b5b017ec310328f1e8a60994bbd2969
|
/02-get-data/02d-read-excel.R
|
d526a916e8902fbbbb879ccdd562c1b85a097de7
|
[
"CC0-1.0"
] |
permissive
|
Zanetik/r-minicourse
|
ac6404875137c560910e101b8eaedb81d51bbe42
|
ab7048b56e8be7be25d0a7c842ed7af9bbd91211
|
refs/heads/master
| 2020-12-17T11:42:10.958948
| 2017-11-14T15:35:00
| 2017-11-14T15:35:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 115
|
r
|
02d-read-excel.R
|
install.packages("xlsx")
library(xlsx)
dt<-read.xlsx(file = "../00-fitbit-demo/fitbit-data.xlsx",sheetIndex = 1)
|
c9ec79957a71f90fec641a6677f66e173b46a61f
|
90de591978470e10d14abe5daefeb0f23390089c
|
/man/compute_auc_components.Rd
|
d8468bc920dd245045cfd3b06ff770b9c7429b8f
|
[
"MIT"
] |
permissive
|
tidymodels/survivalauc
|
01d95c8e88ce0c8ee59fc8ab8fcc4870c7b63ca1
|
936dbe5660f81feb365dd7d725eafba68eb3299a
|
refs/heads/main
| 2023-08-25T04:53:44.394185
| 2021-10-28T19:51:54
| 2021-10-28T19:51:54
| 392,399,325
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,744
|
rd
|
compute_auc_components.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{compute_auc_components}
\alias{compute_auc_components}
\title{Compute Survival AUC}
\usage{
compute_auc_components(time, status, score, threshold)
}
\arguments{
\item{time}{Numeric, survival/censoring time.}
\item{status}{Numeric, censoring indicator taking value 1 if survival time is
observed and 0 otherwise.}
\item{score}{Numeric, risk score obtained from a risk prediction model.}
\item{threshold}{Numeric, thresholds used in estimating sensitivity and
specificity.}
}
\value{
A list with 4 elements:
\describe{
\item{unique_times}{distinct observed failure times in ascending order.}
\item{sensitivity}{\code{m} by \code{k} matrix, the \verb{(i,j)}th element corresponds
to the estimated sensitivity at time point
\code{unique_times[i]} with threshold \code{thresold[j]}}
\item{specificity}{\code{m} by \code{k} matrix, the \verb{(i,j)}th element corresponds
to the estimated specificity at time point
\code{unique_times[i]} with threshold \code{thresold[j]}}
\item{auc}{the estimated AUC at time point \code{unique_times}}
}
}
\description{
Implementing the alternative estimators of the time-dependent area
under the ROC curve, time-dependent sensitivity and specificity,
and time-dependent ROC curves.
}
\details{
\code{time}, \code{status}, and \code{score} must have the same length and be numeric.
}
\examples{
compute_auc_components(
time = sample_data$time,
status = sample_data$status,
score = sample_data$score,
threshold = sample_data$threshold
)
}
\references{
Chambless, L. E. and G. Diao (2006). Estimation of time-dependent area under
the ROC curve for long-term risk prediction. Statistics in Medicine 25,
3474--3486.
}
|
157343593c1b0b17d4528b011e01931556245f66
|
7fedcc709364af02dc16baefabf0950e54bebb6c
|
/man/reorder_class_hetero.Rd
|
398c3f7461d87802fc54ecab1a278242b740c8f4
|
[] |
no_license
|
robertyoung3/MSanalyzeNOM
|
34c406c453a3ac8eda7a1a8456161dd9159062f1
|
604299a54f55948d41ceb5f2769f2242935d2976
|
refs/heads/master
| 2023-05-07T06:42:44.693193
| 2021-06-01T17:55:37
| 2021-06-01T17:55:37
| 268,903,669
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 869
|
rd
|
reorder_class_hetero.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reorder_class_hetero.R
\name{reorder_class_hetero}
\alias{reorder_class_hetero}
\title{reorder_class_hetero}
\usage{
reorder_class_hetero(df)
}
\arguments{
\item{df}{a tibble containing a table of the assigned elements with a column name
for each element (e.g., "C", "H", "N", "O" and "S")}
}
\value{
a vector the heteroatom classes as ordered factors
}
\description{
This function orders heteroatom classes from O1 to Omax through CHNO_S1 to
CHNOS_max for use in summarizing and visualizing heteroatom class data. It
orders the factors based on a computed value, "level", which is discarded
after reordering. The levels increase in a nested fashion by number of O,
then number of N, and then number of S. The function is currently limited to
the default class of elements: C, H, N, O, S.
}
|
f99ecb4aa3c1f110f336938f1bccf9bee29e2414
|
02781e746b7a4688cb05c56b1b2e039cf7516e3f
|
/calculate_overall_performance.R
|
aadf4d42791bf15d7a7f8c9aa9fd57a515b1b970
|
[] |
no_license
|
PaoloFrac/Daily-activities-classification
|
27f9526c742947c98c71c0cf634bb6d5b8c94d28
|
a698f0d8f81617c4b0378f555ddcca13f13cbe66
|
refs/heads/master
| 2020-03-18T23:39:24.639657
| 2018-08-13T13:01:22
| 2018-08-13T13:01:22
| 135,416,900
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,460
|
r
|
calculate_overall_performance.R
|
timeThreshold <- c(10:5)*60
distance_threshold <- 100
analysis_type <- c("time_based", "density_based", "combined")
analysis_type <- c("combined")
for(i in 1:length(analysis_type)){
overall_performance <- NULL
for(j in 1:length(timeThreshold)){
overall_performance <- read.csv(file = paste("~/Data/Projects/Club M/Healthy volunteers study/Analysis/",
analysis_type,
"/performance_OSM",
timeThreshold[j],
"s_",
distance_threshold,
".csv",
sep = "")) %>%
select(-X, -patient) %>%
slice(nrow(.)) %>%
mutate(analysis = paste(analysis_type[i],
timeThreshold[j],
sep = "_")) %>%
bind_rows(overall_performance)
}
write.csv(overall_performance, paste("~/Data/Projects/Club M/Healthy volunteers study/Analysis/",
analysis_type[i],
"/performance_OSM_overall_",
distance_threshold,
".csv", sep = ""))
}
|
7de09f47703e19db8b1ff03e6f64f212c0936ad4
|
a6abcf45a3c6e9bca1fa205e5e5ccb3b09c38d58
|
/Labs/Lab04/Montano_Lab04.R
|
504326c7dbaa844fb2d898878985beef6b966ac4
|
[] |
no_license
|
montanol/CompBioLabsAndHomework
|
779a7f67bbcd29b98b21f1d575382a579389549b
|
c9563b613a3d840eb654ca274eae6153cd0450c2
|
refs/heads/master
| 2020-04-18T15:01:35.746514
| 2019-05-03T20:36:01
| 2019-05-03T20:36:01
| 167,603,219
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,770
|
r
|
Montano_Lab04.R
|
# Lab04
# Part I
# Step 1: write a for loop that prints the word "hi" ten times
for (i in 1:10) {
print("hi")
}
#Step 2: Create a for loop for Tim's total amount of money for the next 8 weeks
# assign parameters
initial <- 10
change <- 5 - (2 * 1.34)
total <- rep(0,8)
for (i in 1:8) {
total[i] <- initial + change
initial <- total[i]
}
#Step 3: calculate population size for 7 years
# assign parameters
initial_pop <- 2000
growth_rate <- .05
total_pop <- rep(0,7)
for (i in 1:7) {
total_pop[i] <- initial_pop - (initial_pop * growth_rate)
initial_pop <- total_pop[i]
}
#Step 4: predict the value of n[12] based on a logistic growth equation using a for loop
# when t = 1,
k <- 10000
r <- 0.8
abundances <- rep(0,12)
abundances[1] <- 2500 #n[1]
for (i in 2:12) {
abundances[i] <- abundances[i-1] + (r * abundances[i-1] * (k - abundances[i-1]) / k)
}
#the value of n[12]
abundances[12]
#Part II
#Step 5a: use rep command to make a vector of 18 zeros
rep(0, 18)
#Step 5b: create a for loop that will store 3
#times the ith value of 'i' in the 'ith' spot of the vector above
vec <- rep(0, 18)
for (i in seq(1,18)) {
vec[i] <- (3 * i)
}
#Step 5c: make a new vector of zeros and make the first entry the value of 1
vec2 <- rep(0,18)
vec2[1] <- 1
#Step 5d: create a loop, starting at 2nd entry of vec2, position is = to 1+(2*previous entry)
for (i in 2:18) {
vec2[i] <- 1 + (2 * vec2[i-1])
}
#Step 6: create a loop that makes vector of 1st 20 Fibonacci numbers
lengOfFibNum <- 20
fibnum <- numeric(lengOfFibNum)
fibnum[1] <- 0
fibnum[2] <- 1
for (i in 3:lengOfFibNum) {
fibnum[i] <- fibnum[i-1]+fibnum[i-2]
}
#Step 7: Make a plot of the data from step 4
time <- seq(1,12)
#already stored the abundances in step 4
plot(time, abundances)
|
b0e36707fd19a8c486e3a8c45a4b37ac7a7c004e
|
7eb11c644f03b7f112fcf2ce461718b96586d3d3
|
/ensemble/LRBaggingEnsemble.R
|
5df0e64b36226092b24a8db03d883cf1e148bf9f
|
[] |
no_license
|
nutphi/Data-Mining-Project-6180
|
35a06b70a126cfd3d52cfd5f6634f476614c72e5
|
615ce216b0e142a31e12930a78c2d4c420900b62
|
refs/heads/master
| 2020-03-22T10:39:41.797328
| 2018-07-06T03:31:30
| 2018-07-06T03:31:30
| 139,918,069
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,475
|
r
|
LRBaggingEnsemble.R
|
library(caret)# used for createDataPartition
library(readr) # Reading data file
source("~/INSE6180_Project/base/LogisticRegression.R")
source("~/INSE6180_Project/preprocessing/Preprocessing.R")
dat <- read_csv("~/INSE6180_Project/data/training10k.csv")
dat <-preprocessing(dat)
# Split data into two sets - Training and Testing
set.seed(107)
inTrain <- createDataPartition(y = dat$click, p = .7, list = FALSE)
training <- dat[ inTrain,]
testing <- dat[-inTrain,]
set.seed(200)
inTrain <- createDataPartition(y = training$click, p = .9, list = FALSE)
training1 <- dat[ inTrain,]
set.seed(300)
inTrain <- createDataPartition(y = training$click, p = .9, list = FALSE)
training2 <- dat[ inTrain,]
set.seed(400)
inTrain <- createDataPartition(y = training$click, p = .9, list = FALSE)
training3 <- dat[ inTrain,]
set.seed(500)
inTrain <- createDataPartition(y = training$click, p = .9, list = FALSE)
training4 <- dat[ inTrain,]
set.seed(600)
inTrain <- createDataPartition(y = training$click, p = .9, list = FALSE)
training5 <- dat[ inTrain,]
rm(inTrain)
lr1.model<-logisticRegression(training1)
lr2.model<-logisticRegression(training2)
lr3.model<-logisticRegression(training3)
lr4.model<-logisticRegression(training4)
lr5.model<-logisticRegression(training5)
lr1.pred <- predict(lr1.model,testing, type = 'response')
for(i in 1:length(lr1.pred)){
lr1.pred[i]<-ifelse(lr1.pred[i]>0.5,"Yes","No")
}
lr2.pred <- predict(lr2.model,testing, type = 'response')
for(i in 1:length(lr2.pred)){
lr2.pred[i]<-ifelse(lr2.pred[i]>0.5,"Yes","No")
}
lr3.pred <- predict(lr3.model,testing, type = 'response')
for(i in 1:length(lr3.pred)){
lr3.pred[i]<-ifelse(lr3.pred[i]>0.5,"Yes","No")
}
lr4.pred <- predict(lr4.model,testing, type = 'response')
for(i in 1:length(lr4.pred)){
lr4.pred[i]<-ifelse(lr4.pred[i]>0.5,"Yes","No")
}
lr5.pred <- predict(lr5.model,testing, type = 'response')
for(i in 1:length(lr5.pred)){
lr5.pred[i]<-ifelse(lr5.pred[i]>0.5,"Yes","No")
}
confusionMatrix(lr1.pred, testing$click)
confusionMatrix(lr2.pred, testing$click)
confusionMatrix(lr3.pred, testing$click)
confusionMatrix(lr4.pred, testing$click)
confusionMatrix(lr5.pred, testing$click)
lrBagging<-NULL
lrBagging <- as.factor(lrBagging)
levels(lrBagging) = list(No=c("1"),Yes=c("2"))
for(i in 1:length(lr1.pred)){
lrBagging[i]<-ifelse(which.max(table(c(lr1.pred[i],lr2.pred[i],lr3.pred[i],lr4.pred[i],lr5.pred[i])))==1,"No","Yes")
}
lrBagging
confusionMatrix(lrBagging,testing$click)
|
1b92a8d0accf9b447fda625c94bf42c3cb9e3477
|
58715ffa0601d5453a820eb420a9662e94c8a41f
|
/plot3.R
|
cabde4543580ff15ff4cfa072267be377c98cee2
|
[] |
no_license
|
kmaclare/ExData_Plotting1
|
f57c682483e7b304159a2f356778fe31c9121482
|
a70fd0ab2cc89c2139683ce68052fe3d215a2d5d
|
refs/heads/master
| 2021-01-23T23:19:37.096811
| 2015-03-05T04:22:53
| 2015-03-05T04:22:53
| 31,689,594
| 0
| 0
| null | 2015-03-05T01:34:58
| 2015-03-05T01:34:58
| null |
UTF-8
|
R
| false
| false
| 1,230
|
r
|
plot3.R
|
# read in data from text file
df <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
# merge date/time data into new column "date2" to do math/comparisons
df$date2 = strptime(paste(as.Date(df$Date, format = "%d/%m/%Y"), df$Time), format = "%Y-%m-%d %H:%M:%S")
# subset data into new data frame
df2 <- df[which((df$date2 >= "2007-02-01 00:00:00") & (df$date2 < "2007-02-03")),]
png(filename="plot3.png", width=480, height=480)
plot (df2$date2,df2$Sub_metering_1,type="n", ylim=c(0,39), # sets the x and y axes scales
xlab="",ylab="Energy sub metering") # adds titles to the axes
lines(df2$date2,df2$Sub_metering_1,col="black",lwd=2.5) # adds a line for sub metering 1
lines(df2$date2,df2$Sub_metering_2,col="red",lwd=2.5) # adds a line for sub metering 2
lines(df2$date2,df2$Sub_metering_3,col="blue",lwd=2.5) # adds a line for sub metering 3
legend("topright", # places a legend at the appropriate place
c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), # puts text in the legend
lty=c(1,1), # gives the legend appropriate symbols (lines)
lwd=c(2.5,2.5),col=c("black", "red", "blue")) # gives the legend lines the correct color and width
dev.off()
|
cea4c9a43403d4901de28258d8aed2ee8bbe7ec6
|
c0c564a4c9e2aa4f847c36e24f89aa74c8706f29
|
/Exam/Exam-scriptC.R
|
87d084a0f06b158172be1384cae8f9e874724972
|
[] |
no_license
|
MglMX/EDDA-VU
|
d81afc64eea68c2fe2003b969eec4d24cf4e6a5d
|
4a7b0d7adb45fa3d9353ac8c0d6788fcd0dd180b
|
refs/heads/master
| 2021-04-30T07:20:04.422697
| 2018-03-23T15:19:19
| 2018-03-23T15:19:19
| 121,392,893
| 4
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,611
|
r
|
Exam-scriptC.R
|
#EXAM!!!!!!
gala_data = read.table("gala.txt", header = TRUE)
gala_data
attach(gala_data)
gala_glm = glm(Species~Area+Elevation+Nearest+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm)
# As we can see, all the p-values are smaller than 0.05 now
# That means all the variables are significant for our model.
# The resulting model of the step-down method is:
Species = 3.15 - 5.80e-04 * Area + 3.54e-03 * Elevation + 8.83e-03 * Nearest - 5.71e-03 * Scruz - 6.63e-04 * Adjacent + error
######################## END #########################################
## For lm
# Nearest p = 0.9932 > 0.05
# Omit it
# Step Down
gala_glm_sd = glm(Species~Area+Elevation+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# Area p = 0.27555 > 0.05
# Omit it
gala_glm_sd = glm(Species~Elevation+Scruz+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# Scruz p = 0.19632 > 0.05
# Omit it
gala_glm_sd = glm(Species~Elevation+Adjacent, family= poisson, data = gala_data)
summary(gala_glm_sd)
# As we can see, all the p-values are smaller than 0.05 now
# That means all the variables are significant for our model.
# The resulting model of the step-down method is:
Species = 1.4329 + 0.2766 * Elevation - 0.0689 * Adjacent + error
##TASK 2
###Task 2
galapagos=read.table("gala.txt", header=TRUE)
#With this method, we start with all of the possible variables in our model. Then, we choose the one that gives the highest p-value. If this p-value is bigger than 0.05, we will discard the variable and repeat the process without it.
galaglm=glm(sqrt(Species)~Area+Elevation+Nearest+Scruz+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#The variable Nearest has the highest p-value with a p-value of 0.411. Since this p-value is bigger than 0.05, we discard it for our model and continue to the next iteration.
galaglm=glm(sqrt(Species)~Area+Elevation+Scruz+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#The variable Scruz has the highest p-value with a p-value of 0.2466. Since this p-value is bigger than 0.05, we discard it for our model and continue to the next iteration.
galaglm=glm(sqrt(Species)~Area+Elevation+Adjacent,family=poisson,data=galapagos)
summary(galaglm)
#As we can see, all the p-values are smaller than 0.05, thus meaning that all the variables are significant for our model.
#The resulting model of the step-down method is:
#sqrt(Species) = 1.314e+00 + -3.262e-04\*Area + 2.018e-03\*Elevation -3.987e-04\*Adjacent + error
|
50919f4a5664ef8dea49902684a63e2ba6d1c276
|
d73240742f7e42aae849ad48c12a444dc34349f3
|
/tests/testthat/test-colour_values.R
|
da44b8a35ffc6a8c3f286d14a0788bacd0d15cc2
|
[] |
no_license
|
techisdead/RcppViridis
|
d0b61cfeae16677bddbace6f832cae7c5d8aa09f
|
add5d90bc336afef212ade8b398e3809d361f59a
|
refs/heads/master
| 2020-03-28T15:40:59.229437
| 2018-09-13T00:25:50
| 2018-09-13T00:25:50
| 148,615,920
| 0
| 0
| null | 2018-09-13T09:37:14
| 2018-09-13T09:37:14
| null |
UTF-8
|
R
| false
| false
| 8,550
|
r
|
test-colour_values.R
|
context("colourvalues")
test_that("numeric values mapped to colours", {
expect_true(colour_values(1) == "#440154FF")
expect_true(all(colour_values(1:2) == c("#440154FF","#FDE725FF")))
## NAs
expect_true(colour_values(NA) == "#808080FF")
expect_true("#808080FF" %in% colour_values(c(1,2,NA,4)))
expect_true(sum("#808080FF" == colour_values(c(1, NA)))==1)
expect_true(sum("#808080FF" == colour_values(c(1,NA,NaN,Inf,-Inf,1))) == 6)
expect_true("#000000FF" == colour_values(NA, na_colour = "#000000FF"))
})
test_that("character values mapped to colours", {
expect_true(all(colour_values(c("a","b")) == colour_values(1:2)))
expect_true(all(colour_values(letters) == colour_values(1:26)))
## NAs
expect_true(colour_values(NA_character_) == "#808080FF")
expect_true(sum("#808080FF" == colour_values(c(1,"a",NA)) ) == 1)
expect_true(sum("#808080FF" == colour_values(c("a",NA, "d","f",NA))) == 2)
})
test_that("factor values mapped to colours", {
f <- as.factor(letters)
expect_true(all(colour_values(f) == colour_values(1:26)))
## NAs
f <- as.factor(c(NA, letters, NA))
expect_true(all(colour_values(f) == c("#808080FF", colour_values(1:26), "#808080FF")))
})
test_that("logical values mapped to colours", {
expect_true(all(colour_values(c(F,T)) == colour_values(1:2)))
## NAs
expect_true(all(colour_values(c(F,T,NA)) == colour_values(c(1:2, NA))))
})
test_that("date values mapped to colours", {
d <- as.Date(c("2018-01-01","2018-01-01","2018-01-02"))
expect_true(all(colour_values(d) == colour_values(c(1,1,2))))
## NAs
expect_true(all(colour_values(c(NA,d,NA)) == colour_values(c(NA,1,1,2,NA))))
})
test_that("posix values mapped to colours", {
d <- as.POSIXct(c("2018-01-01","2018-01-01","2018-01-02"))
expect_true(all(colour_values(d) == colour_values(c(1,1,2))))
## NAs
expect_true(all(colour_values(c(NA,d,NA)) == colour_values(c(NA,1,1,2,NA))))
})
test_that("matrix palette accepted", {
##
m <- grDevices::colorRamp(c("red","green","blue"))(0:4/4)
expect_true(all(colour_values(1:5, palette = m) == c("#FF0000FF", "#808000FF", "#00FF00FF", "#008080FF", "#0000FFFF")))
## This doesn't exactly equal
#grDevices::colorRampPalette(c("red","green","blue"))(5)
## I 'think' because of boost's interpolation
expect_error(
colour_values(1:5, palette = m[,1:2])
#, "Matrix palette needs either 3 (R, G, B) or 4 (R, G, B, A) columns"
)
alpha <- c(0, 100, 150, 200, 255)
m <- cbind( grDevices::colorRamp(c("red","green","blue"))(0:4/4), alpha )
expect_true(all(colour_values(1:5, palette = m) == c("#FF000000", "#80800064", "#00FF0096", "#008080C8", "#0000FFFF")))
## string data
expect_true( all( colour_values(letters[1:5], palette = m) == colour_values(1:5, palette = m) ) )
})
test_that("alpha values applied", {
expect_true(all(substr(colour_values(1:5),8,9) == "FF"))
expect_true(all(substr(colour_values(1:5, alpha = 0.0),8,9) == "00"))
expect_true(all(substr(colour_values(1:5, alpha = 128),8,9) == "80"))
expect_true(all(substr(colour_values(1:5, alpha = 64),8,9) == "40"))
expect_true(all(substr(colour_values(1:5, alpha = 192),8,9) == "C0"))
expect_true(all(substr(colour_values(1:5, alpha = 0),8,9) == "00"))
#expect_error(colour_values(1:5, alpha = c(100,200)),"alpha must either be a single value, or the same length as x")
expect_error(colour_values(1:5, alpha = c(100,200)),"Unknown alpha definition")
expect_true(all(colour_values(letters) == colour_values(letters, alpha = 255)))
expect_true(all(substr( colour_values(letters, alpha = 0),8,9) == "00"))
## individual values for each value
expect_true(all(substr(colour_values(1:5, alpha = c(0, 128, 64, 192, 255) ),8,9) == c("00","80","40","C0","FF")))
## alpha scaled according to numeric variable
colour_values(1:5, alpha = 256:260)
})
test_that("rgb matrix returned", {
m <- colour_values_rgb(1:5)
expect_true(all(m[,1] == c(68,59,33,93,253)))
expect_true(all(m[,2] == c(1,82,144,201,231)))
expect_true(all(m[,3] == c(84,139,140,99,37)))
expect_true(all(m[,4] == 255))
m <- colour_values_rgb(letters[1:5])
expect_true(all(m[,1] == c(68,59,33,93,253)))
expect_true(all(m[,2] == c(1,82,144,201,231)))
expect_true(all(m[,3] == c(84,139,140,99,37)))
expect_true(all(m[,4] == 255))
m <- colour_values_rgb(1:5, palette = "inferno")
expect_true(all(m[,1] == c(0,87,187,249,252)))
expect_true(all(m[,2] == c(0,16,55,141,255)))
expect_true(all(m[,3] == c(4,109,85,10,164)))
expect_true(all(m[,4] == 255))
m <- colour_values_rgb(letters[1:5], palette = "inferno")
expect_true(all(m[,1] == c(0,87,187,249,252)))
expect_true(all(m[,2] == c(0,16,55,141,255)))
expect_true(all(m[,3] == c(4,109,85,10,164)))
expect_true(all(m[,4] == 255))
m <- colour_values_rgb(1:5, palette = "inferno", alpha = 100)
expect_true(all(m[,1] == c(0,87,187,249,252)))
expect_true(all(m[,2] == c(0,16,55,141,255)))
expect_true(all(m[,3] == c(4,109,85,10,164)))
expect_true(all(m[,4] == 100))
m <- colour_values_rgb(letters[1:5], palette = "inferno", alpha = 100)
expect_true(all(m[,1] == c(0,87,187,249,252)))
expect_true(all(m[,2] == c(0,16,55,141,255)))
expect_true(all(m[,3] == c(4,109,85,10,164)))
expect_true(all(m[,4] == 100))
})
test_that("rgb to hex to rgb works", {
alpha <- c(0, 100, 150, 200, 255)
m <- cbind( grDevices::colorRamp(c("red","green","blue"))(0:9/9), alpha )
h <- colour_values(1:10, palette = m)
mh <- t(grDevices::col2rgb(h))
m2 <- colour_values_rgb(1:10, palette = m)
expect_true(sum(abs(m[,1] - m2[,1])) <= nrow(m))
expect_true(sum(abs(m[,2] - m2[,2])) <= nrow(m))
expect_true(sum(abs(m[,3] - m2[,3])) <= nrow(m))
expect_true(sum(abs(m[,4] - m2[,4])) <= nrow(m))
expect_true(sum(abs(m[,1] - mh[,1])) <= nrow(m))
expect_true(sum(abs(m[,2] - mh[,2])) <= nrow(m))
expect_true(sum(abs(m[,3] - mh[,3])) <= nrow(m))
})
test_that("different sizes of variables and palettes work", {
## - very few variables - large palette
df <- data.frame(x = 1:2)
m <- grDevices::colorRamp(c("red","green","blue","yellow"))(0:500/500)
df$col <- colour_values(df$x, palette = m)
expect_true(all(df$col == c("#FF0000FF","#FFFF00FF"))) ## shoudl be extremeties of palette
# df$a <- 10
# barplot(df$a, col = df$col)
## - lots of variables - small palette
df <- data.frame(x = 1:10000)
m <- grDevices::colorRamp(c("red"))(0:4/4)
df$col <- colour_values(df$x, palette = m)
expect_true(unique(df$col == "#FF0000FF"))
## - few variables - small palette
df <- data.frame(x = 1:2)
m <- grDevices::colorRamp(c("red"))(0:4/4)
df$col <- colour_values(df$x, palette = m)
expect_true(unique(df$col == "#FF0000FF"))
## - lots of variables - large palette
df <- data.frame(x = rnorm(n = 1e6))
m <- grDevices::colorRamp(c("red","green","blue","yellow"))(0:1000/1000)
expect_silent(df$col <- colour_values(df$x))
})
test_that("small range of values give distinct palette", {
expect_true(all(colour_values(c(0.00001, 0.00002)) == c("#440154FF","#FDE725FF")))
})
test_that("256 variables produce 'unique' palette", {
## because of 'splining' and rounding I think it's OK it's not exactly 256 colours
expect_true(abs(256 - length(unique(colour_values(1:256)))) <= 2)
})
test_that("NA handled in RGB return", {
expect_true(all(colour_values_rgb(NA) == c(rep(128,3), 255))) ## default "#808080FF
expect_true(all(colour_values_rgb(NA, na_colour = "#FF000000") == c(255, 0, 0, 0)))
})
test_that("alpha can be excluded from results", {
expect_true(all(substr(colour_values(1:5),1,7) == colour_values(1:5, include_alpha = F)))
expect_true(all(substr(colour_values(letters[1:5]),1,7) == colour_values(letters[1:5], include_alpha = F)))
m <- matrix(rep(255,4 * 5),ncol = 4)
expect_true(all(substr(colour_values(1:5, palette = m),1,7) == colour_values(1:5, palette = m, include_alpha = F)))
expect_true(all(substr(colour_values(letters[1:5], palette = m),1,7) == colour_values(letters[1:5], palette = m, include_alpha = F)))
expect_true(all(colour_values_rgb(1:5)[,1:3] == colour_values_rgb(1:5, include_alpha = F)))
expect_true(all(colour_values_rgb(letters[1:5])[,1:3] == colour_values_rgb(letters[1:5], include_alpha = F)))
m <- matrix(rep(255,4 * 5),ncol = 4)
expect_true(all(colour_values_rgb(1:5, palette = m)[,1:3] == colour_values_rgb(1:5, palette = m, include_alpha = F)))
expect_true(all(colour_values_rgb(letters[1:5], palette = m)[,1:3] == colour_values_rgb(letters[1:5], palette = m, include_alpha = F)))
})
|
1952618b14d4bc90dd11477af7df7223229e9135
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/APML0/man/APML0-package.Rd
|
95c1cf6c640ed5260554ff712b3c69ccfd86eb7d
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,177
|
rd
|
APML0-package.Rd
|
\name{APML0-package}
\alias{APML0-package}
\docType{package}
\title{
Augmented and Penalized Minimization Method L0
}
\description{
Fit linear, logistic and Cox models regularized with L0, lasso (L1), elastic-net (L1 and L2), or net (L1 and Laplacian) penalty, and their adaptive forms, such as adaptive lasso / elastic-net and net adjusting for signs of linked coefficients. It solves L0 penalty problem by simultaneously selecting regularization parameters and performing hard-thresholding (or selecting number of non-zeros). This augmented and penalized minimization method provides an approximation solution to the L0 penalty problem, but runs as fast as L1 regularization problem.
The package uses one-step coordinate descent algorithm and runs extremely fast by taking into account the sparsity structure of coefficients. It could deal with very high dimensional data.
}
\details{
\tabular{ll}{
Package: \tab APML0\cr
Type: \tab Package\cr
Version: \tab 0.10\cr
Date: \tab 2020-1-19\cr
License: \tab GPL (>= 2)\cr
}
Functions:
\code{\link{APML0}}, \code{\link{print.APML0}}\cr
}
\author{
Xiang Li, Shanghong Xie, Donglin Zeng and Yuanjia Wang\cr
Maintainer: Xiang Li <spiritcoke@gmail.com>
}
\references{Li, X., Xie, S., Zeng, D., Wang, Y. (2018).
\emph{Efficient l0-norm feature selection based on augmented and penalized minimization. Statistics in medicine, 37(3), 473-486.}\cr
\url{https://onlinelibrary.wiley.com/doi/full/10.1002/sim.7526}\cr
Boyd, S., Parikh, N., Chu, E., Peleato, B., Eckstein, J. (2011).
\emph{Distributed optimization and statistical learning via the alternating direction method of multipliers. Foundations and Trends in Machine Learning, 3(1), 1-122.}\cr
\url{http://dl.acm.org/citation.cfm?id=2185816}\cr
Friedman, J., Hastie, T., Tibshirani, R. (2010).
\emph{Regularization paths for generalized linear models via coordinate descent, Journal of Statistical Software, Vol. 33(1), 1.}\cr
\url{http://www.jstatsoft.org/v33/i01/}\cr
}
\keyword{False positive control}
\keyword{Number of non-zeros}
\keyword{Regularization}
\keyword{Package}
\examples{
### Linear model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
y=rnorm(N,xb)
fiti=APML0(x,y,penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
### Logistic model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
y=rbinom(n=N, size=1, prob=1.0/(1.0+exp(-xb)))
fiti=APML0(x,y,family="binomial",penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,family="binomial",penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
### Cox model ###
set.seed(1213)
N=100;p=30;p1=5
x=matrix(rnorm(N*p),N,p)
beta=rnorm(p1)
xb=x[,1:p1]\%*\%beta
ty=rexp(N, exp(xb))
td=rexp(N, 0.05)
tcens=ifelse(td<ty,1,0) # censoring indicator
y=cbind(time=ty,status=1-tcens)
fiti=APML0(x,y,family="cox",penalty="Lasso",nlambda=10) # Lasso
fiti2=APML0(x,y,family="cox",penalty="Lasso",nlambda=10,nfolds=10) # Lasso
# attributes(fiti)
}
|
dba8275c63ad2be6289f5f4de2475e08d532735d
|
f8d29f169631789521641e0471ca7a7907985062
|
/enhancer_id/shared_code/vienna_tiles.r
|
eaeba9fe295459e46e8e324b71e7d96685c16302
|
[] |
no_license
|
roshanvaid/koenecke_enhancer_papers_2016
|
abd122041c4a9a17139dc785bf757ea27fd25807
|
ce7d317639da16270bfb684e033551753bf3274d
|
refs/heads/master
| 2021-06-01T14:59:18.120055
| 2016-09-02T16:17:13
| 2016-09-02T16:17:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,707
|
r
|
vienna_tiles.r
|
library(GenomicRanges)
library(dplyr)
source("shared_code/stat_tests.r")
vt.df <- readRDS(data_path("stark_enhancers_2014/tile_annotations.df.rds"))
vt.gr <- makeGRangesFromDataFrame(vt.df, seqnames.field="chr", keep.extra=TRUE)
all_tiles.df <- readRDS(data_path("stark_enhancers_2014/all_tiles.df.rds"))
all_tiles.gr <- makeGRangesFromDataFrame(all_tiles.df, seqnames.field="chr", keep.extra=TRUE)
stage_enrichments_for_tiles <- function(tile_ids, group_name, annotations.df, tile_universe) {
all_stages <- sort(unique(annotations.df$stage))
results.df <- all_stages %>%
lapply(function(s) {
tiles_in_stage <- subset(annotations.df, stage == s)$VTID %>% unique()
test.df <- fisher_test_2x2(tile_ids, tiles_in_stage, tile_universe)
test.df$stage <- s
test.df$group_name <- group_name
test.df
}) %>%
bind_rows()
}
term_enrichments_for_tiles <- function(tile_ids, group_name, annotations.df, tile_universe) {
all_terms <- sort(unique(annotations.df$annotation))
results.df <- all_terms %>%
mclapply(function(term) {
tiles_with_term <- subset(annotations.df, annotation == term)$VTID %>% unique()
test.df <- fisher_test_2x2(tile_ids, tiles_with_term, tile_universe)
test.df$term <- term
test.df$group_name <- group_name
test.df
}, mc.cores=6, mc.preschedule=FALSE) %>%
bind_rows()
}
overlapping_tile_ids <- function(gr, tiles.gr) {
tiles.gr[countOverlaps(tiles.gr, gr, ignore.strand=TRUE) > 0]$VTID %>% unique()
}
|
7ee52f890ec7d0491ceaae94cec9896702776b5c
|
2a1e93d5ce1fd21bddbf5667f8da12315234fded
|
/xgbcv.R
|
c6ac1b9303e1482c4a8693226ad720ca8e2bce32
|
[] |
no_license
|
tsuresh83/KaggleSpringLeafMarketingChallenge
|
7d3f7b3eaf383061ba6b1439f653f0476a6fda88
|
6aa2e439c19e705bf8e8afc7691b79b30dc15ad5
|
refs/heads/master
| 2021-01-13T10:05:11.409882
| 2017-08-17T21:23:38
| 2017-08-17T21:23:38
| 72,159,508
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,074
|
r
|
xgbcv.R
|
rm(list=ls())
library(doMC)
library(xgboost)
library(plyr)
library(caret)
library(pROC)
library(dummy)
library(h2o)
library(h2oEnsemble)
library(SuperLearner)
library(cvAUC)
set.seed(13)
load("/media/3TB/kag/springleafmarketingresponse/result/XGBoostAllNumericAndFactorsEQThirdAndUnifCombined_2015-10-0904:52:59_suresh-le_6_WS.rdata")
nround.cv = 10
# ( bst.cv <- xgb.cv(param=param, data=dtrain,
# nfold=10, nrounds=nround.cv, prediction=TRUE, verbose=T) )
paramCV <- list( objective = "binary:logistic",
#booster = "gblinear",
#gamma =10,
eta = 0.01,
max_depth = 10,
subsample = 0.7,
colsample_bytree = 0.9,
eval_metric = "auc"
# alpha = 0.0001,
# lambda = 1
)
history <-xgb.cv(params = paramCV,data = dtrain, nround=2, nfold = 5)
save(history,file="/media/3TB/kag/springleafmarketingresponse/result/XgbCVResult.rdata")
|
98bf6908ff50381e801ff06910ee9606036b0536
|
13c6a1a108ffd9edc98bbbd0287c503b484db2f3
|
/man/p13.16.Rd
|
a0f447365e4d1271d47dc313673e188e8ec06610
|
[] |
no_license
|
cran/MPV
|
81f894f2ecca63c391037ea8dc6033dbc7b57d98
|
b757e5149d3b29c0a8f20cc4315acec17a27693b
|
refs/heads/master
| 2023-03-03T14:14:08.549709
| 2023-02-26T02:40:02
| 2023-02-26T02:40:02
| 17,680,727
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
rd
|
p13.16.Rd
|
\name{p13.16}
\alias{p13.16}
\title{Data Set for Problem 13-16}
\usage{data(p13.16)}
\description{
The \code{p13.16} data frame has 16 rows and 5 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{X1}{a numeric vector}
\item{X2}{a numeric vector}
\item{X3}{a numeric vector}
\item{X4}{a numeric vector}
\item{Y}{a numeric vector}
}
}
\source{
Montgomery, D.C., Peck, E.A., and Vining, C.G. (2001)
Introduction to Linear Regression Analysis. 3rd Edition, John Wiley and Sons.
}
\examples{
data(p13.16)
}
\keyword{datasets}
|
7668e966843c1f97b77e845de8c3af465851f7d0
|
2019eb6672eb249c6334aef416f43216273c4121
|
/tests/testthat/test-merge_vim.R
|
9c09124349636c0373e38aedbc966c147dbb9f1f
|
[
"MIT"
] |
permissive
|
jjfeng/vimp
|
16584dcf86b9aba1bf53f8961dffa7c510eb6358
|
f57fce8061eedddee233a828b5d9fc8cafdbaa96
|
refs/heads/master
| 2022-12-12T11:01:00.959766
| 2020-09-15T00:12:08
| 2020-09-15T00:12:08
| 295,716,651
| 0
| 0
|
NOASSERTION
| 2020-09-15T12:11:59
| 2020-09-15T12:11:58
| null |
UTF-8
|
R
| false
| false
| 2,641
|
r
|
test-merge_vim.R
|
## load required functions and packages
library("testthat")
library("SuperLearner")
library("vimp")
library("xgboost")
## generate the data
set.seed(4747)
p <- 2
n <- 10000
x <- data.frame(replicate(p, stats::runif(n, -5, 5)))
## apply the function to the x's
y <- (x[,1]/5)^2*(x[,1]+7)/5 + (x[,2]/3)^2 + rnorm(n, 0, 1)
folds <- sample(rep(seq_len(2), length = length(y)))
## set up a library for SuperLearner
SL.xgboost1 <- function(..., max_depth = 1, ntree = 500, shrinkage = 0.1){
SL.xgboost(..., objective = 'reg:squarederror', max_depth = max_depth, ntree = ntree, shrinkage = shrinkage)
}
learners <- c("SL.glm.interaction", "SL.xgboost1", "SL.mean")
V <- 2
## fit the data with all covariates
full_fit <- SuperLearner(Y = y[folds == 1], X = x[folds == 1, ], SL.library = learners, cvControl = list(V = V))
full_fitted <- predict(full_fit)$pred
## fit the data with only X1
reduced_fit_1 <- SuperLearner(Y = full_fitted, X = x[folds == 2, -2, drop = FALSE], SL.library = learners, cvControl = list(V = V))
reduced_fitted_1 <- predict(reduced_fit_1)$pred
## fit the data with only X2
reduced_fit_2 <- SuperLearner(Y = full_fitted, X = x[folds == 2, -1, drop = FALSE], SL.library = learners, cvControl = list(V = V))
reduced_fitted_2 <- predict(reduced_fit_2)$pred
test_that("Merging variable importance estimates works", {
est_1 <- vim(Y = y, f1 = full_fitted, f2 = reduced_fitted_1, run_regression = FALSE, indx = 2, type = "r_squared", folds = folds)
est_2 <- vim(Y = y, f1 = full_fitted, f2 = reduced_fitted_2, run_regression = FALSE, indx = 1, type = "r_squared", folds = folds)
merged_ests <- merge_vim(est_1, est_2)
expect_equal(merged_ests$est[1], (500/729)/(1 + 2497/7875 + 500/729), tolerance = 0.2, scale = 1)
expect_equal(merged_ests$est[2], (2497/7875)/(1 + 2497/7875 + 500/729), tolerance = 0.4, scale = 1)
expect_output(print(merged_ests), "Estimate", fixed = TRUE)
})
test_that("Merging cross-validated variable importance estimates works", {
est_1 <- cv_vim(Y = y, X = x, run_regression = TRUE, indx = 2, V = V, cvControl = list(V = V), SL.library = learners,
env = environment(), na.rm = TRUE)
est_2 <- cv_vim(Y = y, X = x, run_regression = TRUE, indx = 1, V = V, cvControl = list(V = V), SL.library = learners,
env = environment(), na.rm = TRUE)
merged_ests <- merge_vim(est_1, est_2)
expect_equal(merged_ests$est[1], (500/729)/(1 + 2497/7875 + 500/729), tolerance = 0.1, scale = 1)
expect_equal(merged_ests$est[2], (2497/7875)/(1 + 2497/7875 + 500/729), tolerance = 0.1, scale = 1)
expect_output(print(merged_ests), "Estimate", fixed = TRUE)
})
|
fc514a975a715d875e12befc04efc8007738d352
|
93fef68695ec291350e728b928c608f6cb9e09eb
|
/Archive/009_multivariate_survival_correlations_Figure1b.R
|
375e819817f5883607d41d1460211a714c19baf9
|
[] |
no_license
|
HongyuanWu/lncRNAs_TCGA
|
ae4fa9202704545fc59a9dae19dabeeda2b7cb34
|
cbfe2356f8d65b20672dcc378fe7de309eec3dba
|
refs/heads/master
| 2023-07-28T00:11:29.152750
| 2021-09-09T13:33:01
| 2021-09-09T13:33:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 22,083
|
r
|
009_multivariate_survival_correlations_Figure1b.R
|
###---------------------------------------------------------------
###009_multivariate_survival_correlations_Figure1b.R
###---------------------------------------------------------------
#what?
#Systematic analysis of how prognostic lncRNAs are across cancer types
#After finding prognostic lncRNAs --> look at how co-expressed these pairs of lncRNAs are
#This script also generates supplementary tables 1 and 2
#load all required data
source("universal_LASSO_survival_script.R")
#load libraries
require(caTools)
library(survAUC)
library(glmnet)
library(survcomp)
library(caret)
library(stringr)
library(EnvStats)
library(patchwork)
library(ggpubr)
library(ggrepel)
library(viridis)
library(patchwork)
library(caret)
library(Rtsne)
library(data.table)
date = Sys.Date()
#------DATA---------------------------------------------------------
#UCSC gene info
ucsc <- fread("UCSC_hg19_gene_annotations_downlJuly27byKI.txt", data.table=F)
#z <- which(ucsc$hg19.ensemblSource.source %in% c("antisense", "lincRNA", "protein_coding"))
#ucsc <- ucsc[z,]
z <- which(duplicated(ucsc[,8]))
ucsc <- ucsc[-z,]
#fantom
fantom <- fread("lncs_wENSGids.txt", data.table=F) #6088 lncRNAs
extract3 <- function(row){
gene <- as.character(row[[1]])
ens <- gsub("\\..*","",gene)
return(ens)
}
fantom[,1] <- apply(fantom[,1:2], 1, extract3)
#remove duplicate gene names (gene names with multiple ensembl ids)
z <- which(duplicated(fantom$CAT_geneName))
rm <- fantom$CAT_geneName[z]
z <- which(fantom$CAT_geneName %in% rm)
fantom <- fantom[-z,]
#summarize lncRNAs that we studied
lncs = colnames(rna)[which(str_detect(colnames(rna), "ENSG"))]
z = which(fantom$CAT_geneID %in% lncs)
fantom = fantom[z,]
write.csv(fantom, file="5785_lncRNAs_used_in_study_table2.csv", quote=F, row.names=F)
#summarize patients
pats = unique(rna[,c("type", "Cancer")])
tt = as.data.table(table(rna$type))
colnames(tt) = c("type", "num_patients")
tt = merge(tt, pats, by="type")
tt = tt[order(num_patients)]
write.csv(tt, file="TCGA_cancer_types_used_in_study_table1.csv", quote=F, row.names=F)
all_genes = as.data.frame(unique(c(colnames(rna), colnames(pcg))))
z = which(str_detect(all_genes[,1], "ENSG"))
all_genes = all_genes[z,]
all_genes = as.data.frame(all_genes)
colnames(all_genes)[1] = "gene"
all_genes$type = ""
z = which(all_genes$gene %in% colnames(rna))
all_genes$type[z] = "lncRNA"
z = which(all_genes$gene %in% colnames(pcg))
all_genes$type[z] = "pcg"
saveRDS(all_genes, file="all_genes_used_in_TCGA_april17.rds")
###---------------------------------------------------------------
#function that tests each lncRNA's survival
#1. list of cancers to apply function to
cancers = as.list(unique(rna$Cancer))
#remove cancer types with less than 50 patients
pats_num = as.data.table(table(rna$Cancer))
pats_num = filter(pats_num, N <50)
canc_rm = pats_num$V1
#remove those ones
cancers = cancers[which(!(cancers %in% canc_rm))]
#2. function that splits data into cancers
get_canc = function(canc){
canc_data = rna[which(rna$Cancer == canc),]
return(canc_data)
}
canc_datas = llply(cancers, get_canc)
#3. function that calculates survival for each gene
canc_survival_genes = function(dato){
#look at all lncRNAs that are expressed in at least some patients
z = which(str_detect(colnames(dato), "ENSG"))
sums = apply(dato[,z], 2, sum)
rm = names(sums[which(sums == 0)])
if(!(length(rm)==0)){
z = which(colnames(dato) %in% rm)
dato = dato[,-z]
}
print(dato$type[1])
z = which(str_detect(colnames(dato), "ENSG"))
genes = unique(colnames(dato)[z])
#TEST------------------------------------------------------------------------------------------
#genes = genes[1:100]
canc_data_genes_analyze = dato
get_survival = function(gene){
print(gene)
results_cox <- as.data.frame(matrix(ncol=8)) ; colnames(results_cox) <- c("gene", "coef", "HR", "pval", "low95", "upper95", "risk_size", "num_patients")
#keep gene expresion as well as clinical columns
cols_keep = c(gene, "patient", "age_at_initial_pathologic_diagnosis", "gender", "race", "clinical_stage", "histological_grade", "OS", "OS.time")
dat = canc_data_genes_analyze[,cols_keep]
dat$age_at_initial_pathologic_diagnosis = as.numeric(dat$age_at_initial_pathologic_diagnosis)
#remove columns with less than 2 contrasts
check_contrasts = function(col){
check = dim(table(col))
if(check >1){
return("keep")
}
}
keep = unlist(apply(dat, 2, check_contrasts))
z = which(colnames(dat) %in% names(keep))
dat = dat[,z]
dat$OS = as.numeric(as.character(dat$OS))
dat$OS.time = as.numeric(as.character(dat$OS.time))
z = which(colnames(dat) %in% gene)
colnames(dat)[z] = "gene"
dat$gene = as.numeric(dat$gene)
rownames(dat) = dat$patient
dat$patient = NULL
#split patients
med = median(dat$gene)
#remove NAs
z = which(is.na(dat$OS.time))
if(!(length(z) ==0)){
dat = dat[-z,]}
med_gene = median(dat$gene)
dat$med = ""
if(med_gene ==0){
#if median = 0 then anyone greater than zero is 1
l1 = which(dat$gene > 0)
l2 = which(dat$gene ==0)
dat$med[l1] = 1
dat$med[l2] = 0
}
if(!(med_gene ==0)){
l1 = which(dat$gene >= med_gene)
l2 = which(dat$gene < med_gene)
dat$med[l1] = 1
dat$med[l2] = 0
}
check1 = table(dat$med)[1] >= 10
check2 = table(dat$med)[2] >= 10
if(check1 & check2){
if(dim(table(dat$med)) ==2){
dat$gene = NULL
dat = dat[,c("med", colnames(dat)[2:ncol(dat)-1])] #rearrange
res.cox <- coxph(Surv(OS.time, OS) ~ ., data = dat)
hr = summary(res.cox)$coefficients[1,c(2)]
num_pat = nrow(dat)
if(hr > 1){
risk = length(which(dat$med ==1))
}
if(hr <1){
risk = length(which(dat$med ==0))
}
row <- c(gene, summary(res.cox)$coefficients[1,c(1,2,5)], summary(res.cox)$conf.int[1,c(3,4)], risk, num_pat)
names(row) <- names(results_cox)
return(row)
}}} #end get_survival function
genes_survival = llply(genes, get_survival, .progress="text")
genes_survival_res = ldply(genes_survival, rbind)
#fdr
colnames(genes_survival_res) = c("gene", "coef", "HR", "pval", "low95", "upper95", "risk_size", "num_patients")
genes_survival_res$fdr = p.adjust(as.numeric(genes_survival_res$pval), method="fdr")
genes_survival_res$canc = dato$Cancer[1]
genes_survival_res = as.data.table(genes_survival_res)
genes_survival_res = genes_survival_res[order(fdr)]
return(genes_survival_res)
}
#DO NOT RUN
#all_cancers_genes_surv = llply(canc_datas, canc_survival_genes, .progress="text")
#all_cancers_genes_surv_comb = ldply(all_cancers_genes_surv, data.frame)
#file = paste(date, "009_lncRNAs_prognosis_all_cancers.rds", sep="_")
#saveRDS(all_cancers_genes_surv_comb, file=file) #<---- important file
##############RUN-----------------------------------------------------------------------------------
all_cancers_genes_surv_comb = readRDS("2019-04-16_009_lncRNAs_prognosis_all_cancers.rds") #load most recent file
colnames(canc_conv)[2] = "canc"
all_cancers_genes_surv_comb = merge(all_cancers_genes_surv_comb, canc_conv, by="canc")
all_cancers_genes_surv_comb[,c(3:10)] = apply(all_cancers_genes_surv_comb[,c(3:10)], 2, function(x){as.numeric(x)})
all_cancers_genes_surv_comb = as.data.table(all_cancers_genes_surv_comb)
###-------------------------------------------------------------------------------------------------
all_cancers_genes_surv_comb$pval = -log10(as.numeric(all_cancers_genes_surv_comb$pval))
all_cancers_genes_surv_comb$fdr = -log10(all_cancers_genes_surv_comb$fdr)
all_cancers_genes_surv_comb$HR = as.numeric(all_cancers_genes_surv_comb$HR)
z = which(is.na(all_cancers_genes_surv_comb$pval))
if(!(length(z)==0)){all_cancers_genes_surv_comb = all_cancers_genes_surv_comb[-z,]}
z1 = which(all_cancers_genes_surv_comb$fdr == "Inf")
z2 = which(all_cancers_genes_surv_comb$upper95 == "Inf")
all_cancers_genes_surv_comb = all_cancers_genes_surv_comb[-c(z1,z2),]
z = which(all_cancers_genes_surv_comb$HR > 20)
all_cancers_genes_surv_comb = all_cancers_genes_surv_comb[-z,]
lineval = -log10(0.1)
all_cancers_genes_surv_comb$fdrsig = ""
all_cancers_genes_surv_comb$fdrsig[all_cancers_genes_surv_comb$fdr < lineval] = "FDRnotSig"
all_cancers_genes_surv_comb$fdrsig[all_cancers_genes_surv_comb$pval > lineval] = "Significant"
all_cancers_genes_surv_comb$fdrsig[all_cancers_genes_surv_comb$pval < lineval] = "NotSignificant"
all_cancers_genes_surv_comb$fdrsig[all_cancers_genes_surv_comb$fdr >= lineval] = "FDRSignificant"
#check overlap of sig prognostic lncRNAs bewteen cancer types
all_cancers_genes_surv_comb$risk_perc = as.numeric(all_cancers_genes_surv_comb$risk_size)/as.numeric(all_cancers_genes_surv_comb$num_patients)
all_cancers_genes_surv_comb$risk_perc_tag[(all_cancers_genes_surv_comb$risk_perc > 0.48) | (all_cancers_genes_surv_comb$risk_perc < 0.52)] = "75%_more_risk_group"
all_cancers_genes_surv_comb$risk_perc_tag[all_cancers_genes_surv_comb$risk_perc > 0.75] = "75%_more_risk_group"
sig_lncs = as.data.table(all_cancers_genes_surv_comb)
#--------------------------------------------------------------------------------------------------
#ONLY FDR < 0.1 SIGNIFICNAT#
#--------------------------------------------------------------------------------------------------
sig_lncs = as.data.table(filter(all_cancers_genes_surv_comb, fdr >= -log10(0.1)))
saveRDS(sig_lncs, file="3671_prognostic_lncRNAs_fdr0.1.rds")
all_cancers_genes_surv_comb = sig_lncs
#summary freq
sig_lncs = as.data.table(table(sig_lncs$canc, sig_lncs$gene))
sig_lncs = as.data.table(filter(sig_lncs, N > 0))
colnames(sig_lncs)[1] = "canc"
sig_lncs = merge(sig_lncs, canc_conv, by="canc")
#calculate how many lncRNAs overlap between cancer types
cancs = unique(sig_lncs$type) #note these are FDR significant
get_pairs = function(canc){
canc_lncs = sig_lncs$V2[sig_lncs$type == canc]
z = which(sig_lncs$V2 %in% canc_lncs)
canc_pairs = sig_lncs[z,]
cp = as.data.table(table(canc_pairs$type))
cp$canc1 = canc
colnames(cp)[1] = "canc2"
return(cp)
}
all_canc_pairs = llply(cancs, get_pairs)
all_canc_pairs = ldply(all_canc_pairs)
all_canc_pairs = as.data.table(all_canc_pairs)
all_canc_pairs = all_canc_pairs[order(-N)]
all_canc_pairs$canc1 = factor(all_canc_pairs$canc1, levels=unique(all_canc_pairs$canc1))
all_canc_pairs$canc2 = factor(all_canc_pairs$canc2, levels=unique(all_canc_pairs$canc1))
#SUMMARY OF HOW MANY PROGNOSTIC LNCRNAS IN COMMON BETWEEN CANCER TYPES
pdf("overlap_ALL_sig_lncRNA_cands_bw_cancers_aug28.pdf", width=8, height=5)
g = ggplot(all_canc_pairs, aes(canc1, canc2)) +
geom_tile(aes(fill=N)) +
geom_text(aes(label = N), size=1.5) +
scale_fill_gradient(low = "grey", high = "orange", na.value = 'transparent') +
xlab("Cancer 1") + ylab("Cancer 2") + theme_bw()
ggpar(g,
font.tickslab = c(8,"plain", "black"),
xtickslab.rt = 45, legend.title="# lncRNAs \noverlap")
dev.off()
#order by most significant to least significant
order = as.data.table(table(all_cancers_genes_surv_comb$canc, all_cancers_genes_surv_comb$fdrsig))
order = order[order(-V1,N)]
z = order[order(order$V1, -order$N),]
# Remove duplicates
z1 = z[!duplicated(z$V1),]
#order again
z1 = z1[order(-V2,N)]
order = z1$V1
all_cancers_genes_surv_comb$canc <- factor(all_cancers_genes_surv_comb$canc, levels = order)
all_cancers_genes_surv_comb$canc # notice the changed order of factor levels
all_cancers_genes_surv_comb$risk[all_cancers_genes_surv_comb$HR >= 1] = "Unfavourable"
all_cancers_genes_surv_comb$risk[all_cancers_genes_surv_comb$HR < 1] = "Favourable"
#Variation 2 of survival overview plot
head(all_cancers_genes_surv_comb)
#all_cancers_genes_surv_comb$HR = log2(all_cancers_genes_surv_comb$HR)
#summarize number favourable and unfabourable lcnRNAs by fdr significance per cancer type
all_cancers_genes_surv_comb = as.data.table(filter(all_cancers_genes_surv_comb, fdr >= -log10(0.1)))
#get order of cancer types by total number of lncRNAs
order = as.data.table(table(all_cancers_genes_surv_comb$type, all_cancers_genes_surv_comb$fdrsig))
order = as.data.table(dplyr::filter(order, N >0))
order = order[order(V2, -N)]
order = unique(order$V1)
summ = as.data.table(table(all_cancers_genes_surv_comb$type, all_cancers_genes_surv_comb$fdrsig,
all_cancers_genes_surv_comb$risk))
colnames(summ) = c("Cancer", "Sig", "Risk", "N")
summ = as.data.table(dplyr::filter(summ, N > 0))
#barplot----summary
#only include significant ones
#how many significant favourable vs unfavourable
summ$Cancer = factor(summ$Cancer, levels = order)
summ$Risk = factor(summ$Risk, levels = c("Unfavourable", "Favourable"))
#how many unique lncrnas per cancer type
unique = as.data.table(table(all_cancers_genes_surv_comb$canc, all_cancers_genes_surv_comb$gene))
unique = unique[order(N)]
unique = as.data.table(filter(unique, N >0))
unique_sum = as.data.table(table(unique$V2))
unique_sum = unique_sum[order(N)]
filter(unique_sum, N ==1)
unique_only_one = as.data.table(filter(unique_sum, N ==1))
colnames(unique_only_one)[1] = "gene"
colnames(unique)[2] = "gene"
unique_sum = merge(unique_only_one, unique, by="gene")
table(unique_sum[,3])
######################################
#FIGURE 1B PART 1---------------------
######################################
pdf("final_figure_1B.pdf", height=6, width=6)
g = ggbarplot(summ, "Cancer", "N",
fill = "Risk", color = "Risk",
palette = "npg")
ggpar(g,
font.xtickslab = c(9,"plain", "black"),
xtickslab.rt = 45) + labs(x="Cancer type", y="Number of prognostic lncRNAs") +
ggtitle("Number of Univariate Significant lncRNAs, adjusted CoxPH p-val < 0.1")+
scale_y_continuous(breaks=seq(0,2250,250))
dev.off()
#---------------------------------------------------------------------------------
### Figure 1 part 2 - correlations between prognostic lncRNAs in each cancer type
#---------------------------------------------------------------------------------
### Get corrplot of survival candidates within each cancer type for significant lncRNAs
head(all_cancers_genes_surv_comb)
all_cancers_genes_surv_comb = as.data.table(all_cancers_genes_surv_comb)
cancers = (unique(all_cancers_genes_surv_comb$canc))
#function apply to each cancer type and plot corrplot for significant lncRNAs (sig & fdrsig)
library(corrplot)
library(Hmisc)
# ++++++++++++++++++++++++++++
# flattenCorrMatrix
# ++++++++++++++++++++++++++++
# cormat : matrix of the correlation coefficients
# pmat : matrix of the correlation p-values
flattenCorrMatrix <- function(cormat, pmat) {
ut <- upper.tri(cormat)
data.frame(
row = rownames(cormat)[row(cormat)[ut]],
column = rownames(cormat)[col(cormat)[ut]],
cor =(cormat)[ut],
p = pmat[ut]
)
}
get_summary = function(cancer){
print(cancer)
#get sig genes
canc_genes = as.data.table(dplyr::filter(all_cancers_genes_surv_comb, canc == cancer))
if((dim(canc_genes)[1] >=2)){
genes = unique(canc_genes$gene)
canc_exp = subset(rna, Cancer == cancer)
rownames(canc_exp) = canc_exp$patient
canc_exp = canc_exp[,which(colnames(canc_exp) %in% c(genes))]
res2 = rcorr(as.matrix(canc_exp), type="spearman")
res2 = flattenCorrMatrix(res2$r, res2$P)
res2$fdr = p.adjust(res2$p, method="fdr")
res2 = as.data.table(res2)
res2 = res2[order(fdr)]
#total pairs
tot_pairs = nrow(res2)
res2 = as.data.table(dplyr::filter(res2, fdr <= 0.05))
sig_pairs = nrow(res2)
#%
perc = sig_pairs/tot_pairs
row = c(as.character(cancer), tot_pairs, sig_pairs, perc)
return(row)
}
}
#get correlations
get_pairs_results = function(cancer){
print(cancer)
#get sig genes
canc_genes = as.data.table(dplyr::filter(all_cancers_genes_surv_comb, canc == cancer))
if((dim(canc_genes)[1] >=2)){
genes = unique(canc_genes$gene)
canc_exp = subset(rna, Cancer == cancer)
rownames(canc_exp) = canc_exp$patient
canc_exp = canc_exp[,which(colnames(canc_exp) %in% c(genes))]
res2 = rcorr(as.matrix(canc_exp), type="spearman")
res2 = flattenCorrMatrix(res2$r, res2$P)
res2$fdr = p.adjust(res2$p, method="fdr")
res2 = as.data.table(res2)
res2 = res2[order(fdr)]
tot_pairs = nrow(res2)
#res2 = as.data.table(dplyr::filter(res2, fdr <= 0.05))
sig_pairs = nrow(res2)
#check if lncRNA-lncRNA correlations match HRs
check_dir = function(lnc1, lnc2){
hr_lnc1 = canc_genes$HR[canc_genes$gene == lnc1]
hr_lnc2 = canc_genes$HR[canc_genes$gene == lnc2]
check1 = ((hr_lnc1 > 1) & (hr_lnc2 > 1))
check2 = ((hr_lnc1 < 1) & (hr_lnc2 < 1))
if(check1){match = "U"
}else if(check2){
match = "F"
}else{match = "D"}
return(match)
}
res2$match = mapply(check_dir, res2$row, res2$column)
#ordered by strongest correlations to weakest correlations
res2 = res2[order(match, cor)]
res2$cor_sum[res2$cor > 0] = "Pos"
res2$cor_sum[res2$cor < 0] = "Neg"
#summarize how many of each kind
t = table(res2$match, res2$cor_sum)
t = as.data.table(tidy(t))
t = t[order(n)]
t$total_sig_pairs = sig_pairs
t$total_pairs = tot_pairs
t$perc = t$n/sig_pairs
t$cancer = cancer
res2$cancer = cancer
return(res2)
}
}
canc_results_pairs_types = llply(cancers, get_pairs_results, .progress = "text")
#save
saveRDS(canc_results_pairs_types, file="correlation_lnc_lnc_results_april10_res2.rds")
#remove null
canc_results_pairs_types2 = Filter(Negate(is.null), canc_results_pairs_types)
canc_results_pairs_types2 = ldply(canc_results_pairs_types2)
canc_results_pairs_types2 = as.data.table(canc_results_pairs_types2)
colnames(canc_conv)[2] = "cancer"
canc_results_pairs_types2 = merge(canc_results_pairs_types2, canc_conv, by="cancer")
canc_results_pairs_types2$HR_pair = ""
canc_results_pairs_types2$HR_pair[canc_results_pairs_types2$match == "F"] = "Both \nFavourable"
canc_results_pairs_types2$HR_pair[canc_results_pairs_types2$match == "U"] = "Both \nUnfavourable"
canc_results_pairs_types2$HR_pair[canc_results_pairs_types2$match == "D"] = "Opposite \nHRs"
#keep only fdr significant ones
canc_results_pairs_types2 = as.data.table(filter(canc_results_pairs_types2, fdr < 0.05, abs(cor)))
#cancer order keep same as first plot
canc_results_pairs_types2$type <- factor(canc_results_pairs_types2$type, levels = rev(order))
canc_results_pairs_types2$column_name = paste(canc_results_pairs_types2$HR_pair, canc_results_pairs_types2$Exp_pair)
saveRDS(canc_results_pairs_types2, file="correlation_lnc_lnc_results_april10_res2.rds")
canc_results_pairs_types2 = readRDS("correlation_lnc_lnc_results_april10_res2.rds")
######################################
#FIGURE 1B PART 2---------------------
######################################
pdf("final_figure_1B_parttwo.pdf", width=4, height=4)
canc_results_pairs_types2$HR_pair = factor(canc_results_pairs_types2$HR_pair, levels = c("Both \nUnfavourable", "Opposite \nHRs", "Both \nFavourable"))
# Change density plot fill colors by groups
g = ggplot(canc_results_pairs_types2, aes(x=cor, fill=HR_pair), color="black") +
geom_density(alpha=0.4, aes(x=cor, y=..density..)) + xlab("Spearman Correlation") + scale_fill_brewer(palette="Set1") +
theme(legend.position="bottom")
ggpar(g,
font.tickslab = c(9,"plain", "black"), font.legend=c(8, "plain", "black"))
dev.off()
######################################
#FIGURE 1C PART ---------------------
######################################
gtex_res_risk = readRDS("lncRNAs_risk_groups_correlation_ranks.rds")
gtex_res_risk = ldply(gtex_res_risk)
gtex_res_risk$V2 = as.numeric(gtex_res_risk$V2)
summary(gtex_res_risk$V2)
gtex_res_risk = as.data.table(gtex_res_risk)
colnames(gtex_res_risk)[8:9] = c("Spearman_rho", "Spearman_p")
gtex_res_risk$Spearman_fdr = p.adjust(gtex_res_risk$Spearman_p, method="fdr")
gtex_res_risk$wilcox_p = as.numeric(gtex_res_risk$wilcox_p)
gtex_res_risk$wilcox_p = p.adjust(gtex_res_risk$wilcox_p, method="fdr")
gtex_res_risk = as.data.table(filter(gtex_res_risk, wilcox_p < 0.05))
#gtex_res = readRDS("significant_GTEX_comparisons_april10.rds")
#z = which(gtex_res$canc %in% c("Glioblastoma multiforme", "Brain Lower Grade Glioma"))
#brain = gtex_res[z,]
#gtex_res = gtex_res[-z,]
#z = which(brain$tis %in% c("Brain - Cerebellum", "Brain - Spinal cord (cervical c-1)"))
#brain = brain[z,]
#gtex_res = rbind(gtex_res, brain)
#get lncRNAs that are prognosic with gtex data
gtex_res = gtex_res_risk
head(gtex_res)
gtex_res$combo2 = paste(gtex_res$lnc, gtex_res$canc, sep="_")
head(all_cancers_genes_surv_comb)
all_cancers_genes_surv_comb$combo2 = paste(all_cancers_genes_surv_comb$gene, all_cancers_genes_surv_comb$type, sep="_")
gtex_res = merge(gtex_res, all_cancers_genes_surv_comb, by="combo2")
gtex_res$median_diff = as.numeric(gtex_res$median_diff)
gtex_res = as.data.table(filter(gtex_res, abs(median_diff) >= 0.2))
###Data
#gtex = readRDS("allGTEX_lncRNAs_scored_Feb2619.rds")
#tcga = readRDS("TCGA_all_lncRNAs_cancers_scored_byindexMay23.rds")
#gtex = gtex[,c(1:4, 6, 5)]
gtex_res$Hazard = gtex_res$risk.y
gtex_res$Hazard = factor(gtex_res$Hazard, levels = c("Unfavourable", "Favourable"))
gtex_res$Spearman_rho = as.numeric(gtex_res$Spearman_rho)
gtex_res$med[gtex_res$median_diff > 0] = "upreg"
gtex_res$med[gtex_res$median_diff < 0] = "downreg"
table(gtex_res$med, gtex_res$Hazard)
pdf("final_figure_1C.pdf", height=5, width=6)
#unfav
m <- ggplot(gtex_res, aes(x = median_diff, y = HR)) +
geom_point(aes(colour = Hazard)) + scale_color_npg()
m + geom_density_2d(colour="black") + geom_hline(yintercept=0, linetype="dashed", color = "black")+
xlab("Median(High risk rank - GTEx rank)") + ylab("Hazard Ratio")+
geom_vline(xintercept=0, linetype="dashed", color = "black") #+ scale_colour_gradient(low = "blue", high = "red")
dev.off()
|
50e5cd7772c68ab3ef6b72d8f830153fa5f27c52
|
52845fa2ba9ad01cbf89590a006856e47db30b0e
|
/man/conv.Rd
|
7549d83005bb90dbe85bd47fd2a382d590ddeaf0
|
[] |
no_license
|
USEPA/CoralBCG
|
fa775a0303850503a759066aeb6ca78a5df7efab
|
594b6c3168f889d1d20ee27bc5e4309b526e051e
|
refs/heads/master
| 2020-12-30T16:46:02.907594
| 2017-06-26T21:32:49
| 2017-06-26T21:32:49
| 91,025,999
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 497
|
rd
|
conv.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/conv.R
\docType{data}
\name{conv}
\alias{conv}
\title{Data frame of coral size conversion factors}
\format{A data frame:
\describe{
\item{\code{spec}}{chr string of species name}
\item{\code{conv}}{numeric for conversion factor}
}}
\usage{
conv
}
\description{
Morphological conversion factors are based on Table J-1 in EPA/600/R-13/350 (same as Table 3-2 in EPA/600/R-12/029) and the equation
}
\keyword{datasets}
|
37da42edf97b7f35d4cd1678eb64ac8a3c6ef233
|
9bc5e4203c6fa2831222de6aa6eca118d009214e
|
/Functions/Complex network.R
|
919f684bcbd64ed0bf9a9b3deb825113bb060e5f
|
[] |
no_license
|
klwilson23/SpatialWatershed
|
8117e17c682889257e6156f423cfacab29928d9e
|
f9e2d2983d476740cfd8b0144d6fcea9e22304e7
|
refs/heads/master
| 2023-06-07T22:12:49.509202
| 2023-06-01T02:09:36
| 2023-06-01T02:09:36
| 154,884,373
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,488
|
r
|
Complex network.R
|
# Author: Kyle L Wilson
# Institution: University of Calgary
# Date: July 26, 2017
# Create simulation social-ecological model based on Hunt et al. 2011
# Date: April 10, 2018
# create 'makeLandscape' function that generates spatial features of the landscape SES model
library(igraph)
library(boot)
library(timeSeries)
library(shape)
source("Figures/some functions.R")
NmetaPatch <- 1 # how many metapopulation patches?
NlocalPops <- 24 # how many populations in metapopulation?
MORELETTERS <- extend(LETTERS)
patches <- matrix(c(MORELETTERS(1:(NlocalPops/2)),"1",MORELETTERS((NlocalPops/2+1):NlocalPops)),nrow=5,ncol=5,byrow=T)
diameter <- 15
colfunc <- colorRampPalette(c("royalblue4","dodgerblue","lightblue","darkorange1","firebrick"))
# declare coordinates of patches for: level (1) branches, level (2) branhes, level (3) branches, and level (4) branches
# add more levels if desired.
edge1 <- c()
for(i in 1:nrow(patches))
{
for(j in 1:ncol(patches))
{
edgeList <- as.vector(t(patches[max(1,(i-1)):min(nrow(patches),(i+1)),max(1,(j-1)):min(ncol(patches),(j+1))]))[-which(as.vector(t(patches[max(1,(i-1)):min(nrow(patches),(i+1)),max(1,(j-1)):min(ncol(patches),(j+1))]))==patches[i,j])]
edge1 <- c(edge1,as.vector(sapply(edgeList,function(x){c(patches[i,j],x)})))
}
}
edgesTot <- edge1
complexLandscape <- graph(edges=as.character(edgesTot),directed=F)
# set distance between patches for: (1) main branches, (2) secondary branhes, (3) tertiary branches
# add more levels if desired.
dist2 <- rep(diameter/2,length(edgesTot))
E(complexLandscape)$weight <- 1/dist2
E(complexLandscape)$weight[grep("1",attr(E(complexLandscape),"vnames"))] <- 1/diameter
node.size<-setNames(c(rep(0.8,NlocalPops/2),2,rep(0.8,NlocalPops/2)),V(complexLandscape)$names)
V(complexLandscape)$color[c(which(patches=="1"),as.vector(t(sapply(patches[-which(patches=="1")],function(x){match(x,V(complexLandscape)$name)}))))] <- c("grey50",as.vector(t(sapply(MORELETTERS(1:NlocalPops),function(x){rep(colfunc(NlocalPops)[which(x==MORELETTERS(1:NlocalPops))],length(match(x,V(complexLandscape)$name)))}))))
#layout(1)
#par(mar=c(5,4,1,1))
#tiff("watershed.tiff",compression="lzw",units="in",height=8,width=8,res=800)
plot(complexLandscape,col="dodgerblue",layout=layout.auto(complexLandscape),vertex.size=node.size*15)
#dev.off()
distance_table(complexLandscape)
mean_distance(complexLandscape)
dist_matrix <- distances(complexLandscape,v=V(complexLandscape),to=V(complexLandscape))
|
d197d27d979ac562e8d81f634d095d9edb2f8bf5
|
87b11807666177a625e1fd6537ae704e1ad97d39
|
/plot1.R
|
aba7424bac009c64ae551e9f3f8c6d48e1083187
|
[] |
no_license
|
jollylamb/ExData_Project2
|
eb27ca96f728aff270c7558f248cd05c798d7d94
|
abe145ff214e2f751dc47a39c3f9b98437be38ea
|
refs/heads/master
| 2016-09-16T16:14:18.717186
| 2015-07-26T19:00:53
| 2015-07-26T19:00:53
| 39,597,820
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 437
|
r
|
plot1.R
|
library(dplyr)
drawPlot1 <- function(){
#read the file
nei <- readRDS("summarySCC_PM25.rds")
#group by year
grouped <- group_by(nei, year)
#sum up the emissions
totals <- summarize(grouped, total=sum(Emissions, na.rm = T))
#open the png device
png(file="plot1.png")
#draw the graph
plot(totals$year, totals$total, type="l", xlab="Year", ylab="Total Emissions")
#close the png device
dev.off()
}
|
a2a58ff81ae3c13c8ade9a003cb51c4997f68374
|
11716f4411208b2a45c7098587777a41b1a76c73
|
/RealTime_plot_FigS2_map.R
|
0b703004f1d7da2521ff3ce5c71d7c1f8d2798a9
|
[
"MIT"
] |
permissive
|
bbarres/Realtime
|
9ae5cd780e5987d2155e2f7e5537120bd1792381
|
c4501a6da58f8d64cf5bb02f8c3abc6d6859c6e6
|
refs/heads/master
| 2023-06-26T20:21:17.782659
| 2023-06-23T21:41:35
| 2023-06-23T21:41:35
| 33,980,368
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,759
|
r
|
RealTime_plot_FigS2_map.R
|
##############################################################################/
##############################################################################/
#Plot the map of the RealTime experiment
##############################################################################/
##############################################################################/
source("RealTime_load.R")
##############################################################################/
#map of the experimental set up####
##############################################################################/
#how many individuals in each treatment for each family
table(dispo$family_simp,dispo$trait)
colovec<-c(brewer.pal(12,"Paired"),brewer.pal(8,"Dark2")[c(1,4,8)])
colovec<-c(colovec[1:3],"#ffffff",colovec[4:15],"#ffffff","#000000",
"#ffffff","#ffffff")
pdf(file="output/Figure_S2_map.pdf",width=20,height=12)
op<-par(mar=c(0,0,15,0))
plot(dispo$coord_X,dispo$coord_Y,bty="n",ann=FALSE,axes=FALSE,
pch=c(rep(21,17),24,22)[as.numeric(dispo$family_simp)],
bg=colovec[as.numeric(dispo$family_simp)])
text(x=c(20,620,1220,20,620,1220,20,620,1220),
y=c(760,760,760,455,455,455,145,145,145),
labels=c("A1 / natural","A2 / protected","A3 / natural",
"B1 / natural","B2 / natural","B3 / protected",
"C1 / protected","C2 / natural","C3 / natural"),
xpd=TRUE,cex=2,font=2,adj=c(0,0))
legend(x=1430,y=1020,horiz=FALSE,x.intersp=1,xpd=TRUE,pt.cex=2,
text.width=50,bty="n",ncol=4,
legend=as.character(levels(dispo$family_simp))[c(1:3,5:16,18,19)],
pch=c(rep(21,15),24,22),y.intersp=1.8,
pt.bg=colovec[-c(4,17)],title="Families",title.cex=2)
title(main="Map of the experimental setup",cex.main=4,line=10)
par(op)
#export to .pdf 20 x 12 inches
dev.off()
##############################################################################/
#map of the dead tree####
##############################################################################/
colovec<-c(brewer.pal(12,"Paired")[c(rep(6,9),9,4)])
op<-par(mar=c(0,0,15,0))
plot(dispo$coord_X,dispo$coord_Y,bty="n",ann=FALSE,axes=FALSE,
pch=c(rep(21,17),24,22)[as.numeric(dispo$family_simp)],
bg=colovec[as.numeric(as.factor(dispo$an_mort))])
text(x=c(20,620,1220,20,620,1220,20,620,1220),
y=c(760,760,760,455,455,455,145,145,145),
labels=c("A1 / natural","A2 / protected","A3 / natural",
"B1 / natural","B2 / natural","B3 / protected",
"C1 / protected","C2 / natural","C3 / natural"),
xpd=TRUE,cex=2,font=2,adj=c(0,0))
legend(x=230,y=950,horiz=FALSE,x.intersp=0.5,y.intersp=0.8,
xpd=TRUE,pt.cex=3,bty="n",text.font=2,
legend=c("Dead","Acorn","Alive"),
pch=c(rep(15,3)),
col=colovec[c(1,10,11)])
legend(x=60,y=950,horiz=FALSE,x.intersp=0.5,y.intersp=0.8,
xpd=TRUE,pt.cex=3,bty="n",text.font=2,
legend=c("Experiment","CC","hd"),
pch=c(21,24,22))
title(main="Dead or alive map",cex.main=4,line=10)
par(op)
#export to .pdf 20 x 12 inches
##############################################################################/
#map of the tree height####
##############################################################################/
#defining a vector to chose the columns with tree height information
temp<-c("Hfin09","Hfin10","Hfin11","Hfin12",
"Hdeb14","Hdeb15","Hdeb16","Hdeb17")
min(dispo[,temp],na.rm=TRUE)
cut(as.numeric(as.matrix(dispo[,temp])),49,na.rm=TRUE,levels=FALSE)
colovec<-viridis(50)
op<-par(mar=c(0,0,15,0))
plot(dispo$coord_X,dispo$coord_Y,bty="n",ann=FALSE,axes=FALSE,
pch=c(rep(21,17),24,22)[as.numeric(dispo$family_simp)],
bg=colovec[as.numeric(cut(as.numeric(as.matrix(dispo[,temp[8]])),
49,na.rm=TRUE,levels=FALSE))])
text(x=c(20,620,1220,20,620,1220,20,620,1220),
y=c(760,760,760,455,455,455,145,145,145),
labels=c("A1 / natural","A2 / protected","A3 / natural",
"B1 / natural","B2 / natural","B3 / protected",
"C1 / protected","C2 / natural","C3 / natural"),
xpd=TRUE,cex=2,font=2,adj=c(0,0))
legend(x=300,y=950,horiz=FALSE,x.intersp=1,y.intersp=0.5,
xpd=TRUE,pt.cex=3,bty="n",text.font=2,
legend=c("Dead","Acorn","Alive"),
pch=c(rep(15,3)),
col=colovec[c(1,10,11)])
legend(x=60,y=950,horiz=FALSE,x.intersp=1,y.intersp=0.5,
xpd=TRUE,pt.cex=3,bty="n",text.font=2,
legend=c("Experiment","CC","hd"),
pch=c(21,24,22))
title(main=paste("Height in ",temp[8],sep=""),cex.main=4,line=10)
par(op)
#export to .pdf 20 x 12 inches
##############################################################################/
#END
##############################################################################/
|
da91671643410c8879c5eb04f5de40b57e4ca4b6
|
acc054f5a920d158aaedcdccde90134d2e078f27
|
/homebrew.R
|
779fe05081d4b289d311c29788518fd2d9dfb8d0
|
[] |
no_license
|
osairisali/IOpack
|
cdf6bc4923f159cc523e8d62c25bf2754fe68506
|
5be3cb4382d3952998a3ce6538c98cb0950eaa2a
|
refs/heads/master
| 2021-01-20T03:56:29.243562
| 2017-08-25T07:45:10
| 2017-08-25T07:45:10
| 101,375,986
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,394
|
r
|
homebrew.R
|
#MEMBUAT AGREGASI IO SESUAI DENGAN KELOMPOK SEKTOR TABEL PDRB JATIM
#membuat fungsi agregasi matriks persegi sesuai index agregasi
agr <- function(data, index){
nol <- matrix(0,
length(unique(index[,1])),
ncol(data)) #matriks nol nrow(index) x ncol(data)
#memasukkan angka 1 pada matriks nol sesuai index agregasi
nol[index] <- 1
#agregasi
agr <- nol %*% data %*% t(nol)
agr
}
#membuat fungsi agregasi matriks bukan persegi sesuai index agregasi
agr2 <- function(data, index){
nol <- matrix(0,
length(unique(index[,1])),
nrow(data)) #matriks nol nrow(index) x ncol(data)
#memasukkan angka 1 pada matriks nol sesuai index agregasi
nol[index] <- 1
#agregasi
agr2 <- nol %*% data
agr2
}
#versi fungsi agr yg lbh kompak
agr <- function(data, index) {
ifelse(
nrow(data) == ncol(data),
{#if the result is conformable, then
nol <- matrix(0,
length(unique(index[ , 1])),
ncol(data))
nol[index] <- 1
agr <- nol %*% data %*% t(nol)
},
{#if the result is non conformable, then
nol <- matrix(0,
length(unique(index[ , 1])),
nrow(data))
nol[index] <- 1
agr <- nol %*% data
}
)
agr
}
#1. AGREGASI IO 47X47 SEKTOR--------------------------------------------
#membaca IO 2010
IO2010 <- read.csv("D:/Dropbox/R Works/Input-Output Package/DATASET IO 2010.CSV",
header=T,
row.names=1)
#membaca index agregasi sesuai PDRB 2010
ind2010 <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/INDEX AGREGASI PDRB 2010.csv",
header=F))
#fungsi def untuk mendefinisikan struktur tabel IO
#def <- function(data,
# Am,
# Fm) {
# a1 <- data.matrix(data[Am])
# f1 <- data.matrix(data[Fm])
#}
#mendefinisikan data IO 2010 cakupan matriks A, final demand, value added, dan output
A2010 <- data.matrix(IO2010[1:110,1:110]) #matriks 110x110
#tesA2010 <- def(IO2010, Am =c(1:110, 1:110), Fm = c(1:110, 112:117))
F2010 <- data.matrix(IO2010[1:110, 112:117]) #matriks 110x6
#membaca IO 2006
IO2006 <- read.csv("D:/Dropbox/R Works/Input-Output Package/DATASET IO 2006.csv",
header=T,
row.names=1)
#membaca index agregasi sesuai PDRB 2006
ind2006 <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/INDEX AGREGASI PDRB 2006.csv",
header=F))
#mendefinisikan data IO 2000 cakupan matriks A, final demand, value added, dan output
A2006 <- data.matrix(IO2006[1:110,1:110]) #matriks 110x110
F2006 <- data.matrix(IO2006[1:110, 112:117]) #matriks 110x6
#Melakukan Agregasi
#data dan variabel menggunakan definisi yang ada di script bagian awal ini
#loading index agregasi IO 2010 awal (47x47 sektor)
ind2010c <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/INDEX agregasi 2010.csv",
header=F))
#loading index agregasi IO 2006 awal (47x47 sektor)
ind2006c <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/INDEX agregasi 2006.csv",
header=F))
#loading index pengurutan (mengurutkan semua sektor IO sesuai dengan kelompoknya)
indurut <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/INDEX agregasi pengurutan.csv",
header=F))
#agregasi matrix A
gA2010c <- agr(A2010, ind2010c)
tes1 <- agr3(A2010, ind2010c)
gF2010c <- agr2(F2010, ind2010c)
tes2 <- agr3(F2010, ind2010c)
#hasil agregasi kemudian diurutkan dengan index matrix pengurutan
ugA2010c <- agr(gA2010c, indurut)
tesgA2010c <- agr3(gA2010c, indurut)
gF2010c <- agr2(gA2010c, gF2010c, indurut)
#AGREGASI IO 2006 (47X47 SEKTOR)
#agregasi matrix A
gA2006c <- agr(A2006, ind2006c)#input antara
gF2006c <- agr2(F2006, ind2006c)#final demand
#hasil agregasi kemudian diurutkan dengan index matrix pengurutan
gA2006c <- agr(gA2006c, indurut)
gF2006c <- agr2(gF2006c, indurut)
#PERHITUNGAN HARGA KONSTAN METODE INFLASI-----------------------------------
#MEMBUAT FUNGSI DEFLATOR UNTUK KOMPONEN matriks A, findem, output
deflator <- function(matA, findem, O, kVA, VA){
#menghitung deflator phi
sumA <- matrix(1, 1, ncol(matA)) #membuat matriks row sums untuk matA
phi <- kVA/as.numeric(O-(sumA %*% matA)) #menghitung deflator phi
#menghitung harga konstan
kA <- matA * phi #harga konstan matriks A
kO <- O * phi #harga konstan output
#sebelum menghitung kF... perlu dihitung rasio kF terhadap outputnya
rF <- findem * (1/(sum(VA)))
kF <- rF * kVA #harga konstan findem
cbind(kA, kF, kO)
}
#MEMBUAT FUNGSI INFLATOR JATIM KE HARGA IO TAHUN 2010
inflator <- function(data, x){
datai <- data * x
datai
}
#PERHITUNGAN HARGA KONSTAN (TAHUN DASAR 2011) UNTUK IO DENGAN AGREGASI AWAL 47X47 SEKTOR
#MENGHITUNG HARGA KONSTAN IO 2000 HARGA TAHUN DASAR 2011
#Loading data IO tahun 2000
IO2000 <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/AGREGASI AWAL PENELITIAN IO 2000 NO VA.csv",
header=T,
row.names=1))
VA2000 <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/AGREGASI AWAL PENELITIAN VA 2000.csv",
header=T,
row.names=1))
VA2006b <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/AGREGASI AWAL PENELITIAN VA 2006.csv",
header=T,
row.names=1))
IO2006b <- data.matrix(read.csv("D:/Dropbox/R Works/Input-Output Package/AGREGASI AWAL PENELITIAN IO 2006 NO VA.csv",
header=T,
row.names=1))
#Menghitung harga konstan tahun dasar 2013
#indeks inflasi tahun 2000 hingga 2013
infl <- (c(10.34, 14.13, 9.15, 4.23, 5.92, 15.19, 6.76, 6.48, 9.66, 3.62, 6.96, 4.09, 4.5, 7.59) / 100) + 1
#menghitung inflasi total dari tahun 2000 ke 2013
infl2 <- prod(infl)
#menghitung inflasi tahun 2006 ke 2013
infl3 <- prod(infl[7:length(infl)])
#menghitung inflasi tahun 2010 ke 2013
infl4 <- prod(infl[11:length(infl)])
#menghitung harga IO 2000 jadi harga tahun dasar 2013
ZF2000k <- IO2000[1:47, 1:53] * infl2
M2000k <- rowSums(gM2010 * infl2)
X2000k <- rowSums(ZF2000k)
IO2000k <- cbind(ZF2000k, X2000k)
#menghitung VA 2000 jadi harga tahun 2011
VA2000k <- t(X2000k) - colSums(ZF2000k[1:47, 1:47])
#Menghitung harga IO 2006 tahun dasar 2011
ZF2006k <- IO2006b[1:47, 1:53] * infl3
X2006k <- rowSums(ZF2006k)
IO2006k <- cbind(ZF2006k, X2006k)
#menghitung VA 2006 jadi harga tahun dasar 2011
VA2006k <- t(X2006k) - colSums(ZF2006k[1:47, 1:47])
#Menghitung harga IO 2010 jadi harga tahun dasar 2013
ZF2010k <- res2010c[,1:53] * infl4 #perubahan ke harga 2013 hanya butuh elemen ke-12 vector infl
X2010k <- rowSums(ZF2010k)
IO2010k <- cbind(ZF2010k, X2010k)
#menghitung VA tahun 2010
VA2010k <- t(X2010k) - colSums(ZF2010k[,1:47])
#DEKOMPOSISI PERMINTAAN AKHIR----------------------------------------------------
#MEMBUAT FUNGSI MATRIKS KEBALIKAN LEONTIEF dan FUNGSI DEKOMPOSISI FINAL DEMAND
Leontief <- function(matZ, vecO){
matI <- diag(1, nrow(matZ), ncol(matZ)) #membuat matriks identitas I
iO <- diag((vecO))
iO <- solve(iO)
matA <- matZ %*% iO
L <- matI - matA
matL <- solve(L)
matL
}
#MEMBUAT FUNGSI DEKOMPOSISI FINAL DEMAND
dfindem <- function(matfindem1, matfindem2, matL1, matL2){
Sr1 <- matrix(1, ncol(matfindem1), 1) #summation matrix S for row sums
ft1 <- matfindem1 %*% Sr1 #menghitung row sums
Sc1 <- matrix(1, 1, nrow(ft1)) #summation matrix S for column sums
skl1 <- as.numeric(Sc1 %*% ft1) #menghitung column sums & dijadikan numeric agar bisa jd pembagi
yt1 <- t(Sc1 %*% matfindem1) #menghitung matriks Yt
dt1 <- yt1/skl1 #menghitung matriks dt
#membuat matriks Bt
diayt1 <- diag(as.numeric(yt1), ncol(matfindem1), ncol(matfindem1)) #membuat matriks diagonal Yt
Bt1 <- matfindem1 %*% solve(diayt1) #menghitung matriks Bt
#melakukan perhitungan serupa untuk matfindem 2
Sr2 <- matrix(1, ncol(matfindem2), 1) #summation matrix S for row sums
ft2 <- matfindem2 %*% Sr2 #menghitung row sums
Sc2 <- matrix(1, 1, nrow(ft2)) #summation matrix S for column sums
skl2 <- as.numeric(Sc2 %*% ft2) #menghitung column sums & dijadikan numeric agar bisa jd pembagi
yt2 <- t(Sc2 %*% matfindem2) #menghitung matriks Yt
dt2 <- yt2/skl2 #menghitung matriks dt
#membuat matriks Bt
diayt2 <- diag(as.numeric(yt2), ncol(matfindem2), ncol(matfindem2)) #membuat matriks diagonal Yt
Bt2 <- matfindem2 %*% solve(diayt2) #menghitung matriks Bt
#dekomposisi matfindem1 dan matfindem2
#membuat variabel final-demand level effect
matlev <- (1/2 * (skl2 - skl1)) * ((Bt1 %*% dt1) + (Bt2 %*% dt2))
#membuat variabel final-deman mix effect
matmix <- 1/2 * (((skl1 * (Bt2 - Bt1)) %*% dt2) + ((skl2 * (Bt2 - Bt1) %*% dt1)))
#membuat variabel final-demand distribution effect
matdis <- 1/2 * ((skl1 * Bt1) + (skl2 * Bt2)) %*% (dt2 - dt1)
#kalkulasi akhir dekomposisi final demand langsung pengaruhnya pada output IO
matfin <- (1/2 * (matL2 + matL1))
matlevf <- matfin %*% matlev
matmixf <- matfin %*% matmix
matdisf <- matfin %*% matdis
#Mulai menggabungkan hasil dekomposisi ke dalam satu file
matgab <- cbind(matlevf, matmixf, matdisf)
colnames(matgab) <- c("Level-Effect", "Mix-Effect", "Distribution-Effect")
matgab
}
#MENGHITUNG DEKOMPOSISI IO PERMINTAAN AKHIR
#MENGHITUNG MATRIX KEBALIKAN LEONTIEF PADA HARGA KONSTAN 2010 (Variabel sama dengan variabel perhitungan harga konstan tahun dasar 2010)
#Menghitung matrix kebalikan Leontief IO 2010
L2010 <- Leontief(IO2010k[1:47, 1:47], rowSums(IO2010k[,1:53]))
#Menghitung matrix kebalikan Leontief IO 2006
L2006 <- Leontief(IO2006k[1:47, 1:47], IO2006k[1:47, 54])
#Menghitung matrix kebalikan Leontief IO 2000
L2000 <- Leontief(IO2000k[1:47,1:47], IO2000k[1:47, 54])
#Menghitung analisa dekomposisi IO 2010 dan 2000 (1)
res1 <- round(dfindem(IO2000k[1:47, 48:53], IO2010k[1:47, 48:53], L2000, L2010), digits=3)
#Menghitung analisa dekomposisi IO 2010 dan 2006 (2)
res2 <- round(dfindem(IO2006k[1:47, 48:53], IO2010k[1:47, 48:53], L2006, L2010), digits=3)
#Menghitung analisa dekomposisi IO 2006 dan 2000 (3)
res3 <- round(dfindem(IO2000k[1:47, 48:53], IO2006k[1:47, 48:53], L2000, L2006), digits=3)
#Menghitung perubahan output tahun 2010 dan 2000
dX1 <- round(rowSums(IO2010k[,1:53]) - X2000k, digits=3)
#Menghitung perubahan output tahun 2010 dan 2006
dX2 <- round(rowSums(IO2010k[,1:53]) - X2006k, digits=3)
#Menghitung perubahan output tahun 2006 dan 2000
dX3 <- round(X2006k - X2000k, digits=3)
#Menggabungkan variabel dX ke variabel res
res1 <- cbind(dX1, res1); colnames(res1) <- c("Perubahan Output", "Level-Effect", "Mix-Effect", "Distribution-Effect")
res2 <- cbind(dX2, res2); colnames(res2) <- c("Perubahan Output", "Level-Effect", "Mix-Effect", "Distribution-Effect")
res3 <- cbind(dX3, res3); colnames(res3) <- c("Perubahan Output", "Level-Effect", "Mix-Effect", "Distribution-Effect")
#Menyalin data ke dalam file
write.csv(res1, file="Dekomposisi Final Demand IO 2010-2000.csv", row.names=T)
write.csv(res2, file="Dekomposisi Final Demand IO 2010-2006.csv", row.names=T)
write.csv(res3, file="Dekomposisi Final Demand IO 2006-2000.csv", row.names=T)
#Dekomposisi Matriks Teknologi-------------------------------------------------------------
#Variabel bersesuaian dengan variabel yang ada di sript sebelumnya
#Membuat Fungsi substitusi matrix nol dengan kolom yang bersesuaian
demat <- function(difA, L1, L2, f1, f2 ){
mat <- matrix(0, nrow(difA), ncol(difA))
mkmat <- function(difA, indklm, L1, L2, f1, f2){
matA <- matrix(difA[,indklm])
res <- matrix(0, nrow(difA), ncol(difA))
res[,indklm] <- matA
res2 <- rowSums(1/2 * ((L2 %*% res %*% L1) %*% (f1 + f2)))
res2
}
for(i in 1:ncol(difA)) {
mat[,i] <- mkmat(difA, i, L1, L2, f1, f2) #menempatkan hasil fungsi mkmat ke kolom i matrix mat
}
mat
}
#Dekomposisi Matrix Teknologi IO 2010 dan 2000
#Mendefinisikan Variabel matrix A untuk semua data IO
A2000 <- IO2000k[1:47, 1:47] %*% solve(diag(IO2000k[,54]))
A2006 <- IO2006k[1:47, 1:47] %*% solve(diag(IO2006k[,54]))
A2010 <- IO2010k[1:47, 1:47] %*% solve(diag(IO2010k[,54]))
#kalkulasi selisih matriks A 2010 dan 2000
difA1 <- A2010 - A2000
#kalkulasi selisih matriks A 2010 dan 2006
difA2 <- A2010 - A2006
#kalkulasi selisih matriks A 2006 dan 2000
difA3 <- A2006 - A2000
#Perhitungan Dekomposisi Teknologi IO 2010 dan 2000
lres1 <- round(demat(difA1, L2000, L2010, IO2000k[,48:53], IO2010k[,48:53]), digits=3)
#Perhitungan Dekomposisi Teknologi IO 2010 dan 2006
lres2 <- round(demat(difA2, L2006, L2010, IO2006k[,48:53], IO2010k[,48:53]), digits=3)
#Perhitungan Dekomposisi Teknologi IO 2006 dan 2000
lres3 <- round(demat(difA3, L2000, L2006, IO2000k[,48:53], IO2006k[,48:53]), digits=3)
#Menyalin data ke dalam file
write.csv(lres1, file="Dekomposisi Teknologi IO 2010-2000.csv", row.names=T)
write.csv(lres2, file="Dekomposisi Teknologi IO 2010-2006.csv", row.names=T)
write.csv(lres3, file="Dekomposisi Teknologi IO 2006-2000.csv", row.names=T)
#MENYUSUN SEMUA DATA UNTUK DIANALISA-----------------------------------------------------
#MENYUSUN TABEL HASIL ANALISA DEKOMPOSISI
#Total efek perubahan teknologi
#Total dekomposisi teknologi 2010-2000
tot1 <- rowSums(lres1)
#Total dekomposisi teknologi 2010-2006
tot2 <- rowSums(lres2)
#Total dekomposisi teknologi 2006-2000
tot3 <- rowSums(lres3)
#Total efek perubahan final demand
#Total efek final deman 2010-2000
tf1 <- round(rowSums(res1[,2:ncol(res1)]), digits=3)
#Total efek final deman 2010-2006
tf2 <- round(rowSums(res2[,2:ncol(res2)]), digits=3)
#Total efek final deman 2006-2000
tf3 <- round(rowSums(res3[,2:ncol(res3)]), digits=3)
#Total efek dari dekomposisi
td1 <- round(tot1 + tf1, digits=3)
td2 <- round(tot2 + tf2, digits=3)
td3 <- round(tot3 + tf3, digits=3)
#Menghitung selisih perubahan total output dengan total efek dekomposisi
se1 <- round(res1[,1] - td1, digits=3)
se2 <- round(res2[,1] - td2, digits=3)
se3 <- round(res3[,1] - td3, digits=3)
#Menyusun data dekomposisi IO
namakol <- c("Perubahan Output", "Level-Effect", "Mix-Effect", "Distribution-Effect", "Total Efek Final Demand", "Total Efek Perubahan Teknologi", "Total Dekomposisi", "Selisih Output & Dekomposisi")
#Dekomposisi 2010-2000
fin1 <- cbind(res1, tf1, tot1, td1, se1); colnames(fin1) <- namakol
#Dekomposisi 2010-2006
fin2 <- cbind(res2, tf2, tot2, td2, se2); colnames(fin2) <- namakol
#Dekomposisi 2006-2000
fin3 <- cbind(res3, tf3, tot3, td3, se3); colnames(fin3) <- namakol
#Menyalin data ke file
write.csv(fin1, file="hasil analisa dekomposisi 2010-2000.csv", row.names=T)
write.csv(fin2, file="hasil analisa dekomposisi 2010-2006.csv", row.names=T)
write.csv(fin3, file="hasil analisa dekomposisi 2006-2000.csv", row.names=T)
#MENYUSUN TABEL ANALISA DATA IO
#Total Permintaan Antara
per1 <- rowSums(IO2010k[,1:47]) #IO 2010 harga konstan
per2 <- rowSums(IO2006k[,1:47]) #IO 2006 harga konstan
per3 <- rowSums(IO2000k[,1:47]) #IO 2000 harga konstan
#Permintaan akhir domestik
dom1 <- rowSums(IO2010k[,48:51]) #IO 2010
dom2 <- rowSums(IO2006k[,48:51])
dom3 <- rowSums(IO2000k[,48:51])
#Permintaan akhir eskpor
eks1 <- rowSums(IO2010k[,52:53])
eks2 <- rowSums(IO2006k[,52:53])
eks3 <- rowSums(IO2000k[,52:53])
#Jumlah permintaan keseluruhan
jum1 <- per1+dom1+eks1
jum2 <- per2+dom2+eks2
jum3 <- per3+dom3+eks3
#Jumlah Import domestik (dari luar provinsi)
M1 <- gM2010[,2] * infl[12]
M2 <- gM2006[,2] * infl3
M3 <- gM2000[,2] * infl2
#Jumlah Import dari luar negeri
m1 <- gM2010[,1] * infl[12]
m2 <- gM2006[,1] * infl3
m3 <- gM2000[,1] * infl2
#Komponen Nilai Tambah
V1 <- t(VA2010k)
V2 <- t(VA2006k)
V3 <- t(VA2000k)
#PERLU DIBUATKAN DAFTAR NAMA SEKTOR!
#Membuat nama kolom untuk tabel
nama <- c("Permintaan Antara", "Permintaan Akhir(Domestik)", "Permintaan Akhir(Ekspor)", "Output Domestik",
"Import Antar Provinsi", "Import Luar Negeri", "Nilai Tambah Bruto (PDRB)")
#Menyusun tabel
tbl1 <- cbind(per1, dom1, eks1, jum1, M1, m1, V1); colnames(tbl1) <- nama
tbl2 <- cbind(per2, dom2, eks2, jum2, M2, m2, V2); colnames(tbl2) <- nama
tbl3 <- cbind(per3, dom3, eks3, jum3, M3, m3, V3); colnames(tbl3) <- nama
|
3e3a84faf03577ab958df21d35f11b3c4dd1db08
|
d4a19fdbcf046b82a79491f5b11e83d0c1c0b0ce
|
/R/lsos.R
|
04a2ff76becc2ec71571a8e1f0dfc9e77c7a07b2
|
[] |
no_license
|
Laurigit/libSE
|
60289577beb67ddad199b331c8226aa18aa79330
|
11e500e741fe0e614eac6d5b5928c35e33ad1564
|
refs/heads/main
| 2023-06-01T01:58:14.486995
| 2021-06-09T12:38:03
| 2021-06-09T12:38:03
| 375,009,105
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 197
|
r
|
lsos.R
|
# shorthand
lsos <- function(..., n=1000) {
query <- (ls_objects(..., order.by="Size", decreasing=TRUE, head=TRUE, n=n))
res <- data.table(obj = row.names(query), query)
}
#res <- lsos(20)
|
b41fc69c56e0342e9c9802cd4e6f34f414f26c34
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/Unsorted/density2.R
|
d7c85b59b581c6a18371781038d81f5945ae010a
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
density2.R
|
# Density
set.seed(1234)
x = ceiling(rnorm(100,50,15))
summary(x)
head(x)
hist(x)
write(x, file='./file/sampledata.txt', ncolumns = 1)
# write to a file to copy to execl
#
hist(x, freq=F)
lines(density(x), lwd=2)
plot(density(x))
rug(x) # jitters at points
abline(v=x) # vertical lines
#another method
plot(density(x)$x, density(x)$y)
rug(density(x)$x)
density(x)$x
|
fd90a15903c6d9f968137cb8911f1b5cf402f634
|
811596219062d2bf43473b342f51cb6c75e9d9bb
|
/TPP_functions.R
|
3ed12c54b64047d9a5246901359e2cce40d199a5
|
[
"MIT"
] |
permissive
|
jwinget/ShinyTPP
|
812b4416103d67d90ac4f041e0a7713f87fcb1cd
|
5b11d6a229742c5ce47287ece05382d53e841fd0
|
refs/heads/master
| 2020-06-27T21:11:13.668927
| 2019-08-01T12:55:08
| 2019-08-01T12:55:08
| 200,050,444
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,700
|
r
|
TPP_functions.R
|
# Functions to run the Trans-proteomic pipeline
library(tidyverse)
library(xml2)
#library(MSGFplus) # MSGF+ currently has some bugs
print("init")
convertRaw <- function(raw.files) {
# Convert raw files to mzML using msConvert
# Template conversion command
Convert_cmd = 'msconvert.exe MYSAMPLE.raw -o DIRNAME --filter "peakPicking true 2" --filter "msLevel 2" --zlib'
# peakPicking true 2 centroids MS2 peaks
# msLevel2 keeps only MS2 data (we don't need MS1 for ident or quant)
# zlib uses compression to reduce file size
# Replace the appropriate strings and run msconvert
for (raw.file in raw.files) {
raw.dir <- paste0(dirname(raw.file), '/')
raw.file <- str_replace_all(raw.file, '\\\\', '/') # Fix double backslashes from choose.files()
cmd <- str_replace(Convert_cmd, 'MYSAMPLE.raw', raw.file) %>% str_replace('DIRNAME', raw.dir)
system(cmd)
}
}
genCometParams <- function(project.dir, db, tol='20', termini=1, missed.cleavages=2,
frag.bin.tol=0.02, frag.bin.offset=0, decoy.prefix="DECOY_"){
# Write appropriate comet search parameters
comet.template <- "comet.params.template"
comet.params <- paste0(project.dir,'comet.params')
print(paste('Writing to', comet.params))
cp <- readLines(comet.template)
print(cp[1])
cp <- gsub(pattern='\\{database\\}', replace=db, x=cp)
cp <- gsub(pattern='\\{tolerance\\}', replace=tol, x=cp)
cp <- gsub(pattern='\\{termini\\}', replace=termini, x=cp)
cp <- gsub(pattern='\\{mc\\}', replace=missed.cleavages, x=cp)
cp <- gsub(pattern='\\{frag_bin_tol\\}', replace=frag.bin.tol, x=cp)
cp <- gsub(pattern='\\{frag_bin_offset\\}', replace=frag.bin.offset, x=cp)
cp <- gsub(pattern='\\{decoy_prefix\\}', replace=decoy.prefix, x=cp)
writeLines(cp, con=comet.params)
print('Written')
}
run.Comet <- function(project.dir, mzML.files){
# Run the comet search engine
comet_cmd <- paste0("comet.exe -P", project.dir, 'comet.params ', paste(mzML.files, collapse=' '))
system(comet_cmd)
}
# These functions "work" to run MSGF+, but there is currently a bug in the way
# MSGF+ writes out its results that breaks label-free quantification
# I need to write a subroutine that corrects the data file locations in the
# pepXML output
genMSGFParams <- function(db, tol='20 ppm', instrument=3, termini=1){
# Write appropriate MSGF+ search parameters
msgf.params <- msgfPar(
database = db,
tolerance = tol, # Parent mass tolerance
instrument = instrument, # See instrument info below
chargeRange = c(2,6),
lengthRange = c(6,25),
enzyme = 'Trypsin', # See enzyme info below
fragmentation = 0, # 0: Auto-detect, 1: CID, 2: ETD, 3: HCD
ntt = termini # Number of tolerable termini. 2 = fully tryptic
)
# INSTRUMENT
# For "hybrid" spectra with high-precision MS1 and low-precision MS2, use 0.
# For usual low-precision instruments (e.g. Thermo LTQ), use 0.
# If MS/MS fragment ion peaks are of high-precision (e.g. tolerance = 10ppm), use 2.
# For TOF instruments, use 2.
# For Q-Exactive HCD spectra, use 3.
# For other HCD spectra, use 1.
# ENZYME
# 0: unspecific cleavage, 1: Trypsin (default), 2: Chymotrypsin, 3: Lys-C, 4: Lys-N,
# 5: glutamyl endopeptidase (Glu-C), 6: Arg-C, 7: Asp-N, 8: alphaLP, 9: no cleavage
}
run.MSGF <- function(mzML.files, msgf.params){
# Run the MSGF+ search engine
# !!! Always needs a vector of mzML files, even if you only search one !!!
# Do not import the results in to R
res <- runMSGF(msgf.params, mzML.files, import=FALSE)
# Convert mzID files to pepXML
for (mzML.file in mzML.files) {
bn <- tools::file_path_sans_ext(mzML.file) # Base name of the mzML file without extension
mzid.dir <- dirname(mzML.file) # Directory for output
mzid.file <- paste0(bn, '.mzid') # The name of the mzid file
idconvert_cmd <- paste0('idconvert.exe ',mzid.file,' --pepXML -o ',mzid.dir)
system(idconvert_cmd)
unlink(mzid.file) # Delete the mzid file. We don't use it for anything else
file.rename(paste0(bn,'.pepXML'), paste0(bn,'.pep.xml'))
}
}
runTPP <- function(pepXML.files, database, combine, output.name) {
# Run the TPP on pepXML file(s) and do label-free quant
# Generates single protXML output
# Parameterized command strings for the TPP modules
Interact_cmd <- "InteractParser.exe MYSAMPLE.interact.pep.xml"
Refresh_cmd <- "RefreshParser.exe MYSAMPLE.interact.pep.xml"
Peptide_cmd <- "PeptideProphetParser.exe MYSAMPLE.interact.pep.xml ACCMASS PPM MINPROB=0 DECOYPROBS DECOY=DECOY NONPARAM EXPECTSCORE"
iProphet_cmd <- "InterProphetParser.exe MYSAMPLE.interact.pep.xml MYSAMPLE.interact.iproph.pep.xml"
Protein_cmd <- "ProteinProphet.exe MYSAMPLE.interact.iproph.pep.xml MYSAMPLE.interact.iproph.prot.xml NORMPROTLEN IPROPHET MINPROB=0"
StPeter_cmd <- "StPeter.exe -d MYSAMPLE.interact.iproph.prot.xml"
if(combine == TRUE){
# Append output directory to output name
pepxml.dir <- dirname(pepXML.files[1])
output.name <- paste0(pepxml.dir, '/', output.name)
# Run InteractParser, combining all pepXMLs
interact_pepXML <- paste0(output.name, '.interact.pep.xml')
interact <- paste(str_replace_all(Interact_cmd, 'MYSAMPLE', output.name), paste(pepXML.files, collapse=' '))
system(interact)
# Run RefreshParser
refresh <- paste(str_replace(Refresh_cmd, 'MYSAMPLE', output.name), database)
system(refresh)
# Run PeptideProphet
peptide <- str_replace(Peptide_cmd, 'MYSAMPLE', output.name)
system(peptide)
# PeptideProphet persistenly screws up the paths, which interferes
# with StPeter. The follows is a super dirty/ugly hack to fix this.
tx <- readLines(interact_pepXML, -1)
tx <- gsub(pattern='C:/TPP/ShinyTPP', replace=pepxml.dir, x=tx)
writeLines(tx, con=interact_pepXML)
# Run iProphet
ipro <- str_replace_all(iProphet_cmd, 'MYSAMPLE', output.name)
system(ipro)
# Run ProteinProphet
protein <- str_replace_all(Protein_cmd, 'MYSAMPLE', output.name)
system(protein)
# Run StPeter
stpeter <- str_replace(StPeter_cmd, 'MYSAMPLE', output.name)
system(stpeter)} else {
for(pepXML.file in pepXML.files) {
# Get the root name of the file
root.name <- str_replace_all(pepXML.file, '\\\\', '/') %>%
str_replace(paste0(dirname(pepXML.file),'/'), '') %>%
str_replace('.pep.xml', '')
# Append output directory to output name
pepxml.dir <- dirname(pepXML.file)
output.name <- paste0(pepxml.dir, '/', root.name)
# Run InteractParser, combining all pepXMLs
interact_pepXML <- paste0(output.name, '.interact.pep.xml')
interact <- paste(str_replace_all(Interact_cmd, 'MYSAMPLE', output.name), pepXML.file)
system(interact)
# Run RefreshParser
refresh <- paste(str_replace(Refresh_cmd, 'MYSAMPLE', output.name), database)
system(refresh)
# Run PeptideProphet
peptide <- str_replace(Peptide_cmd, 'MYSAMPLE', output.name)
system(peptide)
# PeptideProphet persistenly screws up the paths, which interferes
# with StPeter. The follows is a super dirty/ugly hack to fix this.
tx <- readLines(interact_pepXML, -1)
tx <- gsub(pattern='C:/TPP/ShinyTPP', replace=pepxml.dir, x=tx)
writeLines(tx, con=interact_pepXML)
# Run iProphet
ipro <- str_replace_all(iProphet_cmd, 'MYSAMPLE', output.name)
system(ipro)
# Run ProteinProphet
protein <- str_replace_all(Protein_cmd, 'MYSAMPLE', output.name)
system(protein)
# Run StPeter
stpeter <- str_replace(StPeter_cmd, 'MYSAMPLE', output.name)
system(stpeter)
}
}
}
extract.results <- function(protxml.files){
# Parse a set of protXML files to pull out relevant results
# In "tidy" format
out.data <- tribble(~File,~ID,~Link,~Protein.name,~Pct.coverage,~Unique.peps,~dSIN)
print('Processing')
for(protxml.file in protxml.files){
fname <- basename(protxml.file)
x <- read_xml(protxml.file) # Read in the results
ns <- xml_ns_rename(xml_ns(x), d1="protXML")
protein_groups <- xml_find_all(x, "//protXML:protein_group", ns)
for (pg.node in protein_groups) {
protein.node <- xml_find_first(pg.node, 'protXML:protein', ns)
ident = xml_attr(protein.node, "protein_name")
annot.node <- xml_find_first(protein.node, "protXML:annotation", ns)
desc <- xml_attr(annot.node, 'protein_description')
if(str_detect(ident, 'nxp:NX_')){
# Need to clean up ugly nextProt description
begin <- str_split(desc, ' \\\\Gname=')[[1]][1]
protein.name <- str_split(begin, '\\\\Pname=')[[1]][2]
# And generate nextprot URL
url <- paste0('https://www.nextprot.org/entry/',str_replace(ident, 'nxp:', ''),'/')
link_html <- paste0('<a href="',url,'">',ident,'</a>')
} else {
protein.name <- desc
link_html <- ''
}
coverage = as.numeric(xml_attr(protein.node, "percent_coverage"))
distinct.peps = as.numeric(xml_attr(protein.node, "total_number_distinct_peptides"))
stpeter.node <- xml_find_first(protein.node, "protXML:analysis_result/protXML:StPeterQuant", ns)
dSIn <- as.numeric(xml_attr(stpeter.node, "dSIn"))
out.row <- tribble(~File, ~ID,~Link,~Protein.name,~Pct.coverage,~Unique.peps,~dSIN,
fname, ident, link_html, protein.name, coverage, distinct.peps, dSIn)
out.data <- bind_rows(out.data, out.row)
}
}
out.data %>% filter(!is.na(dSIN)) %>% arrange(-dSIN) -> out.data
outfile <- paste0(dirname(protxml.files[1]),'/result_table.csv')
write_csv(out.data, path = outfile)
print(paste('Results written to', outfile))
return(out.data)
}
## Example command-line workflow
# 1. Get all raw files in a directory and convert them
# Set this to the directory where your raw files are:
#> project.dir <- "E:/2016-08_StPeter_Validation/"
# Don't edit these lines:
#> raw.files <- list.files(project.dir, pattern = 'raw', full.names=T)
#> convertRaw(raw.files)
# 2. Generate comet parameters
# Set this to your desired search database
#> database <- "E:/dbase/nextprot_all_DECOY.fasta"
#> genCometParams(project.dir, database, frag.bin.tol=1.0005, frag.bin.offset=0.4)
# Note: if you are using high-res MS2 (e.g. HCD in QEHF),
# remove the frag.bin.tol and frag.bin.offset arguments,
# e.g. genCometParams(project.dir, database)
# 3. Search all mzML files in the project directory with Comet
# You should not need to edit these lines
#> mzML.files <- list.files(project.dir, pattern = 'mzML', full.names=T)
#> run.Comet(project.dir, mzML.files)
# 4a. Run the TPP, COMBINING ALL SEARCH RESULTS INTO ONE OUTPUT
# This is useful for, e.g., gel band data where you need to combine many files
#> pepxml.files <- list.files(project.dir, pattern = 'pep.xml', full.names=T)
#> runTPP(pepxml.files, database, 'combined')
# Note: 'combined' can be changed to whatever name you like,
# just make sure it's in quotes. The output will be
# 'combined.interact.iproph.prot.xml'
# 4b. Run the TPP but process each search result individually
#> pepxml.files <- list.files(project.dir, pattern = 'pep.xml', full.names=T)
#> for(pepxml.file in pepxml.files){
#> bn <- tools::file_path_sans_ext(mzML.file)
#> runTPP(c(pepxml.file), database, bn)
#>}
|
6ce2e3785031723064992424f363f7244814cede
|
c35fbc233528e1920a9ec2ba13232828fae401b9
|
/R/caPred.R
|
f14e23e0f0593d8d8995644c5d35077b3e638f32
|
[] |
no_license
|
cran/mixtox
|
03d9af3b74dfc1286008352119971aed14ea79ab
|
996fec422d5a36a42cbf9fa6483111c93d773cb9
|
refs/heads/master
| 2022-07-05T18:41:34.137823
| 2022-06-20T15:40:02
| 2022-06-20T15:40:02
| 36,813,233
| 6
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,589
|
r
|
caPred.R
|
caPred <- function(model, param, mixType = "eecr", effv, effPoints, sav = FALSE){
## concentration addition prediction
##source('ECx.R')
concAdd <- function(pctEcx, effPoints){
## concentration addition
ecPoints <- ECx(model, param, effPoints)
ca <- 1 / (t(pctEcx) %*% (1 / ecPoints))
return(ca)
}
if (missing(model) || missing(param) || missing(mixType) || missing(effv)) stop('argument missing')
if (length(model) >= 2){
## at these effect points the effect concentrations will be predicted
if(missing(effPoints)){
effPoints <- c(.025, .03, .05, .10, .15, .20, .25, .30, .35, .40, .45, .47, .50, .52, .55, .60, .65, .70, .75, .80, .85, .90)
}
if (mixType == 'eecr'){
## equal effect concentration ratio
ecx <- ECx(model, param, effv)
num <- nrow(ecx)
mixEcx <- colSums(ecx)
if (length(effv) > 1) pctEcx <- ecx / t(replicate(num, mixEcx)) else pctEcx <- ecx / mixEcx
rownames(pctEcx) <- rownames(ecx)
ca <- concAdd(pctEcx, effPoints)
rowName <- paste('ca.EE', effv * 100, sep = '')
rownames(ca) <- rowName
designTable <- NULL
}else if (mixType == 'acr'){
## arbitrary concentration ratio
if(length(model) != length(effv)) stop('no consistence')
pctEcx <- t(t(effv / sum(effv)))
ca <- concAdd(pctEcx, effPoints)
rownames(ca) <- 'ca.acr'
designTable <- NULL
}else if(mixType == 'udcr'){
## uniform design concentration ratio
## source('unidTab.R')
fac <- length(model)
lev <- length(effv)
tab <- unidTab(lev, fac)$T
if (length(dim(tab)) == 3)
## use the first uniform table if many
uniTable <- tab[, , 1]
if(length(dim(tab)) == 2)
uniTable <- tab
ecx <- ECx(model, param, effv)
ecxMix <- matrix(0, fac, lev)
## uniform mixture construction
for (i in seq(fac)){
for (j in seq(lev)){
k <- uniTable[j, i]
ecxMix[i, j] <- ecx[i, k]
}
}
mixEcx <- colSums(ecxMix)
pctEcx <- ecxMix / t(replicate(fac, mixEcx))
ca <- concAdd(pctEcx, effPoints)
rowName <- paste('ca.U', seq(lev), sep = '')
rownames(ca) <- rowName
rownames(pctEcx) <- rownames(ecx)
colnames(pctEcx) <- rowName
designTable <- uniTable
}
}else {
stop('needs more than one component')
}
Results <- list(ca = ca, e = effPoints, pct = t(pctEcx), unitab = designTable)
if (sav != FALSE){
if(sav == TRUE) {
sav = paste("caPred_", mixType, "_", Sys.Date(), ".txt", sep = "")
}
sink(sav)
print(Results)
sink()
}
return(Results)
}
|
d08f1a048e77c5f101cfd53fa7b8d5f352599934
|
f05a2f7175f9422ac9908181c97aaab3c375fd34
|
/man/update_tntpr.Rd
|
2c1fb1531ce5f7f7a6bffcdc9ba7bdaa05ef3d42
|
[
"GPL-2.0-only",
"MIT",
"CC-BY-4.0"
] |
permissive
|
tntp/tntpr
|
19c6b39cf9252f2854a1faf7bd1062b582d8e5ab
|
fa9d7442cb42c667539213805192a9c845cf2913
|
refs/heads/master
| 2023-08-08T23:05:28.893304
| 2023-07-09T15:11:15
| 2023-07-09T15:11:15
| 158,586,843
| 4
| 0
|
CC-BY-4.0
| 2023-08-15T17:43:50
| 2018-11-21T17:54:23
|
R
|
UTF-8
|
R
| false
| true
| 267
|
rd
|
update_tntpr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/update_tntpr.R
\name{update_tntpr}
\alias{update_tntpr}
\title{Re-install the tntpr package from GitHub.}
\usage{
update_tntpr()
}
\description{
Re-install the tntpr package from GitHub.
}
|
7a7835dc3603b67f1017866272069065d565e856
|
f5199fc56c1a4e0f2a28c8eceb8f8f8955101e87
|
/gibbsSampling_MH.R
|
89e82d101537ecf6ef430febb0117e0d9daf706b
|
[] |
no_license
|
mshasan/BayesianDataAnalysis
|
4940a507d77e5b79ce67259b9678e2d1a8ad0cfe
|
e6e320b78076f94000c516b6fef63bfd99978ccb
|
refs/heads/master
| 2021-01-20T08:40:27.025570
| 2017-05-03T17:49:53
| 2017-05-03T17:49:53
| 90,175,198
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,733
|
r
|
gibbsSampling_MH.R
|
#Problem 3
#Part a)
#open the library "coda"
library(coda)
#introduce the data
data=c(65,156,100,134,16,108,121,4,39,143,56,26,1,1,5,65)
#alpha has prior Gamma(a,b), lambda has prior Gamma(c,d)
#Choose a,b,c,d and niter, number of iterations
a=1;
b=3;
c=5;
d=20;
n<-length(data);
niter=40000;
#Calculate the log of joint posterior density
logpostfct<-function(alp,lam)
{ return((n+a-1)*log(alp)+(n+c-1)*log(lam)-alp/b-lam*(1/d+sum(data^alp))
+(alp-1)*sum(log(data)));
}
# This function builds the MCMC chain; it outputs two vectors
#One vector contains the alphas one the lambdas
#start is the vector of starting values, sig1 and sig2 are the scale
#factors that update alpha and lambda, respectively
#nsim is the number of simulations(length of the chain)
metrhast<-function(start, sig1, sig2, nsim)
{
alphav=rep(start[1], nsim); #initialize the vectors containing the
lambdav=rep(start[2], nsim); #alpha and lambda values
for(i in 2:nsim)
{
alphav[i]<-alphav[i-1]+sig1*rnorm(1); #update the parameters
lambdav[i]<-lambdav[i-1]+sig2*rnorm(1); #make sure they both are positive
while((alphav[i]<=0) | (lambdav[i]<=0))
{ alphav[i]<-alphav[i-1]+sig1*rnorm(1);
lambdav[i]<-lambdav[i-1]+sig2*rnorm(1);
}
loglik_ratio=logpostfct(alphav[i], lambdav[i])-
logpostfct(alphav[i-1], lambdav[i-1]);
u<-runif(1);
if(u>min(1, exp(loglik_ratio)))
{alphav[i]<-alphav[i-1];
lambdav[i]<-lambdav[i-1];
}
}
newlist<-list("alphavalues"=alphav, "lambdavalues"=lambdav);
return(newlist);
}
#We will use these values for part e)
sigma1<-1.0;
sigma2<-0.5;
start1=c(1.0, 1.0);
start2=c(5.0, 3.0);
start3=c(0.5, 0.1);
#Part b)
#Find loglikelihood function of posterior of alpha given lambda and data
log.lik<-function(alp,lam)
{
return((n+a-1)*log(alp)+(alp-1)*sum(log(data))-alp/b-lam*sum(data^alp));
}
#This function builds the chains
# startv is the vector of starting values of the chain
#sig is the scale factor used to update alpha
#nsimul is the number of iterations
#datav is the empirical data
Gibbsampling<-function(startv, sig, nsimul, datav)
{
alphavect=rep(startv[1], nsimul); #initialize the vectors containing the
lambdavect=rep(startv[2], nsimul); #alpha and lambda values
#Do MH inside Gibbs sampling
for(i in 2:niter)
{
alphavect[i]<-alphavect[i-1]+sig*rnorm(1);
while(alphavect[i]<=0)
{ alphavect[i]<-alphavect[i-1]+sig*rnorm(1);}
loglik_ratio=log.lik(alphavect[i], lambdavect[i-1])-
log.lik(alphavect[i-1], lambdavect[i-1]);
u<-runif(1);
if(u>min(1, exp(loglik_ratio)))
{alphavect[i]<-alphavect[i-1];}
lambdavect[i]<-rgamma(1, shape=n+c, rate=1/d+sum(data^alphavect[i]));
}
newliste<-list("alphas"=alphavect, "lambdas"=lambdavect);
return(newliste);
}
#We will use this sigma value in part e)
sigma=0.5;
#Part e)
#Run the algorithms from part a) and b) 3 times
#Plot the trace of plots and autocorrelation functions
sigma1<-1.0;
sigma2<-0.5;
sigma<-0.5;
start1=c(1.0, 1.0);
start2=c(5.0, 3.0);
start3=c(0.5, 0.1);
res1<-metrhast(start1, sigma1, sigma2, niter);
matr1<-cbind(res1$alphavalues, res1$lambdavalues);
colnames(matr1)<-c("alpha", "lambda");
m1<-as.mcmc(matr1);
plot(m1, trace=TRUE, density=TRUE);
autocorr.plot(m1, auto.layout=FALSE)
xyplot(m1, col="blue");
res2<-metrhast(start2, sigma1, sigma2, niter);
matr2<-cbind(res2$alphavalues, res2$lambdavalues);
colnames(matr2)<-c("alpha", "lambda");
m2<-as.mcmc(matr2);
xyplot(m2, col="red");
autocorr.plot(m2, auto.layout=FALSE);
thinr2<-window(m2, 15001, 40000, 25);
autocorr.plot(thinr2, auto.layout=FALSE);
res3<-metrhast(start3, sigma1, sigma2, niter);
matr3<-cbind(res3$alphavalues, res3$lambdavalues);
colnames(matr3)<-c("alpha", "lambda");
m1<-as.mcmc(matr3);
xyplot(m2, col="green");
autocorr.plot(m3, auto.layout=FALSE);
gres1<-Gibbsampling(start1, 0.5, 40000, data)
gmatr1<-cbind(gres1$alphas, gres1$lambdas);
colnames(gmatr1)<-c("alpha", "lambda");
gm1<-as.mcmc(gmatr1);
autocorr.plot(gm1, auto.layout=FALSE)
xyplot(gm1, col="green");
thing1<-window(gm1, 15001, 40000, 25);
autocorr.plot(thing1, auto.layout=FALSE)
gres2<-Gibbsampling(start2, 0.5, 40000, data)
gmatr2<-cbind(gres2$alphas, gres2$lambdas);
colnames(gmatr2)<-c("alpha", "lambda");
gm2<-as.mcmc(gmatr2);
autocorr.plot(gm2, auto.layout=FALSE)
xyplot(gm2, col="green");
thing2<-window(gm2, 15001, 40000, 25);
autocorr.plot(thing2, auto.layout=FALSE)
gres3<-Gibbsampling(start3, 0.5, 40000, data)
gmatr3<-cbind(gres3$alphas, gres3$lambdas);
colnames(gmatr3)<-c("alpha", "lambda");
gm3<-as.mcmc(gmatr3);
autocorr.plot(gm3, auto.layout=FALSE)
xyplot(gm3, col="green");
thing3<-window(gm3, 15001, 40000, 25);
autocorr.plot(thing3, auto.layout=FALSE)
|
c21914aef341b9a2816e611a9d3ca2ca0fd4c2ee
|
01ff051e865f75f50694f6a8427a0477bbb32115
|
/Scripts/importBiopaxMod.r
|
3b3c5fd284091afb01b1320497c25c5d91bd7a05
|
[] |
no_license
|
nirupamaBenis/PathwayLevelDataIntegration
|
f2ccca81fab21be17afeef5dd63860cdf4f8d95a
|
f714fd38e47e63cd40fc30232e83ad97d1566601
|
refs/heads/master
| 2020-07-31T16:50:56.889396
| 2019-09-29T11:05:21
| 2019-09-29T11:05:21
| 210,681,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,079
|
r
|
importBiopaxMod.r
|
"
@author Nirupama Benis
@version 0.2 Modified from import_biopax.r
@since 02-11-2015
<p>
This script can convert a biopax object (from rBiopaxParser) to a pathway.catalogue object
<p>
"
library(rBiopaxParser)
library(hash)
library(graph)
library(igraph)
ImportBiopaxMod = function (data, removePathways = NULL) {
# == param
# -biopax The biopax data obtained from readBiopax from rBiopaxParser.
# -rempvePathways Any pathway names that you would like to remove (eg. Reactome categories)
#
# == details
# The output from this functoin can be used in the cepa.all function to calculate pathway significance.
# The rBiopaxParser function pathway2RegulatoryGraph can be used instead of the modified function Pathway2GraphAllTypes.
# The difference is that, while building the pathway graph, the latter uses all the subclasses of biopax level 3 class Interaction,
# while the former starts from only the Control interactions.
#
# == return
# A ``pathway.catalogue`` class object
if(!inherits(data, "biopax")) {
message("importing biopax")
suppressMessages(biopax <- readBiopax(data, verbose = FALSE))
} else {
biopax = data
}
removePathways <- removePathways
# if(biopax$biopaxlevel != 2) {
# stop("Only Biopax level 2 is supported.")
# }
pathway_df = listPathways(biopax)
pathway_df <- pathway_df[!(pathway_df[,2] %in% removePathways),]
pathway_list = sapply(pathway_df[[1]], function(pid) {
#changed it to accomodate changes in recent biopax level 3 files
suppressWarnings(graph <- Pathway2GraphAll(biopax, pid, expandSubpathways = TRUE, splitComplexMolecules = FALSE, useIDasNodenames = TRUE, verbose = FALSE, withSubGraphs = T))
if(!is.null(graph)) {
if(length(graph::edges(graph)) == 0) {
graph = NULL
} else {
edge = graph::edges(graph)
input = rep(names(edge), sapply(edge, length))
output = unlist(edge)
interaction_id = paste(pid, seq_along(output), sep = "_")
graph = data.frame(interaction.id = interaction_id, input = input, output = output, stringsAsFactors = FALSE)
}
}
return(graph)
})
names(pathway_list) <- gsub(" ", ".", pathway_df[,2]) # added to make it look like the PID.db data
pathway_list = pathway_list[!sapply(pathway_list, is.null)]
pathList = lapply(pathway_list, function(pathway) pathway[[1]])
interactionList = do.call("rbind", pathway_list)
# nodes in pathway_list are complex ids
all_nodes = c(interactionList[[2]], interactionList[[3]])
all_nodes = unique(all_nodes)
# mapping from nodes to molecules
bp2 = selectInstances(biopax, id = all_nodes)
l = isOfClass(bp2, "Complex")
complex_nodes = unique(bp2[l]$id)
non_complex_nodes = all_nodes[! all_nodes %in% complex_nodes]
nl = c(lapply(complex_nodes, function(nid) {
splitComplex(biopax, nid, returnIDonly = TRUE)
}),
lapply(non_complex_nodes, function(nid) {
nid
}))
names(nl) = c(complex_nodes, non_complex_nodes)
node_list = data.frame(node.id = rep(names(nl), sapply(nl, length)), molecule.id = unlist(nl), stringsAsFactors = FALSE)
#changed NAME to displayName
mapping = data.frame(node.id = node_list$node.id,
name = sapply(node_list$molecule.id, function(nid) getInstanceProperty(biopax, nid, property = "displayName", includeAllNames = F)),
class = sapply(node_list$molecule.id, function(nid) getInstanceClass(biopax, nid)),
stringsAsFactors = FALSE)
mapping = mapping[mapping$class == "Protein", 1:2] #change this to only the ones you are able to map using ncbi2r
node.type = sapply(all_nodes, function(nid) getInstanceClass(biopax, nid))
node.name = sapply(all_nodes, function(nid) getInstanceProperty(biopax, nid, property = "displayName"))
res = list(pathList = pathList,
interactionList = interactionList,
mapping = mapping,
node.type = node.type,
node.name = node.name)
class(res) = "pathway.catalogue"
return(res)
}
|
af4e391a0c42bae407e3b4d4d93c85fc169aa323
|
c0e66d1ebacdbf10375a818ec6b24c35e5c9073e
|
/man/veri_args.Rd
|
06a1252010b43b93e7bdf7b3e7ba89624763dc0a
|
[] |
no_license
|
Climandes/ClimIndVis
|
b1045a5cdce425e6dbf42d3032bb9ac2a0933786
|
47627ea250c1bb1d7416341645ffe1a7f5ab5b48
|
refs/heads/master
| 2021-11-07T05:24:44.418529
| 2021-10-22T09:17:22
| 2021-10-22T09:17:22
| 140,861,804
| 9
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 962
|
rd
|
veri_args.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_documentation.R
\name{veri_args}
\alias{veri_args}
\title{veri_args}
\arguments{
\item{nmin}{Number of forecasts that are not-missing for forecast to be evaluated, otherwise NA is returned see \code{\link{easyVerification}{veriApply}}. Default=20.}
\item{na_rm}{Logical, should incomplete forecasts-observation pairs be used? see \code{\link{easyVerification}{veriApply}}. Default=TRUE}
\item{ncat}{Number of equidistant categories for converting continous forecast into category forecasts (only for category skill scores). Default=3}
\item{prob}{Optional parameters passed to \code{\link{easyVerification}{veriApply}} for user defined probability thresholds for converting continuous forecasts to category forecasts(only for category skill scores). Default=NULL (in this case prob is set to (1:(ncat-1))/ncat).}
}
\description{
optional arguments for verification function
}
|
9cbc2f995fb942f483c46e4cf3a6a49947fcc649
|
da928003b54ed1c4e016eef8ff663069554bf925
|
/R/_draft/backup/k_slye_id_finder.R
|
2bd5c1d05717e60311b1e64d923bf8cbee7c8b2f
|
[] |
no_license
|
GiulSposito/DudesFantasyFootball
|
c3c412a697f073c48aec61ad637d0e9a79ea3007
|
eccdd274cac718de2c7c099a6637b8bf8cdb2804
|
refs/heads/master
| 2021-09-21T14:03:25.540083
| 2021-09-08T16:14:52
| 2021-09-08T16:14:52
| 153,347,513
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 905
|
r
|
k_slye_id_finder.R
|
library(tidyverse)
scraps <- readRDS("./data/week2_scrap.rds")
# SCRAP
scraps[["K"]] %>%
filter(team=="CAR", data_src=="NumberFire") %>%
mutate(id=as.integer(id)) %>%
glimpse()
scraps[["K"]] %>%
filter(id==14600) %>%
glimpse()
# PLAYER ID
load("../ffanalytics/R/sysdata.rda") # <<- Players IDs !!!
player_ids <- player_ids %>%
mutate(id=as.integer(id),
nfl_id=as.integer(nfl_id)) %>%
mutate(nfl_id = ifelse(id==14600,2563132, nfl_id)) %>%
as_tibble()
player_ids %>%
filter(numfire_id=="robbie-gould") %>%
glimpse()
player_ids %>%
filter(numfire_id=="joey-slye") %>%
glimpse()
# FANTASY
matchups <- readRDS("./data/week2_matchups_json.rds")
x <- matchups %>%
map(~as_tibble(.x[[1]]))
x[[1]]$matchup$awayTeam$players[[1]] %>%
filter(position=="K") %>%
glimpse()
### Keys Sequence ###
#
# Scrap (id) -> (id) Players_id (nfl_id) -> Fantasy(id)
|
8375b6f7dbe413fae7b5b0b02125f52ce316d67d
|
620be3848dbefbe162d700c001fe3b06e6f14826
|
/code/GT-Scan2.final.R
|
a9d47d2005e8157eb797d4fcb25b206664a6e491
|
[] |
no_license
|
BauerLab/GT-Scan2
|
db6ca6247fe0aa55c289d94e06ef6ce08e1347cf
|
8aaa84c8ee38af903f7bf0f8087592b79c65923b
|
refs/heads/master
| 2021-01-09T20:26:34.057748
| 2017-08-22T00:44:09
| 2017-08-22T00:44:09
| 64,459,202
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,766
|
r
|
GT-Scan2.final.R
|
library(randomForest)
library(ggplot2)
library(ROCR)
library(gplots)
library(reshape)
library(MASS)
library(caTools)
set.seed(18) # Seed is set so that the results are always reproducible
setwd('~/Desktop/GTscan_repository/')
#------------------------------------
# Functions
#------------------------------------
### Construct color vectors for heatmaps
### Colored based on class
color_map = function(classification) {
if (classification=="1")
"#FF0000" # Red
else
"#0000FF" # Blue
} #end color.map = function(classification)
### Construct color vectors for heatmaps
### Colored based on classifcaiont (TP, FP etc)
color_map2 = function(classification) {
if (classification=="TP")
"#FF0000" #Red
else if (classification=="FN")
"#FFB90F" #Orange
else if (classification=="TN")
"#0000FF" #Blue
else
"#6495ED" #Ligth Blue
}
### Calc TPR (precision), TNR, Recall, F-score ###
glm_calcStats = function(model, thr, dat) {
pred = predict(model, dat)
pred.df = data.frame(row.names=row.names(dat), Actual=dat$Class, Predicted=pred)
for (i in c(1:nrow(pred.df))) {
if (pred.df[i,'Predicted'] >= thr[1]) { pred.df[i,'Predicted'] = 1 }
else { pred.df[i,'Predicted'] = 0 }
}
p00 = 0 #TN
p01 = 0 #FP
p10 = 0 #FN
p11 = 0 #TP
for (i in c(1:nrow(pred.df))) {
if (pred.df[i,'Actual'] == 0 && pred.df[i,'Predicted'] == 0) { p00 = p00 + 1 }
else if (pred.df[i,'Actual'] == 0 && pred.df[i,'Predicted'] == 1) { p01 = p01 + 1 }
else if (pred.df[i,'Actual'] == 1 && pred.df[i,'Predicted'] == 0) { p10 = p10 + 1 }
else if (pred.df[i,'Actual'] == 1 && pred.df[i,'Predicted'] == 1) { p11 = p11 + 1 }
}
prec = p11/(p11+p01)
rec = p11/(p11+p10)
npv = p00/(p00+p10)
fscore = 2*((prec*rec)/(prec+rec))
tnr = p00/(p00+p01)
acc = (p11+p00)/(p11+p00+p01+p10)
res = list()
res[[1]] = prec
res[[2]] = rec
res[[3]] = npv
res[[4]] = fscore
res[[5]] = tnr
res[[6]] = acc
names(res) = c('Precision','Recall','NPV','F-score','TNR','Accuracy')
return(res)
}
### Calc TPR (precision), TNR, Recall, F-score ###
rf_calcStats = function(model, thr, dat) {
pred = predict(model, dat, type='prob')
pred.df = data.frame(row.names=row.names(dat), Actual=dat$Class, Predicted=pred[,2])
for (i in c(1:nrow(pred.df))) {
if (pred.df[i,'Predicted'] >= thr[1]) { pred.df[i,'Predicted'] = 1 }
else { pred.df[i,'Predicted'] = 0}
}
p00 = 0 #TN
p01 = 0 #FP
p10 = 0 #FN
p11 = 0 #TP
for (i in c(1:nrow(pred.df))) {
if (pred.df[i,'Actual'] == 0 && pred.df[i,'Predicted'] == 0) { p00 = p00 + 1 }
else if (pred.df[i,'Actual'] == 0 && pred.df[i,'Predicted'] == 1) { p01 = p01 + 1 }
else if (pred.df[i,'Actual'] == 1 && pred.df[i,'Predicted'] == 0) { p10 = p10 + 1 }
else if (pred.df[i,'Actual'] == 1 && pred.df[i,'Predicted'] == 1) { p11 = p11 + 1 }
}
prec = p11/(p11+p01)
rec = p11/(p11+p10)
npv = p00/(p00+p10)
fscore = 2*((prec*rec)/(prec+rec))
tnr = p00/(p00+p01)
acc = (p11+p00)/(p11+p00+p01+p10)
res = list()
res[[1]] = prec
res[[2]] = rec
res[[3]] = npv
res[[4]] = fscore
res[[5]] = tnr
res[[6]] = acc
names(res) = c('Precision','Recall','NPV','F-score','TNR','Accuracy')
return(res)
}
### Find optimal cut-off threshold ###
findThr = function(list) {
thrRes = list
thr = round(thrRes@alpha.values[[1]][which.max(thrRes@x.values[[1]]+thrRes@y.values[[1]])],3)
tpr = round(thrRes@y.values[[1]][which.max(thrRes@x.values[[1]]+thrRes@y.values[[1]])],3)
tnr = round(thrRes@x.values[[1]][which.max(thrRes@x.values[[1]]+thrRes@y.values[[1]])],3)
return(c(thr,tpr,tnr))
}
### Calculate combined score
calcCombined = function(df, pen) {
df$Combined = 0
for (i in c(1:nrow(df))) {
if (is.na(df[i,'WU.CRISPR'])) { df[i,'Combined'] = df[i,'sgRNAscorer.Hg']-pen }
else { df[i,'Combined'] = (df[i,'sgRNAscorer.Hg']+df[i,'WU.CRISPR'])/2 }
}
return(df)
}
### Build sequential RFs to find best signature
seq_rf = function(train, imp.ord, reps) {
dat.t = train
imp = imp.ord
genes = row.names(subset(imp, Rank<=1))
dat.sub = as.data.frame(dat.t[,c(genes)])
colnames(dat.sub) = genes
row.names(dat.sub) = row.names(dat.t)
dat.sub$Class = dat.t$Class
oob = 0
print(1)
for (j in c(1:reps)) {
rf = randomForest(data=dat.sub, Class~., ntree=1001, type='classification', mtry=1)
oob = oob + as.numeric(rf$err.rate[1001,1])
}
oob = oob/reps
error = data.frame(Variables=1, OOB=oob)
for (i in c(2:nrow(imp))) {
print(i)
genes = row.names(subset(imp, Rank<=i))
dat.sub = dat.t[,c(genes)]
dat.sub$Class = dat.t$Class
oob = 0
for (j in c(1:reps)) {
rf = randomForest(data=dat.sub, Class~., ntree=1001, type='classification', mtry=i)
oob = oob + as.numeric(rf$err.rate[1001,1])
}
oob = oob/reps
error = rbind(error, c(i,oob))
}
return(error)
}
#Construct ROC for public models
constructROC = function(pred, class) {
pred.to.roc = pred
pred.rocr = prediction(pred.to.roc, as.factor(class))
perf.rocr = performance(pred.rocr, measure='auc', x.measure='cutoff')
AUC = deparse(round(as.numeric(perf.rocr@y.values),3))
perf.tpr.rocr = performance(pred.rocr, 'tpr', 'fpr')
perf.precrec.rocr = performance(pred.rocr, 'prec', 'rec')
res = list()
res[[1]] = AUC
res[[2]] = perf.tpr.rocr
res[[3]] = perf.precrec.rocr
names(res) = c('AUC','ROC','PrecRec')
return(res)
}
### CV testing on RF models
rf_cvTest = function(df, fold, m) {
dat = df
nTest = round(nrow(dat)/fold,0)
pred = c(0,0)
class = c(0)
for (k in (c(1:fold))) {
dat.test = dat[sample(nrow(dat), nTest),]
class = c(class, as.numeric(as.character(dat.test$Class)))
dat.train = dat[!(row.names(dat) %in% row.names(dat.test)),]
cv.rf = randomForest(Class~., data=dat.train, ntree=10001, type='prob')
predi = predict(cv.rf, dat.test, type='prob')
pred = rbind(pred, predi)
}
pred = pred[-1,]
class = class[-1]
pred.to.roc = pred[,2]
pred.rocr = prediction(pred.to.roc, as.factor(class))
perf.rocr = performance(pred.rocr, measure='auc', x.measure='cutoff')
AUC = deparse(round(as.numeric(perf.rocr@y.values),3))
perf.tpr.rocr = performance(pred.rocr,'tpr','fpr')
perf.precrec.rocr = performance(pred.rocr,'prec','rec')
perf.sens.rocr = performance(pred.rocr,'tpr','tnr')
res = list()
res[[1]] = AUC
res[[2]] = perf.tpr.rocr
res[[3]] = perf.precrec.rocr
res[[4]] = perf.sens.rocr
names(res) = c('AUC','ROC','PrecRec','SensSpe')
return(res)
}
### Calculate area under Prec/Rec curve
calcAUPRC = function(list) {
prerec.df = data.frame(Recall=list[[3]]@x.values[[1]], Precision=list[[3]]@y.values[[1]])
prerec.df[is.na(prerec.df)] <- 0
auprc = trapz(prerec.df$Recall, prerec.df$Precision)
auprc = round(auprc,3)
return(auprc)
}
### Test RF model on validation set
rf_valTest = function(df, model) {
pred = c(0,0)
for (i in c(1:nrow(df))) {
predi = predict(model, df[i,], type='prob')
pred = rbind(pred, predi)
}
pred = pred[-1,]
pred.to.roc = pred[,2]
pred.rocr = prediction(pred.to.roc, as.factor(df$Class))
perf.rocr = performance(pred.rocr, measure='auc', x.measure='cutoff')
AUC = deparse(round(as.numeric(perf.rocr@y.values),3))
perf.tpr.rocr = performance(pred.rocr,'tpr','fpr')
perf.precrec.rocr = performance(pred.rocr,'prec','rec')
res = list()
res[[1]] = AUC
res[[2]] = perf.tpr.rocr
res[[3]] = perf.precrec.rocr
names(res)=c('AUC','ROC','PrecRec')
return(res)
}
### Function to convert FPKM to binary
formatExpression = function(df, cutoff) {
dat = df
for (i in c(1:nrow(dat))) {
if (dat[i,'FPKM']<cutoff) { dat[i,'FPKM']=0 }
else { dat[i,'FPKM'] = 1 }
}
return(dat)
}
#------------------------------------
# Workflow
#------------------------------------
### Read in datasets
c.dat = read.delim(sep='\t', header=T, row.names=1, file='data/Chari.featureTable.txt') # Training dataset
h.dat = read.delim(sep='\t', header=T, row.names=1, file='data/Horlbeck.featureTable.txt') # Validation set 1
### Read in scores from other models
c.sco = read.delim(sep='\t', header=T, row.names=1, file='data/Chari.scores.txt')
h.sco = read.delim(sep='\t', header=T, row.names=1, file='data/Horlbeck.scores.txt')
### Format datasets
c.dat$Class = as.factor(c.dat$Class)
h.dat$Class = as.factor(h.dat$Class)
### Perform feature selection
rf.rf = randomForest(Class ~ ., data=c.dat, importance=T, ntree=10001) # Full model
ch.rf = randomForest(Class ~ . - Combined - FPKM, data=c.dat, importance=T, ntree=10001) # Chromatin only model
rf.imp = as.data.frame(varImpPlot(rf.rf)) # Collect feature importance
ch.imp = as.data.frame(varImpPlot(ch.rf))
rf.imp = rf.imp[order(-rf.imp$MeanDecreaseGini),]
ch.imp = ch.imp[order(-ch.imp$MeanDecreaseGini),]
rf.imp$Rank = c(1:nrow(rf.imp))
ch.imp$Rank = c(1:nrow(ch.imp))
rf.error = seq_rf(c.dat, rf.imp, 3) # Build sequential models, in order of feature importance
ch.error = seq_rf(c.dat, ch.imp, 3)
### Plot results of sequential error
rf.errorPlot = ggplot(rf.error, aes(x=Variables, y=OOB)) +
geom_point() +
labs(x='Number of features', y='Average OOB error')
rf.errorPlot
rf.min = as.numeric(row.names(subset(rf.error, OOB<=min(rf.error$OOB))))[1] # Find minimum number of features for best model
rf.vars = row.names(subset(rf.imp, Rank<=rf.min))
ch.min = as.numeric(row.names(subset(ch.error, OOB<=min(ch.error$OOB))))[1]
ch.vars = row.names(subset(ch.imp, Rank<=ch.min))
### Perform 10-fold CV
rf.cvRes = rf_cvTest(subset(c.dat, select=c(rf.vars,'Class')),10,rf.min)
### Construct ROC curves for other models for comparison
ch.cvRes = rf_cvTest(subset(c.dat, select=c(ch.vars,'Class')),10,ch.min)
wu.cvRes = constructROC(c.sco$WU.CRISPR, c.sco$Class) # sgRNA only model
tr.cvRes = constructROC(c.dat$FPKM, c.dat$Class) # Transcription only model
### Plot ROC results
plot(rf.cvRes[[2]], col='blue', lwd=3)
plot(ch.cvRes[[2]], col='red', lwd=3, add=TRUE)
plot(wu.cvRes[[2]], col='orange', lwd=3, add=TRUE)
plot(tr.cvRes[[2]], col='black', lwd=3, add=TRUE)
### Construct final model
rf = randomForest(Class ~ ., data=c.dat[,c(rf.vars,'Class')], ntree=10001)
### Validate on Horlbeck dataset
rf.h.valRes = rf_valTest(h.dat, rf)
sg.h.valRes = constructROC(h.sco$sgRNAscorer.Hg, h.sco$Class)
wu.h.valRes = constructROC(h.sco$WU.CRISPR, h.sco$Class)
az.h.valRes = constructROC(h.sco$Azimuth, h.sco$Class)
### Plot Precision/Recall results
plot(rf.h.valRes[[3]], col='blue', lwd=3, ylim=c(0.5,1), xlim=c(0,0.5))
plot(sg.h.valRes[[3]], col='darkgreen', lwd=3, add=TRUE)
plot(wu.h.valRes[[3]], col='orange', lwd=3, add=TRUE)
plot(az.h.valRes[[3]], col='purple', lwd=3, add=TRUE)
|
295c8670620f1848afc853131ee9833746f07106
|
867bc76d8c0d3194a435821e60fcc20c22ed4ff1
|
/man/tessituraR.Rd
|
77d8f1edeb17a8c9a12eb7e1632d8117ea458d75
|
[] |
no_license
|
gdgkirkley/tessituraR
|
9455f649ae1d4502cd2d8eeebac97bcf2cfffc6d
|
c7b873d2bb3ed08ac03da10eff3dccbc85547884
|
refs/heads/master
| 2022-02-17T19:16:34.943637
| 2022-01-30T20:59:23
| 2022-01-30T20:59:23
| 199,735,570
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 472
|
rd
|
tessituraR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tessituraR-package.R
\docType{package}
\name{tessituraR}
\alias{tessituraR}
\title{tessituraR: Interacting With the Tessitura API}
\description{
A package to help you work with the Tessitura API. At the moment,
this is just a baby package to help with API calls, but the goal is to
create a package that will help to do all kinds of data analysis with
knowledge of Tessitura data structures.
}
|
5597837a7286fdfc0d7b940040691381eac28703
|
59ffd5c658d5c0d076c6c34c0d0ea55183ea83f9
|
/cachematrix.R
|
84290bec829229da036f9f29401dc760d78881be
|
[] |
no_license
|
astutibhagat/ProgrammingAssignment2
|
4528c12a60a902c386a315beb3b7cd119fd66fc1
|
00dd313b02f3d0b1e554f1ca14a15e506e9b7a8c
|
refs/heads/master
| 2022-06-06T04:01:32.539117
| 2020-05-04T16:47:55
| 2020-05-04T16:47:55
| 260,979,592
| 0
| 0
| null | 2020-05-03T17:05:19
| 2020-05-03T17:05:18
| null |
UTF-8
|
R
| false
| false
| 1,202
|
r
|
cachematrix.R
|
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## Initialize the inverse property
inv <- NULL
## Set the matrix
set <- function(y){
x <<- y
inv <<- NULL
}
## Get the matrix
get <- function() x
## Set the inverse of the matrix
setInverse <- function(solveMatrix) inv <<- solveMatrix
## Get the inverse of the matrix
getInverse <- function() inv
## Return a list of the functions outputs
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix" above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return the inverse only if already set
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
## Get the matrix from our object
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
## Return the matrix
inv
}
|
585827ca6916c6ed7b4e83e43a09677309e7f3c3
|
bfb553d2e7b63698354d1b4d6f515161d1550c6f
|
/meanFunctions/value_to_format.R
|
b0cc7bce3fc4220fa390298df614311e95173003
|
[] |
no_license
|
tkappen/recodePDFtable
|
c1653b38d46d31eb1983d6d0bfe4cb17cfe1fb05
|
3d8fe187b2554564a89c0f9cf223ace34205ce8e
|
refs/heads/master
| 2021-01-21T13:08:28.509295
| 2016-05-31T23:46:40
| 2016-05-31T23:46:40
| 48,926,658
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,533
|
r
|
value_to_format.R
|
# All these functions assume the use of
# baseExpr(), baseExprSub(), cellTypes(), valueTypes()
# Get first column of each table
# or return set if only one column
firstCol <- function(x) {
if(!is.null(dim(x))) {
x <- x[,1]
}
return(x)
}
# Get all the default types and formats
# for a single table
tableDefaultTypes <- function(x, tables = NULL, name = "") {
# Get all groups linked to their types
# Types in rownames for selection
grps <- baseExpr()$group
names(grps) <- baseExpr()$type
# Get groups for dataset
cellgrps <- grps[firstCol(x)]
# Get celltypes for dataset
celltypes <- cellTypes()
celltype <- celltypes[celltypes$ranking==1,]
if (!is.null(tables)) {
if (any(cellgrps == "x_")) {
cat(name)
cat("\n")
print(tables, quote = FALSE)
fp <- readinteger("\nIs this a frequency (press 1), mean (2) or a percentage (3)?\n")
celltype[celltype$group == "x_",] <-
with(celltypes,(celltypes[group == "x_" & ranking == fp,]))
}
}
row.names(celltype) <- celltype$group
celltype[cellgrps,]
}
# Get all the default types and formats
# for a list of tables according to a tableList object (read.multi())
getDefaultTypes <- function(x, checkFormats = FALSE) {
if(class(x)!="tableList") {
stop('x is not a tableList object')
}
y <- grepTableSet(x, rowtype=TRUE)
if (checkFormats == TRUE) {
name <- names(y)
dT <- mapply(tableDefaultTypes, y, tables = x[1,],
name = name, SIMPLIFY=FALSE)
} else {
dT <- lapply(y, tableDefaultTypes)
}
class(dT) <- "TypeList"
return(dT)
}
|
fd9d3f7d2877e004402caaa71d778b79eb773f45
|
28744c7a392f341b23db2662621fd0fd5754b21e
|
/man/RAM.plotting.Rd
|
a80920ef45849efd05c15d89542df99a8d9e76f8
|
[] |
no_license
|
randomeffect/RAM
|
3b2a9053cc996bd62cd8f259a34bcd706427c7d4
|
7b869e69182c0ad303891bf07fc0471936db13aa
|
refs/heads/master
| 2020-03-23T15:58:55.970950
| 2018-05-15T14:38:34
| 2018-05-15T14:38:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,248
|
rd
|
RAM.plotting.Rd
|
\name{RAM.plotting}
\alias{RAM.plotting}
\title{
Creating Plots with RAM
}
\description{
This help page details the standards for RAM plotting functions.
}
\section{Overview}{
The RAM package contains many functions to produce plots and
visualizations for metagenomic data. Currently, the plotting
functions are grouped into 'casual' and 'publication' categories.
The 'casual' plotting functions only accept a \code{file}
argument and produce a \code{.tiff} file automatically. They
are meant to quickly highlight certain aspects of the data, but
are not meant to be published. The 'publication' quality plots
accept many more graphing parameters, and should be of suitable
quality for future publication. All 'publication' plots should
accept the following parameters, in addition to those required
to produce the plot:
\itemize{
\item \code{"file"} the file name for the plot.
\item \code{"ext"} the file type for the plot (see below).
\item \code{"height"} the height of the plot, in inches.
\item \code{"width"} the width of the plot, in inches.
}
Additionally, the following parameters are accepted by some
functions:
\itemize{
\item \code{"bw"} should the plot be in black and white?
}
For 'casual' plots, if \code{file} is not provided, the plot is
displayed to the default graphics device (usually a new window),
otherwise a \code{.tiff} file is created.
For 'publication' plots, if neither \code{file} nor \code{ext}
are provided, the plot is displayed to the default graphics
device (usually a new window). If both \code{file} and
\code{ext} are provided, a file of type \code{ext} is created
at \code{file}.
If only one of \code{file} or \code{ext} is given, an error is
raised.
In either case, the file extension will automatically be
appended to the end of \code{file}, if \code{file} does not
already end in the appropriate extension.
For example, if \code{file = ~/my/path.tiff} and
\code{ext = png}, the file will be called
\code{~/my/path.tiff.png}; but if \code{file = ~/my/path.png},
the file will just be called \code{~/my/path.png}.
Finally, if \code{file = ~/my/path}, the file will be called
\code{~/my/path.png}.
}
\section{ggplot2}{
Furthermore, some of the 'publication' quality plots allow for
a \code{ggplot2} argument. If \code{ggplot2} is \code{TRUE},
then the plot will be produced using the \code{ggplot2} package,
and a \code{ggplot} object will be returned. This allows for
additional, personal customization of the plot for those who
have used the package before. If \code{ggplot2} is \code{FALSE},
then the plot will be created using the base plotting functions.
}
\section{File Types}{
For 'publication' quality plots, the following file types are
supported (use any of the following values for the \code{ext}
argument): \code{"pdf"}, \code{"png"}, \code{"tiff"},
\code{"svg"}, \code{"bmp"}, \code{"jpeg"}.
}
\section{Note}{
If \code{file} is given without a directrory
(e.g. \code{file} = my_fancy_file), then that file will be
created in the current working directory (see \code{?getwd} and
\code{?setwd} for more information).
The current default resolution is 1000 dpi for all plots.
}
\section{See Also}{
\code{\link{ggplot}}
}
\section{Author(s)}{
Wen Chen and Joshua Simpson.
}
|
a6fb3a8fdd88de5af8251eacbcbe0520e411c18d
|
0d5a4bbceeda2fb1b0beb559099ae68e518082cc
|
/server.R
|
88ec05037891f4928ac7d63299bce010515b948a
|
[] |
no_license
|
sdlee087/Group-Ranking-Lasso
|
6fc1a27dc1ef2dfd757fe12ae9d143d3ceb67e6a
|
ec79c687cb0e465ccee0d91cdc1c8a5d2134443d
|
refs/heads/master
| 2020-05-23T12:26:46.064742
| 2019-05-26T06:16:08
| 2019-05-26T06:16:08
| 186,757,500
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,016
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse)
##Preliminaries
GRL_tib <- read_csv("GRL_tib.csv", col_types = cols(group = col_factor()))
shinyServer(function(input, output) {
output$Plot1 <- renderPlot({
GRL_tib %>% filter(lambda2 == input$lambda2) %>% ggplot(aes(x=lambda1, y = u, group = team, colour = group)) + geom_line(alpha = 0.6) +
labs(title = paste0("Estimated u at lambda2 = ", input$lambda2)) + coord_cartesian(ylim=c(0,3)) + scale_color_brewer(palette="Dark2")
})
output$Plot2 <- renderPlot({
GRL_tib %>% filter(lambda1 == input$lambda1) %>% ggplot(aes(x=lambda2, y = u, group = team, colour = group)) + geom_line(alpha = 0.6) +
labs(title = paste0("Estimated u at lambda1 = ", input$lambda1)) + coord_cartesian(ylim=c(0,3)) + scale_color_brewer(palette="Dark2")
})
})
|
e6692d6b7857109725f8e86eae23edf405ecbc2a
|
b092ca5a112dd92ea209b9e4c028437d88a905b8
|
/man-roxygen/core_template.R
|
3153818f65c716729b103335b1ee0b5882c3801a
|
[] |
no_license
|
kaneplusplus/bigmemory
|
33d7d853dfdd36110d4837d65f95eb357e70e7e9
|
177c761821a2f5d8c48cb88a9dc344ed47dc8697
|
refs/heads/master
| 2022-09-21T11:55:01.223373
| 2022-09-19T14:14:18
| 2022-09-19T14:14:18
| 27,275,919
| 129
| 30
| null | 2021-06-14T15:48:01
| 2014-11-28T17:14:24
|
C++
|
UTF-8
|
R
| false
| false
| 8,383
|
r
|
core_template.R
|
#' @rdname big.matrix
#' @title The core "big.matrix" operations.
#' @description Create a \code{big.matrix} (or check to see if an object
#' is a \code{big.matrix}, or create a \code{big.matrix} from a
#' \code{\link{matrix}}, and so on). The \code{big.matrix} may be file-backed.
#' @param x a \code{matrix}, \code{vector}, or \code{data.frame} for
#' \code{as.big.matrix}; if a vector, a one-column\cr \code{big.matrix} is
#' created by \code{as.big.matrix}; if a \code{data.frame}, see details.
#' For the \code{is.*} functions, \code{x} is likely a \code{big.matrix}.
#' @param nrow number of rows.
#' @param ncol number of columns.
#' @param type the type of the atomic element
#' (\code{options()$bigmemory.default.type} by default -- \code{"double"} --
#' but can be changed by the user to \code{"integer"}, \code{"short"}, or
#' \code{"char"}).
#' @param init a scalar value for initializing the matrix (\code{NULL} by
#' default to avoid unnecessary time spent doing the initializing).
#' @param dimnames a list of the row and column names; use with caution
#' for large objects.
#' @param separated use separated column organization of the data;
#' see details.
#' @param backingfile the root name for the file(s) for the cache of \code{x}.
#' @param backingpath the path to the directory containing the file
#' backing cache.
#' @param descriptorfile the name of the file to hold the backingfile
#' description, for subsequent use with \code{\link{attach.big.matrix}};
#' if \code{NULL}, the \code{backingfile} is used as the root part of the
#' descriptor file name. The descriptor file is placed in the same directory
#' as the backing files.
#' @param binarydescriptor the flag to specify if the binary RDS format
#' should be used for the backingfile description, for subsequent use with
#' \code{\link{attach.big.matrix}}; if \code{NULL} of \code{FALSE}, the
#' \code{dput()} file format is used.
#' @param shared \code{TRUE} by default, and always \code{TRUE} if the
#' \code{big.matrix} is file-backed. For a non-filebacked \code{big.matrix},
#' \code{shared=FALSE} uses non-shared memory, which can be more stable for
#' large (say, >50% of RAM) objects. Shared memory allocation can sometimes
#' fail in such cases due to exhausted shared-memory resources in the system.
#' @param address an \code{externalptr}, so \code{is.nil(x@@address)} might
#' be a sensible thing to want to check, but it's pretty obscure.
#' @details A \code{big.matrix} consists of an object in \R that does nothing
#' more than point to the data structure implemented in \acronym{C++}. The
#' object acts much like a traditional \R matrix, but helps protect the user
#' from many inadvertent memory-consuming pitfalls of traditional \R matrices
#' and data frames.
#'
#' There are two \code{big.matrix} types which manage
#' data in different ways. A standard, shared \code{big.matrix} is constrained
#' to available \acronym{RAM}, and may be shared across separate \R processes.
#' A file-backed \code{big.matrix} may exceed available \acronym{RAM} by
#' using hard drive space, and may also be shared across processes. The
#' atomic types of these matrices may be \code{double}, \code{integer},
#' \code{short}, or \code{char} (8, 4, 2, and 1 bytes, respectively).
#'
#' If \code{x} is a \code{big.matrix}, then \code{x[1:5,]} is returned as an R
#' \code{matrix} containing the first five rows of \code{x}. If \code{x} is of
#' type \code{double}, then the result will be \code{numeric}; otherwise, the
#' result will be an \code{integer} \R matrix. The expression \code{x} alone
#' will display information about the \R object (e.g. the external pointer)
#' rather than evaluating the matrix itself (the user should try \code{x[,]}
#' with extreme caution, recognizing that a huge \R \code{matrix} will
#' be created).
#'
#' If \code{x} has a huge number of rows and/or columns, then the use of
#' \code{rownames} and/or \code{colnames} will be extremely memory-intensive
#' and should be avoided. If \code{x} has a huge number of columns and
#' \code{separated=TRUE} is used (this isn't typically recommended),
#' the user might want to store the transpose as there is overhead of a
#' pointer for each column in the matrix. If \code{separated} is \code{TRUE},
#' then the memory is allocated into separate vectors for each column.
#' Use this option with caution if you have a large number of columns, as
#' shared-memory segments are limited by OS and hardware combinations. If
#' \code{separated} is \code{FALSE}, the matrix is stored in traditional
#' column-major format. The function \code{is.separated()} returns the
#' separation type of the \code{big.matrix}.
#'
#' When a \code{big.matrix}, \code{x}, is passed as an argument
#' to a function, it is essentially providing call-by-reference rather than
#' call-by-value behavior. If the function modifies any of the values of
#' \code{x}, the changes are not limited in scope to a local copy within the
#' function. This introduces the possibility of side-effects, in contrast to
#' standard \R behavior.
#'
#' A file-backed \code{big.matrix} may exceed available \acronym{RAM} in size
#' by using a file cache (or possibly multiple file caches, if
#' \code{separated=TRUE}). This can incur a substantial performance penalty for
#' such large matrices, but less of a penalty than most other approaches for
#' handling such large objects. A side-effect of creating a file-backed object
#' is not only the file-backing(s), but a descriptor file (in the same
#' directory) that is needed for subsequent attachments (see
#' \code{\link{attach.big.matrix}}).
#'
#' Note that we do not allow setting or changing the \code{dimnames} attributes
#' by default; such changes would not be reflected in the descriptor objects or
#' in shared memory. To override this, set
#' \code{options(bigmemory.allow.dimnames=TRUE)}.
#'
#' It should also be noted that a user can create an ``anonymous'' file-backed
#' \code{big.matrix} by specifying "" as the \code{filebacking} argument.
#' In this case, the backing resides in the temporary directory and a
#' descriptor file is not created. These should be used with caution since
#' even anonymous backings use disk space which could eventually fill the
#' hard drive. Anonymous backings are removed either manually, by a
#' user, or automatically, when the operating system deems it appropriate.
#'
#' Finally, note that \code{as.big.matrix} can coerce data frames. It does
#' this by making any character columns into factors, and then making all
#' factors numeric before forming the \code{big.matrix}. Level labels are
#' not preserved and must be managed by the user if desired.
#' @return A \code{big.matrix} is returned (for \code{big.matrix} and
#' \code{filebacked.big.matrix}, and\cr \code{as.big.matrix}),
#' and \code{TRUE} or \code{FALSE} for \code{is.big.matrix} and the
#' other functions.
#' @author John W. Emerson and Michael J. Kane
#' \email{bigmemoryauthors@gmail.com}
#' @references The Bigmemory Project: \url{http://www.bigmemory.org/}.
#' @seealso \code{\link{bigmemory}}, and perhaps the class documentation of
#' \code{\linkS4class{big.matrix}}; \code{\link{attach.big.matrix}} and
#' \code{\link{describe}}. Sister packages \pkg{biganalytics}, \pkg{bigtabulate},
#' \pkg{synchronicity}, and \pkg{bigalgebra} provide advanced functionality.
#' @examples
#' x <- big.matrix(10, 2, type='integer', init=-5)
#' options(bigmemory.allow.dimnames=TRUE)
#' colnames(x) <- c("alpha", "beta")
#' is.big.matrix(x)
#' dim(x)
#' colnames(x)
#' rownames(x)
#' x[,]
#' x[1:8,1] <- 11:18
#' colnames(x) <- NULL
#' x[,]
#'
#' # The following shared memory example is quite silly, as you wouldn't
#' # likely do this in a single R session. But if zdescription were
#' # passed to another R session via SNOW, foreach, or even by a
#' # simple file read/write, then the attach.big.matrix() within the
#' # second R process would give access to the same object in memory.
#' # Please see the package vignette for real examples.
#'
#' z <- big.matrix(3, 3, type='integer', init=3)
#' z[,]
#' dim(z)
#' z[1,1] <- 2
#' z[,]
#' zdescription <- describe(z)
#' zdescription
#' y <- attach.big.matrix(zdescription)
#' y[,]
#' y
#' z
#' y[1,1] <- -100
#' y[,]
#' z[,]
#' @keywords classes methods
|
ebc4b1eb52494fa6ff508b6d6442dde2e2c4f5cd
|
421bfc4f8d25598823e93ea75c6ab6cef2cb0c50
|
/man/MUS.Rd
|
af975bb3f9245d085485d131cc7bf4575a9bc0dd
|
[] |
no_license
|
LeoEgidi/pivmet
|
f65c46fdf78d479585105984ba41c16988ae2c7e
|
1ff4ebaf8a2f42f4319660fd2a2c26ca80e9260b
|
refs/heads/master
| 2023-03-05T12:54:12.568863
| 2023-02-22T14:00:14
| 2023-02-22T14:00:14
| 139,981,969
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,707
|
rd
|
MUS.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mus.R
\name{MUS}
\alias{MUS}
\title{MUS algorithm}
\usage{
MUS(C, clusters, prec_par = 10)
}
\arguments{
\item{C}{\eqn{N \times N} matrix with a non-negligible number of zeros.
For instance, a similarity matrix estimated from a \eqn{N \times D} data matrix whose rows
are statistical units, or a co-association matrix resulting from clustering
ensembles.}
\item{clusters}{A vector of integers from \eqn{1:k}
indicating the cluster to which each point is allocated (it requires \eqn{k < 5}, see Details).}
\item{prec_par}{Optional argument. The maximum number of candidate pivots for each group.
Default is 10.}
}
\value{
\item{\code{pivots}}{ A vector of integers in 1:N denoting the indeces of the \code{k} selcted pivotal units.}
\item{\code{prec_par}}{The effective number of alternative pivots considered for each group. See Details.}
}
\description{
Perform Maxima Units Search (MUS) algorithm on a large and sparse matrix in
order to find a set of pivotal units through a sequential search
in the given matrix.
}
\details{
Consider \eqn{H} distinct partitions of a set of \eqn{N} \eqn{d}-dimensional
statistical units into \eqn{k}
groups determined by some
clustering technique. A \eqn{N \times N} co-association matrix
\eqn{C} with generic element \eqn{c_{i,j}=n_{i,j}/H} can be constructed,
where \eqn{n_{i,j}} is the number of times the \eqn{i}-th and the \eqn{j}-th unit
are assigned to the same cluster with respect to the clustering ensemble.
Units which are very distant
from each other are likely to have zero co-occurrences; as a consequence,
\eqn{C} is
a square symmetric matrix expected to contain a non-negligible number of zeros.
The main task of the MUS algorithm is to detect submatrices of small
rank from the co-association matrix
and extract those units---pivots---such
that the \eqn{k \times k} submatrix of \eqn{C},
determined by only the pivotal rows
and columns indexes, is identical or nearly identical.
Practically, the resulting units
have the desirable property to be representative of
the group they belong to.
With the argument \code{prec_par} the user may increase
the powerful of the underlying MUS algorithm (see @egidi2018mus for details).
Given the default value 10, the function internally computes an
effective \code{prec_par} as \eqn{\min( 10, \min n_j )},
where \eqn{n_j} is the number of units belonging to the group
\eqn{j, \ j=1,\ldots,k}.
}
\examples{
# Data generated from a mixture of three bivariate Gaussian distributions
\dontrun{
N <- 620
centers <- 3
n1 <- 20
n2 <- 100
n3 <- 500
x <- matrix(NA, N,2)
truegroup <- c( rep(1,n1), rep(2, n2), rep(3, n3))
x[1:n1,]=rmvnorm(n1, c(1,5), sigma=diag(2))
x[(n1+1):(n1+n2),]=rmvnorm(n2, c(4,0), sigma=diag(2))
x[(n1+n2+1):(n1+n2+n3),]=rmvnorm(n3, c(6,6), sigma=diag(2))
# Build a similarity matrix from clustering ensembles
H <- 1000
a <- matrix(NA, H, N)
for (h in 1:H){
a[h,] <- kmeans(x,centers)$cluster
}
sim_matr <- matrix(NA, N,N)
for (i in 1:(N-1)){
for (j in (i+1):N){
sim_matr[i,j] <- sum(a[,i]==a[,j])/H
sim_matr[j,i] <- sim_matr[i,j]
}
}
# Obtain a clustering solution via kmeans with multiple random seeds
cl <- KMeans(x, centers)$cluster
# Find three pivots
mus_alg <- MUS(C = sim_matr, clusters = cl)
}
}
\references{
Egidi, L., Pappadà, R., Pauli, F., Torelli, N. (2018).
Maxima Units Search(MUS) algorithm:
methodology and applications. In: Perna, C. , Pratesi, M., Ruiz-Gazen A. (eds.) Studies in
Theoretical and Applied Statistics,
Springer Proceedings in Mathematics and Statistics 227, pp. 71–81.
}
\author{
Leonardo Egidi \email{legidi@units.it}, Roberta Pappadà
}
|
f9ef6f4d2b4142ab9173afc5dd12d2beae64ed86
|
03feb9c4bd7ab71a580a05857133b9a1196bdc92
|
/factors.R
|
39c11ace927b9da2e24d2ba6f9948d391eb4c818
|
[] |
no_license
|
Pratijna/analytics1
|
abeb046cc4af650ab80b7fd65b712f313eecdd5e
|
28225e3f48ac919cde4a5391efb9ad9b00b96183
|
refs/heads/master
| 2020-04-02T16:33:24.394246
| 2018-10-28T09:22:20
| 2018-10-28T09:22:20
| 154,617,694
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 509
|
r
|
factors.R
|
#factors
gender = sample(c('M','F'), size=20, replace = T)
summary(gender)
genderF= factor(gender)
summary(genderF)
genderF
likscale= sample(c('Ex','Good','Sat', 'Poor'), size = 20, replace=T)
summary(likscale)
class(likscale)
likscaleF= factor(likscale)
summary(likscaleF)
class(likscaleF)
likscaleOF= factor(likscale, ordered=T, levels= c('Poor','Sat','Good','Ex'))
summary(likscaleOF)
likscaleOF
barplot(table(likscaleF), col=1:4)
barplot(table(likscaleOF), col= 1:4)
names(mtcars)
cyl, gear, am, vs
|
dc035cd5a80e7915b58e9e804c6d3bae1a40df76
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/CENFA/R/covij.R
|
5997e4946fd7ef4bfcc48826efd7c5dee78725c2
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,110
|
r
|
covij.R
|
#' @keywords internal
# @importFrom raster cellStats filename extension
# This function calculates the covariance between two raster layers.
.covij <- function(x, y, w, sample = sample){
sm <- canProcessInMemory(x)
if(sm){
x <- values(x)
x <- x - mean(x, na.rm = T)
y <- values(y)
y <- y - mean(y, na.rm = T)
if(!is.null(w)){
w <- values(w)
sumw <- sum(w, na.rm = T)
#if(sumw > 1) w <- w / (sumw)
r <- na.omit(w * x * y)
v <- sum(r, na.rm = T)/(sumw - sample)
} else {
r <- na.omit(x * y)
nn <- length(r)
v <- sum(r, na.rm = T)/(nn - sample)
}
} else if(!sm){
x <- scale(x, scale = F)
y <- scale(y, scale = F)
if(!is.null(w)){
sumw <- cellStats(w, sum, na.rm = T)
#if(sumw > 1) w <- w / (sumw)
r <- w * x * y
v <- cellStats(r, stat = 'sum', na.rm = T) / (sumw - sample)
} else {
r <- x * y
nn <- length(r[!is.na(r)])
v <- cellStats(r, stat = 'sum', na.rm = T) / (nn - sample)
}
f <- filename(r)
file.remove(c(f, extension(f, '.gri')))
}
return(v)
}
|
701a50ca56e4ba01aed4cda590ef9fe358f16df8
|
dbcc234a0d665e90bc8d4c0003bc478bb9bf78e0
|
/vis.R
|
a34f7f9d09b088fc84e25aee88097b83dca0fc9b
|
[] |
no_license
|
fluo-2/sensor-vis
|
21dda8ccf2cc47a58dd86178dde09d1e8e71a78e
|
7cd60b6e7c90e7d54a02da84e418736eead0e879
|
refs/heads/master
| 2021-04-23T21:12:32.953485
| 2020-04-06T13:49:33
| 2020-04-06T13:49:33
| 250,005,847
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,457
|
r
|
vis.R
|
#!/usr/bin/env Rscript
# -*- coding: utf-8 -*-
library(tools)
library(gdata)
library(ggplot2)
library(tikzDevice)
library(reshape2)
library(ggspectra)
source("./handle_data.R")
# read main data source for sheet 1
clean_data <- extract_categorical_data("WL_FWHM")
# create dummy dataframe to initialize wavelength range
wave <- data.frame(wl_center = 100:15000)
# proceed to plot data
tikz("basic_spectral.tex", width=20, height=12, standAlone = TRUE)
g <- ggplot(wave,aes(x=wl_center)) +
## wl_guide(alpha=0.8) +
geom_rect(data=clean_data,aes(xmin=wl_center-(fwhm/2),xmax=wl_center+(fwhm/2),
ymin=0, ymax=0.49),color="black",fill="red",alpha=0.5,size=1.1) +
## geom_vline(xintercept = 685, linetype="dashed", size=1.2) +
ylab("") +
xlab("\n Wavelength $\\lambda$ [nm]") +
ylim(c(0,0.5)) +
theme_bw() +
theme(text = element_text(size=30),
legend.position = "none",
plot.title = element_text(hjust=0.5),
axis.ticks.length = unit(0.2, "cm"),
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
plot.margin = margin(10, 50, 10, 10)) +
scale_x_continuous(breaks = round(seq(roundUp(min(wave$wl_center),500),
roundUp(max(wave$wl_center),500),
by = 1000),1),
expand=expand_scale(mult=c(0.03,0.01))) +
facet_wrap(Sensor~.,ncol=1)
print(g)
dev.off()
texi2pdf("basic_spectral.tex",clean=TRUE)
file.remove("basic_spectral.tex")
file.rename("basic_spectral.pdf",
"./img/basic_spectral.pdf")
# additional truncated plot
wave <- data.frame(wl_center = 100:1200)
# proceed to plot data
tikz("basic_spectral_truncated.tex", width=20, height=12, standAlone = TRUE)
g <- ggplot(wave,aes(x=wl_center)) +
wl_guide(alpha=0.8) +
geom_rect(data=clean_data,aes(xmin=wl_center-(fwhm/2),xmax=wl_center+(fwhm/2),
ymin=0, ymax=0.49),color="black",fill="red",alpha=0.5,size=1.1) +
## geom_vline(xintercept = 685, linetype="dashed", size=1.2) +
ylab("") +
xlab("\n Wavelength $\\lambda$ [nm]") +
## xlim(c(min(wave$wl_center),max(wave$wl_center))) +
ylim(c(0,0.5)) +
theme_bw() +
theme(text = element_text(size=30),
legend.position = "none",
plot.title = element_text(hjust=0.5),
axis.ticks.length = unit(0.2, "cm"),
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
plot.margin = margin(10, 50, 10, 10)) +
scale_x_continuous(limits=c(min(wave$wl_center),max(wave$wl_center)),
breaks = round(seq(roundUp(min(wave$wl_center),100),
roundUp(max(wave$wl_center),100),
by =100),1),
expand=expand_scale(mult=c(0,0))) +
facet_wrap(Sensor~.,ncol=1)
print(g)
dev.off()
texi2pdf("basic_spectral_truncated.tex",clean=TRUE)
file.remove("basic_spectral_truncated.tex")
file.rename("basic_spectral_truncated.pdf",
"./img/basic_spectral_truncated.pdf")
# without spectrum
tikz("basic_spectral_truncated_nos.tex", width=20, height=12, standAlone = TRUE)
g <- ggplot(wave,aes(x=wl_center)) +
## wl_guide(alpha=0.8) +
geom_rect(data=clean_data,aes(xmin=wl_center-(fwhm/2),xmax=wl_center+(fwhm/2),
ymin=0, ymax=0.49),color="black",fill="red",alpha=0.5,size=1.1) +
## geom_vline(xintercept = 685, linetype="dashed", size=1.2) +
ylab("") +
xlab("\n Wavelength $\\lambda$ [nm]") +
## xlim(c(min(wave$wl_center),max(wave$wl_center))) +
ylim(c(0,0.5)) +
theme_bw() +
theme(text = element_text(size=30),
legend.position = "none",
plot.title = element_text(hjust=0.5),
axis.ticks.length = unit(0.2, "cm"),
axis.ticks.y=element_blank(),
axis.text.y=element_blank(),
plot.margin = margin(10, 50, 10, 10)) +
scale_x_continuous(limits=c(min(wave$wl_center),max(wave$wl_center)),
breaks = round(seq(roundUp(min(wave$wl_center),100),
roundUp(max(wave$wl_center),100),
by =100),1),
expand=expand_scale(mult=c(0.01,0.01))) +
facet_wrap(Sensor~.,ncol=1)
print(g)
dev.off()
texi2pdf("basic_spectral_truncated_nos.tex",clean=TRUE)
file.remove("basic_spectral_truncated_nos.tex")
file.rename("basic_spectral_truncated_nos.pdf",
"./img/basic_spectral_truncated_nos.pdf")
|
850e589b82aa8c2073880cdaab21be58f7ee368f
|
0e8b4d2ed772603e3fcd7ee6505f8ba7b91184f5
|
/scripts/persiann_conversion.R
|
a476abbc7bdbbf9e32021adde00c386f1cf2d679
|
[] |
no_license
|
frydolin/r-sat-rain
|
1a19d0d8dae182ae06d02fb64592f8402a3ecb32
|
6ac52eaad55a4b5ba82c73dc3cd399e77c1fe69f
|
refs/heads/master
| 2016-09-06T09:10:32.514731
| 2014-04-26T17:05:49
| 2014-04-26T17:05:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,408
|
r
|
persiann_conversion.R
|
###### SPATIO-TEMPORAL RAINFALL PATTERNS IN KAPUAS BASIN ######
### ASSESSING USEFULNESS OF SATELLITE PRECIPITATION ESTIMATES ###
## persiann_conversion.R
## script to load PERSIANN data in ESRI ASCII format and convert to one single NetCDF file
#### SET UP ####
# call libraries
library("sp")
library("raster")
library("SDMTools")
## set up time locale to get English time format
Sys.setlocale("LC_TIME", "en_US.UTF-8")
###
####P ERSIANN DATA CONVERSION ####
# reads in daily persiann data and converts it to raster object.
# and then exports it to netCDF
# PERSIANN data is in ESRI ASCII format, but correctly subsetted
# therefore .*asc data is read and then converted to raster objects
# rainfall is in mm/day
# 2001-2012 start date: 2001-01-01
fpath="input/PERSIANN" # needs adjustment
files=list.files(path=fpath, pattern="*.asc",
recursive=TRUE, include.dirs=TRUE, full.names=TRUE) # recursive=TRUE!!
persiann.rlist=list() # initialize list
for (i in 1:length(files)) {
asc.tmp =read.asc(file=files[i]) # read file
persiann.rlist[i]=raster(asc.tmp) # convert to raster format
}
persiann=brick(persiann.rlist) # make brick out of list
writeRaster( persiann, "persiann.nc" ) #write to NetCDF file
# cleanup
rm(i, asc.tmp, persiann.rlist)
rm(fpath, files)
###
###### END persiann_conversion.R ######
|
f5f5252f4c730a6e609dcbf97c4a5004dfbb6624
|
d8fe5763a832081b8034e6df7d3c21ec58f43ff0
|
/plot3.R
|
f1257932825df719f34d5ba8fe4ada9730d9419f
|
[] |
no_license
|
alfonsogd/RPlots
|
20e553530874bb6352538aa452fd808448ba7076
|
74f4f20af32e41d81f7c8e388521b5ce8564130c
|
refs/heads/master
| 2020-04-15T02:38:02.462867
| 2014-12-06T17:05:43
| 2014-12-06T17:05:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 876
|
r
|
plot3.R
|
##########
#Load Database
bd <- read.csv("~/Rall/household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE)
#R Libraries
library(plyr)
library(lubridate)
# Transform class
bd <- mutate(bd, Date = as.Date(dmy(Date)))
#subset 2007-02-01 to 2007-02-02
subbd <- bd[(bd$Date >= as.Date("2007/02/01") & bd$Date <= as.Date("2007/02/02")),]
subbd <- mutate(subbd, DateTime = ymd_hms(paste(subbd$Date,subbd$Time)))
#Third PLOT
par(mar=c(3,4,2,2))
with(subbd, plot(DateTime, Sub_metering_1, type="n", ylab="Energy Sub metering"))
with(subbd, points(DateTime, Sub_metering_1, type="l"))
with(subbd, points(DateTime, Sub_metering_2, type="l", col="red"))
with(subbd, points(DateTime, Sub_metering_3, type="l", col="blue"))
legend("topright", pch=c("-","-","-"), col= c("black", "red", "blue"), legend= c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#END
|
d2ffa4cfb92e250e981646cab2e7e9909b262ed3
|
fa05945647038b74b196b9cc8027576576d413e5
|
/0204.R
|
cbced39f1899f1102064e0d32065c522254d0142
|
[] |
no_license
|
saeheeeom/RforKoreanJsonData
|
15ccacb35b93da0f25cb7319933dbb52b103cd44
|
02e1043ce7b990712b0d04bdb8bc8e72ebc0c67a
|
refs/heads/main
| 2023-06-06T05:55:53.322141
| 2021-06-24T11:12:59
| 2021-06-24T11:12:59
| 379,896,353
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 143
|
r
|
0204.R
|
library(tidyverse)
starwars
ggplot(data = starwars) + geom_point(mapping = aes(x = homeworld, y = mass, color = gender)) + coord_flip()
|
df702515276626cf615c8ecc2383de661ad07a1c
|
40b0dedbe4a9952b628144e131ec2531a3d1c24d
|
/run_analysis.R
|
fb2ca4433f101954ed623bd6beb94ea24d88e3e7
|
[] |
no_license
|
mauiii39/Getting-and-Cleaning-Data-Course-Project
|
1e8234664fe61e5c06ef5028a02f9435d50b4efb
|
a6ee723ef48a7e33188d1f3fc000d8ee064b46dc
|
refs/heads/main
| 2023-04-09T21:50:16.552958
| 2021-04-27T18:35:13
| 2021-04-27T18:35:13
| 361,946,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,491
|
r
|
run_analysis.R
|
library(dplyr)
#Download
setwd("C:/Users/Mau/Google Drive/Programming/Coursera/Getting and Cleaning Data/Week 4")
if(!file.exists("./data")){dir.create("./data")}
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, destfile = "./data/data.zip")
# Unzip
unzip(zipfile = "./data/data.zip", exdir = "./data")
# 1. Merging the training and the test sets to create one data set.
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
features <- read.table("./data/UCI HAR Dataset/features.txt")
activity_Labels = read.table("./data/UCI HAR Dataset/activity_labels.txt")
colnames(x_train) <- features[,2]
colnames(x_test) <- features[,2]
colnames(y_train) <- "activity"
colnames(y_test) <- "activity"
colnames(subject_train) <- "subject"
colnames(subject_test) <- "subject"
colnames(activity_Labels) <- c("activity", "description")
training <- cbind(y_train, subject_train, x_train)
test <- cbind(y_test, subject_test, x_test)
all <- rbind(training, test)
# 2. Extract only the measurements on the mean and standard deviation for each measurement.
column_names <- colnames(all)
column_names_subset <- (grepl("activity", column_names) |
grepl("subject", column_names) |
grepl("mean..", column_names) |
grepl("std...", column_names))
all_subset <- all[ , column_names_subset]
# 3. Uses descriptive activity names to name the activities in the data set
merge_activity <- merge(all_subset, activity_Labels,
by = "activity",
all.x = TRUE)
# 4. Appropriately label the data set with descriptive variable names.
# There were all ready labeled in steps before.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
tidy_data_set <- tbl_df(merge_activity)
tidy_data_set <- group_by(tidy_data_set,subject,activity)
tidy_data_set_summary <- summarise_all(tidy_data_set,.funs = c(mean="mean"))
write.table(tidy_data_set_summary, "tidy_data_set.txt")
|
9c908ff4ec77dac2e79c5327ea94dfae1870d93b
|
7f0e751376a2bbdd29de919ad96d8392046cb9ae
|
/air_quality_hw/pollutantmean.R
|
12ac41f2ec3e97ccc854a5da680f35dc748c54c4
|
[] |
no_license
|
The-Wayvy/Data_Science_Coursera
|
a986bf8c5987734ffb1a7bbc674128e329b07ffc
|
1dd9ecbbbb010fcdb9d582aa1d2bd76fffbffd57
|
refs/heads/master
| 2023-06-07T04:06:55.875808
| 2016-11-06T19:03:53
| 2016-11-06T19:03:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,430
|
r
|
pollutantmean.R
|
get_string_id <- function(id){
#re-write the number
if(id >= 100){
string_id <- as.character(id)
}
else if(id >= 10){
string_id <- paste("0", as.character(id), sep = "")
}
else{
string_id <- paste("00", as.character(id), sep = "")
}
string_id
}
get_table <- function(directory,id_string){
# gets a table give its id number as a string
table <- read.csv(paste(directory,"/",id_string,".csv",sep=""))
table
}
get_pollution_vec <- function(a_table, pollutant){
# returns a numeric vector with all NA's removed
has_na_vector <- a_table[pollutant]
clean_vector <- has_na_vector[!is.na(has_na_vector)]
clean_vector
}
pollutantmean <- function(directory,pollutant,id=1:332){
# step 1 : get all the data frames indicated by id into a list
# # step 1.1 : write a function that turns id's into characters
# step 1.2 : write a function that gets a table given an id #
# step 1.3 : write a function that gets the column of interest out of a table
pollution_vector <- vector(mode="numeric")
for(id_number in id){
id_string <- get_string_id(id_number)
one_table <- get_table(directory,id_string)
one_pol_vec <- get_pollution_vec(one_table,pollutant)
pollution_vector <- c(pollution_vector,one_pol_vec)
}
mean(pollution_vector)
}
|
a8e474d3c2d84203a3d9c85f268f573959fccfcd
|
d56ed82f814dd7a299ee6caf8a52ac381bc8bd93
|
/R/packages/mops/man/mcs_sample.Rd
|
377a7da8b699a45840e77939af1df2625ae69d40
|
[
"MIT"
] |
permissive
|
echse/echse_tools
|
db69845e5e9d26f4a8f68a3ae7a9622b28fbce88
|
4e236a426d261f3750a2f31bb7fdab61335ed676
|
refs/heads/master
| 2021-06-03T10:05:55.907513
| 2017-11-08T13:28:41
| 2017-11-08T13:28:41
| 39,069,970
| 1
| 2
| null | 2015-11-09T12:04:53
| 2015-07-14T11:03:07
|
R
|
UTF-8
|
R
| false
| false
| 2,015
|
rd
|
mcs_sample.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/mcs.r
\name{mcs_sample}
\alias{mcs_sample}
\title{Sample generation for Monte-Carlo simulation}
\usage{
mcs_sample(ranges_table, n = 100)
}
\arguments{
\item{ranges_table}{Data frame with 3 columns 'parameter', 'min', 'max'. Each
record defines the name and the sampling range for a particular parameter.}
\item{n}{The number of parameter sets to be generated.}
}
\value{
A data frame with \code{n} rows and as many columns as there are
records in \code{ranges_table}. The column are named after the parameters.
}
\description{
The function generates a number of random parameter sets using a latin
hypercube algorithm from package 'lhs'. The underlying distribution is
uniform.
}
\note{
If the generated random sets are to be used by a simulation model, the
sampling ranges must be chosen carefully.
In models with many parameters, a particular
value of one parameter may be incompatible with a particular value of
another parameter. For example, if the sampling range
for a parameter 'minimumCapacity' was 2...10, it would not make sense to
use a sampling range 5...20 for a related parameter 'maximumCapacity'. If
the ranges were defined as above, it is likely that 'minimumCapacity' >
'maximumCapacity' in some sets. Those cases typically lead to
implausible model outputs or a crash of the model due to numeric errors.
In addition, one should be aware of the fact that the initial value(s) of
a model's state variable(s) must be compatible with all possible random
parameter sets. For example, an intial value of 0 for a state variable
'temperature' would be critical if the sampling range for a related
parameter 'minimumTemperature' was 5...50.
}
\examples{
mcs_sample(data.frame(parameter=c("a","b"),min=c(1,5),max=c(2,10)), n=10)
}
\author{
David Kneis \email{david.kneis@uni-potsdam.de}
}
\seealso{
Use \code{\link{mcs_run}} to run a Monte-Carlo simulation using
the generated sets.
}
|
0f4aac5e7dc73433a1f465f94810d914417219e2
|
0fdc85c0cced444a2210e65523a0dd0b339319db
|
/inst/unitTests/runTests.R
|
d62319ae33ffdb4cbd857973c5927dec30cd32c3
|
[] |
no_license
|
jaroyle/oSCR
|
769730d56085d856437ddbb4d7b870509805bd41
|
250323186d9cef173be929667b618bbb79dc96c7
|
refs/heads/master
| 2023-04-14T00:24:00.143601
| 2023-04-06T11:40:40
| 2023-04-06T11:40:40
| 42,339,391
| 9
| 11
| null | 2020-10-30T14:19:41
| 2015-09-12T01:10:02
|
R
|
UTF-8
|
R
| false
| false
| 3,114
|
r
|
runTests.R
|
## Adapted this from Rcpp package
pkg <- "oSCR"
if(require("RUnit", quietly = TRUE)) {
is_local <- function(){
if( exists( "argv", globalenv() ) && "--local" %in% argv ) return(TRUE)
if( "--local" %in% commandArgs(TRUE) ) return(TRUE)
FALSE
}
if( is_local() ) path <- getwd()
# Seem to have to do this to get this to work
##path <- getwd()
library(package=pkg, character.only = TRUE)
if(!(exists("path") && file.exists(path)))
path <- system.file("unitTests", package = pkg)
## --- Testing ---
## Define tests
testSuite <- defineTestSuite(name=paste(pkg, "unit testing"),
dirs = path,
rngKind="Mersenne-Twister",
rngNormalKind="Inversion")
if(interactive()) {
cat("Now have RUnit Test Suite 'testSuite' for package '", pkg,
"' :\n", sep='')
str(testSuite)
cat('', "Consider doing",
"\t tests <- runTestSuite(testSuite)", "\nand later",
"\t printTextProtocol(tests)", '', sep="\n")
} else { ## run from shell / Rscript / R CMD Batch / ...
## Run
tests <- runTestSuite(testSuite)
output <- NULL
process_args <- function(argv){
if( !is.null(argv) && length(argv) > 0 ){
rx <- "^--output=(.*)$"
g <- grep( rx, argv, value = TRUE )
if( length(g) ){
sub( rx, "\\1", g[1L] )
}
}
}
# give a chance to the user to customize where he/she wants
# the unit tests results to be stored with the --output= command
# line argument
if( exists( "argv", globalenv() ) ){
# littler
output <- process_args(argv)
} else {
# Rscript
output <- process_args(commandArgs(TRUE))
}
# if it did not work, try to use /tmp
if( is.null(output) ){
if( file.exists( "/tmp" ) ){
output <- "/tmp"
} else{
output <- getwd()
}
}
## Print results
output.txt <- file.path( output, sprintf("%s-unitTests.txt", pkg))
output.html <- file.path( output, sprintf("%s-unitTests.html", pkg))
printTextProtocol(tests, fileName=output.txt)
message( sprintf( "saving txt unit test report to '%s'", output.txt ) )
## Print HTML version to a file
## printHTMLProtocol has problems on Mac OS X
if (Sys.info()["sysname"] != "Darwin"){
message( sprintf( "saving html unit test report to '%s'", output.html ) )
printHTMLProtocol(tests, fileName=output.html)
}
## stop() if there are any failures i.e. FALSE to unit test.
## This will cause R CMD check to return error and stop
if(getErrors(tests)$nFail > 0) {
stop("one of the unit tests failed")
}
}
} else {
cat("R package 'RUnit' cannot be loaded -- no unit tests run\n",
"for package", pkg,"\n")
}
tests <- runTestSuite(testSuite)
printTextProtocol(tests)
|
f6f322cf5d39034833715a444c21998c33fe4d1d
|
3f4df74b9f42d1f600e52d7346d23bca2040f03d
|
/wizualizacja_zmiennych.R
|
38cda931b1ac372fd8c3782ec8e84207a3d1e43e
|
[] |
no_license
|
michalcisek/titanic
|
1fdec974380ab51aaca7e69c624b0b3743cef6b5
|
31aee4455b37ecfe3cc11655a5bb6873dab6cbcb
|
refs/heads/master
| 2021-01-09T05:54:35.156088
| 2017-03-08T14:44:34
| 2017-03-08T14:44:34
| 80,863,260
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
r
|
wizualizacja_zmiennych.R
|
rm(list=ls())
library(ggplot2)
library(GGally)
my_fn <- function(data, mapping, ...){
p <- ggplot(data = data, mapping = mapping) +
geom_point() +
geom_smooth(method=loess, fill="red", color="red", ...) +
geom_smooth(method=lm, fill="blue", color="blue", ...)
p
}
source("dane.R")
ggplot(train, aes(x=Pclass,y=factor(Survived)))+
geom_jitter()
spineplot(train$Pclass,factor(train$Survived))
g = ggpairs(train,columns = c(2,3,5:8,10,12,13), lower = list(continuous = my_fn))
g
|
56d943aaefca2f832076a54453bfda315a4018ce
|
c3b6d2d42d100227163a8a3ddeaa2c7e1044b919
|
/R/bower.R
|
7b6876c2587de20029635c3c8b09d3f2d1b43a80
|
[] |
no_license
|
clatworthylab/bowerbird
|
a0ba8930552c487d6fffc75b27f46df4f693843c
|
3a784b34d862d3ae2a2c4950e9ee3c30cc040544
|
refs/heads/master
| 2023-04-15T06:23:40.896545
| 2021-11-22T09:48:53
| 2021-11-22T09:48:53
| 374,603,723
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,805
|
r
|
bower.R
|
#' @title bower
#' @name bower
#' @rdname bower
#' @description
#' A flexible function to initiate a BOWER class from a geneset file or a list of genesets.
#' @include utilities.R
#' @examples
#' gmt_file <- system.file("extdata", "h.all.v7.4.symbols.gmt", package = "bowerbird")
#' bwr <- bower(gmt_file)
#' bwr
#' @export
#'
bower <- function(genesets, graph=NULL, clusters = NULL, ...){
requireNamespace('S4Vectors')
old <- S4Vectors:::disableValidity()
if (!isTRUE(old)) {
S4Vectors:::disableValidity(TRUE)
on.exit(S4Vectors:::disableValidity(old))
}
if(length(genesets) == 0 & length(graph) == 0){
out <- .emptyBOWER()
} else if (length(graph) > 0){
if (length(genesets) > 0){
if (length(clusters) > 0){
out <- new('BOWER', genesets = .sanitize(read_geneset(genesets, ...)), graph = graph, clusters = clusters, ...)
} else {
out <- new('BOWER', genesets = .sanitize(read_geneset(genesets, ...)), graph = graph, clusters = NULL, ...)
}
} else {
if (length(clusters) > 0){
out <- new('BOWER', genesets = list(), graph = graph, clusters = clusters, ...)
} else {
out <- new('BOWER', genesets = list(), graph = graph, clusters = NULL, ...)
}
}
} else {
if (length(clusters) > 0){
out <- new('BOWER', genesets = .sanitize(read_geneset(genesets, ...)), graph = NULL, clusters = clusters, ...)
} else {
out <- new('BOWER', genesets = .sanitize(read_geneset(genesets, ...)), graph = NULL, clusters = NULL, ...)
}
}
if (length(out@graph) > 0){
out@.graph_data <- .graph_to_data(out@graph)
}
return(out)
}
|
c397c72ca5bd9558c25849d2086207b7939c8b4f
|
210683b5347b6f584b258f26c7d48ab51a518fe3
|
/R/MakeHierFormula.R
|
32945a13c4869eb13de4c4164f72081715a475b5
|
[
"MIT"
] |
permissive
|
statisticsnorway/SSBtools
|
6b95eab7f46c1096cd7d6ee3f61d3898150d49d0
|
aa2728571e0840e1965f3e7ed0f1984c818ca7a1
|
refs/heads/master
| 2023-06-24T02:48:17.178606
| 2023-06-23T08:05:58
| 2023-06-23T08:05:58
| 137,074,899
| 5
| 0
|
Apache-2.0
| 2023-06-23T08:06:00
| 2018-06-12T13:21:36
|
R
|
UTF-8
|
R
| false
| false
| 2,012
|
r
|
MakeHierFormula.R
|
#' Make model formula from data taking into account hierarchical variables
#'
#' @encoding UTF8
#'
#' @param data data frame
#' @param hGroups Output from HierarchicalGroups2()
#' @param n Interaction level or 0 (all levels)
#' @param sim Include "~" when TRUE
#'
#' @return Formula as character string
#' @export
#' @author Øyvind Langsrud
#'
#' @examples
#' x <- SSBtoolsData("sprt_emp_withEU")[, -4]
#' MakeHierFormula(x)
#' MakeHierFormula(x, n = 2)
#' MakeHierFormula(x, n = 0)
MakeHierFormula <- function(data = NULL, hGroups = HierarchicalGroups2(data), n = length(hGroups), sim = TRUE) {
if (n == 0)
sepS <- ":" else sepS <- "*"
n <- min(n, length(hGroups))
m <- AllNCombinations(sapply(hGroups, length), n)
n <- NROW(m)
k <- NCOL(m)
x0 <- rep("", k)
z <- rep("", n)
for (i in seq_len(n)) {
mi <- m[i, ]
x <- x0
for (j in seq_len(k))
if (mi[j])
x[j] <- hGroups[[j]][mi[j]]
x <- x[mi != 0]
s <- x[1]
for (t in seq_len(length(x) - 1))
s <- paste(s, x[t + 1], sep = sepS)
z[i] <- s
}
if (!sim)
return(paste(z, collapse = " + "))
paste("~", paste(z, collapse = " + "), sep = " ")
}
AllCombinations <- function(x = c(3, 1, 2), with0 = TRUE, m = matrix(0, 1, 0)) {
if (!length(x))
return(m)
nm <- NROW(m)
AllCombinations(x[-1], with0, cbind(m[rep(seq_len(nm), x[1] + with0), ], sort(rep(seq_len(x[1] + with0), nm)) - with0))
}
AllNCombinations <- function(x = c(3, 1, 2), n = 0, returnSorted = TRUE, returnList = FALSE) {
m <- AllCombinations(x)
rS <- rowSums(m > 0)
if (n)
return(m[rS == n, , drop = FALSE])
if (returnList) {
a <- vector("list", max(rS))
for (i in seq_len(max(rS)))
a[[i]] <- m[rS == i, , drop = FALSE]
return(a)
}
m <- m[!rS == 0, , drop = FALSE]
rS <- rS[!rS == 0]
if (returnSorted)
return(m[order(rS), , drop = FALSE])
m
}
|
0ca4f3309aacc1666f9c4f52e4131d88057a7b5e
|
a7cbc06b6139047c24ff1de36a375f794affc784
|
/Practice N3/R/Assignment N3 Pb1.R
|
18f01ac999d2083d217d40382633b86ec652daa2
|
[] |
no_license
|
MarcoYou/Econometrics
|
10c2a5a35ccae35afdba001c22348154dba36938
|
e8a7ceb78f20e2a4c235758d82c057706284387e
|
refs/heads/master
| 2023-02-12T03:26:44.798488
| 2021-01-03T03:22:58
| 2021-01-03T03:22:58
| 296,728,301
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,772
|
r
|
Assignment N3 Pb1.R
|
library(stargazer)
# 1. Generate artificial data
## set seed
set.seed(123)
## normal distribution parameters
mu1 <- 50; mu2 <- 100
sd1 <- sqrt(10); sd2 <- sqrt(10)
cov12 <- 5
cor12 <- (cov12/(sd1*sd2))
## bivariate normal distribution parameters
mu <- c(50,100)
sigma <- matrix(c(sd1^2, sd1*sd2*cor12, sd2*sd1*cor12, sd2^2),2)
## random sample generation
N <- 1000
XY <- mvrnorm(N,mu,sigma)
X <- XY[, 1]
Y <- XY[, 2]
## measurement error
w <- rnorm(N, mean = 0, sd = 10)
Xt <- X + w
# regression with correct sample
regress1 <- lm(formula = Y ~ X)
stargazer(regress1, type="latex") # to generate the summary table in LaTeX format
beta.01 <- coef(regress1) # intercept and slope of regress1 stored in beta.01
beta.0 <- as.numeric(beta.01["(Intercept)"])
beta.1 <- as.numeric(beta.01["X"])
# plot regress1
par(mfrow=c(2,2))
plot(X, Y, col = "red", xlab = "X", main = "True Observation", cex=.5)
abline(beta.01, lwd = 2)
# regression with errored sample
regress2 <- lm(formula = Y ~ Xt)
stargazer(regress2, type="latex")
beta.hat.01 <- coef(regress2)
beta.hat.0 <- as.numeric(beta.hat.01["(Intercept)"])
beta.hat.1 <- as.numeric(beta.hat.01["Xt"])
# plot regress2
plot(Xt, Y, col = "red", xlab = "X tilda", main = "Measurement Error", cex=.5)
abline(beta.hat.01, lwd = 2)
# ANOVA test
anova(regress1)
anova(regress2)
# beta.hat probability convergence check
check1 <- ((var(X) + var(w))/var(X)) * beta.hat.1 # sample variances
beta.1 - check1
# beta corrected
beta.co.1 <- ((var(X) + var(w))/var(X)) * beta.hat.1
# if var(w) -> 0 then beta.1 = beta.hat.1
# if var(w) -> oo then (var(X)/(var(X)+var(w))) -> 0 hence beta.hat.1 -> 0
## Monte Carlo simulation
# bias
set.seed(NULL)
fixed.samp <- 100
iter <- c(25,100,500,1000)
B.bias <- rep(0,length(iter))
for(i in 1:length(iter)){
Bb <- rep(0,iter[i])
for(j in 1:iter[i]){
xy <- mvrnorm(fixed.samp,mu,sigma)
x <- xy[, 1]
y <- xy[, 2]
wt <- rnorm(fixed.samp, mean = 0, sd = 10)
xt <- x + wt
reg.monte <- lm(formula = y ~ xt)
beta.01.monte <- coef(reg.monte)
beta.1.monte <- as.numeric(beta.01.monte["xt"])
Bb[j] <- beta.1.monte
}
mean.Bb <- mean(Bb)
B.bias[i] <- mean.Bb
}
# accuracy of mean does not improve
# consistency
fixed.iter <- 100
samp <- c(25,100,500,1000)
B.cons <- rep(0,length(samp))
for(i in 1:length(samp)){
Bc <- rep(0,fixed.iter)
for(j in 1:fixed.iter){
xy <- mvrnorm(samp[i],mu,sigma)
x <- xy[, 1]
y <- xy[, 2]
wt <- rnorm(samp[i], mean = 0, sd = 10)
xt <- x + wt
reg.monte <- lm(formula = y ~ xt)
beta.01.monte <- coef(reg.monte)
beta.1.monte <- as.numeric(beta.01.monte["xt"])
Bc[j] <- beta.1.monte
}
sd.Bc <- sd(Bc)
B.cons[i] <- sd.Bc
}
# variance decreases
|
8a411f42768cf0b7296bc6360f66c387aaaae7c1
|
b94dd703bc872656f020682956efa65b0f132bb6
|
/Module - 5/Assignment 11.R
|
aa805c3b9f30cc810592d541d7f71f25d6e13bbe
|
[] |
no_license
|
nitinsingh27/DataScience-With-R
|
5ba07d613d79f34b833dbf63327bca8c4ffc53a0
|
8d3f02a424aa1a34ffbfcf640c5bced3870c4c6d
|
refs/heads/main
| 2023-06-11T19:21:08.895755
| 2021-07-12T14:46:30
| 2021-07-12T14:46:30
| 385,279,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,586
|
r
|
Assignment 11.R
|
setwd("C:/Users/enerc/OneDrive/Desktop/data science/sessions/r_training")
getwd()
customer <- read.csv("Customer_churn.csv",stringsAsFactors = T)
placement <- read.csv("Placement.csv",stringsAsFactors = T)
pharma <- read.csv("pharma.csv",stringsAsFactors = T)
city <- read.csv("city_temperature.csv",stringsAsFactors = T)
library(caTools)
library(ggplot2)
#1. Analyze the features from the temperature dataset.
ggplot(data = city, aes(x = Country, y = Month)) + geom_point() + geom_smooth(method = 'lm')
sample.split(city,SplitRatio = 0.005) -> split_city
subset(city,split_city==T) -> train
subset(city,split_city==F) -> test
lm(Month~AvgTemperature, data = train) ->model
summary(model)
predict(model,newdata = test) -> city_predicted
cbind(Actual = test$Month, Predicted = city_predicted) -> result1
# We can also use data frame function
#data.frame(Actual = test$Month, Predicted = city_predicted) -> result1
result1$Actual - result1$Predicted -> result1$Error
View(result1)
ggplot(data = result1, aes(x = Predicted, y = Error)) + geom_point()
qqnorm(result1$Error)
qqline(result1$Error)
#2. Analyze the features from the Pharma dataset.
ggplot(data = pharma, aes(x = DrugID , y = Age)) + geom_point() + geom_smooth(method = "lm")
sample.split(pharma,SplitRatio = 0.60) -> split_pharma
subset(pharma,split_pharma==T) -> train_pharma
subset(pharma,split_pharma==F) -> test_pharma
lm(Age~DrugID, data = train_pharma) -> model_pharma
summary(model_pharma)
predict(model_pharma, newdata = test_pharma) -> pharma_predicted
data.frame(Actual = test_pharma$Age, Predicted = pharma_predicted) -> result2
result2$Actual - result2$Predicted -> result2$Error
View(result2)
ggplot(data = result2, aes(x = Predicted, y = Error)) + geom_point()
qqnorm(result2$Error)
qqline(result2$Error)
#3. Analyze the features from the Placement dataset.
ggplot(data = placement, aes(x = status, y = etest_p)) + geom_point() + geom_smooth(method = 'lm')
sample.split(placement,SplitRatio = 0.70) -> split_placement
subset(placement,split_placement==T) -> train_P
subset(placement,split_placement==F) -> test_P
lm(etest_p~mba_p, data = train_P) ->model_p
summary(model_p)
predict(model_p,newdata = test_P) -> placement_predicted
data.frame(Actual = test_P$etest_p, Predicted = placement_predicted) -> result3
result3$Actual - result3$Predicted -> result3$Error
View(result3)
ggplot(data = result3, aes(x = Predicted, y = Error)) + geom_point()
qqnorm(result3$Error)
qqline(result3$Error)
|
32e0e8fb1531887afb3fac0587fc5afd4ac3ba27
|
9fd0ec3a1bcbf4178ca2769fee4e09b37141439b
|
/Script1.R
|
7283f13e880df0e7573284fa19f08e4b2b5bbc8e
|
[] |
no_license
|
J0S3UL1S3S/RepData_PeerAssessment1
|
5fa696f62787e83aed52583661a5323383dfce41
|
d828cfbbbd45378699785393c00acdac424b49a7
|
refs/heads/master
| 2022-11-26T12:53:14.578441
| 2020-07-27T15:54:44
| 2020-07-27T15:54:44
| 282,536,997
| 0
| 0
| null | 2020-07-25T22:44:44
| 2020-07-25T22:44:43
| null |
UTF-8
|
R
| false
| false
| 558
|
r
|
Script1.R
|
## Loading and preprocessing the data
activity <- read.csv("activity/activity.csv", header = TRUE, sep = ",")
View(activity)
colnames(activity)
df1 <- na.omit(activity)
## What is mean total number of steps taken per day?
df2 <- aggregate(steps~date, data=activity, FUN=mean, na.rm=TRUE)
hist(df2$steps)
## ignore the missing values in the dataset.
# Calculate the total number of steps taken per day
# Make a histogram of the total number of steps taken each day
# Calculate and report the mean and median of the total number of steps taken per day
|
827cb4f736a88738407ef7f16c5e10e660902d63
|
dbe14666492f3a9927cac0638d09700edd04ac85
|
/man/plot_top_etiol_by_age.Rd
|
f51307b131535dffb8a2cf9a87e53c18205b97e8
|
[
"Apache-2.0"
] |
permissive
|
EGHI-CHAMPS/champs-L2-statistics
|
5c19735820180f7d957b6bbeffeaecb2a90fb7c3
|
4e180553ea38958ceacd1e4b09cdf47b9d5de832
|
refs/heads/master
| 2023-01-29T03:47:50.419589
| 2020-10-13T21:59:59
| 2020-10-13T21:59:59
| 295,535,862
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,053
|
rd
|
plot_top_etiol_by_age.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plot_top_etiol_by_age}
\alias{plot_top_etiol_by_age}
\title{Plot the etiologies of a computed CHAMPS table}
\usage{
plot_top_etiol_by_age(x, etiologies = NULL, top = 6, bar_width = 0.5)
}
\arguments{
\item{x}{A data object obtained from \code{\link{calc_top_etiol_by_age}}.}
\item{etiologies}{The etiologies to include in the graphic with their
shortened names as a named vector. Defaults to NULL. If NULL than the top
variable is used and the names are shortened automatically.}
\item{top}{The number of etiologies to include in the graphic}
\item{bar_width}{defines the size of the bar the chart. Defaults to 0.5.}
}
\description{
Plot the etiologies of a computed CHAMPS table
}
\examples{
mock_top_etiol_by_age <- calc_top_etiol_by_age(mock,
age_groups = c(
"Death in the first 24 hours",
"Early Neonate (24-72 hours)",
"Early Neonate (72+hrs to 6 days)",
"Late Neonate (7 to 27 days)"))
plot_top_etiol_by_age(mock_top_etiol_by_age)
}
|
24dc9fa95fd7bd8e72f29b2545d7f97d25032369
|
41e94163ad063f7b43541210b26dc1f622b1c050
|
/Indicator_ALL.R
|
5628916b0caf4a916092fdcf7baeaccea77bff69
|
[] |
no_license
|
BlockchainTradeAnalysis/Robot_Portfolio
|
e4de788b3fa214b24522d5791ea71365587b08c1
|
da3417d7b1e91de5bb6bd29d17fc9f98f7419034
|
refs/heads/master
| 2020-04-14T07:27:24.207074
| 2019-01-09T10:37:10
| 2019-01-09T10:37:10
| 163,712,944
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,266
|
r
|
Indicator_ALL.R
|
library(quantmod)
library(TTR)
library(xts)
load(file="C:\\RData\\ETHUSDT_4HR_2017_8_16_2019_1_5.rda")
head(total_data)
STK=total_data
Cl_STK = as.numeric(STK[,5])
Op_STK = as.numeric(STK[,2])
High_STK = as.numeric(STK[,3])
Low_STK = as.numeric(STK[,4])
Vol_STK = as.numeric(STK[,6])
Date_STK = STK[,13]
#KD運算
#設定
KD_N = 9
Rsv_STK = c(1:(nrow(STK)))
K_STK = c(1:(nrow(STK)))
D_STK = c(1:(nrow(STK)))
for(i in 1:(KD_N-1)){
Rsv_STK[i] = 0
K_STK[i] = 0
D_STK[i] = 0
}
K_STK[KD_N-1] = 50
D_STK[KD_N-1] = 50
for(i in KD_N:(nrow(STK))){
Rsv_STK[i] = (Cl_STK[i]-min(Low_STK[(i-KD_N+1):i]))/(max(High_STK[(i-KD_N+1):i])-min(Low_STK[(i-KD_N+1):i])+0.0001)*100
K_STK[i]=2/3*K_STK[i-1]+1/3*Rsv_STK[i]
D_STK[i]=2/3*D_STK[i-1]+1/3*K_STK[i]
}
#EMA運算
#設定
EMA_N=5
EMA_N_STK=EMA(Cl_STK,EMA_N)
#MACD運算
#設定
EMA1=9
EMA2=12
EMA3=26
EMA12_STK=EMA(Cl_STK,EMA2)
EMA26_STK=EMA(Cl_STK,EMA3)
DIF_STK=EMA12_STK-EMA26_STK
MACD_STK=EMA(DIF_STK,EMA1)
DIF_MACD_STK=DIF_STK-MACD_STK
#William運算
William_11_STK = c(1:(nrow(STK)))
William_33_STK = c(1:(nrow(STK)))
William_89_STK = c(1:(nrow(STK)))
for(i in 1:(nrow(STK))){
William_11_STK[i] = 0
William_33_STK[i] = 0
William_89_STK[i] = 0
}
for(i in 11:(nrow(STK))){
William_11_STK[i] = 100-(max(High_STK[(i-11+1):i])-Cl_STK[i])/(max(High_STK[(i-11+1):i])-min(Low_STK[(i-11+1):i]))*100
if(i>=33){
William_33_STK[i] = 100-(max(High_STK[(i-33+1):i])-Cl_STK[i])/(max(High_STK[(i-33+1):i])-min(Low_STK[(i-33+1):i]))*100
}
if(i>=89){
William_89_STK[i] = 100-(max(High_STK[(i-89+1):i])-Cl_STK[i])/(max(High_STK[(i-89+1):i])-min(Low_STK[(i-89+1):i]))*100
}
}
#MTM運算
#設定
MTM_EMA_N=10
MTM_N = 10
MTM_EMA_N_STK=EMA(Cl_STK,MTM_EMA_N)
MTM_STK = c(1:(nrow(STK)))
m_MTM_STK = c(1:(nrow(STK))) #斜率
a_MTM_STK = c(1:(nrow(STK))) #加速度
for(i in 1:(nrow(STK))){
MTM_STK[i] = 0
m_MTM_STK[i] = 0
a_MTM_STK[i] = 0
}
for(i in MTM_N:(nrow(STK))){
MTM_STK[i] = Cl_STK[i]-Cl_STK[i-MTM_N+1]
m_MTM_STK[i] = MTM_STK[i]/MTM_EMA_N_STK[i]
}
for(i in MTM_N+1:(nrow(STK))){
a_MTM_STK[i] = na.omit(m_MTM_STK[i] - m_MTM_STK[i-1])
}
#合併結果
result_STK <- data.frame(Date=Date_STK,
CL=Cl_STK,
OP=Op_STK,
HI=High_STK,
LO=Low_STK,
VO=Vol_STK,
RSV=Rsv_STK,
K=K_STK,
D=D_STK,
EMA5=EMA_N_STK,
EMA12=EMA12_STK,
EMA26=EMA26_STK,
DIF=DIF_STK,
MACD=MACD_STK,
DIF_MACD=DIF_MACD_STK,
William_11=William_11_STK,
William_33=William_33_STK,
William_89=William_89_STK,
MTM_EMA_10=MTM_EMA_N_STK,
MTM = MTM_STK,
m_MTM = m_MTM_STK,
a_MTM = a_MTM_STK
)
#輸出資料
save(result_STK, file="C:\\RData\\result_ETHUSDT_4HR_2017_8_16_2019_1_5.rda")
|
5fbb9bba42f7ddfdef744c70f697ad30c9ff3746
|
4cca07f2956a13ddc2d6c9c0501b2d786df098bf
|
/WordPredict/ui.R
|
9e98748de0918ba931990845b019f23ab7626515
|
[] |
no_license
|
cepwin/capstone
|
84f632e2db2519b0fa365498f35b9a14290b12da
|
1e84bec35fe60d39d367e022d24380f91adcb65d
|
refs/heads/master
| 2021-01-21T15:48:35.383557
| 2017-06-13T22:58:37
| 2017-06-13T22:58:37
| 91,858,137
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 678
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Text Prediction"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
textInput("phrase","Enter Phase:"),
submitButton("Submit")
),
# Show a plot of the generated distribution
mainPanel(
h3("Next Word"),
textOutput("nextWord")
)
)
))
|
ab99a5e4fbccc50beb91fcd21ffe69f83ddd9d07
|
3e06b4c4c56f314811c9f2678a63a71f81a2b05c
|
/plot4.R
|
af764ca00f19352ba2a0601d077ee47988a8dadd
|
[] |
no_license
|
PKljajic/ExData_Plotting1
|
0847c8a44faaf9ec35ea1b0aba2f73f8a2952fb8
|
7b516a9a663e7754122847a0666c2a830bef4bc8
|
refs/heads/master
| 2020-12-25T21:23:30.396393
| 2015-10-08T09:48:49
| 2015-10-08T09:48:49
| 43,768,990
| 0
| 0
| null | 2015-10-06T18:08:17
| 2015-10-06T18:08:17
| null |
UTF-8
|
R
| false
| false
| 1,742
|
r
|
plot4.R
|
#elcon is a table - electrical power consumption for dates 1/2/2007 and 2/2/2007
elcon <- read.table("household_power_consumption.txt", stringsAsFactors = F,
na.strings = "?", sep = ";", skip = 66637, nrows = 2880)
#assigning header to table elcon
namesOf <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
names(elcon) <- namesOf
#prepairing screen for 4 graphs
par(mfcol = c(2, 2))
#plotting plot1 (upper left)
x <- 1:length(elcon$Global_active_power)
plot(x, elcon$Global_active_power, type = "l", xaxt = "n", xlab = "",
ylab = "Global Active Power (kilowatts)");
at <- seq(1, max(x) + 1, max(x)/2)
axis(side = 1, at = at, labels = c("Thu", "Fri", "Sat"))
#plotting plot2 (lower left)
plot(x, elcon$Sub_metering_1, type = "n", xaxt = "n", xlab = "", ylab = "Energy sub metering")
axis(side = 1, at = at, labels = c("Thu", "Fri", "Sat"))
lines(x, elcon$Sub_metering_1)
lines(x, elcon$Sub_metering_2, col = "red")
lines(x, elcon$Sub_metering_3, col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lwd = 2)
#plot datetime vs voltage (upper right)
plot(x, elcon$Voltage, xlab = "datetime", ylab = "Voltage", xaxt = "n", type = "l")
axis(side = 1, at = at, labels = c("Thu", "Fri", "Sat"))
#plot datetime vs global reactive power (lower right)
plot(x, elcon$Global_reactive_power, xlab = "datetime", ylab = "Global Reactive Power", xaxt = "n", type = "l")
axis(side = 1, at = at, labels = c("Thu", "Fri", "Sat"))
#exporting plot to png file and closing graphics device
dev.copy(png, "plot4.png")
dev.off()
|
687a36abeead7e7576192a0fd2c00627a603c984
|
86347e19447a2ee4a2b65cb3e1d68e5ac26d82c1
|
/R/PredictEnv.R
|
c02ac7481932aef0f6aa56845d9a74464111497a
|
[
"MIT"
] |
permissive
|
brshipley/megaSDM
|
a9e8cd4de0affaba17fdcda5c3489bc2ab7312dd
|
5d285705e295d168ba26435bc453b5242b0ac2dd
|
refs/heads/master
| 2023-06-22T14:46:43.898117
| 2023-06-09T13:36:19
| 2023-06-09T13:36:19
| 206,993,880
| 18
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,024
|
r
|
PredictEnv.R
|
#' Project, clip, and store forecasted/hindcasted environmental rasters for SDM prediction
#'
#' This function takes lists of SpatRasters that correspond to future or past time periods
#' of a single climate model (e.g., RCP4.5, CCSM3), ensures that the environmental variables
#' are the same as those that the model will be developed on, and projects, clips, and resamples
#' these layers to the characteristics of a given study region. If more than one climate scenario
#' is required, run this function multiple times for each climate scenario.
#'
#' @param studylayers A single SpatRaster or list of raster files that constitute all environmental
#' variables and parameters (e.g., extent, resolution) used for projecting the modelled relationship
#' (see \code{$study} in \code{megaSDM::TrainStudyEnv}).
#' @param futurelayers A list of SpatRasters or vectors of file-names corresponding to the environmental
#' variables at the different time periods the model will be forecasted/hindcasted to.
#' @param time_periods a vector of the time periods the models will be projected to, with the first element
#' as the year the model will be trained on (usually the current data). The projected time periods should be
#' given in the same order as the list given in \code{futurelayers}. If no precise years are available
#' (e.g., using data from the Last Glacial Maximum), order the \code{futurelayers} from current to least
#' current (farthest into the future/past) and give character strings for the years (e.g., "LGM"). If
#' running dispersal analyses, \code{time_periods} must be numeric (e.g., -21000 instead of "LGM").
#' @param output If the rasters are to be written to the computer, the full path of the directory where
#' they will be written out to as .grd files. If there are multiple climate scenarios wanted in this
#' SDM analysis, give the output directory the name of the climate scenario (e.g., ".../output/RCP4.5).
#' If set to \code{NA} (the default), the rasters will not be written out and will be returned as the value
#' of this function.
#' @param scenario_name (optional) If the rasters are to be written to the disk, a character string with the
#' name of the climate model/scenario. A sub-directory will be created within \code{output} and files will
#' be placed in there. If only one distinct climate scenario is needed, still give it a name for reference
#' in other functions within \code{megaSDM}.
#' @param maxentproj TRUE/FALSE: Will the MaxEntProj step be run on these data? If so, the NA value of the raster
#' will be set to the maximum raster value + 0.01 instead of the non-numeric NaN when written out.
#'
#' @export
#' @return Returns a list of multi-layer SpatRasters, with each SpatRaster corresponding to a time period.
PredictEnv <- function(studylayers, futurelayers,
time_periods, output = NA,
scenario_name = NA,
maxentproj = TRUE) {
if (class(studylayers) != "SpatRaster") {
#Ensure that all study area rasters have the same projection and extent
projstudy <- rep(NA, len = length(studylayers))
extstudy <- rep(NA, len = length(studylayers))
for (i in 1:length(studylayers)) {
projstudy[i] <- as.character(terra::crs(terra::rast(studylayers[[i]])))
extstudy[i] <- as.character(terra::ext(terra::rast(studylayers[[i]])))
}
if (length(unique(projstudy)) > 1) {
stop("Not all of the current study area environmental rasters are in the same projection")
} else if (length(unique(extstudy)) > 1) {
stop("Not all of the current study area environmental rasters have the same extent")
}
studylayers <- terra::rast(studylayers)
}
if (is.na(terra::crs(studylayers))) {
stop("study area raster crs = NA: Ensure all raster layers have a defined coordinate projection")
}
time_periods <- time_periods[2:length(time_periods)]
if(length(time_periods) != length(futurelayers)) {
stop("The number of time_periods given does not match the number of environmental layer sets")
}
for (j in 1:length(time_periods)) {
focuslayers <- futurelayers[[j]]
if (class(focuslayers) != "SpatRaster") {
#Ensure that all future/past rasters have the same projection and extent
projstudy <- rep(NA, len = length(focuslayers))
extstudy <- rep(NA, len = length(focuslayers))
for (i in 1:length(focuslayers)) {
projstudy[i] <- as.character(terra::crs(terra::rast(focuslayers[[i]])))
extstudy[i] <- as.character(terra::ext(terra::rast(focuslayers[[i]])))
}
if (length(unique(projstudy)) > 1) {
stop("Not all of the current study area environmental rasters are in the same projection")
} else if (length(unique(extstudy)) > 1) {
stop("Not all of the current study area environmental rasters have the same extent")
}
focuslayers <- terra::rast(focuslayers)
}
if (is.na(terra::crs(focuslayers))) {
stop("study area raster crs = NA: Ensure all raster layers have a defined coordinate projection")
}
if (!setequal(names(focuslayers), names(studylayers))) {
message("Warning: the environmental layer names do not match between current and future/past raster data")
}
if (as.character(terra::crs(focuslayers)) != as.character(terra::crs(studylayers))) {
focuslayers <- terra::project(focuslayers, terra::crs(studylayers))
if (terra::res(focuslayers)[1] > terra::res(studylayers)[1]) {
message("Warning: the future/past raster data have coarser resolution than the current raster data")
message(paste0("Changing the resolution of the current raster data to ", terra::res(focuslayers)[1], "is recommended"))
}
focuslayers_res <- terra::rast(extent = terra::ext(studylayers), resolution = terra::res(studylayers), crs = terra::crs(studylayers))
focuslayers <- terra::resample(focuslayers, focuslayers_res, method = "bilinear")
} else if (terra::res(focuslayers)[1] > terra::res(studylayers)[1]) {
message("Warning: the future/past raster data have coarser resolution than the current raster data")
message(paste0("Changing the resolution of the current raster data to ", terra::res(focuslayers)[1], "is recommended"))
}
if (terra::ext(focuslayers) != terra::ext(studylayers)) {
focuslayers <- terra::crop(focuslayers, terra::ext(studylayers))
}
if (!setequal(terra::res(focuslayers), terra::res(studylayers))) {
focuslayers <- terra::resample(focuslayers, studylayers, method = "bilinear")
}
if (j == 1) {
PredictEnv <- list(focuslayers)
} else {
PredictEnv[[j]] <- focuslayers
}
}
names(PredictEnv) <- time_periods
if (!is.na(output)) {
for (i in 1:length(PredictEnv)) {
if (!is.na(scenario_name)) {
if (!dir.exists(paste0(output, "/", scenario_name))) {
dir.create(paste0(output, "/", scenario_name))
}
}
time <- names(PredictEnv)[i]
if (!dir.exists(paste0(output, "/", scenario_name ,"/", time))) {
dir.create(paste0(output, "/", scenario_name, "/", time))
}
#If maxent projection is necessary: change NA value of each training raster to maximum value + 0.01
if (maxentproj) {
for(e in 1:terra::nlyr(PredictEnv[[i]])) {
FocusRast <- PredictEnv[[i]][[e]]
MaxValue <- max(terra::values(FocusRast), na.rm = TRUE)
FocusRast[which(is.na(terra::values(FocusRast)))] <- as.numeric(MaxValue + 0.01)
terra::writeRaster(FocusRast,
filename = paste0(output, "/", scenario_name, "/", time, "/", names(FocusRast), ".grd"),
overwrite = TRUE, NAflag = as.numeric(MaxValue + 0.01))
}
} else {
terra::writeRaster(PredictEnv[[i]], paste0(output, "/", scenario_name, "/", time, "/", names(FocusRast), ".grd"),
overwrite = TRUE)
}
}
}
return(PredictEnv)
}
|
f0dba2156124aa62a14eec48ea017a28c02556b1
|
1a3576049a5b97f1192b472023d5010d3bd849fa
|
/RAO/R/server.R
|
b319f55fb5f52b3cbd68e86e8b3ea405cc44d199
|
[
"MIT"
] |
permissive
|
thecomeonman/RAO
|
0ab6a6a5352a87068d2d9bb580da2eedc79b6b3c
|
80670220f4b49fbbf197d0ae027dd953f24a92b8
|
refs/heads/master
| 2023-06-02T02:08:49.755885
| 2020-10-16T02:35:26
| 2020-10-16T02:35:26
| 380,129,415
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 80,303
|
r
|
server.R
|
#' @import shiny
#' @import shinydashboard
#' @import ggplot2
#' @import gridExtra
#' @import ggmap
#' @import data.table
#' @import plyr
#' @import bit64
#' @import DT
#' @import GA
#' @import snow
#' @import snowfall
#' @import rlecuyer
#' @import igraph
fDashboardServer = function(input, output, session) {
cat(
file = stderr(),
paste(
Sys.time(),
'Launching app'
)
)
theme_set(theme_bw(12))
# preventing scientific notation when printing numbers
options(scipen=50)
# allows users to upload files of up to 100MB
# the optimisation result could reach that size if it's a large city with
# lots of locations
options(shiny.maxRequestSize=100*1024^2)
# Directory locations for app, etc.
# ------------------------------------------------------------------------------
cRootDirectory = getShinyOption("cRootDirectory", Sys.getenv('AtherGitRepo'))
cAtherDataLocation = getShinyOption("cAtherDataLocation", Sys.getenv('AtherDataLocation'))
cRootDirectory = if ( cRootDirectory == '' ) {
'/mnt/disks/vi-data/AtherGit'
} else {
cRootDirectory
}
cAtherDataLocation = if ( cAtherDataLocation == '' ) {
'/mnt/disks/vi-data/Data'
} else {
cAtherDataLocation
}
# Other input parametrs
# ------------------------------------------------------------------------------
nEarthCircumference_m = getShinyOption('nEarthCircumference_m', 40000000)
nMaxRadiusUserWillUse_m = getShinyOption('nMaxRadiusUserWillUse_m', 6000)
# initialising reactive values list
lReactiveValues = reactiveValues(
iTableRenderNumber = 0,
ggplotMapWidth = 100,
ggplotMapHeight = 100
)
plotCityBlank = ggplot() +
geom_text(aes(x=0,y=0,label='No city data plotted yet')) +
theme(
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
line = element_blank(),
title = element_blank(),
text = element_blank()
)
# --------------------------------------------------------------------------
output$selectizeInputCities = renderUI({
selectizeInput(
inputId = 'selectizeInputCities',
label = 'Pick City',
choices = list.files(
paste0(
cAtherDataLocation,
'/Processed/Pikachu'
)
),
selected = NULL,
multiple = F
)
})
# updating data directory to point to the respective city
cDataDirectory = reactive({
cDirectory = paste0(
cAtherDataLocation,
'/Processed/Pikachu/',
input$selectizeInputCities
)
cat(
file = stderr(),
paste(
Sys.time(),
'Changing working directory to ',
cDirectory,
'as per city choice\n'
)
)
cDirectory
})
# uploading the locations file
observeEvent(
input$actionButtonUploadLocations,
{
cat(
file = stderr(),
paste(
Sys.time(),
'Uploading location data\n'
)
)
progress = Progress$new(session)
on.exit(progress$close())
progress$set(
message = 'Loading location data'
)
# reading in the file and some basic processing
inFile <- input$fileInputUploadLocations
if (is.null(inFile))
return(NULL)
tryCatch(
{
dtLocations = fread(inFile$datapath, stringsAsFactors = F)
setkey(dtLocations, LocationID)
dtLocations[, LocationID := as.character(LocationID)]
dtLocations[, Longitude_deg := as.numeric(as.character(Longitude_deg))]
dtLocations[, Latitude_deg := as.numeric(as.character(Latitude_deg))]
dtLocations[, LocationName := as.character(LocationName)]
dtLocations[, Score := as.numeric(Score)]
},
error = function(e) {
showModal(modalDialog(
title = "Error! The app expects a file with 5 + 2 columns: LocationID, a character column for LocationName, numeric columns for Score, Latitude_deg, and Longitude_deg, and optionally columns for Status and Chosen.",
easyClose = TRUE,
footer = NULL
))
return ( NULL )
}
)
if ( dtLocations[,length(unique(LocationID))] != nrow(dtLocations) ) {
vcDuplicates = dtLocations[,.N,LocationID][N>1][, LocationID]
showModal(modalDialog(
title = paste("Error! Duplicate locations IDs:", paste(vcDuplicates, collapse = ', ')),
easyClose = TRUE,
footer = NULL
))
}
# Fixing status column
if ( !'Status' %in% colnames(dtLocations) ) {
dtLocations[, Status := '?']
} else {
dtLocations[, Status := as.character(Status)]
dtLocations[is.na(Status), Status := '?']
}
# Fixing chosen column
if ( !'Chosen' %in% colnames(dtLocations) ) {
dtLocations[, Chosen := 'F']
} else {
dtLocations[, Chosen := as.character(Chosen)]
dtLocations[is.na(Chosen), Chosen := 'F']
dtLocations[Chosen == 'TRUE', Chosen := 'T']
dtLocations[Chosen == 'FALSE', Chosen := 'F']
}
# Sanity check for location IDs, etc.
if ( !all(complete.cases(dtLocations)) ) {
showModal(modalDialog(
title = paste(
"Error! Some of the locations have missing values for the ID, name, or coordinates (",
paste(
dtLocations[!complete.cases(dtLocations), paste(LocationID,':', LocationName)],
collapse = ', '
# collapse <br/> didn't work and \n didn't either
),
') Please remove / rectify. For now, these locations shall be excluded.'
),
easyClose = TRUE,
footer = NULL
))
}
dtLocations = dtLocations[complete.cases(dtLocations)]
# Since data.tables are by reference, we need to trace updates to
# location with posixctLastLocationUpload
lReactiveValues$dtLocations = dtLocations
lReactiveValues$posixctLastLocationUpload = Sys.time()
# adding locations to the actionable locations selection box
vcLocations = unlist(dtLocations[, LocationID])
names(vcLocations) = dtLocations[, LocationName]
selectizeInputToActionLocations = input$selectizeInputToActionLocations
updateSelectizeInput(
session = session,
inputId = 'selectizeInputToActionLocations',
choices = vcLocations,
selected = intersect(vcLocations, selectizeInputToActionLocations)
)
# Getting coverage data for some other processing
dtPolygonCoveredEdges = lReactiveValues$dtPolygonCoveredEdges
if ( !is.null(dtPolygonCoveredEdges) ) {
dtWays = lReactiveValues$dtWays
# Checking for locations not in coverage data
progress$set(
message = 'Checking for locations not in coverage data'
)
vcNewLocations = setdiff(
dtLocations[, LocationID],
dtPolygonCoveredEdges[, unique(LocationID)]
)
if ( length(vcNewLocations) > 0 ) {
names(vcNewLocations) = dtLocations[
LocationID %in% vcNewLocations,
LocationName
]
}
updateSelectizeInput(
session = session,
inputId = 'selectizeInputNewLocations',
choices = vcNewLocations,
selected = vcNewLocations
)
# Calculating location coverage details
progress$set(
message = 'Calculating location coverage details'
)
lReactiveValues$dtCoverageSummary = fGenerateCoverageSummary(
dtLocations = dtLocations,
dtWays = dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges,
nMaxRadiusUserWillUse_m
)
}
print(head(dtLocations))
}
)
# creating a datatable with checkboxes, etc. for the user to manipulate
output$datatableLocations = DT::renderDataTable(
{
cat(
file = stderr(),
paste(
Sys.time(),
'Creating the datatable of locations\n'
)
)
progress = Progress$new(session)
# on.exit(progress$close())
progress$set(
message = 'Preparing table'
)
iTableRenderNumber = isolate(lReactiveValues$iTableRenderNumber) + 1
lReactiveValues$posixctLastLocationUpload
# creatinga a copy becase we don't want to modify the original one
dtLocations = copy(isolate(lReactiveValues$dtLocations))
if ( is.null(dtLocations) ) {
return ( NULL )
}
setkey(dtLocations, LocationID)
# removing the previous radio button elements
# http://stackoverflow.com/questions/40020600/r-shiny-selectedinput-inside-renderdatatable-cells/40027726#40027726
session$sendCustomMessage('unbind-DT', 'datatableLocations')
# creating the radio button options
dtLocations[,
Status := fActOnShinyInput(
cFunctionName = 'radioButtons',
id = paste0('radioStatus', iTableRenderNumber),
vcSuffixes = LocationID,
choices = c('T','F','?'),
selected = Status,
inline = T,
label = NULL
),
Status
]
dtLocations[,
Chosen := fActOnShinyInput(
cFunctionName = 'radioButtons',
id = paste0('radioChosen', iTableRenderNumber),
vcSuffixes = LocationID,
choices = c('T','F'),
selected = Chosen,
inline = T,
label = NULL
),
Chosen
]
# creating the radio button options
dtLocations[,
X := fActOnShinyInput(
cFunctionName = 'checkboxInput',
id = paste0('checkBoxActive', iTableRenderNumber),
vcSuffixes = LocationID,
value = F,
label = NULL
),
Chosen
]
cat(
file = stderr(),
paste(
Sys.time(),
'dtLocations done\n'
)
)
# Adding overall coverage summary details
dtCoverageSummary = lReactiveValues$dtCoverageSummary
if ( !is.null(dtCoverageSummary) ) {
dtLocations = merge(
dtLocations[, list(X, LocationID, LocationName, Score = round(Score,2), Status, Chosen)] ,
dtCoverageSummary,
'LocationID',
all.x = T
)
}
cat(
file = stderr(),
paste(
Sys.time(),
'dtCoverageSummary done\n'
)
)
cat(
file = stderr(),
paste(
Sys.time(),
'Performance fixes\n'
)
)
# Adding optimisation coverage summary details
dtOptimisationCoverageSummmary = lReactiveValues$lOptimisationResult$lEditedResults$dtOptimisationCoverageSummmary
if ( !is.null(dtOptimisationCoverageSummmary) ) {
dtLocations = merge(
dtLocations,
dtOptimisationCoverageSummmary,
'LocationID',
all.x = T
)
setkey(
dtLocations,
'C/1'
)
dtLocations = dtLocations[.N:1]
} else {
dtLocations[, c('V1','V2') := NA]
setnames(
dtLocations,
c('V1','V2'),
c('C','C/1')
)
}
cat(
file = stderr(),
paste(
'dtOptimisationCoverageSummmary done\n'
)
)
dtLocations[, LocationID := NULL]
dtLocations[, Score := NULL]
lReactiveValues$iTableRenderNumber = iTableRenderNumber
progress$set(
message = 'The table has been created. It may take a few seconds
to load in the browser though. This pop up won\t disappear
automatically so once the table is loaded, please close it
yourself.'
)
return ( dtLocations )
},
server = TRUE,
escape = FALSE,
selection = 'none',
filter = 'top',
# extensions = c('FixedColumns',"FixedHeader"),
# extensions = c('FixedHeader'),
# rownames = FALSE,
options = list(
scrollY = '400px',
autoWidth = TRUE,
columnDefs = list(
list(
width = '40px',
targets = c(1, 5, 6, 7, 8, 9)
),
list(
width = '80px',
targets = 4
),
list(
width = '120px',
targets = 3
)
),
# scrollX = F,
paging = FALSE,
preDrawCallback = JS('function() { Shiny.unbindAll(this.api().table().node()); }'),
drawCallback = JS('function() { Shiny.bindAll(this.api().table().node()); } ')
# fixedHeader =TRUE
# fixedColumns = list(leftColumns = 3, rightColumns = 0),
)
)
# not used anywhere except when debugging is needed
# observe({
output$debugging = renderPrint({
cat(
file = stderr(),
paste(
Sys.time(),
'Debugging chunk\n'
)
)
dtLocations = (lReactiveValues$dtLocations)
print(Sys.time())
# qwe1 = (fRetrieveShinyValue(paste0('radioStatus', iTableRenderNumber), dtLocations[, LocationID]))
# qwe2 = fRetrieveShinyValue(paste0('radioChosen', iTableRenderNumber), unlist(dtLocations[, LocationID]))
qwe3 = fRetrieveShinyValue(
paste0('checkBoxActive', iTableRenderNumber),
unlist(dtLocations[, LocationID]),
input
)
# print(str(qwe1))
# print(str(qwe2))
print(str(qwe3))
})
# loading the map, etc. data about the city
observeEvent(
input$actionButtonLoadCityData,
{
cat(
file = stderr(),
paste(
Sys.time(),
'Uploading city data, etc.\n'
)
)
progress = Progress$new(session)
on.exit(progress$close())
# OSM data about ways and nodes
progress$set(
message = 'Loading map data'
)
load(
paste0(
cDataDirectory(),
'/OSMServiceable.Rdata'
)
)
setDT(dtNodes)
setDT(dtWays)
dtNodes = dtNodes[
NodeID %in% dtWays[, SourceID] |
NodeID %in% dtWays[, DestinationID]
]
lReactiveValues$dtNodes = dtNodes
lReactiveValues$dtWays = dtWays
# the image tiles for the map plot
progress$set(
message = 'Loading map image'
)
load(
paste0(
cDataDirectory(),
'/ggMapTiles.Rdata'
)
)
lReactiveValues$ggMapTiles = ggMapTiles
# data for the coverage of ways by each nodes
progress$set(
message = 'Loading coverage data'
)
load(
paste0(
cDataDirectory(),
'/dtPolygonCoveredEdgesServiceable.Rdata'
)
)
setDT(dtPolygonCoveredEdges)
dtPolygonCoveredEdges = dtPolygonCoveredEdges[
DistanceFromDestination_m <= nMaxRadiusUserWillUse_m,
list(LocationID, WayID, DistanceFromDestination_m)
]
# dtPolygonCoveredEdges[, setdiff(colnames(dtPolygonCoveredEdges), c('LocationID','WayID', 'DistanceFromDestination_m')) := NULL]
# save(list = 'dtPolygonCoveredEdges', file = 'dtPolygonCoveredEdgesServiceable.Rdata')
gc()
setkey(dtPolygonCoveredEdges, LocationID)
dtPolygonCoveredEdges[, LocationID := as.character(LocationID)]
setkey(dtPolygonCoveredEdges, LocationID)
lReactiveValues$dtPolygonCoveredEdges = dtPolygonCoveredEdges
# checking if all loctions are present in the coverage data. Else need to pre-process
dtLocations = isolate(lReactiveValues$dtLocations)
if ( !is.null(dtLocations) ) {
progress$set(
message = 'Checking for locations not in coverage data'
)
vcNewLocations = setdiff(
dtLocations[, LocationID],
dtPolygonCoveredEdges[, LocationID]
)
if ( length(vcNewLocations) > 0 ) {
names(vcNewLocations) = dtLocations[
LocationID %in% vcNewLocations,
LocationName
]
}
updateSelectizeInput(
session = session,
inputId = 'selectizeInputNewLocations',
choices = vcNewLocations,
selected = vcNewLocations
)
# Generating coverage summary
progress$set(
message = 'Generating location details'
)
dtPolygonCoveredEdges = lReactiveValues$dtPolygonCoveredEdges
if ( is.null(dtPolygonCoveredEdges) ) {
dtLocations = dtLocations[, list(X, LocationName, Status, Chosen)]
return ( dtLocations )
}
lReactiveValues$dtCoverageSummary = fGenerateCoverageSummary(
dtLocations = dtLocations,
dtWays = dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges,
nMaxRadiusUserWillUse_m = nMaxRadiusUserWillUse_m
)
}
})
# map plotting with the locations, etc.
output[['ggplotMap']] = renderPlot({
cat(
file = stderr(),
paste(
Sys.time(),
'Plotting map\n'
)
)
sliderPlotWidth = input[['sliderPlotWidth']]
ggplotMapLatitudeRange_deg = lReactiveValues$ggplotMapLatitudeRange_deg
if ( is.null(ggplotMapLatitudeRange_deg) ) {
nHeight = 100
nWidth = sliderPlotWidth
} else {
ggplotMapLongitudeRange_deg = lReactiveValues$ggplotMapLongitudeRange_deg
if ( diff(ggplotMapLatitudeRange_deg) < diff(ggplotMapLongitudeRange_deg) ) {
nWidth = sliderPlotWidth
nHeight = sliderPlotWidth * diff(ggplotMapLatitudeRange_deg) / diff(ggplotMapLongitudeRange_deg)
} else {
nWidth = sliderPlotWidth * diff(ggplotMapLongitudeRange_deg) / diff(ggplotMapLatitudeRange_deg)
nHeight = sliderPlotWidth
}
}
lReactiveValues[['ggplotMapWidth']] = nWidth
lReactiveValues[['ggplotMapHeight']] = nHeight
lReactiveValues[['ggplotMap']]
},
width = function () { lReactiveValues[['ggplotMapWidth']] },
height = function () { lReactiveValues[['ggplotMapHeight']] }
)
# Updating the plot as per user request
observeEvent(
input$actionButtonUpdatePlot,
{
cat(
file = stderr(),
paste(
Sys.time(),
'Updating plot\n'
)
)
selectizeInputPlotScenario = input$selectizeInputPlotScenario
dtLocations = isolate( lReactiveValues$dtLocations )
dtOptimisedLocations = lReactiveValues$lOptimisationResult$lEditedResults$dtOptimisedLocations
dtWays = lReactiveValues$dtWays
ggMapTiles = lReactiveValues$ggMapTiles
sliderLocationSize = input$sliderLocationSize
sliderCoverageSize = input$sliderCoverageSize
dtCoveredEdges = lReactiveValues$lOptimisationResult$lEditedResults$dtCoveredEdges
dtOptimisationCoverageSummmary = lReactiveValues$lOptimisationResult$lEditedResults$dtOptimisationCoverageSummmary
ggplotCity <<- fGetCityPlot(
ggMapTiles = ggMapTiles,
selectizeInputPlotScenario = selectizeInputPlotScenario,
dtOptimisedLocations = dtOptimisedLocations,
dtLocations = dtLocations,
dtWays = dtWays,
dtOptimisationCoverageSummmary = dtOptimisationCoverageSummmary,
dtCoveredEdges = dtCoveredEdges,
sliderLocationSize = sliderLocationSize,
sliderCoverageSize = sliderCoverageSize
)
print('plot done. <<-')
lReactiveValues[['ggplotMap']] = ggplotCity
plot(ggplotCity)
if ( !is.null(dtLocations) ) {
lReactiveValues[['ggplotMapLongitudeRange_deg']] = dtLocations[, range(Longitude_deg)]
lReactiveValues[['ggplotMapLatitudeRange_deg']] = dtLocations[, range(Latitude_deg)]
}
}
)
# zooming in on brush
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Implementing brush\n'
)
)
ggplotMapBrush = input$ggplotMapBrush
if ( is.null(ggplotMapBrush) ) {
return ( NULL )
}
ggplotMap = isolate(lReactiveValues[['ggplotMap']])
if ( is.null(ggplotMap) ) {
return ( NULL )
}
lReactiveValues[['ggplotMap']] = ggplotMap + coord_cartesian(
xlim = c(
ggplotMapBrush$xmin,
ggplotMapBrush$xmax
),
ylim = c(
ggplotMapBrush$ymin,
ggplotMapBrush$ymax
)
)
lReactiveValues[['ggplotMapLongitudeRange_deg']] = c(
ggplotMapBrush$xmin,
ggplotMapBrush$xmax
)
lReactiveValues[['ggplotMapLatitudeRange_deg']] = c(
ggplotMapBrush$ymin,
ggplotMapBrush$ymax
)
})
# resetting the zoom on double click
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Implementing double click\n'
)
)
if ( is.null(input$ggplotMapDblClick) ) {
return ( NULL )
}
ggplotMap = isolate( lReactiveValues[['ggplotMap']] )
if ( is.null(ggplotMap) ) {
return ( NULL )
}
dtLocations = isolate( lReactiveValues$dtLocations )
# Since coord_fixed causes problems, I have to use coord_cartesian.
# I tried to do this to try and get a square-ish plot but that was fail
# This code is still here even though, I think, it doesn't do anything
# useful anyway.
# It gets the coordinates correctly but since the final image isn't a
# square itself, it fails
if ( !is.null(dtLocations) ) {
vnLongitudeRange_deg = range(dtLocations[, Longitude_deg], na.rm = T)
vnLatitudeRange_deg = range(dtLocations[, Latitude_deg], na.rm = T)
nLongitudeSpan_deg = diff(vnLongitudeRange_deg)
nLatitudeSpan_deg = diff(vnLatitudeRange_deg)
nBiggestSpan_deg = pmax(nLatitudeSpan_deg, nLongitudeSpan_deg)
if ( nLongitudeSpan_deg < nBiggestSpan_deg ) {
nExtension_deg = (nBiggestSpan_deg - nLongitudeSpan_deg) / 2
vnLongitudeRange_deg[1] = vnLongitudeRange_deg[1] - nExtension_deg
vnLongitudeRange_deg[2] = vnLongitudeRange_deg[2] + nExtension_deg
} else {
nExtension_deg = (nBiggestSpan_deg - nLatitudeSpan_deg) / 2
vnLatitudeRange_deg[1] = vnLatitudeRange_deg[1] - nExtension_deg
vnLatitudeRange_deg[2] = vnLatitudeRange_deg[2] + nExtension_deg
}
ggplotMap = ggplotMap +
coord_cartesian(
xlim = c(
vnLongitudeRange_deg[1] - 0.01,
vnLongitudeRange_deg[2] + 0.01
),
ylim = c(
vnLatitudeRange_deg[1] - 0.01,
vnLatitudeRange_deg[2] + 0.01
)
)
} else {
ggplotMap = ggplotMap + coord_cartesian()
}
lReactiveValues[['ggplotMap']] = ggplotMap
lReactiveValues[['ggplotMapLongitudeRange_deg']] = dtLocations[, range(Longitude_deg)]
lReactiveValues[['ggplotMapLatitudeRange_deg']] = dtLocations[, range(Latitude_deg)]
})
# based on insertion or deletions in the check box, changing the checkboxes in the datatable
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Updating check boxes based on selectize box of action locations\n'
)
)
selectizeInputToActionLocations = input$selectizeInputToActionLocations
iTableRenderNumber = isolate(lReactiveValues$iTableRenderNumber)
lReactiveValues$posixctLastLocationUpload
dtLocations = isolate(lReactiveValues$dtLocations)
if ( is.null(dtLocations) ) {
return ( NULL )
}
isolate({
for ( cSuffix in intersect(dtLocations[,LocationID], selectizeInputToActionLocations) ) {
updateCheckboxInput(
inputId = paste0('checkBoxActive', iTableRenderNumber, cSuffix),
value = T,
session = session
)
}
for ( cSuffix in setdiff(dtLocations[,LocationID], selectizeInputToActionLocations) ) {
updateCheckboxInput(
inputId = paste0('checkBoxActive', iTableRenderNumber, cSuffix),
value = F,
session = session
)
}
})
})
# checking / unchecking on the datatable should result in addition/deletion in the checkbox
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Adding checked items to selectize action locations \n'
)
)
# this can't be in isolate because the launch of the dashboard triggers this once
# null, nothing happpens. and then the reactivity just ignores it since thet
# checkbox inputs didn't register with the reactivity.
lReactiveValues$posixctLastLocationUpload
dtLocations = isolate(lReactiveValues$dtLocations)
iTableRenderNumber = isolate(lReactiveValues$iTableRenderNumber)
if ( is.null(dtLocations) ) {
return ( NULL )
}
vbSelectedLocations = fRetrieveShinyValue(
paste0('checkBoxActive', iTableRenderNumber),
dtLocations[, LocationID],
input
)
if ( any(is.null(vbSelectedLocations) ) | any(is.na(vbSelectedLocations) ) ) {
return ( NULL )
}
dtLocationsActive = dtLocations[vbSelectedLocations]
selectizeInputToActionLocations = dtLocationsActive[, LocationID]
names(selectizeInputToActionLocations) = dtLocationsActive[, LocationName]
isolate({updateSelectizeInput(
session = session,
inputId = 'selectizeInputToActionLocations',
selected = selectizeInputToActionLocations
)})
})
# translating click to an addition/deletion in the checkbox
# and also carrying over to the checkboxes i nthe datatable
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Adding location on click\n'
)
)
ggplotMapClick = input$ggplotMapClick
iTableRenderNumber = isolate(lReactiveValues$iTableRenderNumber)
if ( is.null(ggplotMapClick)) {
return ( NULL )
}
dtLocations = copy(isolate(lReactiveValues$dtLocations))
selectizeInputToActionLocations = isolate(input$selectizeInputToActionLocations)
if ( is.null(dtLocations) ) {
return ( NULL )
}
# Identifying the nearest location to the click
dtLocationsClick = dtLocations[
fNearestLocation(
nLongitude_deg = ggplotMapClick$x,
nLatitude_deg = ggplotMapClick$y,
nRadius_m = 1000,
vnLongitude_deg = dtLocations$Longitude_deg,
vnLatitude_deg = dtLocations$Latitude_deg
)
]
if ( nrow(dtLocationsClick) == 0 ) {
return ( data.table() )
}
# Updating the selectize Box based on the various scenarios
if ( is.null(selectizeInputToActionLocations) ) {
selectizeInputToActionLocations = c(
dtLocationsClick[, LocationID]
)
names(selectizeInputToActionLocations) = dtLocationsClick[, LocationName]
isolate({updateCheckboxInput(
session = session,
inputId = paste0(paste0('checkBoxActive', iTableRenderNumber), unlist(dtLocationsClick[, LocationID])),
value = T
)})
} else if ( dtLocationsClick[, LocationID] %in% selectizeInputToActionLocations ) {
selectizeInputToActionLocations = setdiff(
selectizeInputToActionLocations,
dtLocationsClick[, LocationID]
)
isolate({updateCheckboxInput(
session = session,
inputId = paste0(paste0('checkBoxActive', iTableRenderNumber), unlist(dtLocationsClick[, LocationID])),
value = F
)})
} else {
selectizeInputToActionLocations = c(
selectizeInputToActionLocations,
dtLocationsClick[, LocationID]
)
names(selectizeInputToActionLocations) = c(
names(selectizeInputToActionLocations),
dtLocationsClick[, LocationName]
)
isolate({updateCheckboxInput(
session = session,
inputId = paste0(paste0('checkBoxActive', iTableRenderNumber), dtLocationsClick[, LocationID]),
value = T
)})
}
isolate({updateSelectizeInput(
session = session,
inputId = 'selectizeInputToActionLocations',
selected = selectizeInputToActionLocations
)})
})
# Optimisation
observeEvent(
input$actionButtonOptimise,
{
progress = Progress$new(session)
on.exit(progress$close())
# OSM data about ways and nodes
progress$set(
message = 'Optimising'
)
cat(
file = stderr(),
paste(
Sys.time(),
'Optimising\n'
)
)
radioOptimisationType = input$radioOptimisationType
checkboxOptimiseMore = input$checkboxOptimiseMore
sliderCoverageRadius = input$sliderCoverageRadius
sliderCoverageScoreTradeoff = input$sliderCoverageScoreTradeoff
sliderMaxStations = input$sliderMaxStations
sliderMinScore = input$sliderMinScore
sliderMinCoverage = input$sliderMinCoverage
sliderMaxIterations = input$sliderMaxIterations
sliderPopulation = input$sliderPopulation
dtLocations = copy(lReactiveValues$dtLocations)
setkey(dtLocations, LocationID)
if ( is.null(dtLocations) ) {
showModal(modalDialog(
title = "Error!",
"No locations data uploaded!",
easyClose = TRUE,
footer = NULL
))
return ( NULL )
}
dtWays = lReactiveValues$dtWays
dtPolygonCoveredEdges = lReactiveValues$dtPolygonCoveredEdges[DistanceFromDestination_m < sliderCoverageRadius]
if ( is.null(dtPolygonCoveredEdges) ) {
showModal(modalDialog(
title = "Error!",
"City data not loaded!",
easyClose = TRUE,
footer = NULL
))
return ( NULL )
}
dtLocationsToIgnore = dtLocations[ !LocationID %in% dtPolygonCoveredEdges[, LocationID] ]
# Checking for invalid max stations constraint
if ( nrow(dtLocationsToIgnore) > 0 ) {
showModal(modalDialog(
title = "Warning!",
paste(
"Coverage for some stations hasn't been calculated, forcing them as status = F:",
dtLocationsToIgnore[, paste0(LocationName, collapse = ', ' )]
),
easyClose = TRUE,
footer = NULL
))
}
dtLocations[ LocationID %in% dtLocationsToIgnore[, LocationID], Status := 'F' ]
rm(dtLocationsToIgnore)
# The stations that need to be decided
iToDecide = dtLocations[, sum(Status == '?')]
# The stations that are already chosen
iChosen = dtLocations[, sum(Status == 'T')]
# Trying to parallelise
# sfInit(parallel = T, cpus = 1)
# sfLibrary(data.table)
# sfLibrary(GA)
# sfExport('radioOptimisationType')
# sfExport('numericTrials')
# sfExport('sliderPopulation')
# sfExport('iToDecide')
# sfExport('sliderMaxStations')
# sfExport('iChosen')
# sfExport('sliderCoverageRadius')
# sfExport('dtLocations')
# sfExport('dtWays')
# sfExport('dtPolygonCoveredEdges')
# sfExport('sliderMaxIterations')
# sfExport('sliderMinCoverage')
# sfExport('fCIFitnessStations')
# sfExport('fCIFitnessCoverageScore')
# Creating empty objects to fill iterations in
lGAResults = list()
dtGASummary = data.frame()
posixctStartTime = Sys.time()
setkey(
dtLocations,
LocationID
)
# If the user wishes to resume from last run then load last
# result as initial solution
if ( !is.null(checkboxOptimiseMore) ) {
if ( checkboxOptimiseMore == T ) {
lOptimisationResult = lReactiveValues$lOptimisationResult
gaResult = lOptimisationResult$lGAResults$Result
dtGASummary = lOptimisationResult$lGAResults$dtGASummary
dtOptimisedLocations = lOptimisationResult$lGAResults$dtOptimisedLocations
lOptimisationResultStatistics = lOptimisationResult$lOptimisationResultStatistics
rm(lOptimisationResult)
setkey(
dtOptimisedLocations,
LocationID
)
}
}
if ( !exists('lOptimisationResultStatistics')) {
lOptimisationResultStatistics = list()
}
# How many generation should the GA 'pause' to update the
# progress message
iUpdateAfterEvery = 5
# So many iterations of iUpdateAfterEvery generations need to be run
for ( i in 0:floor(sliderMaxIterations/iUpdateAfterEvery)) {
# Poorly named variable. This is actually the number of
# generations that the GA should run for
iIterations = sliderMaxIterations - (i * iUpdateAfterEvery)
if ( iIterations <= 0 ) {
break
}
# GA can't propogate for only one generation so if remaining
# generations after this iteration is only one then just
# include that in this one
if ( iIterations != (iUpdateAfterEvery+1) ) {
iIterations = pmin(
iIterations,
iUpdateAfterEvery
)
}
if ( radioOptimisationType == 'Maximise coverage' ) {
# Checking for invalid max stations constraint
if ( sliderMaxStations - iChosen < 0 ) {
showModal(modalDialog(
title = "Error!",
"Max stations allowed < stations already selected in status column",
easyClose = TRUE,
footer = NULL
))
return ( NULL )
}
# Preparing initial solution for GA to start from
if ( exists('gaResult') ) {
# Solution from previous iteration / previous run
if ( !is.null(gaResult@population) ) {
# todo - updating status from T to F or vv and resuming
# from previous optimisation causes it to crash.
mSuggestions = rbind(
gaResult@solution[1,],
gaResult@population[
sample(
nrow(gaResult@population),
min(nrow(gaResult@population), sliderPopulation) - 1
),
]
)
if ( exists('dtOptimisedLocations') ) {
mSuggestions = fUpdateSuggestionsForStatusChange(
dtLocations,
dtOptimisedLocations,
mSuggestions
)
}
}
} else {
# randomly select combinations of the highest number of stations
mSuggestions = Reduce(
rbind,
lapply(
1:sliderPopulation,
function(x) {
viSuggestion = rep(0, iToDecide)
viFlip = sample(iToDecide, min(floor(runif(1) * (sliderMaxStations - iChosen)), iToDecide))
viSuggestion[viFlip] = 1
viSuggestion
}
)
)
}
gaResult <- ga(
type = "binary",
fitness = function(x) fCIFitnessCoverageScore(
viSelected = x,
nRadiusUpperLimit_m = sliderCoverageRadius,
iMaxStations = sliderMaxStations,
dtLocations = dtLocations,
dtWays = dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges
),
suggestions = mSuggestions,
nBits = iToDecide,
popSize = sliderPopulation,
maxiter = iIterations,
mutation = function(x,y) fCustomMutation(
object = x,
parent = y,
dtLocations = dtLocations,
dtWays = dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges
),
pmutation = function(x, y) fCustomPMutation(
object = x,
iUpdateAfterEvery = iUpdateAfterEvery,
i = i,
iMaxIterations = sliderMaxIterations
)
)
progress$set(
message = paste(
Sys.time(),
'After',
(iUpdateAfterEvery*i) + iIterations,
'iterations, coveragescore achieved = ',
pmax(0, round(gaResult@fitnessValue,2)),
'. Optimisation continues. Approximately',
ceiling((
(
sliderMaxIterations - (
iIterations + (i * iUpdateAfterEvery)
)
) *
as.numeric(Sys.time() - posixctStartTime) / (iIterations + (i * iUpdateAfterEvery)))/60
),
'minute(s) left'
)
)
dtGASummaryNew = data.frame(gaResult@summary)
} else if ( radioOptimisationType == 'Minimise stations' ) {
# Preparing initial solution for GA to start from
if ( exists('gaResult') ) {
# Solution from previous iteration / previous run
if ( !is.null(gaResult@population) ) {
mSuggestions = rbind(
gaResult@solution[1,],
gaResult@population[
sample(
nrow(gaResult@population),
min(nrow(gaResult@population), sliderPopulation) - 1
),
]
)
if ( exists('dtOptimisedLocations') ) {
mSuggestions = fUpdateSuggestionsForStatusChange(
dtLocations,
dtOptimisedLocations,
mSuggestions
)
}
}
} else {
# Taking random combinations of stations in proportion to coverage area constraint
mSuggestions = Reduce(
rbind,
lapply(
1:sliderPopulation,
function(x) {
viSuggestion = rep(0, iToDecide)
viFlip = sample(iToDecide, floor(iToDecide * min(1, (sliderMinCoverage/100) + runif(1))))
viSuggestion[viFlip] = 1
viSuggestion
}
)
)
}
# mSuggestions = matrix(rep(1, iToDecide), nrow = 1, ncol = iToDecide)
gaResult <- ga(
type = "binary",
fitness = function(x) fCIFitnessStations(
viSelected = x,
nRadiusUpperLimit_m = sliderCoverageRadius,
nMinCoverage_pct = sliderMinCoverage,
dtLocations = dtLocations,
dtWays = dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges
),
suggestions = mSuggestions,
nBits = iToDecide,
popSize = sliderPopulation,
maxiter = iIterations,
mutation = function(x,y) fCustomMutation(
object = x,
parent = y,
dtLocations = dtLocations,
dtWays = dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges
),
keepBest = T,
pmutation = function(x, y) fCustomPMutation(
object = x,
iUpdateAfterEvery = iUpdateAfterEvery,
i = i,
iMaxIterations = sliderMaxIterations
)
)
# cat(sliderMaxIterations)
# cat(iIterations)
# cat(i)
# cat(iUpdateAfterEvery)
# cat(posixctStartTime)
# cat(Sys.time())
progress$set(
message = paste(
Sys.time(),
'After',
(iUpdateAfterEvery*i) + iIterations,
'iterations, number of stations selected = ',
pmax(0, round(1/gaResult@fitnessValue, 0)),
'. Optimisation continues. Approximately',
ceiling((
(
sliderMaxIterations - (
iIterations + (i * iUpdateAfterEvery)
)
) *
as.numeric(Sys.time() - posixctStartTime) / (iIterations + (i * iUpdateAfterEvery)))/60
),
'minutes left.'
)
)
dtGASummaryNew = data.frame(1/gaResult@summary)
}
# This thing will work but look weird if the sort
# of optimisation is changed between runs
dtGASummary = rbind.fill(
dtGASummary,
dtGASummaryNew
)
if ( length(lOptimisationResultStatistics) > 0 ) {
vbStoredDetailElements = sapply(
lOptimisationResultStatistics,
function ( lOptimisationResultStatisticsIncremental ) {
is.null(lOptimisationResultStatisticsIncremental$dtCoveredEdges)
}
)
if ( sum(vbStoredDetailElements) > 1 ) {
for ( iElement in which(vbStoredDetailElements)[-1] ) {
lOptimisationResultStatistics[[iElement]]$dtCoveredEdges = NULL
lOptimisationResultStatistics[[iElement]]$dtOptimisationCoverageSummmary = NULL
}
}
}
lOptimisationResultStatistics = append(
lOptimisationResultStatistics,
lapply(
seq(length(gaResult@bestSol)),
function ( iSolutionIndex ) {
vcRawSolution = gaResult@bestSol[[iSolutionIndex]]
lOptimisationResultStatisticsIncremental = fGetOptimisationResultStatistics(
dtPolygonCoveredEdges,
vcRawSolution,
dtLocations,
dtWays,
sliderCoverageRadius
)
if ( iSolutionIndex < length(gaResult@bestSol) ) {
lOptimisationResultStatisticsIncremental$dtCoveredEdges = NULL
lOptimisationResultStatisticsIncremental$dtOptimisationCoverageSummmary = NULL
}
return ( lOptimisationResultStatisticsIncremental )
}
)
)
}
setDT(dtGASummary)
dtGASummary[, iter := .I]
# gaResult = lGAResults[[which.max(sapply(lGAResults, function(x) x@fitness[1]))]]
rm(lGAResults)
if (
gaResult@fitness[1] < 0 ) {
showModal(modalDialog(
title = "Error!",
"FYI: the best solution achieved wasn't a valid solution. Try relaxing some of the constraints. If you like, you can restore the last solution from the button below.",
easyClose = TRUE,
footer = NULL
))
}
# needs the [1,] because sometime multiple solutions are there
# means discarding the other solutions. Shouldn't matter.
vcSolution = rep('F', length(gaResult@solution[1,]))
vcSolution[gaResult@solution[1,] == 1] = 'T'
# Updating locations table with the solution
dtLocations[Status != '?', Chosen := Status]
dtLocations[Status == '?', Chosen := vcSolution]
dtLocations[, Chosen := as.character(Chosen)]
# A lot of the other updating doesn't happen here but happens
# when computing cResultSummary because that chunk will also
# respond to recalculations whereas this chunk won't
# Big bulky object with all optimisation results
# The lEditedResults is where the recalculated versions
# will be stored. The optimisation solution is epxensive
# to compute which is why we don't overwrite it.
lOptimisationResult = list(
lGAResults = list(
cOptimisationType = radioOptimisationType,
Result = gaResult,
nRadiusUpperLimit_m = sliderCoverageRadius,
iMaxStations = sliderMaxStations,
nMinCoverage_pct = sliderMinCoverage,
dtOptimisedLocations = dtLocations,
dtGASummary = dtGASummary
),
lOptimisationResultStatistics = lOptimisationResultStatistics,
lEditedResults = list(
nRadiusUpperLimit_m = sliderCoverageRadius,
dtOptimisedLocations = dtLocations
)
)
lReactiveValues$lPreviousOptimisationResult = lReactiveValues$lOptimisationResult
lReactiveValues$lOptimisationResult = lOptimisationResult
}
)
# Preparing the text snippet that goes in the results
# Almost all of the updates that follow an optimisation are
# here because this also responds to recalculations which the
# optimisation chunk does not.
output$cResultSummary = renderUI({
cat(
file = stderr(),
paste(
Sys.time(),
'Updating optimisation result to dtLocations\n'
)
)
lOptimisationResult = lReactiveValues$lOptimisationResult
lPreviousOptimisationResult = lReactiveValues$lPreviousOptimisationResult
dtWays = lReactiveValues$dtWays
if ( is.null(lOptimisationResult) ) {
return ( NULL )
}
dtOptimisedLocations = lOptimisationResult$lEditedResults$dtOptimisedLocations
dtPolygonCoveredEdges = isolate(lReactiveValues$dtPolygonCoveredEdges)
lReactiveValues$dtLocations = copy(dtOptimisedLocations)
iTableRenderNumber = isolate(lReactiveValues$iTableRenderNumber)
if ( is.null(dtPolygonCoveredEdges) ) {
return ( NULL )
}
# Updating the datatable's radio buttons based on results
isolate({
for ( cSuffix in dtOptimisedLocations[Chosen == 'F', LocationID] ) {
updateRadioButtons(
inputId = paste0('radioChosen', iTableRenderNumber, cSuffix),
selected = F,
session = session
)
}
for ( cSuffix in dtOptimisedLocations[Chosen == 'T', LocationID] ) {
updateRadioButtons(
inputId = paste0('radioChosen', iTableRenderNumber, cSuffix),
selected = T,
session = session
)
}
})
# save(
# list = 'lOptimisationResultStatistics',
# file = '/tmp/lOptimisationResultStatistics.Rdata'
# )
cReturn = paste(
'<br/>Results of latest optimisation / calculation: Objective value of <b>',
round(lOptimisationResult$lOptimisationResultStatistics[[length(lOptimisationResult$lOptimisationResultStatistics)]]$nCoverage_pct, 4),
'</b> with <b>',
lOptimisationResult$lOptimisationResultStatistics[[length(lOptimisationResult$lOptimisationResultStatistics)]]$iStationsUsed,
'stations</b>',
# ' of average score <b>',
# round(nMeanScore, 4),
# '</b>',
''
)
dtIterationSummary = rbindlist(
lapply(
lOptimisationResult$lOptimisationResultStatistics,
function(x) {
data.table(StationsUsed = x$iStationsUsed, Coverage_pct = x$nCoverage_pct)
}
)
)
dtIterationSummary[, Iteration := .I]
isolate({lReactiveValues$lOptimisationResult$lEditedResults$dtOptimisationCoverageSummmary = lOptimisationResult$lOptimisationResultStatistics[[length(lOptimisationResult$lOptimisationResultStatistics)]]$dtOptimisationCoverageSummmary})
isolate({lReactiveValues$lOptimisationResult$lEditedResults$dtCoveredEdges = lOptimisationResult$lOptimisationResultStatistics[[length(lOptimisationResult$lOptimisationResultStatistics)]]$dtCoveredEdges})
isolate({lReactiveValues$lOptimisationResult$lEditedResults$iStationsUsed = lOptimisationResult$lOptimisationResultStatistics[[length(lOptimisationResult$lOptimisationResultStatistics)]]$iStationsUsed})
isolate({lReactiveValues$lOptimisationResult$lEditedResults$nCoverage_pct = lOptimisationResult$lOptimisationResultStatistics[[length(lOptimisationResult$lOptimisationResultStatistics)]]$nCoverage_pct})
isolate({lReactiveValues$lOptimisationResult$lEditedResults$nMeanScore = lOptimisationResult$lOptimisationResultStatistics[[length(lOptimisationResult$lOptimisationResultStatistics)]]$nMeanScore})
isolate({lReactiveValues$lOptimisationResult$lEditedResults$dtIterationSummary = dtIterationSummary})
# Appending results of previous optimisation if there exists one
if ( !is.null(lPreviousOptimisationResult) ) {
cReturn = paste(
cReturn,
'<br/>Results of previous optimisation / calculation: Objective value of <b>',
round(lPreviousOptimisationResult$lEditedResults$nCoverage_pct, 4),
'</b> with <b>',
lPreviousOptimisationResult$lEditedResults$iStationsUsed,
# 'stations</b> of average score <b>',
# round(lPreviousOptimisationResult$lEditedResults$nMeanScore, 4),
# '</b>',
''
)
}
lReactiveValues[['ggplotMap']] = plotCityBlank
lReactiveValues[['ggplotMapLatitudeRange_deg']] = NULL
lReactiveValues[['ggplotMapLongitudeRange_deg']] = NULL
return ( HTML(cReturn) )
})
# Discards current optimisation and loads the previous one
observeEvent(
input$actionButtonLoadPreviousOptimisaton,
{
lReactiveValues$lOptimisationResult = lReactiveValues$lPreviousOptimisationResult
lReactiveValues$lPreviousOptimisationResult = NULL
lReactiveValues[['ggplotMap']] = plotCityBlank
lReactiveValues[['ggplotMapLatitudeRange_deg']] = NULL
lReactiveValues[['ggplotMapLongitudeRange_deg']] = NULL
}
)
# Checks for the existence of an optimisation solution and if so
# then allows user the option to continue optimisation from that solution
output[['uiOptimiseMore']] = renderUI({
cat(
file = stderr(),
paste(
Sys.time(),
'Checkbox to resume from previous optimisation\n'
)
)
# The other element, lEditedResults, gets updated on a recalc also
# So then the object won't show up as null
if ( is.null(lReactiveValues$lOptimisationResult$lGAResults)) {
return (
h6('')
)
} else {
checkboxOptimiseMore = input$checkboxOptimiseMore
if ( is.null(checkboxOptimiseMore) ) {
checkboxInput(
inputId = 'checkboxOptimiseMore',
label = 'Resume from latest run',
value = F
)
} else {
checkboxInput(
inputId = 'checkboxOptimiseMore',
label = 'Resume from latest run',
value = checkboxOptimiseMore
)
}
}
})
# Depending on the existence of a previous optimisation result,
# gives controls to the user to load it instead of current result
output[['uiLoadPreviousOptimisationResult']] = renderUI({
cat(
file = stderr(),
paste(
Sys.time(),
'Adding more optimisation option \n'
)
)
if ( is.null(lReactiveValues$lPreviousOptimisationResult)) {
return (
h6('')
)
} else {
actionButton(
inputId = 'actionButtonLoadPreviousOptimisaton',
label = 'Load prev result ( discard current )'
)
}
})
# Letting user save the edits to the chosen and status radio buttons
observeEvent(
input$actionButtonSave,
{
# status radio button being updated back to the dataset
# should I just dynamically retrieve this when the dataset is needed?
cat(
file = stderr(),
paste(
Sys.time(),
'Saving changes \n'
)
)
cat(
file = stderr(),
paste(
'Updating status radio button change to dataset \n'
)
)
# this can't be in isolate because the launch of the dashboard triggers this once
# null, nothing happpens. and then the reactivity just ignores it since thet
# checkbox inputs didn't register with the reactivity.
dtLocations = isolate(lReactiveValues$dtLocations)
iTableRenderNumber = isolate(lReactiveValues$iTableRenderNumber)
if ( is.null(dtLocations) ) {
return ( NULL )
}
vbStatus = fRetrieveShinyValue(
paste0('radioStatus', iTableRenderNumber),
unlist(dtLocations[, LocationID]),
input
)
vcStatusFlip = paste(
dtLocations[Status != vbStatus, LocationName],
vbStatus[dtLocations[,Status] != vbStatus]
)
dtLocations[, Status := as.character(vbStatus)]
# chosen radio button being updated back to the dataset
# should I just dynamically retrieve this when the dataset is needed?
cat(
file = stderr(),
paste(
'Updating chosen radio button change to dataset\n'
)
)
# this can't be in isolate because the launch of the dashboard triggers this once
# null, nothing happpens. and then the reactivity just ignores it since thet
# checkbox inputs didn't register with the reactivity.
vbChosen = fRetrieveShinyValue(
paste0('radioChosen', iTableRenderNumber),
unlist(dtLocations[, LocationID]),
input
)
vcChosenFlip = paste(
dtLocations[Chosen != vbChosen, LocationName],
vbChosen[dtLocations[,Chosen] != vbChosen]
)
dtLocations[, Chosen := as.character(vbChosen)]
isolate({lReactiveValues$dtLocations = dtLocations})
# Some acknowledgement messages
cModalString = paste(
paste(
'Status changed for:',
paste(
vcStatusFlip,
collapse = ', '
)
),
'<br/>',
paste(
'Chosen changed for:',
paste(
vcChosenFlip,
collapse = ', '
)
),
'<br/>',
'For debugging, table number:',
lReactiveValues$iTableRenderNumber - 1
)
showModal(
modalDialog(
title = "Confirmation!",
HTML(cModalString),
easyClose = TRUE,
footer = NULL
)
)
}
)
# Recalculate coverage, etc based on the latest available status and chosen
observeEvent(
input$actionButtonRecalculate,
{
cat(
file = stderr(),
paste(
Sys.time(),
'Recalculating\n'
)
)
sliderEditedCoverageRadius = input$sliderEditedCoverageRadius
dtLocations = lReactiveValues$dtLocations
# This should trigger the cResultSummary chunk which will do
# all the coverage, etc. calculations
lReactiveValues$lOptimisationResult = list(
lGAResults = lReactiveValues$lOptimisationResult$lGAResults,
lEditedResults = list(
nRadiusUpperLimit_m = sliderEditedCoverageRadius,
dtOptimisedLocations = dtLocations
)
)
}
)
# Downloading a scenario - analogous to the locations.csv file
output$downloadScenario = downloadHandler(
filename = function() {
lOptimisationResult = lReactiveValues$lOptimisationResult
selectizeInputCities = input$selectizeInputCities
paste0(
"Scenario-",
selectizeInputCities,'-',
lOptimisationResult$lEditedResults$nRadiusUpperLimit_m,'-',
# input$selectizeInputCities,
# lReactiveValues$lOptimisationResult$lEditedResults$sliderEditedCoverageRadius,
strftime(Sys.time(), '%Y%m%d%H%M'),
".csv"
)
},
content = function(file) {
dtOptimisedLocations = lReactiveValues$lOptimisationResult$lEditedResults$dtOptimisedLocations
if ( !is.null(dtOptimisedLocations) ) {
write.csv(
dtOptimisedLocations,
file,
row.names = F,
quote = T,
na = ''
)
} else {
showModal(
modalDialog(
title = "Error!",
"At least one optimisation / recalculation should have happened to save anything.",
easyClose = TRUE,
footer = NULL
)
)
}
}
)
# Downloading an optimisation result - analogous to the lOptimisationResult
output$downloadResult = downloadHandler(
filename = function() {
lOptimisationResult = lReactiveValues$lOptimisationResult
selectizeInputCities = input$selectizeInputCities
paste0(
"Result-",
selectizeInputCities,'-',
lOptimisationResult$lEditedResults$nRadiusUpperLimit_m,'-',
# input$selectizeInputCities,
# lReactiveValues$lOptimisationResult$lEditedResults$sliderEditedCoverageRadius,
strftime(Sys.time(), '%Y%m%d%H%M'),
".Rdata"
)
},
content = function(file) {
lOptimisationResult = lReactiveValues$lOptimisationResult
if ( !is.null(lOptimisationResult) ) {
save(
list = 'lOptimisationResult',
file = file
)
} else {
showModal(
modalDialog(
title = "Error!",
"At least one optimisation / recalculation should have happened to save",
easyClose = TRUE,
footer = NULL
)
)
}
}
)
# Uploading the result of an optimisation
observeEvent(
input$actionButtonUploadScenario,
{
cat(
file = stderr(),
paste(
Sys.time(),
'Uploading scenario data\n'
)
)
progress = Progress$new(session)
on.exit(progress$close())
progress$set(
message = 'Loading scenario data'
)
# reading in the file in
inFile <- input$fileInputUploadResult
if (is.null(inFile)) {
return(NULL)
}
load(inFile$datapath)
# Some basic preprocessing
setkey(lOptimisationResult$lEditedResults$dtOptimisedLocations, LocationID)
dtLocations = lOptimisationResult$lEditedResults$dtOptimisedLocations
lReactiveValues$dtLocations = dtLocations
lReactiveValues$posixctLastLocationUpload = Sys.time()
lReactiveValues$lOptimisationResult = lOptimisationResult
rm(lOptimisationResult)
# adding locations to the actionable locations selection box
vcLocations = unlist(dtLocations[, LocationID])
names(vcLocations) = dtLocations[, LocationName]
selectizeInputToActionLocations = input$selectizeInputToActionLocations
updateSelectizeInput(
session = session,
inputId = 'selectizeInputToActionLocations',
choices = vcLocations,
selected = intersect(vcLocations, selectizeInputToActionLocations)
)
# checking if all loctions are present in the coverage data. Else need to pre-process
dtPolygonCoveredEdges = lReactiveValues$dtPolygonCoveredEdges
if ( !is.null(dtPolygonCoveredEdges) ) {
progress$set(
message = 'Checking for locations not in coverage data'
)
vcNewLocations = setdiff(
dtLocations[, LocationID],
dtPolygonCoveredEdges[, LocationID]
)
if ( length(vcNewLocations) > 0 ) {
names(vcNewLocations) = dtLocations[
LocationID %in% vcNewLocations,
LocationName
]
}
updateSelectizeInput(
session = session,
inputId = 'selectizeInputNewLocations',
choices = vcNewLocations,
selected = vcNewLocations
)
progress$set(
message = 'Calculating location coverage details'
)
lReactiveValues$dtCoverageSummary = fGenerateCoverageSummary(
dtLocations = dtLocations,
dtWays = lReactiveValues$dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges,
nMaxRadiusUserWillUse_m = nMaxRadiusUserWillUse_m
)
}
})
# Calculating the hover text on ggplot
output[['htmlHoverText']] = renderUI({
cat(
file = stderr(),
paste(
Sys.time(),
'Implementing hover text\n'
)
)
# getting the hover action
ggplotMapHover <- input[['ggplotMapHover']]
ggplotMapLatitudeRange_deg = isolate(lReactiveValues$ggplotMapLatitudeRange_deg)
# if there is no hover then return a null
if ( is.null(ggplotMapHover) ) {
return (NULL)
}
# if there is only a blank plot
if ( any(is.null(ggplotMapLatitudeRange_deg)) ) {
return (NULL)
# else if there is hover then calculate the display
}
dtLocations = lReactiveValues$dtLocations
if ( is.null(dtLocations) ) {
return ( NULL )
}
# calculate point position INSIDE the image as percent of total dimensions
# from left (horizontal) and from top (vertical)
left_pct <- (ggplotMapHover$x - ggplotMapHover$domain$left) / (ggplotMapHover$domain$right - ggplotMapHover$domain$left)
top_pct <- (ggplotMapHover$domain$top - ggplotMapHover$y) / (ggplotMapHover$domain$top - ggplotMapHover$domain$bottom)
# calculate distance from left and bottom side of the picture in pixels
left_px <- ggplotMapHover$range$left + left_pct * (ggplotMapHover$range$right - ggplotMapHover$range$left)
top_px <- ggplotMapHover$range$top + top_pct * (ggplotMapHover$range$bottom - ggplotMapHover$range$top)
# create style property fot tooltip
# background color is set so tooltip is a bit transparent
# z-index is set so we are sure are tooltip will be on top
style <- paste0(
"position:absolute; z-index:100; background-color: rgba(245, 245, 245, 0.85); ",
"left:",
left_px + 2,
"px; top:",
top_px + 2,
"px;"
)
dtLocationsClick = dtLocations[
fNearestLocation(
nLongitude_deg = ggplotMapHover$x,
nLatitude_deg = ggplotMapHover$y,
nRadius_m = 1000,
vnLongitude_deg = dtLocations$Longitude_deg,
vnLatitude_deg = dtLocations$Latitude_deg
)
]
if ( nrow(dtLocationsClick) == 0 ) {
return (NULL)
}
# Sending this to reactive values for it to be reused in the output object
return (
wellPanel(
style = style,
p(HTML(
dtLocationsClick[, LocationName]
))
)
)
})
# If there are new locations in the locations file which
# haven't been processed yet then processing them.
# The logic is very similar to what's in the intialising city script
observeEvent(
input$actionButtonProcessNewLocations,
{
progress = Progress$new(session)
on.exit(progress$close())
progress$set(
message = 'Processing new locations'
)
selectizeInputNewLocations = input[['selectizeInputNewLocations']]
dtLocations = lReactiveValues$dtLocations
dtNodes = lReactiveValues$dtNodes
dtWays = lReactiveValues$dtWays
cCoverageFileName = paste0(
cDataDirectory(),
'/Analysis/OSMServiceable.Rdata'
)
# Getting locations data only for new location
dtLocationsToBeProcessed = dtLocations[LocationID %in% selectizeInputNewLocations]
progress$set(
message = 'Calculating nearest map nodes to locations'
)
fUpdateCoverageData (
igraphLinks = NULL,
dtWays,
cCoverageFileName,
bOverwriteExistingCoverageCalculation,
dtLocationsToBeProcessed
)
load(cCoverageFileName)
setkey(dtPolygonCoveredEdges, LocationID)
lReactiveValues$dtPolygonCoveredEdges = dtPolygonCoveredEdges
progress$set(
message = 'Generating location details'
)
# Updating the coverage summary
lReactiveValues$dtCoverageSummary = fGenerateCoverageSummary(
dtLocations = dtLocations,
dtWays = dtWays,
dtPolygonCoveredEdges = dtPolygonCoveredEdges,
nMaxRadiusUserWillUse_m = nMaxRadiusUserWillUse_m
)
}
)
# Basic plot to show GA iteration details
output[['plotConvergence']] = renderPlot({
cat(
file = stderr(),
paste(
Sys.time(),
'Convergence plot\n'
)
)
# dtGASummary = lReactiveValues$lOptimisationResult$lGAResults$dtGASummary
dtIterationSummary = lReactiveValues$lOptimisationResult$lEditedResults$dtIterationSummary
if ( is.null(dtIterationSummary) ) {
return (
ggplot() +
geom_text(aes(x=0,y=0,label='No optimisations run yet')) +
theme(
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
line = element_blank(),
title = element_blank(),
text = element_blank()
)
)
}
cat(
file = stderr(),
paste(
Sys.time(),
'There is sometimes a very long delay after this chunk. Timestamp
to indicate this chunk ended and some other unidentified thing
is running.\n'
)
)
grid.arrange(
ggplot(dtIterationSummary) + geom_line(aes(x = Iteration, y = Coverage_pct)),
ggplot(dtIterationSummary) + geom_line(aes(x = Iteration, y = StationsUsed)),
ncol = 1
)
})
# Updating the max coverage possible with current radius
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Calculating max possible coverage\n'
)
)
progress = Progress$new(session)
on.exit(progress$close())
# OSM data about ways and nodes
progress$set(
message = 'Calculating max possible coverage'
)
sliderCoverageRadius = input$sliderCoverageRadius
dtWays = lReactiveValues$dtWays
dtPolygonCoveredEdges = lReactiveValues$dtPolygonCoveredEdges
if ( is.null(sliderCoverageRadius) ) {
return ( NULL )
}
if ( is.null(dtWays) ) {
return ( NULL )
}
if ( is.null(dtPolygonCoveredEdges) ) {
return ( NULL )
}
dtPolygonCoveredEdges = merge(
dtPolygonCoveredEdges,
dtWays[, list(WayID, Distance_m)],
'WayID'
)
dtPolygonCoveredEdges = dtPolygonCoveredEdges[
DistanceFromDestination_m < sliderCoverageRadius
]
# Roads covered by the stations
dtCoverage = dtPolygonCoveredEdges[,
list(
1
),
list(
WayID,
Distance_m
)
]
# Calculating coverage
nCoverage_pct = dtCoverage[, sum(Distance_m)] / dtWays[, sum(Distance_m)]
nMaxValue_pct = floor(
100*nCoverage_pct
) / 100
updateSliderInput(
inputId = 'sliderMinCoverage',
session = session,
max = nMaxValue_pct,
value = pmin(nMaxValue_pct, isolate(input$sliderMinCoverage))
)
})
# This priority ensures the running order of the chunks is correct
outputOptions(output, "cResultSummary", priority = 500)
outputOptions(output, "datatableLocations", priority = 400)
output[['ggplotCorrectionsMap']] = renderPlot({
cat(
file = stderr(),
paste(
Sys.time(),
'Plotting corrections map\n'
)
)
lReactiveValues$ggplotCorrectionsMap
})
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Implementing brush on corrections map\n'
)
)
progress = Progress$new(session)
on.exit(progress$close())
progress$set(
message = 'Zooming in on corrections map'
)
ggplotCorrectionsMap = isolate(lReactiveValues$ggplotCorrectionsMap)
ggplotCorrectionsMapBrush = input$ggplotCorrectionsMapBrush
if ( is.null(ggplotCorrectionsMap) ) {
return ( NULL )
}
if ( !is.null(ggplotCorrectionsMapBrush) ) {
ggplotCorrectionsMap = ggplotCorrectionsMap +
coord_cartesian(
xlim = c(
ggplotCorrectionsMapBrush$xmin,
ggplotCorrectionsMapBrush$xmax
),
ylim = c(
ggplotCorrectionsMapBrush$ymin,
ggplotCorrectionsMapBrush$ymax
)
)
}
lReactiveValues$ggplotCorrectionsMap = ggplotCorrectionsMap
})
# resetting the zoom on double click
observe({
cat(
file = stderr(),
paste(
Sys.time(),
'Implementing double click on corrections map\n'
)
)
progress = Progress$new(session)
on.exit(progress$close())
progress$set(
message = 'Resetting corrections map'
)
ggplotCorrectionsMapDblClick = input$ggplotCorrectionsMapDblClick
if ( is.null(input$ggplotCorrectionsMapDblClick) ) {
return ( NULL )
}
ggplotCorrectionsMap = isolate(lReactiveValues$ggplotCorrectionsMap)
lReactiveValues$ggplotCorrectionsMap = ggplotCorrectionsMap +
coord_cartesian()
})
observe({
progress = Progress$new(session)
on.exit(progress$close())
progress$set(
message = 'Making map for corrections tab'
)
cat(
file = stderr(),
paste(
Sys.time(),
'Creating corrections map\n'
)
)
ggMapTiles = lReactiveValues$ggMapTiles
dtWays = lReactiveValues$dtWays
dtNodes = lReactiveValues$dtNodes
input$actionButtonResetCorrectionsMap
if ( is.null(ggMapTiles) ) {
return ( NULL )
}
ggplotCorrectionsMap = ggmap(ggMapTiles) +
geom_segment(
data = dtWays,
aes(
x = Longitude_deg1,
y = Latitude_deg1,
xend = Longitude_deg2,
yend = Latitude_deg2,
color = OneWay
),
size = 1
) +
geom_point(
data = dtNodes,
aes(
x = Longitude_deg,
y = Latitude_deg
),
size = 1,
shape = 21,
fill = 'green',
color = 'black'
) +
coord_cartesian()
lReactiveValues$ggplotCorrectionsMap = ggplotCorrectionsMap
})
output[['textPathDetails']] = eventReactive(
input$actionButtonDrawRoute,
{
progress = Progress$new(session)
on.exit(progress$close())
progress$set(
message = 'Getting path details'
)
ggplotCorrectionsMap = isolate(lReactiveValues$ggplotCorrectionsMap)
dtWays = isolate(lReactiveValues$dtWays)
dtNodes = isolate(lReactiveValues$dtNodes)
textInputFromCorrectionMap = isolate(input$textInputFromCorrectionMap)
textInputToCorrectionMap = isolate(input$textInputToCorrectionMap)
igraphLinks = graph_from_data_frame(
d = rbind(
dtWays[,
list(
from = SourceID,
to = DestinationID,
weight = Distance_m
)
],
dtWays[
OneWay == 'no',
list(
from = DestinationID,
to = SourceID,
weight = Distance_m
)
]
),
directed = T
)
lPath = get.shortest.paths(
graph = igraphLinks,
# from = '423744834',
# to = '736207519'
from = textInputFromCorrectionMap,
to = textInputToCorrectionMap
)
vcNodesOnPath = names(lPath$vpath[[1]])
dtCorrectionMapPath = rbind(
merge(
data.table(SourceID = head(vcNodesOnPath, -1), DestinationID = tail(vcNodesOnPath, -1)),
dtWays,
c('SourceID','DestinationID')
),
merge(
data.table(DestinationID = head(vcNodesOnPath, -1), SourceID = tail(vcNodesOnPath, -1)),
dtWays,
c('SourceID','DestinationID')
)
)
setDT(dtCorrectionMapPath)
ggplotCorrectionsMap = ggplotCorrectionsMap +
geom_segment(
data = dtCorrectionMapPath,
aes(
x = Longitude_deg1,
y = Latitude_deg1,
xend = Longitude_deg2,
yend = Latitude_deg2
),
size = 2,
color = '#FF4500'
) +
geom_point(
data = dtNodes[NodeID %in% vcNodesOnPath],
aes(
x = Longitude_deg,
y = Latitude_deg
),
size = 2,
shape = 21,
fill = '#FF4500',
color = 'black'
)
lReactiveValues$ggplotCorrectionsMap = ggplotCorrectionsMap
# print(head(dtCorrectionMapPath))
paste(
'Distance between',
head(vcNodesOnPath, 1),
'and',
tail(vcNodesOnPath, 1),
':',
dtCorrectionMapPath[, round(sum(Distance_m) / 1000, 2)],
'km'
)
}
)
output[['textClickedNode']] = renderText({
cat(
file = stderr(),
paste(
Sys.time(),
'Name of node which had click\n'
)
)
ggplotCorrectionsMapClick = input$ggplotCorrectionsMapClick
iTableRenderNumber = isolate(lReactiveValues$iTableRenderNumber)
dtNodes = lReactiveValues$dtNodes
if ( is.null(ggplotCorrectionsMapClick)) {
return ( NULL )
}
# Identifying the nearest location to the click
cCorrectionMapLocationsClick = dtNodes[
fNearestLocation(
nLongitude_deg = ggplotCorrectionsMapClick$x,
nLatitude_deg = ggplotCorrectionsMapClick$y,
nRadius_m = 1000,
vnLongitude_deg = dtNodes$Longitude_deg,
vnLatitude_deg = dtNodes$Latitude_deg
),
NodeID
]
lReactiveValues$cCorrectionMapLocationsClick = cCorrectionMapLocationsClick
paste(
'NodeID:',
cCorrectionMapLocationsClick
)
})
}
# config file will have city, radius,
|
d7690b370375610d6512ff9941615f303294655d
|
c746b5f40c118fb4f41a2d7cb88024738476d40f
|
/Data_Generation/Results/combine_elnet9_500.R
|
7f2cc520a5bfe7ee43f9f085e87ff44823450767
|
[] |
no_license
|
multach87/Dissertation
|
5548375dac9059d5d582a3775adf83b5bc6c0be7
|
d20b4c6d3087fd878a1af9bc6e8543d2b94925df
|
refs/heads/master
| 2023-06-25T20:09:25.902225
| 2021-07-23T18:51:07
| 2021-07-23T18:51:07
| 281,465,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,335
|
r
|
combine_elnet9_500.R
|
#load data
half.data <- readRDS("/Users/Matt Multach/Desktop/Dissertation/Dissertation_Git/Data_Generation/Data_Storage/500_data_10052020.RData")
elnet9.final <- readRDS("/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/elnet9_resultmain_500.RData")
#initialize dataframe
elnet9.results <- data.frame(matrix(ncol = ncol(elnet9.final[[1]]$info)))
colnames(elnet9.results) <- colnames(elnet9.final[[1]]$info)
#initialize error vector
elnet9.errors <- numeric()
#fill results
##Errors at:
for(i in 1:length(elnet9.final)) {
if(is.null(elnet9.final[[i]]$error)) {
elnet9.results[i , ] <- elnet9.final[[i]]$info
} else {
cat("error at i = " , i , "\n")
elnet9.results[i , 1:7] <- half.data[[i]]$conditions
elnet9.errors <- c(elnet9.errors , i)
}
}
mean(elnet9.results[ , "fpr"] , na.rm = T)
mean(elnet9.results[ , "fnr"] , na.rm = T)
mean(elnet9.results[!is.infinite(elnet9.results[ , "mpe"]) , "mpe"] , na.rm = T)
#save results
saveRDS(elnet9.results , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/MainResults_Storage/elnet9_resultDF_500.RData")
#saveRDS(elnet9.errors , "/Users/Matt Multach/Dropbox/USC_Grad2/Courses/Dissertation/Dissertation_Git/Data_Storage/Error_Storage/elnet9_errorindices_500.RData")
|
d26785825821554075ad42b06c80dea4a9771ccd
|
e58a041effe2ee474c329eb3327300363aa18588
|
/R/prediction.R
|
1710a2af6b167b473803fea9fc43a4f7f9068441
|
[] |
no_license
|
Andreii94Tarasenko/SEMrush_petFood_analysis
|
0cb70e496122e50c3377b22127c5f1cd513bac75
|
f7194bf763823b5a13398235c2ee2f3d57fe8ef9
|
refs/heads/master
| 2020-03-18T14:59:23.225949
| 2016-11-19T19:04:16
| 2016-11-19T19:04:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 440
|
r
|
prediction.R
|
## regression analysis
key_parameters<-function(arg1){
temp.lm <- lm(Position ~. ,data = arg1)#whiskas[ ,-c(1, 7, 12, 13, 14)])
temp.new.lm <- step(temp.lm, trace = FALSE)
important_parameters <- names(temp.new.lm$coefficients)
important_parameters
}
#print(summary(whiskas.new.lm))
#print("Unimportant_parameters in scope of position in whiskas data:")## i.e. position does not depends on them
#print(unimportant_parameters_whiskas)
|
f1ea2cebbc41b696101050bc20bf42273d7e668a
|
e67681fe1830d76dc7079f136a466740e0f485f2
|
/docs/msc-phygeo-remote-sensing/worksheets/msc-phygeo-rs-ws-05-2.R
|
0f5a607ebd5fb7876a20279850edb5f6df50592d
|
[
"MIT"
] |
permissive
|
GeoMOER/gmoc
|
ceea97260e16ee6a03730718c28dcddf991cf0e3
|
2a2fe5ab4240c4a1534fd3dcc0fea917b26125d6
|
refs/heads/master
| 2021-09-28T02:47:08.096750
| 2018-11-13T15:32:16
| 2018-11-13T15:32:16
| 105,365,298
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,497
|
r
|
msc-phygeo-rs-ws-05-2.R
|
# rs-ws-05-2
# MOC - Data Analysis (T. Nauss, C. Reudenbach)
# Merging training areas
# Set environment --------------------------------------------------------------
if(Sys.info()["sysname"] == "Windows"){
source("D:/active/moc/msc-ui/scripts/msc-phygeo-ei/src/functions/set_environment.R")
} else {
source("/media/permanent/active/moc/msc-ui/scripts/msc-phygeo-ei/src/functions/set_environment.R")
}
# Merge shape files ------------------------------------------------------------
# Read names of shape files
shp_names <- list.files(path_muf_set1m_lcc_ta,
pattern = glob2rx("*.shp"), full.names = TRUE)
shp_names <- shp_names[-grep("muf", basename(shp_names))]
# Put shapes in a list and adjust geometry ids
shift <- 0
shps <- list()
for(s in seq(length(shp_names))){
act_shps <- readOGR(shp_names[s], ogrListLayers(shp_names[s]))
shps[[s]] <- spChFIDs(act_shps, as.character(seq(nrow(act_shps)) + shift))
shift <- shift + nrow(act_shps)
}
# rownames(as(shps[[1]], "data.frame"))
# Remove non-standard columns (if necessary)
shps[[1]]@data$merge_id <- NULL
# Combine shapes
shps_cmb <- do.call("rbind", shps)
# Recode values
ids_old <- unique(shps_cmb@data$ID)
ids_repl <- paste(ids_old, ids_new, sep = "=", collapse = ";")
shps_cmb@data$ID <- recode(shps_cmb@data$ID, ids_repl)
# Write shape file
writeOGR(shps_cmb, paste0(path_muf_set1m_lcc_ta, "muf_lc_ta_poly_large"),
"muf_lc_ta_poly_large", driver = "ESRI Shapefile", overwrite = TRUE)
|
69354989f56075f6ed31f96763c98c12c931c9d6
|
9077699975ffc6239097170bcc851790a5967dc6
|
/man/align.Rd
|
921696f7c6137efd11dfa60ac33aa5dd0ebd5c69
|
[] |
no_license
|
cran/HFWutils
|
8eec52f548293d2057cbfdaad9bdaac39524ba20
|
fa8bdcc82482007e09cfa6207ba7d5bd4189853c
|
refs/heads/master
| 2021-01-01T15:51:28.662706
| 2008-05-17T00:00:00
| 2008-05-17T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,480
|
rd
|
align.Rd
|
\name{align}
\alias{align}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ align ordering of two vectors}
\description{
order one vector to match order of first vector
}
\usage{
align(names1, names2)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{names1}{ ~~Describe \code{names1} here~~ }
\item{names2}{ ~~Describe \code{names2} here~~ }
}
\details{
inputs names1,names2 must have same lengths
}
\value{
~Describe the value returned
If it is a LIST, use
\item{comp1 }{Description of 'comp1'}
\item{comp2 }{Description of 'comp2'}
...
}
\references{ ~put references to the literature/web site here ~ }
\author{ Felix Wittmann \email{hfwittmann@gmail.com}}
\note{ ~~further notes~~ }
\seealso{ ~~objects to See Also as \code{\link{help}}, ~~~ }
\examples{
x1 <- 1:4
x2 <- c(1,3,2,4)
align(names1=x1, names2=x2)
# [1] 1 3 2 4
x1 <- LETTERS[1:4]
x2 <- c(1,3,2,4)
align(names1=x1, names2=x2)
# [1] 1 3 2 4
x1 <- LETTERS[1:4]
x2 <- LETTERS[c(1,3,2,4)]
align(names1=x1, names2=x2)
# [1] 1 3 2 4
x1 <- LETTERS[1:5]
x2 <- LETTERS[c(1,3,2,4)]
align(names1=x1, names2=x2)
# lengths don't match :
# names1 : A B C D E
# names2 : A C B D
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}
|
7ff838cfa0b10220fc7977426903066c2821e226
|
d870f64e2749df0a1a2b5ce0eacdbd5fca2ea167
|
/man/segmat_to_list.Rd
|
965919d408593e9fd54c7e85dd55dc68e72b4d38
|
[
"MIT"
] |
permissive
|
bwrc/semigeom-r
|
729c86b764d7ec5ed98f1f725929b38992e09f3a
|
993580030b4cc8f6423b808a703a2cfc5bdf8c84
|
refs/heads/master
| 2020-12-25T04:55:03.170869
| 2016-09-17T18:20:15
| 2016-09-17T18:20:15
| 62,126,804
| 4
| 2
|
NOASSERTION
| 2022-12-10T14:03:39
| 2016-06-28T09:10:18
|
R
|
UTF-8
|
R
| false
| true
| 365
|
rd
|
segmat_to_list.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities.R
\name{segmat_to_list}
\alias{segmat_to_list}
\title{Convert a matrix with segments to a list}
\usage{
segmat_to_list(segmat)
}
\arguments{
\item{segmat}{A matrix with segments}
}
\value{
A list containing the tiles
}
\description{
Convert a matrix with segments to a list
}
|
37673b2dc439f95cab6ff7f87dad7dda67430d49
|
a86ac1523fdcf9b4477ac94a3831a9cc71695071
|
/R/dataSetup.R
|
4bef6c6158cabea0722cbab56a1acbe90b51c8a7
|
[] |
no_license
|
wolski/MLwithCaret_AS3
|
682e983c35821b5d2ca75c624713317cd246e1db
|
63e8dff51bda3519685783f5fa1706140c7f7acc
|
refs/heads/master
| 2020-04-26T17:42:19.263964
| 2019-03-04T16:10:06
| 2019-03-04T16:10:06
| 173,721,652
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,824
|
r
|
dataSetup.R
|
rm(list=ls())
library(tidyverse)
library(readr)
library(factoextra)
library(GGally)
library(caret)
data_RW <- read_csv("winetrain.csv") %>% dplyr::select(-X1)
data_RW$quality <- as.factor(ifelse(data_RW$quality==0,"poor","good"))
data_RW$colour <- as.factor(ifelse(data_RW$colour==0,"white","red"))
xx <- data_RW %>% select_if(is.numeric)
bb <- as.matrix(xx)
prProc <- preProcess( bb , method="BoxCox" )
saveRDS(prProc, file="preProcess_BoxCoxModel.rds")
gg <- predict( prProc , bb)
gg <- data.frame(gg)
data_RW_t <- data.frame(colour = data_RW$colour, quality = data_RW$quality, gg)
data_RW <- data_RW_t
data_RW <- as_tibble(data_RW)
data_W <- data_RW %>% filter(colour=="white") %>% select(-colour)
data_R <- data_RW %>% filter(colour=="red") %>% select(-colour)
trainIndex_RW <- createDataPartition(data_RW$quality , p = .8,
list = FALSE,
times = 1)
trainIndex_W <- createDataPartition(data_W$quality , p = .8,
list = FALSE,
times = 1)
trainIndex_R <- createDataPartition(data_R$quality , p = .8,
list = FALSE,
times = 1)
data_RW_train <- data_RW[trainIndex_RW,]
data_RW_test <- data_RW[-trainIndex_RW,]
data_W_train <- data_W[trainIndex_W,]
data_W_test <- data_W[-trainIndex_W,]
data_R_train <- data_R[trainIndex_R,]
data_R_test <- data_R[-trainIndex_R,]
reslist <- list(data_RW_train = data_RW_train, data_RW_test = data_RW_test,
data_W_train = data_W_train,
data_W_test = data_W_test,
data_R_train = data_R_train,
data_R_test = data_R_test)
saveRDS(reslist, file=paste0("AllData_",gsub("[ :]", "_",date()) , ".rds"))
|
38249be5382c1f1c628f2aa45de1ba0d1e5af768
|
24d84164000936adbfd268aebd825887347e4a21
|
/more_movies.R
|
96c00f67a8c54052ef8bffeead0bfa80e9f77e0b
|
[] |
no_license
|
maxrdavidson/FMLBot
|
57adf296af604c23b1289c1f5e81c68bb2764bf7
|
42c8410828fd28b06d0842379d2474846456a8fd
|
refs/heads/master
| 2020-03-22T10:50:10.827487
| 2018-08-22T21:07:34
| 2018-08-22T21:07:34
| 139,929,916
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,860
|
r
|
more_movies.R
|
library(boxoffice)
library(tidyverse)
thing1 <- boxoffice(as.Date("2018-07-21"),site="mojo")
thing2 <- boxoffice(as.Date("2018-07-21"),site="numbers")
thing2$movie <- gsub("’","'",thing2$movie)
start_date <- as.Date("2016-03-04")
end_date <- as.Date("2018-07-20")
date_list <- seq.Date(start_date,end_date,by="days")
numbers <- boxoffice(date_list,site="numbers")
mojo <-boxoffice(date_list,site="mojo")
numbers$movie <- gsub("’","'",numbers$movie)
mojo$id <- paste(substring(mojo$movie,1,10),mojo$theaters,mojo$days,mojo$date,sep="_")
numbers$id <- paste(substring(numbers$movie,1,10),numbers$theaters,numbers$days,numbers$date,sep="_")
numbers <- numbers[,c("id","distributor")]
all_movies <- merge(mojo,numbers,by="id")
past_data$movie <- gsub("’","'",past_data$movie)
past_data$id <- paste(substring(past_data$movie,1,10),past_data$theaters,past_data$days,past_data$date,sep="_")
n_m <- read.csv("corrected_movies.csv")
n_m$id <- paste(substring(n_m$movie,1,10),n_m$theaters,n_m$days,n_m$date,sep="_")
dist <- merge(n_m,past_data,by="id")
numbers$id <- paste(substring(numbers$movie,1,10),numbers$theaters,numbers$days,numbers$date,sep="_")
mojo$id <- paste(substring(mojo$movie,1,10),mojo$theaters,mojo$days,mojo$date,sep="_")
numbers <- numbers[,c("id","distributor")]
all_movies <- merge(mojo,numbers,by="id")
names(all_movies)[11] <- "distributor"
all_movies$distributor.x<- NULL
write.csv(all_movies, "corrected_movies.csv")
new_movies[3:7] <- NULL
new_movies[4] <- NULL
names(new_movies) <- c("movie","distributor","days","id","gross","percent_change","theaters","per_theater","total_gross","date", "distributor_Y")
new_movies <- new_movies[,c(1,2,5,6,7,8,9,3,10)]
write.csv(new_movies, "corrected_movies.csv")
new_movies <- inner_join(past_data,all_movies,by=c("movie","days"))
past_data
|
abe3cec00628a286921464ab102b460c720aaea8
|
650cebcd88de7cb9f6646b3b6435a68a3e8b2345
|
/06_MOATS_replication_verification/02_Scripts/00_Archive/Part01_of_03_LabviewLogs_to_df_dml_creation.R
|
026eba3e19095db6eab4edaacab2355fddca6402
|
[] |
no_license
|
KROVINSKI/NWFSC.MUK_KRL_SMR2019
|
b15cfec476863e6ac69b2984b24914ba0d236c5c
|
c731dbde4e69b77a7e23216580d72a3f34963a1b
|
refs/heads/master
| 2021-04-24T00:52:23.845084
| 2020-10-30T14:48:20
| 2020-10-30T14:48:20
| 250,047,214
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,608
|
r
|
Part01_of_03_LabviewLogs_to_df_dml_creation.R
|
##Hello World
#*********************************
## Version Check
#*********************************
R.version
## Author: OA Lab, NWFSC
## Title: Aquarium Temperature Investigation: Temperature SubSelectScript.R
## Date: April 2020
# R script below will subselect and plot temperature data for MOATs
# Overall goal is to determine if MOATs (per treatment) are true replicates
# Steps 1 thru 9+
#*********************************
##Libraries
#*********************************
library(shiny)
library(tidyverse)
library(stringr)
library(readxl)
library(readr)
library(tidyr)
library(data.table)
library (sparklyr)
library(xts)
library(TSstudio)
library(lubridate)
library(violinmplot)
library(vioplot)
library(yarrr)
library(datapasta)
library(reprex)
library(miniUI)
#*********************************
## Outline Current 2020.05.08
#*********************************
# 1.) Working Directory
# 2.) Spolling Data into one CSV
# *this includes insitu samples
# 3.) Creating the Dataframe "dml"
#*********************************
## 1.) Set working directory
#*********************************
# Inside the working directory are the CSV files generated after using the moats graph app
# https://github.com/pmcelhany/moatsGraphs.git
# Moving average window (nObs) set to 4
# this results in the observations being averaged into a 24second timeframe
# CSVs were created for each moats: M01, M02, M03, M04, M05, M06, M07, M08, M09, M10, M11, M12, M13
# files are also available on the OA Google drive:
# https://drive.google.com/open?id=13a2hk1a9I9gRIgf2Xyl1dSYDf5vnyYvs
setwd("/Users/katherinerovinski/GIT/NWFSC.MUK_MOATs_SMR2019/LabViewLogs.AllMOATS")
#*********************************
## 2.) Spolling Data into one CSV
#*********************************
#Combining multiple CSV files into 1 document. Original input files from individual LVM (logical volumne management) files off each MOATs.
#LVM files were then inputed to the Moats Graphy (Shiny) app
#Output from app are the CSV files manipulated below.
#Critical user inputs at the U/I on app is the observation window- will default to 4 and average those four windows. LVM files were generated at a rate of 1:6secs. The default of 4 observation windows will generate observations 24seconds apart.
## 2.1 Create a list of files |
# All files to be joined have ext. "csv" can use that pattern to join
files <- list.files(pattern = ".csv")
print(files)
## 2.2 Create a temporary place for files |
temp <- lapply(files, fread, sep= ",")
print(temp)
## 2.3 Create a new vector for Moats data logs |
# "M01thruM13Moatslog_data" via rbind
M01thruM13moatslog_data <- rbindlist(temp)
print(M01thruM13moatslog_data)
## 2.4 Write the new csv document |
# "M01thruM13moatslog"
write.csv(M01thruM13moatslog_data, file = "M01thruM13moatslog.csv", row.names = FALSE)
#Saved on the OA Google Drive
# https://drive.google.com/open?id=15iBXct9b4EjKDq75vKnm5NobowBwK3G-
#*********************************
## 3.) Creating the Dataframe "dml"
#*********************************
## 3.1 Reading the CSV |
## ensuring column names and types
## Data Moats Log = dml
dml <- read.csv(file = "M01thruM13moatslog.csv", stringsAsFactors = FALSE)
dim(dml)
# * * * * * * * * * * * * * * * *
## 3.1a Sub sampling dataframe "dml"
# * * * * * * * * * * * * * * * *
## creating a sub sample of the data moats log dml dataframe to allow for quick graphs
#subsample every 17th row (because prime numbers are indeed cool)
dml <- dml %>% arrange(moats, dateTime) %>% filter(row_number() %% 17 == 0)
write.csv(dml, file = "M01thruM13moatslog_n17.csv", row.names = FALSE)
# * * * * * * * * * * * * * * * *
## 3.1b duplicates observed in "dml" - checkpoint
# * * * * * * * * * * * * * * * *
# 2020.05.08 Paul McElhany Patch
#there are no duplicates
#if no dups, then dup2 has 0 rows
dup2 <- dml[duplicated(dml),]
#if no dups, Cdml2 has same number of rows as Cdml
dml2 <- dml %>% distinct()
#
# # 2020.05.07 Patch'ski
# # Duplicates observed in observations
# dim(dml)
# uniqueDml <- unique(dml)
# dim(uniqueDml)
# dml <- uniqueDml
#
#
dim(dml)
#**********E*N*D*****************#
#*********************************
## End of Document End of Script
#*********************************
#
# ┊┊┊┊┊╭╭╭╮╮╮┊┊┊┊
# ┊┊┊┊┊╰╰╲╱╯╯┊┊┊┊
# ┊┏╮╭┓╭━━━━━━╮┊┊
# ┊╰╮╭╯┃┈┈┈┈┈┈┃┊┊
# ┊┊┃╰━╯┈┈╰╯┈┈┃┊┊
# ┊┊┃┈┈┈┈┈┈┈╰━┫┊┊
# ╲╱╲╱╲╱╲╱╲╱╲╱╲╱╲
|
6186eebfe5a953e0c97f0a9fe3e622ec4b25472a
|
9653102c68d42bb1bae1284fb0840210cd28063f
|
/man/viewMiss.Rd
|
ea15a4c712f054ee1631de788d52cf2e2208cc61
|
[] |
no_license
|
RonGuymon/ronsFunctions
|
caf4fd6180a6e169ce8326bf4df5f45a3026b324
|
a16af4c8441f1f926068e91ce8a8237971158456
|
refs/heads/master
| 2021-01-23T05:45:31.438373
| 2018-11-30T16:49:45
| 2018-11-30T16:49:45
| 92,987,270
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 454
|
rd
|
viewMiss.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viewMiss.R
\name{viewMiss}
\alias{viewMiss}
\title{View a simple histogram of missing data}
\usage{
viewMiss(df)
}
\arguments{
\item{df}{The name of the dataframe, not in quotes, for which the columns need to be converted to character class.}
}
\value{
Plot
}
\description{
Returns a histogram with a bar for each column in the dataframe. The height is the percent missing.
}
|
f39a0a332d3bf28bd9c89c0bd2e6d0e3ed74cccd
|
1fbb0be81cda9cc4e559932f7797db699605883a
|
/man/setEpsilon-set.Rd
|
dd32170f91265304f873f8b0e3709a7d5ec0cc8f
|
[
"MIT"
] |
permissive
|
cran/diffpriv
|
07e6980e8ebcd9a374b87aeee2c07390860f0944
|
456af10fbeed00dc49960b59bf2f511d36752e9f
|
refs/heads/master
| 2021-01-01T15:19:37.441979
| 2017-07-18T10:42:21
| 2017-07-18T10:42:21
| 97,591,600
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 455
|
rd
|
setEpsilon-set.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/privacy_params.R
\name{setEpsilon<-}
\alias{setEpsilon<-}
\title{Setter for slot \code{epsilon}.}
\usage{
setEpsilon(object) <- value
}
\arguments{
\item{object}{the instance of \code{DPParamsEps}.}
\item{value}{positive numeric \eqn{\epsilon} value.}
}
\description{
Use this method instead of slot \code{epsilon}.
}
\seealso{
\code{\link{DPParamsEps}}.
}
|
36259a62abe5105145f066ad77eee3c56020fc92
|
ec8c3a7ee6af47a7a7294fae09e54e649214e8fc
|
/R/fundos_vedados.R
|
410ac403d26d779177097da65f477a10e517fcf3
|
[
"MIT"
] |
permissive
|
marcosfs2006/ADPrev
|
c47c599786b8275dbfb4f575d9ddfeeb80a55173
|
1e9918a6a1759b7a890772a4632a987a0e9a1005
|
refs/heads/master
| 2021-11-24T17:07:14.732712
| 2021-11-18T19:34:47
| 2021-11-18T19:34:47
| 330,948,351
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,973
|
r
|
fundos_vedados.R
|
#' Relação dos Fundos de Investimentos Vedados
#'
#' Conjunto de dados contendo a relação dos fundos de investimentos nos quais os
#' RPPS estão vedados de fazer investimentos.
#'
#' @name fundos_vedados
#' @format Data frame contendo 138 observações e 12 variáveis na versão de 21.12.2018 da relação
#' \describe{
#' \item{cnpj}{CNPJ do fundo de investimento}
#' \item{nm_fundo}{nome do fundo de investimento}
#' \item{classe}{categoria do fundo de investimento}
#' \item{sub_classe}{tipo do fundo de investimento}
#' \item{tx_adm}{percentual da taxa de administração do fundo de investimento}
#' \item{administrador}{nome do administrador do fundo de investimento}
#' \item{gestor}{nome do gestor do fundo de investimento}
#' \item{carencia}{prazo para que seja possível solicitar o resgate de cotas}
#' \item{conv_cotas}{quantidade de dias até que as cotas resgatadas sejam convertidas em numerário}
#' \item{disp_resgate}{quantidade de dias até que os recursos relativos às cotas resgatadas estejam disponíveis}
#' \item{tx_saida}{percentual cobrado do investidor sobre o valor resgatado caso o resgate ocorra antes do prazo acordado}
#' \item{motivo}{motivo do fundo de investimento ser vedado ao RPPS}
#' }
#' @details
#' Os dados referem-se à listagem de fundos de investimentos vedados aos RPPS
#' disponibilizada pela SPREV com data de 21.12.2018. Refere-se aos fundos de
#' investimento que na data em referência possuiam aplicações dos RPPS.
#'
#' É possível a existência de outros fundos de investimentos vedados que não
#' constem do conjunto de dados.
#'
#' Exemplos de uso deste conjunto de dados podem ser vistos no
#' tutorial [Análise de Dados Previdenciários - ADPrev](https://marcosfs2006.github.io/ADPrevBook/)
#'
#' @source \url{https://www.gov.br/previdencia/pt-br/assuntos/previdencia-no-servico-publico/menu-investimentos/investimento-estatisticas-e-informacoes}
#' @md
"fundos_vedados"
|
f86dc3bafd45f49503a083218897502758dc8f06
|
05df63a03336e8d7c5738c797f5969ff16f8f8c1
|
/TP3/src/Fonctions_Utilities.R
|
9baa121a93a24040aaa1261018788f43dc79a9e8
|
[] |
no_license
|
raphaelhamonnais/UTC_SY09_DataMining
|
a1fead6eb84aa2719a65688c599d0f6a407eee3e
|
3816a16a32c7e5c9be9cc20e940bba65cb82cfa3
|
refs/heads/master
| 2021-03-22T04:20:04.637177
| 2017-06-21T15:46:47
| 2017-06-21T15:46:47
| 84,962,362
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 735
|
r
|
Fonctions_Utilities.R
|
maxN <- function(x, N=2){
len <- length(x)
if(N>len){
warning('N greater than length(x). Setting N=length(x)')
N <- length(x)
}
sort(x,partial=len-N+1)[len-N+1]
}
minN <- function(x, N=2){
len <- length(x)
if(N>len){
warning('N greater than length(x). Setting N=length(x)')
N <- length(x)
}
sort(x,partial=len-(len-N))[N]
}
compute.sucess.rate = function(predictedClasses, actualClasses) {
zvalPredictedKnowingZvalContingency = table(predictedClasses, actualClasses)
correctPredictions = sum(diag(zvalPredictedKnowingZvalContingency))
totalPredictions = sum(zvalPredictedKnowingZvalContingency)
successRate = correctPredictions / totalPredictions
}
|
af6c7745b8062988e265dc630bd0a8790f86b588
|
8ac8320676625ab0cfc91282084a221a8b101c46
|
/src/R/data_processing/mimic/8-generate_reference_data_mimic_2.R
|
e57985e8fc5dcbe01876891ab7d1db3282a90a41
|
[
"MIT"
] |
permissive
|
farhanadam/shockalert-documented
|
e8e01e698ee8cb8fe1b28b34b708c5b7b93967b2
|
9f8a0e78a62ebb235e2de9367a77f3b119dae4c5
|
refs/heads/master
| 2023-02-23T18:17:26.776118
| 2021-01-23T16:54:27
| 2021-01-23T16:54:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,162
|
r
|
8-generate_reference_data_mimic_2.R
|
rm(list=ls())
library(pracma)
library(tictoc)
library(glmnet)
library(ROCR)
library(matrixStats)
library(parallel)
library(ggplot2)
library(lubridate)
library(xgboost)
source("src/R/functions/mimic/generate_sampling_rate_table.R")
source("src/R/functions/mimic/eval_carry_forward.R")
source("src/R/functions/mimic/eval_interval.R")
source("src/R/functions/mimic/eval_max_in_past_2.R")
source("src/R/functions/mimic/eval_sum_in_past.R")
source("src/R/functions/mimic/eval_early_prediction_timestamps_combined_rf.R")
source("src/R/functions/mimic/eval_table_with_sofa_4.R")
source("src/R/functions/mimic/generate_table_with_sofa_timestamps_2.R")
sofa.scores = readRDS("data/mimic/sofa_scores.rds")
clinical.data = readRDS("data/mimic/clinical.data.mimic.rds")
infection.icustays = readRDS("data/mimic/icd9.infection.icustays.rds")
icustays = readRDS("data/mimic/icustays.rds")
# Convert timestamps to numeric
clinical.subjects = sapply(clinical.data, function(x) x$icustay.id)
clinical.hadm.ids = sapply(clinical.subjects, function(x) icustays$hadm_id[which(icustays$icustay_id==x)])
has.infection = is.element(clinical.subjects, infection.icustays)
lengths = sapply(sofa.scores, function(x) length(x$timestamps))
sepsis.labels = sapply(sofa.scores[lengths>0&has.infection], function(x) rowSums(x[2:7])>=2)
has.sepsis = sapply(sepsis.labels, any)
shock.labels = mapply(function(x,y) x&y$lactate&y$vasopressors, sepsis.labels, sofa.scores[lengths>0&has.infection])
has.shock = sapply(shock.labels, function(x) any(x,na.rm=T))
shock.onsets = as_datetime(mapply(function(x,y) min(x$timestamps[y],na.rm=T),sofa.scores[lengths>0&has.infection][has.shock],shock.labels[has.shock]),tz="GMT")
tic("Generating sampling rate data")
sampling.rate.data = lapply(clinical.data, generate.sampling.rate.table)
toc()
num.cores = detectCores()
cluster = makeCluster(num.cores)
clusterExport(cluster,c("eval.table.with.sofa","eval.carry.forward","eval.sum.in.past","eval.max.in.past","eval.interval","generate.table.with.sofa.timestamps","sofa.scores","sampling.rate.data","shock.onsets","lengths","has.sepsis","has.shock","has.infection"))
clusterEvalQ(cluster,library(lubridate))
tic("Generate data tables (parallel)")
noninfection.data.sampling.rate = parLapply(cluster, 1:sum(!has.infection), function(x) generate.table.with.sofa.timestamps(min(sofa.scores[lengths>0&!has.infection][[x]]$timestamps),max(sofa.scores[lengths>0&!has.infection][[x]]$timestamps),100,sampling.rate.data[lengths>0&!has.infection][[x]]))
nonsepsis.data.sampling.rate = parLapply(cluster, 1:sum(!has.sepsis), function(x) generate.table.with.sofa.timestamps(min(sofa.scores[lengths>0&has.infection][!has.sepsis][[x]]$timestamps),max(sofa.scores[lengths>0&has.infection][!has.sepsis][[x]]$timestamps),100,sampling.rate.data[lengths>0&has.infection][!has.sepsis][[x]]))
nonshock.data.sampling.rate = parLapply(cluster, 1:sum(has.sepsis&!has.shock), function(x) generate.table.with.sofa.timestamps(min(sofa.scores[lengths>0&has.infection][has.sepsis&!has.shock][[x]]$timestamps),max(sofa.scores[lengths>0&has.infection][has.sepsis&!has.shock][[x]]$timestamps),100,sampling.rate.data[lengths>0&has.infection][has.sepsis&!has.shock][[x]]))
preshock.data.sampling.rate = parLapply(cluster, 1:sum(has.shock), function(x) generate.table.with.sofa.timestamps(shock.onsets[x]-minutes(120),shock.onsets[x]-minutes(60),100,sampling.rate.data[lengths>0&has.infection][has.shock][[x]]))
toc()
stopCluster(cluster)
num.cores = detectCores()
cluster = makeCluster(num.cores)
clusterExport(cluster,c("eval.table.with.sofa","eval.carry.forward","eval.sum.in.past","eval.max.in.past","eval.interval","generate.table.with.sofa.timestamps","sofa.scores","clinical.data","shock.onsets","lengths","has.sepsis","has.shock","noninfection.data.sampling.rate","nonsepsis.data.sampling.rate","nonshock.data.sampling.rate","preshock.data.sampling.rate","has.infection"))
clusterEvalQ(cluster,library(lubridate))
tic("Data tables (parallel)")
noninfection.data = parLapply(cluster, 1:sum(!has.infection), function(x) eval.table.with.sofa(noninfection.data.sampling.rate[[x]]$timestamps,clinical.data[lengths>0&!has.infection][[x]]))
nonsepsis.data = parLapply(cluster, 1:sum(!has.sepsis), function(x) eval.table.with.sofa(nonsepsis.data.sampling.rate[[x]]$timestamps,clinical.data[lengths>0&has.infection][!has.sepsis][[x]]))
nonshock.data = parLapply(cluster, 1:sum(has.sepsis&!has.shock), function(x) eval.table.with.sofa(nonshock.data.sampling.rate[[x]]$timestamps,clinical.data[lengths>0&has.infection][has.sepsis&!has.shock][[x]]))
preshock.data = parLapply(cluster, 1:sum(has.shock), function(x) eval.table.with.sofa(preshock.data.sampling.rate[[x]]$timestamps,clinical.data[lengths>0&has.infection][has.shock][[x]]))
toc()
stopCluster(cluster)
save(clinical.subjects,clinical.hadm.ids,has.infection,sepsis.labels,has.sepsis,shock.labels,has.shock,shock.onsets,sampling.rate.data,noninfection.data.sampling.rate,nonsepsis.data.sampling.rate,
nonshock.data.sampling.rate,preshock.data.sampling.rate,noninfection.data,nonsepsis.data,nonshock.data,preshock.data,
file="data/mimic/mimic3.reference.data2.rdata")
|
488010e9fb25ebfa6fd575f1c3db744078796f12
|
61d29d3ef402b7d47e527d054372e1d50e6a2e12
|
/R/pxd007959.R
|
ff3471a2c22494b9cd5542fbd32142d95bc0df8f
|
[] |
no_license
|
DavisLaboratory/msImpute
|
2cab3e32be84656b9120db00fb32083547665e63
|
538873e2d8f512bfdfba4f764457d194d961f26a
|
refs/heads/master
| 2023-08-10T03:45:05.054105
| 2023-07-31T08:25:53
| 2023-07-31T08:25:53
| 239,129,382
| 9
| 0
| null | 2022-10-13T11:02:07
| 2020-02-08T12:32:33
|
R
|
UTF-8
|
R
| false
| false
| 1,605
|
r
|
pxd007959.R
|
#' Processed peptide intensity matrix and experimental design table from PXD007959 study
#'
#' Extracellular vesicles isolated from the descending colon of pediatric patients with inflammatory bowel disease
#' and control patients. Characterizes the proteomic profile of extracellular vesicles isolated from the descending colon
#' of pediatric patients with inflammatory bowel disease and control participants. This object contains data from peptide.txt
#' table output by MaxQuant. Rows are Modified Peptide IDs. Charge state variations are treated as distinct peptide species.
#' Reverse complements and contaminant peptides are discarded. Peptides with more than 4 observed intensity values are retained.
#' Additionally, qualified peptides are required to map uniquely to proteins.
#' Two of the samples with missing group annotation were excluded.
#' The peptide.txt and experimentalDesignTemplate files can be downloaded as RDS object from \url{https://github.com/soroorh/proteomicscasestudies}.
#' Code for data processing is provided in package vignette.
#'
#' @format A list of two: samples (data frame of sample descriptions), and y (numeric matrix of peptide intensity values)
#' @references
#' Zhang X, Deeke SA, Ning Z, Starr AE, Butcher J, Li J, Mayne J, Cheng K, Liao B, Li L, Singleton R, Mack D, Stintzi A, Figeys D, Metaproteomics reveals associations between microbiome and intestinal extracellular vesicle proteins in pediatric inflammatory bowel disease. Nat Commun, 9(1):2873(2018)
#' @source \url{http://proteomecentral.proteomexchange.org/cgi/GetDataset?ID=PXD007959}
"pxd007959"
|
2137ecf8361c7ea3926e601147d9e392514f6b24
|
6774ff603669193087d624f4c890d012e1dbf808
|
/man/GenoGAMDataSet-class.Rd
|
f4630574b322c107510897a90db2931c565ebc7a
|
[] |
no_license
|
gstricker/GenoGAM
|
b1a6f73b925df91f5413766de037b7975a449ea2
|
0ac7b967b6926bc6810909bec3c6926a91128c2e
|
refs/heads/master
| 2021-05-01T05:24:42.051198
| 2019-07-14T14:09:57
| 2019-07-14T14:09:57
| 55,611,218
| 10
| 4
| null | 2016-04-06T14:07:54
| 2016-04-06T14:07:54
| null |
UTF-8
|
R
| false
| true
| 11,214
|
rd
|
GenoGAMDataSet-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GenoGAMDataSet-class.R
\docType{class}
\name{GenoGAMDataSet-class}
\alias{GenoGAMDataSet-class}
\alias{GenoGAMDataSet}
\alias{getIndex}
\alias{tileSettings}
\alias{dataRange}
\alias{getChromosomes}
\alias{getTileSize}
\alias{getChunkSize}
\alias{getOverhangSize}
\alias{getTileNumber}
\alias{getChunkSize<-}
\alias{getTileSize<-}
\alias{getOverhangSize<-}
\alias{getTileNumber<-}
\alias{getIndex,GenoGAMDataSet-method}
\alias{getCountMatrix,GenoGAMDataSet-method}
\alias{tileSettings,GenoGAMDataSet-method}
\alias{dataRange,GenoGAMDataSet-method}
\alias{getChromosomes,GenoGAMDataSet-method}
\alias{getTileSize,GenoGAMDataSet-method}
\alias{getChunkSize,GenoGAMDataSet-method}
\alias{getOverhangSize,GenoGAMDataSet-method}
\alias{getTileNumber,GenoGAMDataSet-method}
\alias{is.HDF5,GenoGAMDataSet-method}
\alias{design,GenoGAMDataSet-method}
\alias{design<-,GenoGAMDataSet,ANY-method}
\alias{sizeFactors,GenoGAMDataSet-method}
\alias{sizeFactors<-,GenoGAMDataSet,ANY-method}
\alias{getChunkSize<-,GenoGAMDataSet,numeric-method}
\alias{getTileSize<-,GenoGAMDataSet,numeric-method}
\alias{getOverhangSize<-,GenoGAMDataSet,numeric-method}
\alias{getTileNumber<-,GenoGAMDataSet,numeric-method}
\title{GenoGAMDataSet}
\usage{
GenoGAMDataSet(experimentDesign, design, chunkSize = NULL,
overhangSize = NULL, directory = ".", settings = NULL,
hdf5 = FALSE, split = hdf5, fromHDF5 = FALSE, ignoreM = FALSE,
...)
\S4method{getIndex}{GenoGAMDataSet}(object)
\S4method{getCountMatrix}{GenoGAMDataSet}(object)
\S4method{tileSettings}{GenoGAMDataSet}(object)
\S4method{dataRange}{GenoGAMDataSet}(object)
\S4method{getChromosomes}{GenoGAMDataSet}(object)
\S4method{getTileSize}{GenoGAMDataSet}(object)
\S4method{getChunkSize}{GenoGAMDataSet}(object)
\S4method{getOverhangSize}{GenoGAMDataSet}(object)
\S4method{getTileNumber}{GenoGAMDataSet}(object)
\S4method{is.HDF5}{GenoGAMDataSet}(object)
\S4method{design}{GenoGAMDataSet}(object)
\S4method{design}{GenoGAMDataSet,ANY}(object) <- value
\S4method{sizeFactors}{GenoGAMDataSet}(object)
\S4method{sizeFactors}{GenoGAMDataSet,ANY}(object) <- value
\S4method{getChunkSize}{GenoGAMDataSet,numeric}(object) <- value
\S4method{getTileSize}{GenoGAMDataSet,numeric}(object) <- value
\S4method{getOverhangSize}{GenoGAMDataSet,numeric}(object) <- value
\S4method{getTileNumber}{GenoGAMDataSet,numeric}(object) <- value
}
\arguments{
\item{experimentDesign}{Either a character object specifying the path to a
delimited text file (the delimiter will be determined automatically),
a data.frame specifying the experiment design or a RangedSummarizedExperiment
object with the GPos class being the rowRanges. See details for the structure
of the experimentDesign.}
\item{design}{A formula object. See details for its structure.}
\item{chunkSize}{An integer specifying the size of one chunk in bp.}
\item{overhangSize}{An integer specifying the size of the overhang in bp.
As the overhang is taken to be symmetrical, only the overhang of one side
should be provided.}
\item{directory}{The directory from which to read the data. By default
the current working directory is taken.}
\item{settings}{A GenoGAMSettings object. Not needed by default, but might
be of use if only specific regions should be read in.
See \code{\link{GenoGAMSettings}}.}
\item{hdf5}{Should the data be stored on HDD in HDF5 format? By default this
is disabled, as the Rle representation of count data already provides a
decent compression of the data. However in case of large organisms, a complex
experiment design or just limited memory, this might further decrease the
memory footprint. Note this only applies to the input count data, results are
usually stored in HDF5 format due to their space requirements for type double.
Exceptions are small organisms like yeast.}
\item{split}{A logical argument specifying if the data should be stored as
a list split by chromosome. This is useful and necessary for huge organisms like
human, as R does not support long integers.}
\item{fromHDF5}{A logical argument specifying if the data is already present in
form of HDF5 files and should be rather read in from there.}
\item{ignoreM}{A logical argument to ignore the Mitochondria DNA on data read in.
This is useful, if one is not interested in chrM, but it's size prevents the tiles
to be larger, as all tiles has to be of some size.}
\item{...}{Further parameters, mostly for arguments of custom processing
functions or to specify a different method for fragment size estimation.
See details for further information.}
\item{object}{For use of S4 methods. The GenoGAMDataSet object.}
\item{value}{For use of S4 methods. The value to be assigned to the slot.}
}
\value{
An object of class \code{\link{GenoGAMDataSet}} or the respective slot.
}
\description{
The GenoGAMDataSet class contains the pre-processed raw data and
additional slots that define the input and framework for the model.
It extends the RangedSummarizedExperiment class by adding an index
that defines ranges on the entire genome, mostly for purposes of
parallel evaluation. Furthermore adding a couple more slots to hold
information such as experiment design. It also contains the
\code{\link[GenoGAM]{GenoGAMSettings}} class that defines global
settings for the session. For information on the slots inherited from
SummarizedExperiment check the respective class.
GenoGAMDataSet is the constructor function for the GenoGAMDataSet-class.
}
\section{Methods (by generic)}{
\itemize{
\item \code{getIndex}: An accessor to the index slot
\item \code{getCountMatrix}: An accessor to the countMatrix slot
\item \code{tileSettings}: The accessor to the list of settings,
that were used to generate the tiles.
\item \code{dataRange}: The actual underlying GRanges showing
the range of the data.
\item \code{getChromosomes}: A GRanges object representing the chromosomes
or chromosome regions on which the model will be computed
\item \code{getTileSize}: The size of the tiles
\item \code{getChunkSize}: The size of the chunks
\item \code{getOverhangSize}: The size of the overhang (on one side)
\item \code{getTileNumber}: The total number of tiles
\item \code{is.HDF5}: A boolean function that is true if object uses HDF5 backend
\item \code{design}: Access to the design slot.
\item \code{design<-}: Replace method of the design slot.
\item \code{sizeFactors}: Access to the sizeFactors slot
\item \code{sizeFactors<-}: Replace method of the sizeFactors slot
\item \code{getChunkSize<-}: Replace method of the chunkSize parameter,
that triggers a new computation of the tiles based on the new chunk size.
\item \code{getTileSize<-}: Replace method of the tileSize parameter,
that triggers a new computation of the tiles based on the new tile size.
\item \code{getOverhangSize<-}: Replace method of the overhangSize parameter,
that triggers a new computation of the tiles based on the new overhang size.
\item \code{getTileNumber<-}: Replace method of the tileNumber parameter,
that triggers a new computation of the tiles based on the new number of tiles.
}}
\section{Slots}{
\describe{
\item{\code{settings}}{The global and local settings that will be used to compute the
model.}
\item{\code{design}}{The formula describing how to evaluate the data. See details.}
\item{\code{sizeFactors}}{The normalized values for each sample. A named numeric vector.}
\item{\code{index}}{A GRanges object representing an index of the ranges defined
on the genome. Mostly used to store tiles.}
\item{\code{hdf5}}{A logical slot indicating if the object should be stored as HDF5}
\item{\code{countMatrix}}{Either a matrix or HDF5Matrix to store the sums of counts of
the regions (could also be seen as bins) for later use especially by DESeq2}
}}
\section{Config}{
The config file/data.frame contains the actual experiment design. It must
contain at least three columns with fixed names: 'ID', 'file' and 'paired'.
The field 'ID' stores a unique identifier for each alignment file.
It is recommended to use short and easy to understand identifiers because
they are subsequently used for labelling data and plots.
The field 'file' stores the BAM file name.
The field 'paired', values TRUE for paired-end sequencing data, and FALSE for
single-end sequencing data.
All other columns are stored in the colData slot of the GenoGAMDataSet
object. Note that all columns which will be used for analysis must have at
most two conditions, which are for now restricted to 0 and 1. For example,
if the IP data schould be corrected for input, then the input will be 0
and IP will be 1, since we are interested in the corrected IP. See examples.
}
\section{Design/Formula}{
Design must be a formula. At the moment only the following is
possible: Either ~ s(x) for a smooth fit over the entire data or
s(x, by = myColumn), where 'myColumn' is a column name
in the experimentDesign. Any combination of this is possible:
~ s(x) + s(x, by = myColumn) + s(x, by = ...) + ...
For example the formula for correcting IP for input would look like this:
~ s(x) + s(x, by = experiment)
where 'experiment' is a column with 0s and 1s, with the ip samples annotated
with 1 and input samples with 0.
'
}
\section{Further parameters}{
In case of single-end data it might be usefull to specify a different
method for fragment size estimation. The argument 'shiftMethod' can be
supplied with the values 'coverage' (default), 'correlation' or 'SISSR'.
See ?chipseq::estimate.mean.fraglen for explanation.
}
\examples{
# Build from config file
config <- system.file("extdata/Set1", "experimentDesign.txt", package = "GenoGAM")
dir <- system.file("extdata/Set1/bam", package = "GenoGAM")
## For all data
ggd <- GenoGAMDataSet(config, chunkSize = 1000, overhangSize = 200,
design = ~ s(x) + s(x, by = genotype), directory = dir)
ggd
## Read data of a particular chromosome
settings <- GenoGAMSettings(chromosomeList = "chrXIV")
ggd <- GenoGAMDataSet(config, chunkSize = 1000, overhangSize = 200,
design = ~ s(x) + s(x, by = genotype), directory = dir,
settings = settings)
ggd
## Read data of particular range
region <- GenomicRanges::GRanges("chrI", IRanges(10000, 15000))
params <- Rsamtools::ScanBamParam(which = region)
settings <- GenoGAMSettings(bamParams = params)
ggd <- GenoGAMDataSet(config, chunkSize = 1000, overhangSize = 200,
design = ~ s(x) + s(x, by = genotype), directory = dir,
settings = settings)
ggd
# Build from data.frame config
df <- read.table(config, header = TRUE, sep = '\\t')
ggd <- GenoGAMDataSet(df, chunkSize = 1000, overhangSize = 200,
design = ~ s(x) + s(x, by = genotype), directory = dir,
settings = settings)
ggd
# Build from SummarizedExperiment
gr <- GenomicRanges::GPos(GRanges("chr1", IRanges(1, 10000)))
seqlengths(gr) <- 1e6
df <- S4Vectors::DataFrame(colA = 1:10000, colB = round(runif(10000)))
se <- SummarizedExperiment::SummarizedExperiment(rowRanges = gr, assays = list(df))
ggd <- GenoGAMDataSet(se, chunkSize = 2000, overhangSize = 250,
design = ~ s(x) + s(x, by = experiment))
ggd
}
\author{
Georg Stricker \email{georg.stricker@in.tum.de}
}
|
4ea3e9e6528b6bb00211ff60510fc7920b91cbfe
|
51d049a86d691e6d44a0a46364a15a9c8bb64c54
|
/analysisDV/countGenes.R
|
d439b183408f91204ec1b23603de494bb31ad4fd
|
[] |
no_license
|
cgpu/sbas-nf
|
5a0776a439099a740947edd2ca8e0e5e8753f1d2
|
78c22c1642d43c920c791610e5cf094b77edf42b
|
refs/heads/5b31e76-error-strategy
| 2023-02-26T12:32:23.986621
| 2020-08-26T12:05:49
| 2020-08-26T12:05:49
| 264,303,520
| 1
| 1
| null | 2020-08-26T11:56:23
| 2020-05-15T21:34:45
|
HTML
|
UTF-8
|
R
| false
| false
| 6,819
|
r
|
countGenes.R
|
library(dplyr)
library(ggplot2)
#Parse files to create a data frame with counts
files <- list.files(path = "significant_events/", pattern = "*.txt")
as_types <- c("a3ss", "a5ss", "mxe", "ri", "se")
files_aux <- gsub(pattern = ".txt", replacement = "", x = files)
files_aux <- gsub(pattern = "a3ss$|a5ss$|mxe$|ri$|se$", replacement = "", files_aux)
a3ss_annot <- read.table(file = "fromGTF.A3SS.txt", sep = "\t", quote = "\"", header = T, stringsAsFactors = F)
a5ss_annot <- read.table(file = "fromGTF.A5SS.txt", sep = "\t", quote = "\"", header = T, stringsAsFactors = F)
mxe_annot <- read.table(file = "fromGTF.MXE.txt", sep = "\t", quote = "\"", header = T, stringsAsFactors = F)
ri_annot <- read.table(file = "fromGTF.RI.txt", sep = "\t", quote = "\"", header = T, stringsAsFactors = F)
se_annot <- read.table(file = "fromGTF.SE.txt", sep = "\t", quote = "\"", header = T, stringsAsFactors = F)
gene_as <- data.frame()
for (i in 1:length(files)) {
lines <- readLines(paste0("./significant_events/", files[i]))
if(length(lines) > 1){ #has significant events
events <- read.table(paste0("./significant_events/", files[i]), sep = "\t", skip = 1)
if(grepl("a3ss.txt$", files[i])){
idx <- match(events$V1, a3ss_annot$ID)
res <- data.frame(Tissue = files_aux[i], ASE = "A3SS",
GeneSymbol = a3ss_annot$geneSymbol[idx],
chr = a3ss_annot$chr[idx])
}
if(grepl("a5ss.txt$", files[i])){
idx <- match(events$V1, a5ss_annot$ID)
res <- data.frame(Tissue = files_aux[i], ASE = "A5SS",
GeneSymbol = a5ss_annot$geneSymbol[idx],
chr = a5ss_annot$chr[idx])
}
if(grepl("mxe.txt$", files[i])){
idx <- match(events$V1, mxe_annot$ID)
res <- data.frame(Tissue = files_aux[i], ASE = "MXE",
GeneSymbol = mxe_annot$geneSymbol[idx],
chr = mxe_annot$chr[idx])
}
if(grepl("se.txt$", files[i])){
idx <- match(events$V1, se_annot$ID)
res <- data.frame(Tissue = files_aux[i], ASE = "SE",
GeneSymbol = se_annot$geneSymbol[idx],
chr = se_annot$chr[idx])
}
if(grepl("ri.txt$", files[i])){
idx <- match(events$V1, ri_annot$ID)
res <- data.frame(Tissue = files_aux[i], ASE = "RI",
GeneSymbol = ri_annot$geneSymbol[idx],
chr = ri_annot$chr[idx])
}
gene_as <- rbind(gene_as, res)
} #if has sig. events
} #for all files
# Count most frequent spliced genes
res <- gene_as %>% group_by(GeneSymbol) %>% count(GeneSymbol) %>% arrange(desc(n)) %>% as.data.frame()
res$GeneSymbol <- factor(res$GeneSymbol, levels = res$GeneSymbol)
length(res$GeneSymbol)
#Add number of tissues
nTissues <- rep(NA, length(res))
for (i in 1:nrow(res)) {
df_gene <- gene_as %>% filter(GeneSymbol == res$GeneSymbol[i])
nTissues[i] <- length(unique(df_gene$Tissue))
}
res$Tissues <- nTissues
# Genes with more than 10 splicing events
ggplot(res[res$n > 10, ], aes(x = GeneSymbol, y = n)) +
geom_point(size = 4, aes(fill = Tissues, color = Tissues)) +
theme_bw() +
theme(axis.text.x = element_text(size=10, angle = 270, hjust = 0.0, vjust = 0.5),
axis.text.y = element_text(size=16),
axis.title.x = element_text(face="plain", colour="black",
size=14),
axis.title.y = element_text(face="plain", colour="black",
size=14),
legend.title=element_blank(),
legend.text = element_text(face="plain", colour="black",
size=12)) +
scale_fill_viridis_c(aesthetics = c("colour", "fill"),
option = "plasma",
limits = c(1, 30), breaks = c(10, 20, 30)) +
ylab(paste("Number of sex-biased splicing events")) +
xlab("Genes")
#Pie chart
#Tissue specific 1 tissue
#Tissue group 2-5 tissues
#Recurrent > 5 tissues
#Pie chart - Number of patients with 0, 1, >1 events
counts <- c(res %>% filter(Tissues == 1) %>% count() %>% as.numeric(),
res %>% filter(Tissues > 1 & Tissues < 5) %>% count() %>% as.numeric(),
res %>% filter(Tissues > 5) %>% count() %>% as.numeric())
# Define some colors ideal for black & white print
colors <- c("white","grey70","grey90","grey50","black")
colors <- c("grey90","grey50", "black")
# Calculate the percentage for each category
counts_labels <- round(counts/sum(counts) * 100, 1)
counts_labels <- paste(counts_labels, "%", sep="")
counts_labels <- paste(counts_labels, counts, sep =" ")
# Create a pie chart with defined heading and custom colors
# and labels
pie(counts, main="", col=colors, labels=counts_labels,
cex=1.2)
# Create a legend at the right
legend(1.5, 0.5, c("1 tissue","2-5 tissues","> 5 tissues"), cex=1.2,
fill=colors)
# Number of splicing events normalized per chromosome
library(rtracklayer)
gencode <- import.gff("/Users/dveiga/tools/Gencode/gencode.v25.annotation.gtf")
exons <- gencode[ gencode$type == "exon", ]
exons <- as.data.frame(exons)
all_chr <- as.character(unique(gene_as$chr))
chr_counts <- rep(0, length(all_chr))
for (i in 1:length(all_chr)) {
chr_counts[i] <- nrow(exons[exons$seqnames == all_chr[i], ])
}
exon_counts <- data.frame(chr = all_chr, counts = chr_counts)
# Count most frequent spliced chromosomes
res <- gene_as %>% group_by(chr) %>% count(chr) %>% arrange(desc(n)) %>% as.data.frame()
res$chr <- factor(res$chr, levels = res$chr)
idx <- match(res$chr, exon_counts$chr)
res$ExonCounts <- exon_counts$counts[idx]
res$Index <- (res$n / res$ExonCounts) * 1000
res_sorted <- res %>% arrange(desc(Index))
res_sorted$chr <- factor(res_sorted$chr, levels = res_sorted$chr)
ggplot(res_sorted, aes(x = chr, y = Index, size = n)) +
geom_point(color = "red") +
theme_bw() +
theme(axis.text.x = element_text(size=10, angle = 270, hjust = 0.0, vjust = 0.5),
axis.text.y = element_text(size=16),
axis.title.x = element_text(face="plain", colour="black",
size=14),
axis.title.y = element_text(face="plain", colour="black",
size=14),
legend.title=element_blank(),
legend.text = element_text(face="plain", colour="black",
size=12)) +
scale_fill_viridis_c(aesthetics = c("colour", "fill"),
option = "plasma",
limits = c(1, 650)) +
ylab(paste("Normalized Number of sex-biased ASE")) +
xlab("Chromosomes") +
guides(size = guide_legend(title = "Number of ASE"))
write.table(res_sorted, file = "SplicingIndex_chr.tsv", sep = "\t", quote = F,
row.names = F)
|
a846876939c57ac745eb96a4e9df37e8c76e038f
|
77a913a0a20295998a6bfd052ada3f6fe8b7ac8f
|
/presentation.R
|
6164ffe82aee02e9448181e4f772a91b12b39c9f
|
[] |
no_license
|
joelgombin/SeminR
|
314904dfda26559399876b8f4e145846aba1a11b
|
e1b3c95e7b8d9cb954381505025af495fb3aff1d
|
refs/heads/master
| 2021-01-10T20:28:45.421938
| 2012-10-15T13:11:33
| 2012-10-15T13:11:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,312
|
r
|
presentation.R
|
## @knitr initialisation
library(rgrs)
library(sp)
library(maptools)
## @knitr shp
library(rgdal)
picardie <- readOGR("./donnees", "picardie")
picardie$STATUT<-as.factor(iconv(picardie$STATUT, from="latin1", to="utf8")) ## traitement de problèmes d'encodage
picardie$codeINSEE <- as.character(picardie$INSEE_COM)
## @knitr shp2
summary(picardie)
## @knitr shp4
plot(picardie)
## @knitr data
names(picardie@data)
summary(picardie@data$NOM_DEPT)
## @knitr data2
somme <- picardie[picardie@data$CODE_DEPT == "80",]
## @knitr union
require(gpclib)
gpclibPermit()
require(maptools)
dpts <- unionSpatialPolygons(picardie, IDs=picardie@data$CODE_DEPT)
plot(dpts)
## @knitr load
load("./donnees/mini_picardie.Rdata")
head(mini_picardie$CODE_COMMU)
# Non, donc on recode :
mini_picardie$codeINSEE <- paste(as.character(mini_picardie$CODE_D_PAR), as.character(mini_picardie$CODE_COMMU), sep="")
## @knitr carteprop
somme <- picardie[picardie@data$CODE_DEPT == "80",]
carte.prop(somme, mini_picardie, varname="AbsIns", sp.key="codeINSEE", data.key="codeINSEE", at=quantile(mini_picardie$AbsIns, c(0,.25,.5,.75,1)))
## @knitr carteeff
carte.eff(somme, mini_picardie, varname="Population", sp.key="codeINSEE", data.key="codeINSEE")
## @knitr cartequal
pal <- c("orange", "yellow", "light green", "red")
carte.qual(somme, mini_picardie, varname="type_urbain", sp.key="codeINSEE", data.key="codeINSEE", palette=pal, posleg="bottomleft", main="Le découpage en ZAUER en Picardie", sub="source : INSEE.")
## @knitr fusion
arrdts <- unionSpatialPolygons(somme, IDs=somme@data$CODE_ARR)
carte.qual(somme, mini_picardie, varname="type_urbain", sp.key="codeINSEE", data.key="codeINSEE", palette=pal, posleg="bottomleft", main="Le découpage en ZAUER en Picardie", sub="source : INSEE.")
plot(arrdts, lwd=5, border = "red", add = TRUE)
## @knitr coloration
library(RColorBrewer)
colors <- brewer.pal(6, "RdBu")
pal <- colorRampPalette(colors)
carte.prop(picardie, mini_picardie, "revenu.fiscal.moyen", sp.key="codeINSEE", data.key="codeINSEE", at=as.integer(levels(as.factor(mini_picardie$revenu.fiscal.moyen))), border="transparent", palette=pal(length(levels(as.factor(mini_picardie$revenu.fiscal.moyen)))), posleg="none", main="Le revenu fiscal moyen des ménages par communes")
plot(dpts, add=T)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.