blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6cffa1b59ec6ca5791b8981e30f25ed1ac9ddebc
|
a13cb3956b57ab3a4dfadcb0e5b79478318b2715
|
/scripts/compute_size.R
|
91a9421e55d32a6c48dae77ce6bafe1cf47698a2
|
[] |
no_license
|
lecrabe/emd_rdc
|
c1feae88a03febd3fe8a4e732fd50bcbc42752ee
|
4898750dbb21011934d8ec37f19009d50d33fbd0
|
refs/heads/master
| 2023-01-02T07:07:22.166213
| 2020-10-30T13:09:52
| 2020-10-30T13:09:52
| 290,019,564
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,377
|
r
|
compute_size.R
|
if(!file.exists(date_stt_file)){
#############################################################
#################### CLUMP
#############################################################
system(sprintf("oft-clump %s %s ",
glad_msk_file,
glad_clump_tmp
))
#############################################################
#################### COMPRESS
#############################################################
system(sprintf("gdal_translate -co COMPRESS=LZW %s %s",
glad_clump_tmp,
glad_clump
))
#############################################################
#################### COMPUTE PATCH SIZE
#############################################################
system(sprintf("oft-his -i %s -o %s -um %s -maxval 3 ",
glad_msk_file,
glad_stt_file,
glad_clump
))
#############################################################
#################### GET DATE STATS PER PATCH
#############################################################
system(sprintf("oft-stat -i %s -o %s -um %s ",
glad_date_bb,
date_stt_file,
glad_clump
))
#############################################################
#################### CLEAN
#############################################################
system(sprintf("rm -f %s",glad_clump_tmp))
}
|
92606515cc4cc1d8fdd183cf081c3a9def5f9c4d
|
3b936bab003c7d8ed6c29ab1959f2ca7f592e364
|
/itemset.coxpath.R
|
389e8e2ff6d702d1acd41ba71a859b8b43aef837
|
[] |
no_license
|
david-duverle/regularisation-path-following
|
4da6181f5d8acf63b0e357c45dbd961cd1c6429f
|
d47e568724b5fda938c0a3fc2519947c858d5e56
|
refs/heads/master
| 2021-01-01T17:43:30.558309
| 2013-10-18T01:08:54
| 2013-10-18T01:08:54
| 10,049,135
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 49,320
|
r
|
itemset.coxpath.R
|
step.length.cox <- function(corrector, depth, lambda2, exprs.all, d, rslist, wlist,
min.lambda, max.arclength, frac.arclength,
add.newvars, backshoot, approx.Gram, h0 = NULL,
eps = .Machine$double.eps)
{
active <- corrector$active
force.active <- NULL
lambda <- corrector$lambda - min.lambda
k <- length(active)
Lambda2 <- rep(lambda2, length(active))
b <- corrector$b[active]
eta <- corrector$eta
wsum <- corrector$wsum
n <- length(d)
A <- rep(0, n)
if (!approx.Gram)
AA <- matrix(0, n, n)
rset <- w <- NULL
for (i in 1:sum(d == 1)) {
if (!is.null(rslist[[i]])) {
rset0 <- rset
rset <- c(rset0, rslist[[i]])
}
w <- c(rep(1, length(rset0)), wlist[[i]]) * eta[rset]
w1 <- w / wsum[i]
A[rset] <- A[rset] + w1 - w1^2
if (!approx.Gram) {
k <- length(rset)
AA[1:k, 1:k] <- AA[1:k, 1:k] - outer(w1[rset], w1[rset])
}
}
if (approx.Gram)
dhdb.x <- t(get.active.values(exprs.all, active) * A)
else {
diag(AA) <- A
dhdb.x <- t(get.active.values(exprs.all, active)) %*% AA
}
C.active = apply(get.active.values(exprs.all, active) * corrector$a, 2, sum)
# C = apply(exprs.all * corrector$a, 2, sum)
if (length(active) == 1)
db <- sign(C.active) / (dhdb.x %*% get.active.values(exprs.all, active) + lambda2)
else {
db <- (solve(dhdb.x %*% get.active.values(exprs.all, active) + diag(Lambda2)) %*% sign(C.active))
}
newa <- NULL
if (!backshoot) {
w.1 = - corrector$a
w.2 = - t(db) %*% t(get.active.values(exprs.all, active)) %*% AA
hd <- -b / db
if(length(hd[hd > eps]) > 0)
curmin = min(hd[hd > eps])
else
curmin = lambda
active.array = gene.set.name.to.num.array(active, colnames(exprs.all), depth)
idxs = as.integer(rep(0, depth))
res = .C("get_min_itemset", as.integer(depth), as.integer(exprs.all), as.integer(nrow(exprs.all)), as.integer(ncol(exprs.all)), as.double(w.1), as.double(w.2), as.integer(t(active.array)), as.integer(nrow(active.array)), as.double(corrector$lambda), as.double(curmin), idxs, as.integer(global.trace))
h = as.double(res[10])
min.idxs = as.integer(res[[11]])
if(min.idxs[1] > 0)
names(h) = make.gene.set.name(colnames(exprs.all)[min.idxs[min.idxs > 0]])
if(global.trace) {
cat("\rStep length (min): ", h, sep="");
if(length(names(h)))
cat(" (", names(h), ")", sep="");
cat("\n")
}
if(min.idxs[1] > 0 && h < lambda) {
# if(names(h) %in% active) {
# print("### Argmin is in active set already")
# browser()
# }
newa = names(h)
}
h <- min(h * frac.arclength, max.arclength / sum(abs(db)))
}
else {
hd <- b / db
ii <- hd > eps & hd < -h0
if (any(ii))
h <- -max(hd[ii])
else
h = 0
}
list(h = -h, db = db, newa = newa, arclength = h * sum(abs(db)))
}
predictor.cox <- function(b, step)
{
b - step$db * step$h
}
get.active.values <- function(exprs.all, subsets) {
if(class(subsets) != 'character') {
cat("Subsets provided is not a string:", subsets, "\n")
browser()
}
vals = sapply(strsplit(subsets, "*", fixed = TRUE),
function(feature) {
if(all(feature %in% colnames(exprs.all))) {
if(length(feature) > 1)
apply(exprs.all[,feature], 1, all)
else
exprs.all[,feature];
}
else
rep(FALSE, nrow(exprs.all))
})
colnames(vals) = subsets
return(vals)
}
make.gene.set.name <- function(obj1, obj2 = NULL) {
if(!is.null(obj2) & length(obj1) > 1)
return(sapply(obj1, make.gene.set.name, USE.NAMES=FALSE, obj2))
if(length(obj2) > 1)
return(sapply(obj2, make.gene.set.name, USE.NAMES=FALSE, obj1))
if(is.null(obj2))
arr = obj1
else
arr = c(obj1, obj2)
arr = arr[order(arr)]
return(paste(arr, sep="", collapse="*"))
}
gene.set.name.to.num.array <- function(gene.sets, gene.names, depth = 3) {
l = lapply(gene.sets, function(gene.set) {
ids = sapply(strsplit(gene.set, "*", fixed=T)[[1]], function(gene) {
which(gene.names == gene)
})
c(as.numeric(ids[order(ids)]), rep(0, depth-length(ids)))
})
matrix(unlist(l), ncol=depth, byrow=T)
}
corrector.cox <- function(exprs.all, depth, d, rslist, wlist, rept, method, active, tmpa,
force.active, lambda, lambda2, b0.tmpa,
bshoot.threshold, relax.lambda,
eps = .Machine$double.eps)
{
nobs <- nrow(exprs.all)
p <- length(tmpa)
if (p > 0) {
b2 <- c(pmax(b0.tmpa, 0), -pmin(b0.tmpa, 0))
# xa <- x[, tmpa, drop = FALSE]
xa = get.active.values(exprs.all, tmpa)
penalty <- rep(1, p)
z <- c(as.vector(xa), d, rept, penalty, rep(0, nobs), rep(0, nobs))
mz <- c(nobs, method, lambda, lambda2, 0)
sol <- .C('solve_coxpath',
as.integer(2 * p),
as.double(b2),
as.double(rep(0, 2 * p)),
as.double(rep(1e300, 2 * p)),
as.integer(0),
as.double(z),
as.double(mz))
if (sol[[5]] != 0) {
cat('Convergence warning\n')
}
b0.tmpa <- sol[[2]][1:p] - sol[[2]][(p + 1):(2 * p)]
names(b0.tmpa) = tmpa
i <- (p + 2) * nobs + p
eta <- sol[[6]][i + c(1:nobs)]
i <- i + nobs
wsum <- sol[[6]][i + c(1:nobs)][d == 1]
lp <- sol[[7]][5]
}
else {
eta <- rep(1, nobs)
wsum <- rep(0, sum(d))
}
rset <- NULL
a <- d == 1
for (i in 1:sum(a)) {
if (!is.null(rslist[[i]])) {
rset0 <- rset
rset <- c(rset0, rslist[[i]])
}
w <- c(rep(1, length(rset0)), wlist[[i]]) * eta[rset]
if (wsum[i] == 0)
wsum[i] <- sum(w)
a[rset] <- a[rset] - w / wsum[i]
}
if (p == 0)
lp <- -sum(log(wsum))
filename.prefix = paste('.temp.pid', Sys.getpid(), '.', sep='')
write(a, paste(filename.prefix, "weights.dat", sep=''), sep="\n")
write(-a, paste(filename.prefix, "neg_weights.dat", sep=''), sep="\n")
if (p > 0) {
cmd = paste("./lcm C -l 1 -u ", depth, " -w ", filename.prefix, "weights.dat ", filename.prefix, "input.dat ", lambda * (1 - relax.lambda), " ", filename.prefix, "output.dat", sep='')
output = system(cmd, intern = TRUE, ignore.stdout = TRUE, ignore.stderr = TRUE)
fc <- file(paste(filename.prefix, "output.dat", sep=''))
vals = strsplit(readLines(fc), " ")
close(fc)
vals = sapply(vals, function(item) { make.gene.set.name(colnames(exprs.all)[as.numeric(item)]) })
newa <- as.character(vals[! vals %in% tmpa])
newactive <- as.character(vals[! vals %in% active])
cmd = paste("./lcm CK -l 1 -u ", depth, " -w ", filename.prefix, "neg_weights.dat ", filename.prefix, "input.dat ", lambda * (1 - relax.lambda), " ", filename.prefix, "output.dat", sep='')
output = system(cmd, intern = TRUE, ignore.stdout = TRUE, ignore.stderr = TRUE)
fc <- file(paste(filename.prefix, "output.dat", sep=''))
vals = strsplit(readLines(fc), " ")
close(fc)
vals = sapply(vals, function(item) { make.gene.set.name(colnames(exprs.all)[as.numeric(item)]) })
if(length(vals) > 0) {
if(class(newactive) == 'list') #DEBUG
browser()
newa <- unique(append(newa, as.character(vals[! vals %in% tmpa])))
newactive <- unique(append(newactive, as.character(vals[! vals %in% active])))
if(class(newactive) == 'list') #DEBUG
browser()
}
if(sum(newa %in% tmpa) > 0) {
cat("ERROR: non-unique elements")
cat(newa)
cat(tmpa)
browser()
}
# i <- which(abs(corr) >= lambda * (1 - relax.lambda))
i <- which(abs(b0.tmpa[active]) < eps)
inactive <- active[i]
if(length(i) > 0) {
# b0.tmpa = b0.tmpa[-i]
active = active[!active %in% inactive]
b0.tmpa[i] = 0
}
if(length(newactive) > 0) {
new.b = rep(0, length(newactive))
names(new.b) = newactive
b0.tmpa = append(b0.tmpa, new.b)
}
active <- append(active, newactive)
}
else {
cmd = paste("./lcm C -K 1 -l 1 -u 3 -w ", filename.prefix, "weights.dat ", filename.prefix, "input.dat 0", sep='')
max.val = as.numeric(system(cmd, intern = TRUE, ignore.stdout = F, ignore.stderr = TRUE))
lambda = 0
how.many.results = 1
while(max.val > lambda + 0.001) { #HACK!
how.many.results = how.many.results*100
cmd = paste("./lcm Ff -# ", how.many.results, " -l 1 -u 3 -w ", filename.prefix, "weights.dat ", filename.prefix, "input.dat 0 ", filename.prefix, "output.dat", sep="") # fine-tune maximum num of results parameter (curr. 1000)
output = system(cmd, intern = TRUE, ignore.stdout = TRUE, ignore.stderr = TRUE)
fc <- file(paste(filename.prefix, "output.dat", sep=""))
vals = strsplit(readLines(fc), " ")
close(fc)
lambdas = lapply(vals, function(val) { as.double(substr(val[[length(val)]], 2, nchar(val[[length(val)]])-1)) })
i = which.max(lambdas)
lambda = lambdas[[i]]
c1 = as.integer(vals[[i]][1:length(vals[[i]])-1])
}
cmd = paste("./lcm C -K 1 -l 1 -u 3 -w ", filename.prefix, "neg_weights.dat ", filename.prefix, "input.dat 0", sep='')
max.val = as.numeric(system(cmd, intern = TRUE, ignore.stdout = F, ignore.stderr = TRUE))
how.many.results = 1
while(max.val > lambda + 0.001) { #HACK!
how.many.results = how.many.results*100
cmd = paste("./lcm Ff -# ", how.many.results, " -l 1 -u 1 -w ", filename.prefix, "neg_weights.dat ", filename.prefix, "input.dat ", lambda , " ", filename.prefix, "output.dat", sep='')
output = system(cmd, intern = TRUE, ignore.stdout = TRUE, ignore.stderr = TRUE)
fc <- file(paste(filename.prefix, "output.dat", sep=''))
vals = strsplit(readLines(fc), " ")
close(fc)
lambdas = lapply(vals, function(val) { as.double(substr(val[[length(val)]], 2, nchar(val[[length(val)]])-1)) })
i = which.max(lambdas)
if(length(i) == 1 && lambdas[[i]] > lambda) {
lambda = lambdas[[i]]
c1 = as.integer(vals[[i]][1:length(vals[[i]])-1])
}
}
newa <- newactive <- colnames(exprs.all)[c1]
inactive <- NULL
active <- append(active, newactive)
if(length(c1) > 0) {
new.b = rep(0, length(newactive))
names(new.b) = newactive
b0.tmpa = c(b0.tmpa, new.b)
}
}
df <- length(active) - length(newactive)
if(length(active) == 0) #DEBUG
browser()
if(class(active) == 'list') #DEBUG
browser()
if(length(newactive) == 0)
newactive = NULL
else if(class(newactive) == 'list')
browser()
backshoot <- (any(abs(b0.tmpa[newactive]) > bshoot.threshold))
list(eta = eta, wsum = wsum, b = b0.tmpa, lp = lp, active = active,
force.active = force.active, newactive = newactive, newa = newa,
inactive = inactive, corr = NULL, lambda = lambda, df = df,
backshoot = backshoot, a = a)
}
extract.features <- function(exprs, extra.features = NULL, deviation.threshold = NULL, quantile = NULL) {
if(is.null(deviation.threshold))
deviation.threshold = 1.5
# extracting features sets:
m <- ncol(exprs)
if(is.null(quantile)) {
exprs.sd = apply(exprs, 2, sd, na.rm=T)
exprs.sd = do.call("rbind", replicate(nrow(exprs), exprs.sd, simplify = FALSE))
exprs.mean = apply(exprs, 2, mean, na.rm=T)
exprs.mean = do.call("rbind", replicate(nrow(exprs), exprs.mean, simplify = FALSE))
exprs.std = (exprs - exprs.mean)/exprs.sd
exprs.expressed = (exprs.std >= deviation.threshold)
exprs.repressed = (exprs.std <= -deviation.threshold)
}
else {
n = nrow(exprs)
exprs.order = apply(exprs, 2, order)
exprs.expressed = matrix(FALSE, n, m)
exprs.repressed = matrix(FALSE, n, m)
for(i in 1:m) {
exprs.repressed[exprs.order[1:(n/quantile), i], i] = TRUE;
exprs.expressed[exprs.order[(n * (quantile-1)/quantile):n, i], i] = TRUE;
}
}
exprs.all = cbind(exprs.repressed, exprs.expressed)
colnames(exprs.all) = c(paste('dn.', seq(m), sep=''), paste('up.', seq(m), sep=''))
s = apply(exprs.all, 2, any)
exprs.all = exprs.all[, s > 0 & !is.na(s)]
exprs.all[is.na(exprs.all)] = F
m = ncol(exprs.all)
if(!is.null(extra.features)) {
if(nrow(extra.features) != nrow(exprs)) {
stop("Wrong input: extra features need to have same number of row as gene expression matrix")
}
exprs.all = cbind(exprs.all, extra.features)
colnames(exprs.all)[seq(m+1, m+ncol(extra.features))] = colnames(extra.features)
}
return(exprs.all)
}
itemset.coxpath <- function(data, gene.symbols = NULL, depth = 3, nopenalty.subset = NULL,
method = c('breslow', 'efron'), lambda2 = 1e-5,
min.steps = 10, max.steps = 10 * min(n, m), max.norm = 100 * m,
min.lambda = (if (m >= n) 1e-3 else 0), max.vars = Inf,
max.arclength = Inf, frac.arclength = 1, add.newvars = 1,
bshoot.threshold = 0.1, relax.lambda = 1e-7,
approx.Gram = FALSE, standardize = TRUE,
eps = .Machine$double.eps, trace = FALSE, deviation.threshold = NULL, quantile = NULL,
max.time.per.step = 30 * depth)
{
dyn.load("solve_coxpath.so")
dyn.load("explore_itemset.so")
assign("global.trace", trace, envir = .GlobalEnv)
filename.prefix = paste('.temp.pid', Sys.getpid(), '.', sep='')
if(is.null(gene.symbols) && length(data) == 2 && class(data) == 'list' && ncol(data[[1]]$x) == nrow(data[[2]])) {
gene.symbols = data[[2]]
data = data[[1]]
}
call <- match.call()
method <- match.arg(method)
mthd <- switch(method, breslow = 1, efron = 2)
exprs <- data$x
time <- data$time
status <- data$status
n <- length(time)
if(nrow(exprs) > ncol(exprs)) {
print("Sanity check failed: input data has wrong dimensions: more rows than columns.")
return(NULL);
}
exprs.all = extract.features(exprs, data$extra.features, deviation.threshold, quantile)
if(sum(is.na(exprs.all)) > 0) {
print("ERROR: exprs.all contains NA values")
browser()
}
m = dim(exprs.all)[2]
o <- order(status)
oo <- o[order(time[o], decreasing = TRUE)]
exprs.all <- exprs.all[oo, ]
time <- time[oo]
status <- status[oo]
complete <- which(status == 1)
nnc <- length(complete)
rept <- rep(0, n)
exprs.sets = apply(exprs.all, 1, which)
unlink(paste(filename.prefix, "input.dat", sep=''))
lapply(exprs.sets, function(line) { write(line, paste(filename.prefix, "input.dat", sep=''), n=dim(exprs.all)[2], append=TRUE)} );
for (i in complete)
rept[i] <- sum(time[i:n] == time[i] & status[i:n] == 1)
rslist <- wlist <- vector('list', length = nnc)
for (i in 1:nnc) {
if (i == 1) {
ii <- time >= time[complete[1]]
rslist[[1]] <- which(ii)
} else if (rept[complete[i]] >= rept[complete[i] - 1]) {
ii <- (time >= time[complete[i]]) & (time < time[complete[i - 1]])
rslist[[i]] <- which(ii)
}
wlist[[i]] <- rep(1, sum(ii))
if (mthd == 2) {
if (rept[complete[i]] > 0) {
tie <- time[ii] == time[complete[i]] & status[ii] == 1
di <- max(rept[ii][tie])
wlist[[i]][tie] <- wlist[[i]][tie] - (di - rept[complete[i]]) / di
}
}
}
if (frac.arclength > 1 || frac.arclength <= 0) {
frac.arclength <- 1
cat('frac.arclength should be in (0,1]. frac.arclength is set to 1.\n')
}
if (max.arclength < Inf && frac.arclength < 1) {
frac.arclength <- 1
cat(paste('frac.arclength<1 can be used only if max.arclength=Inf.',
'frac.arclength is set to 1.\n'))
}
n.repeat <- n.repeat1 <- ceiling(1 / frac.arclength)
lam.vec <- step.len <- rep(0, max.steps)
# bmat.pred <- bmat.corr <- cmat <- matrix(0, nrow = max.steps, ncol = ncol(exprs.all))
bmat.pred <- bmat.corr <- cmat <- list();
lp <- df <- new.df <- rep(0, max.steps)
new.A <- rep(FALSE, max.steps)
actions <- vector('list', length = max.steps)
backshoot <- FALSE
b.tmpa <- NULL
force.active <- NULL
corrector <- corrector.cox(exprs.all, depth, status, rslist, wlist, rept, mthd,
nopenalty.subset, nopenalty.subset,
force.active, 0, lambda2, b.tmpa,
bshoot.threshold, relax.lambda)
k <- 1
b.tmpa = bmat.pred[[k]] = bmat.corr[[k]] = corrector$b
lam.vec[k] <- lambda <- corrector$lambda
new.df[k] <- df[k] <- corrector$df
lp[k] <- corrector$lp
new.A[k] <- TRUE
actions[[k]] <- active <- corrector$active
cox.print.debug('Lambda = ', lambda, '. Let the first factor in.\n#', k)
cox.print.debug('\t', active, ' added')
if (max.steps <= 1)
stop('Increase max.steps.')
if (max.norm <= sum(abs(b.tmpa)))
stop('Increase max.norm.')
if (lambda <= min.lambda)
stop('Decrease min.lambda')
if (max.vars <= 1)
stop('Increase max.vars.')
while(TRUE) {
if (!backshoot) {
arclength <- max.arclength
if (new.A[k])
frac.arclength1 <- frac.arclength
else {
frac.arclength1 <- 1
if (n.repeat1 > 1 && max.arclength == Inf)
arclength <- step$arclength
}
k <- k + 1
cox.print.debug('\n#', k, ' (lambda: ', lambda, ')')
gc();
elapsed = system.time(step <- step.length.cox(corrector, depth, lambda2, exprs.all, status, rslist, wlist, min.lambda, arclength, frac.arclength1, add.newvars, backshoot, approx.Gram))
if(elapsed[1] > max.time.per.step & k > min.steps) {
cat("Step took ", elapsed[1], "s (max.time.per.step: ", max.time.per.step, ").\nEnding algorithm.", sep='')
max.steps = k
}
b.tmpa[active] <- predictor.cox(b.tmpa[active], step)
bmat.pred[[k]] <- b.tmpa[active]
step.len[k - 1] <- h <- step$h
lam.vec[k] <- lambda <- lambda + h
tmpa <- append(active, step$newa)
new.b = rep(0, length(step$newa))
names(new.b) = step$newa
b.tmpa = append(b.tmpa, new.b)
b.tmpa = b.tmpa[tmpa]
a <- abs(b.tmpa)
}
else {
cox.print.debug('\n#', k, ' (backshoot):')
step <- step.length.cox(corrector, depth, lambda2, exprs.all, status, rslist, wlist, min.lambda, Inf, 1, add.newvars, backshoot, approx.Gram, h)
step.len[k - 1] <- h + step$h
h <- step$h
lam.vec[k] <- lambda <- lambda + h
a <- abs(b.tmpa)
}
corrector <- corrector.cox(exprs.all, depth, status, rslist, wlist, rept, mthd, active, tmpa, force.active, lambda, lambda2, b.tmpa, bshoot.threshold, relax.lambda)
newa <- corrector$newa
while(length(newa) > 0) {
cox.print.debug('\nRepeating step ', k, ':')
tmpa <- append(tmpa, newa)
new.b = rep(0, length(newa))
names(new.b) = newa
b.tmpa = append(b.tmpa, new.b)
a <- abs(b.tmpa)
corrector <- corrector.cox(exprs.all, depth, status, rslist, wlist, rept, mthd,
active, tmpa, force.active, lambda, lambda2,
b.tmpa, bshoot.threshold, relax.lambda)
newa <- corrector$newa
b.tmpa = corrector$b
}
newaction <- corrector$newactive
if(length(corrector$inactive) > 0)
newaction = append(newaction, paste("-", corrector$inactive, sep=""))
if(class(active) == 'list') #DEBUG
browser()
if (length(corrector$active) <= n) {
if (length(newaction) > 0) {
if (corrector$backshoot && !backshoot) {
cox.print.debug('\nOvershooting occurred: increasing lambda again')
backshoot <- TRUE
n.repeat1 <- 1
}
else {
active <- corrector$active
b.tmpa <- corrector$b
actions[[k]] <- newaction
new.df[k] <- corrector$df
new.A[k] <- TRUE
if (trace) {
cat("added/dropped:\n")
cat(paste("\t", newaction))
}
backshoot <- FALSE
n.repeat1 <- n.repeat
}
}
else {
active <- corrector$active
b.tmpa <- corrector$b
backshoot <- FALSE
n.repeat1 <- max(n.repeat1 - 1, 1)
}
}
if (!backshoot) {
bmat.corr[[k]] <- b.tmpa[active]
cmat[k] <- corrector$corr
lp[k] <- corrector$lp
df[k] <- corrector$df
if (lambda <= min.lambda
|| k == max.steps
|| length(corrector$active) > min(n, max.vars)
|| sum(corrector$a) >= max.norm) {
if (length(corrector$active) > min(n, max.vars))
k <- k - 1
if (lambda <= min.lambda)
cox.print.debug('\nLambda = ', min.lambda, '\n')
else if (k == max.steps) {
cox.print.debug('\nMaximum steps (', max.steps, ') taken.\n')
}
else if (length(corrector$active) > min(n, max.vars))
cox.print.debug('\nNumber of active variables has reached its maximum.\n')
else
cox.print.debug('\n|beta| >= ', max.norm, '\n')
break
}
}
}
unlink(paste(filename.prefix, "input.dat", sep=''))
unlink(paste(filename.prefix, "output.dat", sep=''))
unlink(paste(filename.prefix, "neg_weights.dat", sep=''))
unlink(paste(filename.prefix, "weights.dat", sep=''))
bmat.pred <- bmat.pred[1:k]
bmat.corr <- bmat.corr[1:k]
cmat <- cmat[1:k]
df <- df[1:k]
lp <- lp[1:k]
aic <- -2 * lp + 2 * df
bic <- -2 * lp + log(n) * df
object <- list(call = call, lambda = lam.vec[1:k], lambda2 = lambda2,
step.length = abs(step.len[1:(k-1)]), corr = cmat,
new.df = new.df[1:k], df = df, loglik = lp, aic = aic,
bic = bic, b.predictor = bmat.pred, b.corrector = bmat.corr,
new.A = new.A[1:k], actions = actions[1:k],
sdx = rep(1, m), xnames = colnames(exprs.all), method = method,
nopenalty.subset = nopenalty.subset,
standardize = standardize, gene.symbols = gene.symbols, deviation.threshold = deviation.threshold, quantile = quantile)
class(object) <- 'itemset.coxpath'
object
}
plot.itemset.coxpath <- function(x, xvar = c('norm', 'lambda', 'step', 'stepcoeffs'),
type = c('coefficients', 'aic', 'bic'),
plot.all.steps = FALSE, xlimit = NULL, plot.until.step = NULL,
predictor = FALSE, omit.zero = TRUE, breaks = TRUE,
mar = NULL, main = NULL, eps = .Machine$double.eps,
...)
{
object <- x
ii <- object$new.A
if (plot.all.steps) {
ii[!ii] <- TRUE
} else {
ii[length(ii)] <- TRUE
}
if(!is.null(plot.until.step))
ii[plot.until.step:length(ii)] = FALSE
lam <- object$lambda[ii]
xvar <- match.arg(xvar)
type <- match.arg(type)
coef.pred <- object$b.predictor[ii]
coef.corr <- object$b.corrector[ii]
m <- ncol(coef.pred)
k <- length(coef.corr)
s <- switch(xvar,
norm = sapply(coef.corr, function(item) { if(length(item) == 0) 0 else sum(abs(item)) }),
lambda = lam,
step = seq(k),
stepcoeffs = seq(k))
if (xvar != 'lambda') {
if (is.null(xlimit)) xlimit <- max(s)
else if (xlimit <= min(s)) stop('Increase xlimit.')
xi <- s <= xlimit
} else {
if (is.null(xlimit)) xlimit <- min(s)
else if (xlimit >= max(s)) stop('Decrease xlimit.')
xi <- s >= xlimit
}
coef.names = unique(names(unlist(coef.corr)))
coef.corr = t(sapply(coef.corr, function(item) {
new.item = rep(0, length(coef.names))
names(new.item) = coef.names
new.item[names(item)] = item
return(new.item)
}));
coef.pred = t(sapply(coef.pred, function(item) {
new.item = rep(0, length(coef.names))
names(new.item) = coef.names
new.item[names(item)] = item
return(new.item)
}))
colnames(coef.corr) = colnames(coef.pred) = coef.names = line.to.gene.symbol(coef.names, object$gene.symbols)
k <- max(which(xi))
xname <- switch(xvar, norm = '|beta|', lambda = 'lambda', step = 'step', stepcoeffs = '')
if (!is.null(mar))
par(mar = mar)
else {
mar = c(2.8, 2.5, 2, 4)
if(type == "coefficients")
mar[4] = 10.5
if(xvar == "stepcoeffs")
mar[1] = 9
par(mar = mar)
}
if(xvar == 'stepcoeffs')
xaxt = 'n'
else
xaxt = NULL
if (type == 'aic') {
aic <- object$aic[ii][xi]
plot(s[xi], aic, xlab = xname, ylab = 'AIC', type = 'o', pch = 16,
cex = 0.3, xaxt = xaxt, xaxs = "i", mgp=c(1.5, 0.4, 0), ...)
} else if (type == 'bic') {
bic <- object$bic[ii][xi]
plot(s[xi], bic, xlab = xname, ylab = 'BIC', type = 'o', pch = 16, cex = 0.3, xaxt = xaxt, xaxs = "i", mgp=c(1.5, 0.4, 0), ...)
# if (is.null(main)) title('BIC', line = 2.5)
# else title(main, line = 2.5)
} else {
ylab <- ifelse(object$standardize, 'coefficients', 'Coefficients')
yvals = coef.corr[xi, ]
# yvals[c(2:dim(yvals)[1], dim(yvals)[1]),] == 0 &
yvals[yvals == 0 & yvals[c(1, 1:dim(yvals)[1]-1),] == 0] = NA
for(i in 1:dim(yvals)[2]) {
if(i > length(object$actions[ii]))
next #DEBUG: browser()
yvals[i, names(object$actions[ii][[i]])][is.na(yvals[i, names(object$actions[ii][[i]])])] = 0
}
matplot(s[xi], yvals, xlab = xname, type = 'o', pch = '*',
ylab = ylab, lty = 1, xaxt = xaxt, xaxs = "i", mgp=c(1.5, 0.4, 0), ...)
abline(h = 0, lty = 3)
at = coef.corr[k,]
at = at[!is.na(yvals[k,])]
coef.names = coef.names[!is.na(yvals[k,])]
axis(4, at = at, labels = FALSE, tick = TRUE)
if(require(TeachingDemos)) {
mindiff = 0.015 #1/(max(at) - min(at))
# browser()
at = spread.labs(at, mindiff=mindiff)
}
axis(4, at = at, labels = coef.names, cex = 0.5, adj = 0, las = 1, cex.axis=0.55, tick = FALSE)
if (predictor) {
for (i in 1:m) {
segments(s[xi][-k], coef.corr[xi, ][-k,i], s[xi][-1],
coef.pred[xi, ][-1,i], lty = 2, col = i)
}
}
}
if (breaks) {
new <- object$new.A[ii] & xi
axis(3, at = s[new], labels = object$new.df[ii][new], cex = 0.6)
abline(v = s[new], col = "grey", lwd=0.5)
}
if(xvar == 'stepcoeffs') {
added.labs = NULL
removed.labs = NULL
added.at = NULL
removed.at = NULL
for(i in seq(1, length(s[xi]))) {
if(i > length(object$actions[ii]) || is.null(object$actions[ii][[i]]))
next #DEBUG: browser()
to.add = line.to.gene.symbol(object$actions[ii][[i]][substr(object$actions[ii][[i]], 1, 1) != '-'], object$gene.symbols)
if((length(to.add) > 1) || to.add != "") {
added.labs = append(added.labs, to.add)
added.at = append(added.at, rep(i, length(to.add)))
}
to.remove = line.to.gene.symbol(object$actions[ii][[i]][substr(object$actions[ii][[i]], 1, 1) == '-'], object$gene.symbols)
if((length(to.remove) > 0) || to.remove != "") {
removed.labs = append(removed.labs, to.remove)
removed.at = append(removed.at, rep(i, length(to.remove)))
}
}
all.at = spread.labs(c(added.at, removed.at), mindiff=0.3)
at = all.at[1:length(added.at)]
axis(1, at = at, labels = added.labs, cex = 0.5, adj = 1, las = 3, cex.axis=0.55, tick = FALSE, col.axis=1, hadj = 0.9)
at = all.at[(length(added.at)+1):(length(added.at)+length(removed.at))]
axis(1, at = at, labels = removed.labs, cex = 0.5, adj = 1, las = 3, cex.axis=0.55, tick = FALSE, col.axis=2, hadj = 0.9)
}
}
predict.itemset.coxpath <- function(object, data, s,
type = c('coefficients', 'loglik', 'lp', 'risk', 'coxph'),
mode = c('step', 'norm.fraction', 'norm', 'lambda.fraction', 'lambda'),
eps = .Machine$double.eps, exprs.all = NULL, deviation.threshold = NULL, quantile = NULL, ...) {
mode <- match.arg(mode)
type <- match.arg(type)
if (missing(data) && type != 'coefficients') {
warning('No data argument; type switched to coefficients')
type <- 'coefficients'
}
if (!missing(s)) {
if (length(s) > 1 && type == 'coxph') {
warning('Length(s) > 1. Only the first element is used.')
s <- s[1]
}
}
b <- object$b.corrector
if(is.null(b) || length(b) == 0) {
cat("No corrector parameters provided. Cannot fit.\n")
return(NULL)
}
coef.names = unique(names(unlist(b)))
if(is.null(exprs.all))
exprs.all = extract.features(data$x, data$extra.features, deviation.threshold = deviation.threshold, quantile = quantile)
x.used = get.active.values(exprs.all, coef.names)
one <- rep(1, nrow(x.used))
meanx.used <- drop(one %*% x.used) / nrow(x.used)
b = t(sapply(b, function(item) {
new.item = rep(0, length(coef.names))
names(new.item) = coef.names
new.item[names(item)] = item
return(new.item)
}));
k <- nrow(b)
steps <- seq(k)
if (missing(s)) {
s <- steps[object$new.A]
if (mode != 'step') {
warning('no s argument; mode switched to step')
mode <- 'step'
}
}
sb <- switch(mode, step = {
if (any(s < 1) || any(s > k))
stop('Argument s out of range')
steps
}, norm.fraction = {
if (any(s > 1) || any(s < 0))
stop('Argument s out of range')
bnorm <- apply(abs(b), 1, sum)
bnorm / bnorm[k]
}, norm = {
bnorm <- apply(abs(b), 1, sum)
if (any(s > bnorm[k]) || any(s < bnorm[1]))
stop('Argument s out of range')
bnorm
}, lambda.fraction = {
if (any(s > 1) || any(s < 0))
step('Argument s out of range')
lam <- object$lambda
lam[lam < eps] <- eps
lam <- log(lam)
(lam - min(lam)) / (max(lam) - min(lam))
}, lambda = {
lam <- object$lambda
if (any(s > lam[1]) || any(s < lam[k]))
stop('Argument s out of range')
lam
})
sfrac <- (s - sb[1]) / (sb[k] - sb[1])
sb <- (sb - sb[1]) / (sb[k] - sb[1])
usb <- unique(sb)
useq <- match(usb, sb)
sb <- sb[useq]
b <- b[useq, ]
coord <- approx(sb, seq(sb), sfrac)$y
left <- floor(coord)
right <- ceiling(coord)
newb <- (((sb[right] - sfrac) * b[left, , drop = FALSE] +
(sfrac - sb[left]) * b[right, , drop = FALSE]) /
(sb[right] - sb[left]))
newb[left == right, ] <- b[left[left == right], ]
coef <- newb
if (type == 'coefficients') {
fit <- coef
dimnames(fit) <- list(s, object$xnames)
} else if (type == 'loglik') {
fit <- logplik(x.used, data$time, data$status, t(coef), object$method)
names(fit) <- s
} else if (type == 'lp' || type == 'risk') {
b0 <- coef %*% meanx.used
fit <- scale(x.used %*% t(coef), b0, FALSE)
if (type == 'risk') fit <- exp(fit)
dimnames(fit) <- list(seq(nrow(x.used)), s)
} else {
print("ERROR: not implemented yet")
browser()
coef <- drop(coef)
active <- abs(coef) > eps
coef <- coef[active]
x <- x.used[, active, drop = FALSE]
time <- data$time
status <- data$status
fit <- coxph(Surv(time, status) ~ x, method = object$method)
junk <- logplik(x, time, status, coef, object$method, TRUE)
w <- junk$w
dmat <- junk$dmat
oo <- junk$oo
a <- sum(active)
info <- matrix(0, a, a)
for (i in 1:sum(status == 1)) {
ind <- dmat[, i] > 0
xr <- x[oo[ind], , drop = FALSE]
wr <- w[ind, i]
v1 <- xr * wr
v2 <- apply(v1, 2, sum)
info <- info + t(xr) %*% v1 - outer(v2, v2)
}
fit$coefficients <- coef
fit$var <- solve(info)
fit$loglik <- c(fit$loglik[1], junk$loglik)
fit$iter <- fit$residuals <- NULL
fit$linear.predictors <- junk$eta - sum(coef * meanx.used[active])
fit$method <- object$method
fit$assign <- seq(a)
fit$wald.test <- sum(coef*(info %*% coef))
}
attr(fit, 's') <- s
attr(fit, 'fraction') <- sfrac
attr(fit, 'mode') <- mode
return(fit)
}
logplik <- function(x, time, status, b, method = c('breslow', 'efron'),
return.all = FALSE)
{
method <- match.arg(method)
n <- length(time)
o <- order(status, decreasing=T)
oo <- o[order(time[o])]
time <- time[oo]
status <- status[oo]
rept <- rep(0, n)
for (i in 1:n)
rept[i] <- sum(time[i:n] == time[i] & status[i:n] == 1)
complete <- which(status == 1)
nnc <- length(complete)
if (nnc == 0) {
stop('No complete observation. Failed to compute partial likelihood.')
browser()
}
dmat <- matrix(0, n, nnc)
for (i in 1:nnc) {
dmat[time >= time[complete[i]], i] <- 1
if (method == 'efron') {
if (rept[complete[i]] > 0) {
tie <- time == time[complete[i]] & status == 1
di <- max(rept[tie])
dmat[tie, i] <- dmat[tie, i] - (di - rept[complete[i]]) / di
}
}
}
eta <- x %*% b
eeta <- exp(eta)
k <- ncol(eta)
loglik <- rep(0, k)
for (i in 1:k) {
w <- dmat * eeta[oo, i]
wsum <- apply(w, 2, sum)
loglik[i] <- sum(eta[oo, i][status == 1]) - sum(log(wsum))
}
if (return.all) {
return(list(loglik = loglik, w = scale(w, F, wsum), eta = eta,
dmat = dmat, oo = oo))
} else {
return(loglik)
}
}
print.itemset.coxpath <- function(x, ...)
{
cat('Call:\n')
dput(x$call)
actions <- line.to.gene.symbol(x$actions, x$gene.symbols)
k <- length(actions)
for (i in 1:k) {
if (length(actions[[i]]) > 0) {
cat('Step', i, ':')
# for (ii in actions[[i]]) {
cat(paste("\t", actions[[i]]))
# cat("\n")
# cat(paste("\t", x$actions[[i]]))
cat('\n')
}
}
}
summary.itemset.coxpath <- function(object, ...)
{
cat('Call:\n')
dput(object$call)
ii <- object$new.A
ii[length(ii)] <- TRUE
M <- data.frame(Df = object$df[ii], Log.p.lik = object$loglik[ii],
AIC = object$aic[ii], BIC = object$bic[ii])
dimnames(M)[[1]] <- paste('Step', which(ii), sep=' ')
M
}
line.to.gene.symbol <- function(object, gene.symbols, reorder=F) {
if(class(object) == "list")
return(lapply(object, line.to.gene.symbol, gene.symbols))
if(length(object) > 1)
return(sapply(object, USE.NAMES=FALSE, line.to.gene.symbol, gene.symbols))
if(is.null(object))
return(NULL)
if(length(object) == 0)
return('')
object = gsub("down.", "dn.", object, fixed=T)
group = sapply(strsplit(as.character(object), "[\\+\\*]", fixed = F)[[1]], function(item) {
items = strsplit(item, ".", fixed=TRUE)[[1]]
if(length(items) > 1) {
name = gsub(" +", "", gene.symbols$Gene.Symbol[as.numeric(items[2])])
name = gsub("\\/\\/\\/.*", "#", name)
if (is.na(name) || is.null(name) || name == '') {
name = paste('pb', gene.symbols$ID[as.numeric(items[2])], sep='')
}
return(paste(items[1], sep=".", name))
}
else
return(item)
})
if(reorder)
group = group[order(grepl(".", group, fixed=T))]
paste(group, collapse="*")
}
cox.print.debug <- function(...) {
if(global.trace)
cat(paste(..., "\n", sep=""))
}
range01 <- function(x){(x-min(x))/(max(x)-min(x))}
run.cv <- function(data, gene.symbols = NULL, prefix = "", load.saved.file = T, training.set.ratio = 0.6, num.repeats = 20, trace = 3, min.lambda = 0.5, depth = 3, ...) {
if(is.null(gene.symbols) && length(data) == 2 && class(data) == 'list' && ncol(data[[1]]$x) == nrow(data[[2]])) {
gene.symbols = data[[2]]
data = data[[1]]
}
n = length(data$time)
fits = list()
run.times = list()
start.i = 1
depth.str = paste('.d', depth, sep = '')
file.name = paste(prefix, "cv.", as.character(sys.call()[2]), ".subset-", training.set.ratio, depth.str, '.data', sep = '')
print(file.name)
if(load.saved.file && file.exists(file.name)) {
load(file.name)
start.i = length(fits)+1
cat("###\nSkipping: ", length(fits), " repeats\n\n")
}
if(start.i <= num.repeats) {
for(i in start.i:num.repeats) {
cat('CV repeat: ', i, '/', num.repeats, "\n", sep = '')
shuffled = sample(seq(n))
training = (shuffled <= n*training.set.ratio)
fit = itemset.coxpath(data[training,], gene.symbols, depth = depth, trace = trace, min.lambda = min.lambda, ...)
fits[[i]] = list(fit = fit, training.set = training)
save(fits, file=file.name)
gc();
}
}
return(fits)
}
plot.all.kaplan.meier <- function(vars, data, fit, max.fit, group.ratios, title = "KM") {
par(mfrow = c(4, 3), oma = c(0, 0, 3, 0))
for(i in 1:prod(par('mfrow'))) {
if(i > length(vars)) {
plot.new()
break
}
plot.kaplan.meier(names(vars)[i], data, gene.symbols, group.ratios, fit$b.corrector[[max.fit]][names(vars)[i]])
}
mtext(paste(title, " - Step ", max.fit , "/", length(fit$aic), sep=""), outer = TRUE, cex=1.2)
}
gene.symbol.to.line <- function(object, gene.symbols) {
if(class(object) == "list")
return(lapply(object, gene.symbol.to.line, gene.symbols))
if(length(object) > 1)
return(sapply(object, USE.NAMES=FALSE, gene.symbol.to.line, gene.symbols))
if(is.null(object))
return(NULL)
if(length(object) == 0)
return('')
filtered.symbols = gsub("\\/\\/\\/.*", "#", gsub(" +", "", gene.symbols$Gene.Symbol))
object = gsub("\\s*", "", object) # remove whitespace
object = gsub("dn.", "dn.", object, fixed=T)
paste(sapply(strsplit(as.character(object), "[\\+\\*]", fixed = F)[[1]], function(item) {
items = strsplit(item, ".", fixed=TRUE)[[1]]
if(length(items) > 1) {
lines = which(filtered.symbols == paste(items[-1], collapse='.'))
if(length(lines) == 0) {
lines = which(gene.symbols$ID == paste(items[-1], collapse='.'))
if(length(lines) == 0)
line = items[2]
else
line = lines[1]
}
else
line = lines[1]
return(paste(items[1], sep=".", line))
}
else
return(gsub('−', '-', item)) # replacing unicode by ascii
}), collapse="*")
}
plot.interaction.kaplan.meier.from.gene.symbols <- function(combi.genes, data.1, gene.symbols, data.2 = NULL, data.3 = NULL, data.1.title = NULL, data.2.title = NULL, data.3.title = NULL, ...) {
return(plot.interaction.kaplan.meier(gene.symbol.to.line(combi.genes, gene.symbols), data.1 = data.1, gene.symbols, data.1.title = ifelse(is.null(data.1.title), as.character(sys.call()[3]), data.1.title),
data.2 = data.2, data.2.title = ifelse(is.null(data.2.title), as.character(sys.call()[4]), data.2.title),
data.3 = data.3, data.3.title = ifelse(is.null(data.3.title), as.character(sys.call()[5]), data.3.title), ...))
}
plot.interaction.kaplan.meier <- function(combi.var, data.1, gene.symbols, data.1.title = "data.1", data.2 = NULL, data.2.title = "data.2", data.3 = NULL, data.3.title = "data.3", var.coef = NULL, group.ratios = c(0.5, 0.5), main.title = NULL, pval.threshold = 0.05) {
if(length(group.ratios) == 0 || group.ratios[1] == 0)
stop("Error: group.ratios[1] == 0");
if(length(combi.var) > 1)
combi.vars = combi.var
else
combi.vars = c(combi.var)
tot.per.line = max(sapply(combi.vars, function(x) { length(strsplit(x, '*', fixed=T)[[1]]) })) + 1
# tot.per.line = max(sapply(combi.var, function(x) {length(strsplit(x, '*', fixed=T)[[1]])})) + 1
tot.per.col = max(3, min(length(combi.var), 6))
par(mfrow = c(tot.per.col, tot.per.line), oma = c(0, 0, 3, 0))
for(i in 1:length(combi.vars)) {
combi.var = combi.vars[i]
if(substr(combi.var, 1, 1) == '¬') {
combi.var.clean = substr(combi.var, 3, nchar(combi.var)-2)
}
else {
combi.var.clean = combi.var
}
pval.combi = plot.kaplan.meier(combi.var, data.1, gene.symbols, group.ratios, var.coef)
if(!is.null(data.2)) {
plot.kaplan.meier(combi.var, data.2, gene.symbols, group.ratios, var.coef)
mtext(data.2.title, adj = 3/(2*tot.per.line), outer = TRUE, line = -.8, cex = 0.8)
}
if(!is.null(data.3)) {
plot.kaplan.meier(combi.var, data.3, gene.symbols, group.ratios, var.coef)
mtext(data.3.title, adj = 5/(2*tot.per.line), outer = TRUE, line = -.8, cex = 0.8)
}
split.genes = strsplit(combi.var.clean, '*', fixed=T)[[1]]
pvals = sapply(split.genes, function(gene) {
pval = plot.kaplan.meier(gene, data.1, gene.symbols, group.ratios)
if(!is.null(data.2))
plot.kaplan.meier(gene, data.2, gene.symbols, group.ratios)
if(!is.null(data.3))
plot.kaplan.meier(gene, data.3, gene.symbols, group.ratios)
return(pval)
})
blanks = tot.per.line - length(split.genes) - 1
if(blanks > 0)
for(i in 1:blanks)
plot.new()
}
if(length(combi.vars) == 1) {
title = line.to.gene.symbol(combi.var, gene.symbols)
if(!is.null(main.title))
title = paste(title, " (", main.title, ")", sep='')
mtext(title, outer = TRUE, cex=1.2, line=1)
mtext(data.1.title, adj = 1/(2*tot.per.line), outer = TRUE, line = -.8, cex = 0.8)
}
else {
mtext(data.1.title, outer = TRUE, cex=1.2, line=1)
}
return((pval.combi < pval.threshold) & any(pvals > pval.threshold))
}
plot.synthetic.kaplan.meier.from.gene.symbols <- function(combi.genes, data.1, gene.symbols, ...) {
return(plot.synthetic.kaplan.meier(gene.symbol.to.line(combi.genes, gene.symbols), data.1 = data.1, gene.symbols, ...))
}
plot.synthetic.kaplan.meier <- function(combi.var, data.1, gene.symbols, data.1.title = "data.1") {
group.ratios = c(0.5, 0.5)
if(length(combi.var) == 1)
combi.vars = c(combi.var)
else
combi.vars = combi.var
tot.per.line = 4
tot.per.col = max(3, min(length(combi.vars), 6))
par(mfrow = c(tot.per.col, tot.per.line), oma = c(0, 0, 3, 0))
for(i in 1:length(combi.vars)) {
combi.var = combi.vars[i]
pval.combi = plot.kaplan.meier(combi.var, data.1, gene.symbols, group.ratios, plot.only.colours = c("red", "blue"), title="")
pval.combi = plot.kaplan.meier(combi.var, data.1, gene.symbols, group.ratios, plot.only.colours = c("green", "purple"), title="")
pval.combi = plot.kaplan.meier(combi.var, data.1, gene.symbols, group.ratios, plot.only.colours = c("red", "purple"), title="")
pval.combi = plot.kaplan.meier(combi.var, data.1, gene.symbols, group.ratios, plot.only.colours = c("green", "blue"), title="")
}
mtext(data.1.title, outer = TRUE, cex=1.2, line=1)
}
plot.kaplan.meier.from.gene.symbols <- function(var, data, gene.symbols, ...) {
return(plot.kaplan.meier(gene.symbol.to.line(var, gene.symbols), data, gene.symbols, ...))
}
plot.kaplan.meier <- function(var, data, gene.symbols, group.ratios = c(0.5, 0.5), var.coef = NULL, plot.third.group = F, plot.only.colours = NULL, title = NULL, legend.size = 0.8) {
if(length(group.ratios) == 0 || group.ratios[1] == 0)
stop("Error: group.ratios[1] == 0");
if(!is.null(plot.only.colours) && (group.ratios[[1]] != 0.5 || group.ratios[[2]] != 0.5)) {
print("For four-group plots, group threshold needs to be 0.5")
group.ratios = c(0.5, 0.5)
}
if(substr(var, 1, 1) == '¬') {
neg.correl = T
var = substr(var, 3, nchar(var)-2)
}
else
neg.correl = F
groups = sapply(strsplit(var, '*', fixed=T)[[1]], function(gene) {
gene = strsplit(gene, ".", fixed=TRUE)[[1]]
if(length(gene) == 1)
col = data$extra.features[, gene] * 2
else {
if(gene[1] == 'up')
col = data$x[, as.integer(gene[2])]
else
col = -data$x[, as.integer(gene[2])]
col = (col > col[order(col)][floor(length(data$time)*group.ratios[1])]) + (col > col[order(col)][ceiling(length(data$time)*group.ratios[2])])
}
return(col)
})
if(!is.null(plot.only.colours)) {
groups = groups[,1] + groups[,2]/2
colours = c("red", "blue", "purple", "green")
select.colours = colours %in% plot.only.colours
colours = colours[select.colours]
select.groups = groups %in% (0:3)[select.colours]
if(length(table(groups[select.groups])) < 2) {
plot.new()
return(0)
}
surv <- survfit(Surv(data$time[select.groups], data$status[select.groups] == 1)~groups[select.groups]);
logrank <- survdiff(Surv(data$time[select.groups], data$status[select.groups] == 1)~groups[select.groups]);
genes = strsplit(var, '*', fixed=T)[[1]]
legend = c(
paste(opposite.expression(line.to.gene.symbol(genes[1], gene.symbols)), "AND", opposite.expression(line.to.gene.symbol(genes[2], gene.symbols))),
paste(opposite.expression(line.to.gene.symbol(genes[1], gene.symbols)), "AND", line.to.gene.symbol(genes[2], gene.symbols)),
paste(line.to.gene.symbol(genes[1], gene.symbols), "AND", opposite.expression(line.to.gene.symbol(genes[2], gene.symbols))),
paste(line.to.gene.symbol(genes[1], gene.symbols), "AND", line.to.gene.symbol(genes[2], gene.symbols)))
legend = legend[select.colours]
p.val = pchisq(logrank$chisq, 1, lower.tail = FALSE);
}
else {
groups = apply(groups, 1, sum)/ncol(groups)
groups = (groups >= 1) + (groups >= 2)
# }
surv <- survfit(Surv(data$time[groups != 1], data$status[groups != 1] == 1)~groups[groups != 1]);
logrank <- survdiff(Surv(data$time[groups != 1], data$status[groups != 1] == 1)~groups[groups != 1]);
p.val = pchisq(logrank$chisq, 1, lower.tail = FALSE);
if(plot.third.group) {
colours = c("blue", "grey", "red")
surv <- survfit(Surv(data$time, data$status == 1)~groups);
logrank <- survdiff(Surv(data$time, data$status == 1)~groups);
legend = c(gsub("*", " AND ", opposite.expression(line.to.gene.symbol(var, gene.symbols)), "Everything else", line.to.gene.symbol(var, gene.symbols), fixed=T))
}
else {
colours = c("blue", "red")
legend = gsub("*", " AND ", c(opposite.expression(line.to.gene.symbol(var, gene.symbols)), line.to.gene.symbol(var, gene.symbols)), fixed=T)
}
}
par(mar=c(2.5, 2.5, 2.8, 1))
if(neg.correl)
prefix = '¬ '
else
prefix = ''
if(is.null(title)) {
if(! is.null(plot.only.colours))
title = paste(prefix, gsub('up.', '', gsub('dn.', '', line.to.gene.symbol(var, gene.symbols), fixed=T), fixed=T), ifelse(is.null(var.coef), "", paste(" (", round(var.coef, digits=3), ")", sep='')), sep="")
else
title = paste(prefix, line.to.gene.symbol(var, gene.symbols), ifelse(is.null(var.coef), "", paste(" (", round(var.coef, digits=3), ")", sep='')), sep="")
}
plot(surv, conf.int = F, col = colours, main= title, cex.main=0.9);
if(p.val < 1e-5)
mtext("p-val < 1e-5", cex = 0.65)
else
mtext(paste("p-val:", round(p.val, digits = 6)), cex = 0.65)
legend(x = "bottomleft", legend = legend, fill = colours, cex=legend.size)
return(p.val)
}
get.kaplan.meier.p.value <- function(var, data, group.ratios = c(0.5, 0.5)) {
if(length(group.ratios) == 0 || group.ratios[1] == 0)
stop("Error: group.ratios[1] == 0");
if(substr(var, 1, 1) == '¬') {
neg.correl = T
var = substr(var, 3, nchar(var)-2)
}
else
neg.correl = F
groups = sapply(strsplit(var, '*', fixed=T)[[1]], function(gene) {
gene = strsplit(gene, ".", fixed=TRUE)[[1]]
if(length(gene) == 1)
col = data$extra.features[, gene] * 2
else {
if(gene[1] == 'up')
col = data$x[, as.integer(gene[2])]
else
col = -data$x[, as.integer(gene[2])]
col = (col > col[order(col)][floor(length(data$time)*group.ratios[1])]) + (col > col[order(col)][ceiling(length(data$time)*group.ratios[2])])
}
return(col)
})
groups = apply(groups, 1, sum)/ncol(groups)
groups = (groups >= 1) + (groups >= 2)
logrank <- survdiff(Surv(data$time[groups != 1], data$status[groups != 1] == 1)~groups[groups != 1]);
p.val = pchisq(logrank$chisq, 1, lower.tail = FALSE);
return(p.val)
}
opposite.expression <- function(var) {
opposites = sapply(strsplit(var, '*', fixed=T)[[1]], function(gene) {
gene = strsplit(gene, ".", fixed=TRUE)[[1]]
if(length(gene) == 1)
val = paste("¬", gene)
else {
if(gene[1] == 'up')
val = paste('dn.', gene[2], sep='')
else
val = paste('up.', gene[2], sep='')
}
return(val)
})
return(paste(opposites, collapse=" AND "))
}
|
1870369f2a0e5e40f54cd559d65a7a390530cc44
|
5705a0566bdafcfd915c3d456ad684e21a7f2a44
|
/scripts/chart_2.R
|
3ad96414d714143a32d5b2e9b4dd5b6401e6417d
|
[] |
no_license
|
mltong/a7cool_beanz
|
86270dd16868527089f2da1ca84d8fe3ba5f4bcb
|
ccf950b4894d91d79caa7992a3da2f4db445e83c
|
refs/heads/master
| 2021-01-20T01:34:47.426566
| 2016-02-24T20:25:27
| 2016-02-24T20:25:27
| 52,050,551
| 0
| 0
| null | 2016-02-24T20:25:27
| 2016-02-19T00:53:05
|
HTML
|
UTF-8
|
R
| false
| false
| 985
|
r
|
chart_2.R
|
library(plotly)
library(dplyr)
# Chart Two
# The total amount of students who identified their programming level experience in INFO498f
chart_2 <- function(data) {
# Read in data
data <- read.csv("https://raw.githubusercontent.com/INFO-498F/a7-survey-data/master/intro_survey_data.csv")
# Group the students by their programming experience
program_experience_data <- data %>% select(What.is.your.programming.experience.)
names(program_experience_data) <- c("program_experience")
summarized_data <- program_experience_data %>% group_by(program_experience) %>%
summarise(num_students = n())
plot_ly(summarized_data, x = program_experience,
y = num_students,
name = "Programming Experience",
type = "bar"
) %>%
layout(color = "orange",
title = "INFO498f - Program Experience Level",
xaxis = list(title = "Program Experience Level"),
yaxis = list(title = "Number of Students"))
}
|
e11eba96c9a68f752cce238ad68b79305d5376d3
|
c2e8293b965bf3c844991066b0ffef22b68aedf8
|
/project/src/plot1.R
|
a48a2e46068c778cfb83dc81f0850d37a142ad55
|
[] |
no_license
|
pascal-p/PA01_EDA_Plotting
|
9b145713c66eb08bdde7ea7aee077fcdb394afa5
|
6097ce763b5c95eba5cee234ae2d6408b8dcdd1d
|
refs/heads/master
| 2020-03-21T23:45:50.827921
| 2018-07-01T11:17:55
| 2018-07-01T11:19:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 517
|
r
|
plot1.R
|
olist <- ls()
mod <- source("shared.R")$value
# global env, closure
myPlotFun <- function(df) {
hist(df$Global_Active_Power, col="red", main="Global Active Power", xlab = "Global Active Power (kilowatts)")
}
main <- function() {
# call - just 1 field required
df <- mod$readFile(colN =c("NULL", "NULL", "Global_Active_Power"),
colC = c("NULL", "NULL", "numeric"))
mod$withpng(df, myPlotFun, "plot1.png") # plotfun from global env
print("Done")
}
main()
rm(list=setdiff(ls(), olist))
|
fca844497b36ec2c646adf16031950a38fbb5709
|
3413a4251da58e64b85b6f07055c7ee250fa853d
|
/MATCH/EMA/backend/match_ema_accelerometer.R
|
ad0b6bd5b5517e3022faca0cd0e22637a6473249
|
[] |
no_license
|
wangjingke/reach
|
48dd0da6901f8393f22c4db02fce7d5fc10f214c
|
1320fd5e9f76533ffe0e3d1e124ce8ed10673aa1
|
refs/heads/master
| 2020-05-21T14:59:02.926515
| 2018-04-23T03:43:29
| 2018-04-23T03:43:29
| 63,976,443
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,537
|
r
|
match_ema_accelerometer.R
|
ema.ACC = function(ema, missing_output, accDir, accList) {
# generating emaAccMatch for pairing EMA and acc
ema.emaAccMatch = function(ema, accList) {
validIndiv=unique(ema[, c("subjectID", "date", "dayInStudy")]) # compliance should be 1, otherwise there is no start time for the prompt
validIndiv$date = as.Date(validIndiv$date, format = "%Y-%m-%d", tz = "America/Los_Angeles")
emaAccMatch = aggregate(date~subjectID, data = validIndiv, min)
emaAccMatch$file=NA
accList=read.csv(accList, header = TRUE, stringsAsFactors = FALSE)
accList$date = as.Date(accList$date, format = "%Y-%m-%d", tz = "America/Los_Angeles")
for (i in 1:nrow(emaAccMatch)) {
pos=which(accList$subjectID==emaAccMatch$subjectID[i] & abs(difftime(accList$date, emaAccMatch$date[i], units = "days"))<=10)
if (length(pos)>0) {emaAccMatch$file[i] = accList$file[pos]}
rm(pos)
}
return(emaAccMatch)
}
emaAccMatch=ema.emaAccMatch(ema=ema, accList = paste0(accDir, "/", accList))
# individuals missing ACC data
accMissing=emaAccMatch[is.na(emaAccMatch$file),]
write.csv(accMissing, missing_output, row.names = FALSE, quote = FALSE)
for (i in 1:nrow(emaAccMatch)) {
# filling in the MVPA variables
if (!is.na(emaAccMatch$file[i])) {
accX=readRDS(paste0(accDir, "/", emaAccMatch$file[i]))
promptLine=which(ema$subjectID==emaAccMatch$subjectID[i] & !is.na(ema$comply))
for (j in promptLine) {
mvpaX=ema.extractACC(ema$promptStart[j], accX)
ema[j, names(mvpaX)]=mvpaX
rm(mvpaX)
}
rm(accX)
rm(promptLine)
}
# filling in the OMVPA variables
targetID=ifelse(emaAccMatch$subjectID[i]>=12000, as.numeric(emaAccMatch$subjectID[i])-1000, as.numeric(emaAccMatch$subjectID[i])+1000)
pos = which(emaAccMatch$subjectID==targetID)
if (length(pos)>0 && !is.na(emaAccMatch$file[pos])) {
accX=readRDS(paste0(accDir, "/", emaAccMatch$file[pos]))
promptLine=which(ema$subjectID==emaAccMatch$subjectID[i] & !is.na(ema$comply))
for (j in promptLine) {
mvpaX=ema.extractACC(ema$promptStart[j], accX)
ema[j, paste0("o", names(mvpaX))]=mvpaX
rm(mvpaX)
}
rm(accX)
rm(promptLine)
}
rm(targetID)
rm(pos)
if (i%%10==0) {print(paste0(i, " ", round(i/nrow(emaAccMatch)*100, 2), "%"))}
}
return(ema)
}
# integreting accelerometer data to ema
ema.attachACC=function(ema, prefix = "") {
acc_point=c(outer(outer(c("valid", "nonvalid", "sed", "light", "mod", "vig", "mvpa"), c("_15", "_30", "_60", "_120"), paste0), c("_before", "_after"), paste0))
acc_win=paste0(outer(c("valid", "nonvalid", "sed", "light", "mod", "vig", "mvpa"), c("_30", "_60", "_120", "_240"), paste0), "_window")
varlist=paste0(prefix, c(acc_point, acc_win))
acc=data.frame(matrix(NA, nrow=nrow(ema), ncol=length(acc_point)+length(acc_win), dimnames = list(c(), varlist)))
return(cbind(ema, acc))
}
ema.extractACC = function(timepoint, accX) {
# function to subset by time, and aggregate MVPA (fork from common)
ema.mvpa = function(accX, start, end) {
skeleton = data.frame(MVPA = c("light", "moderate", "nonvalid", "sedentary", "vigorous"))
accX.seq = accX$data[accX$data$stamp > start & accX$data$stamp <= end, ]
mvpa = aggregate(min ~ MVPA, data = accX.seq, sum, na.rm = TRUE)
result = merge(skeleton, mvpa, by = "MVPA", all.x = TRUE, sort = FALSE)
result[is.na(result)] = 0
return(result)
}
MVPA=rbind(
category=c("nonvalid", "sedentary", "light", "moderate", "vigorous"),
var=c("nonvalid", "sed", "light", "mod", "vig")
)
output=ema.attachACC(data.frame(1))
output=output[2:ncol(output)]
anchor1=strptime(timepoint, format = "%Y-%m-%d %H:%M:%S", tz="America/Los_Angeles")
if (anchor1<min(accX$data$stamp) | anchor1>max(accX$data$stamp)) {
return(output)
} else {
# loop through all MVPA var
MinToSec=function(u) {return(u*60)}
for (k in c(15, 30, 60, 120)) {
anchor0=anchor1-MinToSec(k)
anchor2=anchor1+MinToSec(k)
# before prompt
mvpaX=ema.mvpa(accX, start=anchor0, end=anchor1)
for (m in 1:nrow(mvpaX)) {
type=MVPA["var", grep(mvpaX$MVPA[m], MVPA["category",])]
output[1, paste0(type, "_", k, "_before")]=mvpaX[m, "min"]
}
# after prompt
mvpaX=ema.mvpa(accX, start=anchor1, end=anchor2)
for (m in 1:nrow(mvpaX)) {
type=MVPA["var", grep(mvpaX$MVPA[m], MVPA["category",])]
output[1, paste0(type, "_", k, "_after")]=mvpaX[m, "min"]
}
}
# around prompt
for (k in c(15, 30, 60, 120)) {
output[1, paste0("valid_", k, "_before")]=k-output[1, paste0("nonvalid_", k, "_before")]
output[1, paste0("mvpa_", k, "_before")]=output[1, paste0("mod_", k, "_before")]+output[1, paste0("vig_", k, "_before")]
output[1, paste0("valid_", k, "_after")]=k-output[1, paste0("nonvalid_", k, "_after")]
output[1, paste0("mvpa_", k, "_after")]=output[1, paste0("mod_", k, "_after")]+output[1, paste0("vig_", k, "_after")]
for (m in c("valid", "nonvalid", "sed", "light", "mod", "vig", "mvpa")) {
output[1, paste0(m, "_", 2*k, "_window")]=output[1, paste0(m, "_", k, "_before")]+output[1, paste0(m, "_", k, "_after")]
}
}
return(output)
}
}
# merge daily physical activity
ema.dailyPA = function(ema, AccSummary) {
accSum = read.table(AccSummary, header = TRUE, sep = "\t", stringsAsFactors = FALSE)
colnames(accSum)[colnames(accSum)=="nonvalid"]="day_nonvalid"
colnames(accSum)[colnames(accSum)=="valid"]="day_valid"
colnames(accSum)[colnames(accSum)=="sedentary"]="day_sed"
colnames(accSum)[colnames(accSum)=="light"]="day_light"
colnames(accSum)[colnames(accSum)=="moderate"]="day_mod"
colnames(accSum)[colnames(accSum)=="vigorous"]="day_vig"
colnames(accSum)[colnames(accSum)=="mvpa"]="day_mvpa"
output=merge(ema, accSum[c("subjectID", "date", grep("day_", names(accSum), value=TRUE))], by=c("subjectID", "date"), all.x=TRUE, sort=FALSE)
return(output)
}
|
ed944d72a2839ac616db8774e1800794bb509c72
|
184180d341d2928ab7c5a626d94f2a9863726c65
|
/issuestests/RJafroc/man/datasetBinned123.Rd
|
6e75ce9f934d28486aea3f6d769cf8a9440b61a1
|
[] |
no_license
|
akhikolla/RcppDeepStateTest
|
f102ddf03a22b0fc05e02239d53405c8977cbc2b
|
97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5
|
refs/heads/master
| 2023-03-03T12:19:31.725234
| 2021-02-12T21:50:12
| 2021-02-12T21:50:12
| 254,214,504
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,932
|
rd
|
datasetBinned123.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{datasetBinned123}
\alias{datasetBinned123}
\title{Binned dataset suitable for checking \code{\link{FitCorCbmRoc}}; seed = 123}
\format{A list with 8 elements:
\itemize{
\item{\code{NL}}{ Ratings array [1, 1:2, 1:10000, 1], of non-lesion localizations, NLs}
\item{\code{LL}}{ Ratings array [1, 1:2, 1:5000, 1], of lesion localizations, LLs}
\item{\code{lesionVector}}{ array [1:5000], number of lesions per diseased case, all set to one}
\item{\code{lesionID}}{ array [1:5000, 1], lesions labels on diseased cases, all set to one}
\item{\code{lesionWeight}}{ array [1:5000, 1], weights, all set to one}
\item{\code{dataType}}{ "ROC", the data type}
\item{\code{modalityID}}{ "1", treatment label}
\item{\code{readerID}}{ [1:2] "1" "2", reader labels}
}}
\usage{
datasetBinned123
}
\description{
A binned dataset suitable for analysis by \code{\link{FitCorCbmRoc}}. It was generated by
\link{DfCreateCorCbmDataset} by setting the \code{seed} variable to 123. Note
the formatting of the data as a single treatment two reader dataset, even though
the actual pairing might be different, see \code{\link{FitCorCbmRoc}}. The dataset is
intentionally large so as to demonstrate the asymptotic convergence of ML estimates,
produced by \code{\link{FitCorCbmRoc}}, to the population values. The data was generated
by the following argument values to \code{\link{DfCreateCorCbmDataset}}: seed = 123,
K1 = 5000, K2 = 5000, desiredNumBins = 5, muX = 1.5, muY = 3, alphaX = 0.4,
alphaY = 0.7, rhoNor = 0.3, rhoAbn2 = 0.8.
}
\examples{
str(datasetBinned123)
}
\references{
Zhai X, Chakraborty DP (2017). A bivariate contaminated binormal model for robust
fitting of proper ROC curves to a pair of correlated, possibly degenerate,
ROC datasets. Medical Physics. 44(6):2207--2222.
}
\keyword{datasets}
|
4225100b45e5609abb5f1b09d271c2451c0f4cce
|
aa710f59f213bdcb4426b40b1b3b3b38dc113958
|
/desserts-match-gl.R
|
7fd42f73a7b0a7f1b3b10710137dc0f9571f4836
|
[] |
no_license
|
marierivers/favorite-dessert
|
4671fdb2dc3f0cc40816308dd045c22c72eeb279
|
dd1d331bea730d14d47edb353e803d470d5666a9
|
refs/heads/main
| 2023-07-17T11:30:34.305118
| 2021-08-25T22:14:14
| 2021-08-25T22:14:14
| 399,258,051
| 0
| 0
| null | 2021-08-24T16:30:23
| 2021-08-23T21:56:42
| null |
UTF-8
|
R
| false
| false
| 1,188
|
r
|
desserts-match-gl.R
|
library(tidyverse)
library(rvest)
# read the webpage code
webpage <- read_html("https://www.eatthis.com/iconic-desserts-united-states/")
# Extract the desserts listing
dessert_elements<- html_elements(webpage, "h2")
dessert_listing <- dessert_elements %>%
html_text2() %>% # extracting the text associated with this type of elements of the webpage
as_tibble() %>% # make it a data frame
rename(dessert = value) %>% # better name for the column
head(.,-3) %>% # 3 last ones were not desserts
rowid_to_column("rank") %>% # adding a column using the row number as proxy for the rank
write_csv("data/iconic_desserts.csv") # save it as csv
dessert_listing <- dessert_listing %>%
mutate(dessert = tolower(dessert))
favorite_desserts <- read_csv("favorite_desserts.csv") %>%
rename(dessert = Favorite_dessert) %>%
mutate(dessert = tolower(dessert))
is_iconic <- right_join(favorite_desserts, dessert_listing, by = "dessert") #changed semi_join to right_join -- Grace
# Returns the data frame with the names and desserts that match the iconic desserts.
print("These are the deserts you entered that are iconic American desserts!") # added message -- GL
is_iconic
|
ba17efb6ebca1c57f837d7bc55604f5ae9d968d7
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/easyPubMed/examples/batch_pubmed_download.Rd.R
|
ca80ade45c49aece0b8ddc83ccb5352a2eb96d5f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 679
|
r
|
batch_pubmed_download.Rd.R
|
library(easyPubMed)
### Name: batch_pubmed_download
### Title: Download PubMed Records in XML or TXT Format
### Aliases: batch_pubmed_download
### ** Examples
## Not run:
##D # Example 01: retrieve data from PubMed and save as XML file
##D ml_query <- "Machine Learning[TI] AND 2016[PD]"
##D out1 <- batch_pubmed_download(pubmed_query_string = ml_query, batch_size = 180)
##D XML::xmlParse(out1[1])
##D #
##D # Example 02: retrieve data from PubMed and save as TXT file
##D ml_query <- "Machine Learning[TI] AND 2016[PD]"
##D out2 <- batch_pubmed_download(pubmed_query_string = ml_query, batch_size = 180, format = "medline")
##D readLines(out2[1])[1:30]
## End(Not run)
|
d372f8780fd4b95095e06d5ecd90fbb9e759ae3e
|
a693da8676743148657e3ddb7dbfdc47c50d53a1
|
/R/init.R
|
5de43d29838b982e3ac11fd40bc7d0eb96033cc7
|
[] |
no_license
|
Hafro/geo
|
4de0f08370973b75d8d46003fb8e9a9d536beaff
|
6deda168a3c3b2b5ed1237c9d3b2f8b79a3e4059
|
refs/heads/master
| 2022-11-23T15:06:47.987614
| 2022-11-16T22:26:55
| 2022-11-16T22:26:55
| 38,042,352
| 3
| 6
| null | 2022-11-16T22:26:56
| 2015-06-25T10:09:50
|
R
|
UTF-8
|
R
| false
| false
| 8,544
|
r
|
init.R
|
#' Initiate a geoplot (??)
#'
#' Initiate a geoplot (??).
#'
#'
#' @param lat,lon Latitude and longitude
#' @param type Plot method
#' @param pch Plot character
#' @param xlim,ylim Plot limits
#' @param b0 Base latitude
#' @param r Plot ratio, multiplier on \code{diff(x_or_ylim)}
#' @param xlab,ylab Labels for x- and y-axes, default \code{"Longitude",
#' "Latitude"}
#' @param option Method of determining plot extent, default \code{"cut"}
#' @param grid Should a grid be drawn, default \code{TRUE}
#' @param new Plot control, default \code{FALSE} adds plot to current plot
#' @param cont For use with contours: should space be reserved for contour
#' labels? Default \code{FALSE}
#' @param cex Character size expansion
#' @param col Color, default 1, usually black
#' @param lcont Contour lable space allocation, default c(0.13, 0.21)
#' @param plotit If FALSE plot is only initialized but not plotted. If used
#' other programs are used to fill the plot (geolines, geocontour, geopolygon
#' etc). Most often used in multiple plots.
#' @param reitur Should the grid be that of statistical rectangles?
#' @param smareitur Should the grid be that of statistical sub--rectangles?
#' @param reittext Should the rectangles be labelled?
#' @param axratio Parameter usually not changed by the user.
#' @param lwd Line width
#' @param axlabels If FALSE no numbers are plotted on the axes. Default value
#' is TRUE.
#' @param oldpar The old par--list, from the parent geoplot--call
#' @param projection Projection, default \code{Mercator}
#' @param b1 Second latitude for Lambert projection
#' @param dlat,dlon Defines the grid, to make a grid on the axes, 1 is a number
#' on axis and a line at every deg. Not usualy set by user.
#' @param command The parent \code{geoplot} command is included, although for
#' what purpose isn't quite clear??
#' @param jitter Random jitter to accentuate repeated values. Default no jitter
#' (\code{jitter = 0})
#' @param xaxdist,yaxdist Distance from plot to the labels on axes (dist or r
#' argument to geoaxis). Default values \code{0.2, 0.3} but higher values mean
#' that axlabels are further away from the plot. Further flexibility with axes
#' can be reached by calling geoplot with axlabels = FALSE and geoaxis
#' aferwards.
#' @return No value, side effect plotted.
#' @note Needs further elaboration, alternatively hide the function.
#' @seealso Called by \code{\link{geoplot}} calls \code{\link{Elimcomp}},
#' \code{\link{findline}}, \code{\link{geoaxis}}, \code{\link{gridaxes}},
#' \code{\link{gridaxes.Lambert}}, \code{\link{invProj}}, \code{\link{Proj}}.
#' @keywords hplot
#' @export init
init <-
function(lat, lon = 0, type = "p", pch = "*", xlim = c(0, 0), ylim = c(0, 0),
b0 = 65, r = 1.05, xlab = "Longitude", ylab =
"Latitude", option = "cut", grid = T, new = F, cont = F, cex =0.7,
col = 1, lcont = c(0.13, 0.21), plotit = T, reitur = F, smareitur = F,
reittext = F, axratio = 1, lwd = 0, axlabels = T, oldpar, projection =
"Mercator", b1 = 65, dlat = 0, dlon = 0, command = 0, jitter = 0,xaxdist,yaxdist)
{
if(projection == "none") {
if(length(lon) == 1) {
lon <- lat$y
lat <- lat$x
}
}
else {
if(length(lon) == 1) {
lon <- lat$lon
lat <- lat$lat
}
}
nlat <- length(lat)
lat <- lat + (runif(nlat) - 0.5) * jitter
lon <- lon + (runif(nlat) - 0.5) * jitter
if(xlim[1] == xlim[2])
l1 <- mean(range(lon[!is.na(lon)]))
else l1 <- mean(xlim)
par(xpd = F)
scale <- "km"
xgr <- Proj(lat, lon, scale, b0, b1, l1, projection)
# size of text
par(cex = cex)
if(lwd != 0)
par(lwd = lwd)
if(!axlabels) {
xlab <- ""
ylab <- ""
}
# contourplot
if(!cont) {
lcont[1] <- 0
lcont[2] <- 0
}
if(cont)
option <- "nocut"
plt <- oldpar$plt
contlab <- plt
contlines <- plt
contlines[1] <- plt[1] + lcont[2] * (plt[2] - plt[1])
contlab[2] <- plt[1] + lcont[1] * (plt[2] - plt[1])
par(plt = contlines)
# Find borders, adjust them if given.
xyratio <- par()$pin[1]/par()$pin[2]
#*1.04777 ratio of axes.
if(projection == "none") {
ind <- c(1:length(xgr$x))
ind <- ind[!is.na(xgr$x)]
#No NAs
if(xlim[1] == xlim[2]) {
xmin <- min(xgr$x[ind])
xmax <- max(xgr$x[ind])
}
else {
xmin <- xlim[1]
xmax <- xlim[2]
r <- 1
}
if(ylim[1] == ylim[2]) {
ymin <- min(xgr$y[ind])
ymax <- max(xgr$y[ind])
}
else {
ymin <- ylim[1]
ymax <- ylim[2]
r <- 1
}
}
else {
ind <- c(1:length(xgr$lon))
ind <- ind[!is.na(xgr$lon)]
#No NAs
if(xlim[1] == xlim[2]) {
xmin <- min(xgr$lon[ind])
xmax <- max(xgr$lon[ind])
}
else {
xmin <- xlim[1]
xmax <- xlim[2]
r <- 1
}
if(ylim[1] == ylim[2]) {
ymin <- min(xgr$lat[ind])
ymax <- max(xgr$lat[ind])
}
else {
ymin <- ylim[1]
ymax <- ylim[2]
r <- 1
}
}
if(projection == "Lambert") {
xt1 <- c(l1, xmin, xmax, xmax)
xt2 <- c(ymin, ymin, ymin, ymax)
}
else if(projection == "none") {
xt2 <- c(xmin, xmax)
xt1 <- c(ymin, ymax)
}
else {
xt1 <- c(xmin, xmax)
xt2 <- c(ymin, ymax)
}
xl <- Proj(xt2, xt1, scale, b0, b1, l1, projection)
xmin <- min(xl$x)
ymin <- min(xl$y)
xmax <- max(xl$x)
ymax <- max(xl$y)
xymax <- max((ymax - ymin), (xmax - xmin)/xyratio)
meanx <- (xmin + xmax)/2
meany <- (ymin + ymax)/2
r1 <- r + (r - 1)/2
r1 <- r1 - 0.5
if(option == "cut") {
# cut figure and graph region
limx <- c(meanx - r1 * (xmax - xmin), meanx + r1 * (xmax - xmin
))
limy <- c(meany - r1 * (ymax - ymin), meany + r1 * (ymax - ymin
))
xyr <- (ymax - ymin)/((xmax - xmin)/xyratio)
pinpar <- c(1:2)
if(xyr > 1) {
pinpar[1] <- par()$pin[1]/xyr
pinpar[2] <- par()$pin[2]
}
else {
pinpar[1] <- par()$pin[1]
pinpar[2] <- par()$pin[2] * xyr
}
par(pin = pinpar)
}
else {
limx <- c(meanx - r1 * xymax * xyratio, meanx + r1 * xymax *
xyratio)
limy <- c(meany - r1 * xymax, meany + r1 * xymax)
}
if(type == "l") {
gx <- limx
gy <- limy
border <- list(x = c(gx[1], gx[2], gx[2], gx[1], gx[1]), y = c(
gy[1], gy[1], gy[2], gy[2], gy[1]))
xx <- findline(xgr, border)
}
else {
ind <- c(1:length(xgr$x))
ind <- ind[(xgr$x > limx[1]) & (xgr$x < limx[2]) & (xgr$y >
limy[1]) & (xgr$y < limy[2])]
xx <- list(x = xgr$x[ind], y = xgr$y[ind])
}
if(length(xx$x) == 0) {
type <- "n"
xx <- xgr
}
# to get rid of errors if no point in plot.
par(new = new)
if(plotit) {
if(projection == "none") {
plot(xx$x, xx$y, type = type, pch = pch, xlim = limx,
ylim = limy, xlab = xlab, ylab = ylab, col =
col)
}
else {
plot(xx$x, xx$y, type = type, pch = pch, xlim = limx,
ylim = limy, axes = FALSE, xlab = xlab, ylab = ylab,
col = col)
# plot grid and axes
if(projection == "Lambert"){
d <- gridaxes.Lambert(limx,limy, scale, b0, xyratio, grid, col,
reitur, smareitur, axratio, axlabels,
b1, l1, projection, dlat, dlon)
}
else{
d <- gridaxes(limx, limy, scale, b0, xyratio,
grid, col, reitur, smareitur, axratio,
axlabels, b1, l1, projection, dlat,
dlon)
}
}
}
else plot(xx$x, xx$y, type = "n", pch = pch, xlim = limx, ylim = limy,
axes = F, xlab = "", ylab = "", col = col)
#par(new = T)
gpar <- par(no.readonly = TRUE)
# save graphical setup
o <- invProj(limx, limy, scale, b0, b1, l1, projection)
gpar <- Elimcomp(gpar)
geopar <- list(gpar = gpar, limx = limx, limy = limy, scale = scale,
b0 = b0, b1 = b1, l1 = l1, contlab = contlab, contlines =
contlines, cont = cont, projection = projection, origin = o,
command = command)
# store geopar list inside options(), where plot functions can access it
options(geopar=geopar)
# Extra to get geoaxis instead of normal axis added in R version.
if(axlabels && projection == "Mercator"){
if(!reitur && !smareitur){
geoaxis(side=2,dist=yaxdist,dlat=d$dlat,inside=F,cex=cex)
geoaxis(side=1,dlon=d$dlon,inside=F,cex=cex,dist=xaxdist)
}
else{
if(reitur){
geoaxis(side=2,dlat=d$dlat,inside=F,cex=0.63)
geoaxis(side=1,dlon=d$dlon,inside=F,cex=0.63)
}
if(smareitur){
geoaxis(side=2,dlat=d$dlat*2,inside=F,cex=0.63)
geoaxis(side=1,dlon=d$dlon*2,inside=F,cex=0.63)
}
}
}
return(invisible())
}
|
d54dbfd0fa4b8a8b382b6727f88263d176fca681
|
e987fd9dedb377ca83937324530e3903a60fb160
|
/audio_gps_processing/extract_synchs.R
|
f3e5b195f02a07f7dbba6bed3974b2802f362c60
|
[] |
no_license
|
arianasp/move_comm_analysis
|
fd3d209406555a0f66469d8ffd84ac6f4773e8e0
|
6d2f420be2b2f9c4fc92913e91f2605aeade1cd5
|
refs/heads/master
| 2023-06-12T14:55:14.751202
| 2023-06-05T11:47:33
| 2023-06-05T11:47:33
| 249,375,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,326
|
r
|
extract_synchs.R
|
#extract synch calls and where they line up with seconds within a file
#output a data frame of this information for all files within a specific directory (indir)
#save it to an output directory
#LIBRARIES
library(stringr)
library(lubridate)
source('~/Dropbox/meerkats/code/general/meerkat_functions.R')
#DIRECTORIES
indir <- '~/Dropbox/meerkats/data/Kalahari2017/labels_CSV'
outdir <- '~/Dropbox/meerkats/data/Kalahari2017/RDATA'
gps.synch.file <- '~/Dropbox/meerkats/data/Kalahari2017/RDATA/synch_info_all.csv'
#MAIN
#set working directory to indir
setwd(indir)
#get list of files
files <- list.files()
#Read in info about each file and store in a data frame
#filename
file.info <- data.frame(filename=files)
file.info$filename <- as.character(file.info$filename)
#split into sound focals and collar data
foc.files <- grep('SOUNDFOC',file.info$filename)
collar.files <- seq(1,length(files))[-foc.files]
#individual id
file.info$id <- sapply(file.info$filename,FUN=function(x){return(strsplit(x,"_")[[1]][2])})
#dates
file.info$date <- NA
file.info$date[foc.files] <- sapply(file.info$filename[foc.files],function(x){return(strsplit(x,'_')[[1]][4])})
file.info$date[foc.files] <- sapply(file.info$date[foc.files],function(x){return(paste(substring(x,1,4),'-',substring(x,5,6),'-',substring(x,7,8),sep=''))})
file.info$date[collar.files] <- sapply(file.info$filename[collar.files],function(x){return(substring(x,which(strsplit(x,'')[[1]]=='(')+1,which(strsplit(x,'')[[1]]=='(')+10))})
file.info$date[collar.files] <- sapply(file.info$date[collar.files],function(x){return(gsub('_','-',x))})
#Load in GPS to audio synch table
gps.synch.dat <- read.table(gps.synch.file,header=T,sep=',')
gps.synch.dat$gps.datetime <- as.POSIXct(paste(gps.synch.dat$Date,gps.synch.dat$GPS.Time,sep=' '),format='%m/%d/%y %H:%M:%S')
gps.synch.dat$speaker.sec <- sapply(gps.synch.dat$Speaker.Time,to.secs)
gps.synch.dat$zero.datetime <- gps.synch.dat$gps.datetime - gps.synch.dat$speaker.sec
gps.synch.dat$min.datetime <- as.POSIXct(paste(gps.synch.dat$Date,gps.synch.dat$t0,sep=' '),format='%m/%d/%y %H:%M:%S')
gps.synch.dat$max.datetime <- as.POSIXct(paste(gps.synch.dat$Date,gps.synch.dat$tf,sep=' '),format='%m/%d/%y %H:%M:%S')
#Get synch info from files - both beeps (more accurate) and synchs (less accurate)
synch.info <- data.frame()
for(i in 1:nrow(file.info)){
file <- file.info$filename[i]
print(file)
dat <- read.table(file,header=T,sep='\t',stringsAsFactors=F,comment.char = "",quote = )
beeps <- dat[grep('BEEP',dat$Name),]
synchs <- dat[grep('SYNCH',dat$Name),]
if(nrow(beeps)>0){
beeps$Type <- 'beep'
}
if(nrow(synchs)>0){
synchs$Type <- 'synch'
}
print(beeps)
print(synchs)
dat <- rbind(beeps,synchs)
dat$synchtime <- sapply(dat$Name,FUN=function(x){return(substring(x,which(strsplit(x,'')[[1]]==':')[1]-1,which(strsplit(x,'')[[1]]==':')[1]+5))})
dat$synchtime.sec <- sapply(dat$synchtime,FUN=to.secs)
dat$rectime.sec <- sapply(dat$Start,FUN=to.secs)
gps.synch.dat.curr <- gps.synch.dat[which(as.character(date(gps.synch.dat$gps.datetime))==file.info$date[i]),]
dat$gps.time <- gps.synch.dat.curr$zero.datetime[1] + dat$synchtime.sec
diff.clock.idxs <- which(dat$gps.time > gps.synch.dat.curr$max.datetime[1])
if(length(diff.clock.idxs)>0){
for(j in 1:length(diff.clock.idxs)){
idx <- diff.clock.idxs[j]
dat$gps.time <- gps.synch.dat.curr$zero.datetime[2] + dat$synchtime.sec
}
}
synch.info.curr <- data.frame(filename=rep(file,nrow(dat)),date=rep(file.info$date[i],nrow(dat)),id=rep(file.info$id[i],nrow(dat)),speaker.time=dat$synchtime.sec,rec.time=dat$rectime.sec,gps.time=dat$gps.time,synch.type=dat$Type)
synch.info.curr$drift <- synch.info.curr$rec.time - synch.info.curr$rec.time[1] - (synch.info.curr$gps.time - synch.info.curr$gps.time[1])
synch.info <- rbind(synch.info,synch.info.curr)
}
#resids <- c()
#for(i in 1:nrow(file.info)){
# synch.info.curr <- synch.info[which(synch.info$filename==file.info$filename[i] & synch.info$synch.type=='beep'),]
# x <- as.numeric(synch.info.curr$gps.time-synch.info.curr$gps.time[1])/60/60 #in hours
# y <- as.numeric(synch.info.curr$drift)
# plot(x,y)
# print(i)
# print(lm(y~x)$coefficients[2])
# resids <- c(resids,lm(y~x)$residuals)
#}
save(list=c('synch.info'),file=paste(outdir,'/synch_info_all_2020-02-21.RData',sep=''))
|
9183f0e9065bf2891807172fca3a25398e88d152
|
38b161f15f0d6ca60e386ffbb1e37f91806d944a
|
/R/insert_pandoc_columns.R
|
b0109359ccda1e1640e952a536059da5f16f9dd9
|
[
"MIT"
] |
permissive
|
MusculusMus/pptxtemplates
|
51c871077023ec5afaa77481e4ba8496502b933c
|
fab50ccdc9ec1caa5494dd307e5781397b057b4e
|
refs/heads/master
| 2023-07-02T18:54:10.146636
| 2021-08-07T22:40:12
| 2021-08-07T22:40:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 697
|
r
|
insert_pandoc_columns.R
|
cols <- ":::::: {.columns}\n::: {.column}\n\n:::\n::: {.column}\n\n:::\n::::::\n"
insert_pandoc_columns <- function() {
# Gets The active Documeent
ctx <- rstudioapi::getActiveDocumentContext()
# Checks that a document is active
if (!is.null(ctx)) {
# Extracts selection as a string
selected_text <- ctx$selection[[1]]$text
# modify string
selected_text <- cols
# replaces selection with string
rstudioapi::modifyRange(ctx$selection[[1]]$range, selected_text)
# New position
new_position <- rstudioapi::document_position(
row = ctx$selection[[1]]$range$start[1] + 2,
column = 1
)
rstudioapi::setCursorPosition(new_position)
}
}
|
7f9dc982133d5b3bec184ca9b30ba6c05309bdc6
|
f7ccb98205f25ac5d3b3e460173b404f7ae4de39
|
/R/module_201_3_ans_ex_2.R
|
a7ab1771f14be5c20d89f4a842b16f6bc1ef84c2
|
[] |
no_license
|
erin-r-butterfield/R4Biochemists201
|
543f18ca112979dcf4783f49d56b17a2e10e39e6
|
dc791862e71b664a33693c12c02794ac3658d9d1
|
refs/heads/master
| 2023-08-26T18:26:29.486831
| 2021-11-05T21:36:49
| 2021-11-05T21:36:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,381
|
r
|
module_201_3_ans_ex_2.R
|
## ---- exercise_2-----------------------------------------------------------------------------------
data_2 <- read_excel("file.xlsx", sheet_names[8], skip = 1)
colnames(data_2) <- c("thick", "express")
# you can plot it in ggplot but you will need to calculate the stats
# and add them manually
ggplot(data_2, aes(thick, express)) +
geom_point() +
stat_smooth(method = "lm", se = FALSE, color="black") +
xlab("Thickness (mm)") + # label x-axis
ylab("mRNA expression") + # label y-axis
ggtitle("Croft et al, 2019, Figure 1m") + # add a title
scale_y_continuous(limits=c(-4,12), breaks=c(-4,-2,0,2,4,6,8,10,12)) +
theme_classic()
## ---- more_for_exercise_2--------------------------------------------------------------------------
# make the plot with ggscatter()
plot <- ggscatter(data_2, x = "thick", y = "express",
add = "reg.line", # Add regression line
cor.coef = TRUE, # Add correlation coefficient. see ?stat_cor
cor.coeff.args = list(method = "spearman"),
xlab = "Thickness (mm)",
ylab = "mRNA expression",
xlim = c(0,1.5),
title = "Croft et al, 2019, Figure 1m" # add a title
)
# use scale_y_continuous() to alter the y axis
plot + scale_y_continuous(limits=c(-4,12), breaks=c(-4,-2,0,2,4,6,8,10,12))
|
e1d26e6bd718a682109ea41274b914cbadf83cbe
|
ec842c4e81dcb1ecece2d95a7b5a0ed63ffd95c7
|
/Codes/JSEM_codes/code_jsem.R
|
82b8ad1efc87e88e4093af72efe7f3ec326129d7
|
[] |
no_license
|
shubhobm/Stratified-mult-GGM
|
c3fbabe3f37fdaa3b05b468dd097e4b26d8ff337
|
931dffe72e418d8d105eba37e4205721d9afdd8c
|
refs/heads/master
| 2021-08-22T10:43:14.753864
| 2021-06-16T23:00:09
| 2021-06-16T23:00:09
| 99,459,082
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,684
|
r
|
code_jsem.R
|
##########################################################
# function: JSEM
## Purpose: to use joint group lasso to estimate multiple graphical models,
## given the known grouping structure. Thresholding corrects the group
## misspecification
# trainX, # a data matrix sorted by group label, n by p
# trainY, # a n-dim vector indicating model membership of the observed data (trainX)
# index, # a K by p matrix of node-specific grouping pattern.
# lambda, # penalty parameter; allow one parameter for every regression
# delta1 = NULL, #Thresholding parameter at the group level
# delta2 = NULL, #thresholding parameter with group
## Output:
## out: a list which contains the estimated adjacency matrices.
JSEM <- function(
trainX,
trainY,
index,
lambda,
delta1 = NULL,
delta2 = NULL,
eps = 1e-06
){
p = dim(trainX)[2]
K = length(unique(trainY))
lambda.grpreg = rep(lambda, p)
# list observed data by model
design = vector("list", K)
Ahat = vector("list", K)
for (k in 1:K){
design[[k]] = trainX[which(trainY == k), ]
Ahat[[k]] = matrix(0, p, p)
}
## Start the loop for each node
for (i in 1:p) {
#cat("i= ", i, "\n")
## Create a block diagonal matrix for the variables
list.x = vector("list", K)
list.y = vector("list", K)
for (k in 1:K) {
list.x[[k]] = design[[k]][, -i]
list.y[[k]] = design[[k]][, i]
}
#Duplicate columns for each k and add these extra columns to the end of the rearranged design X;
#In the meantime, add the variable indices to the list of all variables.
#Need to reorganize the estimate in the end.
X = as.matrix(bdiag(list.x))
new_X = X
Y = unlist(list.y)
## To get the index label
myindex = sort.variables(index[[i]])
X = X[, myindex$x.index]
fit = grpreg(X, Y, myindex$v.index, family = "gaussian", penalty = "grLasso", lambda = lambda.grpreg[i])
coeff = fit$beta[-1, ]
if (!is.null(delta1)){
coeff = thr.group(coeff, myindex$v.index, delta1)
}
if (!is.null(delta2)){
coeff = thr.coord(coeff, myindex$v.index, delta2)
}
tmp = matrix(coeff, nrow = K)
for (j in 1:dim(tmp)[2]){
tmp[, j] = tmp[myindex$b.index[, j], j]
}
for (k in 1:K){
Ahat[[k]][i, -i] = tmp[k,]
}
}
# Symmetrize the thresholded coefficients;
# Get the symmetric adjacency matrices;
Theta = lapply(Ahat, FUN = symmetrize)
Ahat = lapply(Theta, function(u) {u[abs(u)>eps] = 1; return(u)})
return(list(Ahat = Ahat, Theta=Theta, lambda = lambda.grpreg))
}
##########################################################
## sel.lambda.jsem.R
# Purpose: to select the tuning parameter for JSEM
sel.lambda.jsem <- function(
trainX,
testX,
trainY,
testY,
index,
lambda
){
p = dim(trainX)[2]
K = length(unique(trainY))
Info <- vector("list", K)
# Find the sample size for each category
n <- rep(0, K)
for (k in 1:K){
n[k] <- nrow(trainX[which(trainY == k), ])
}
N <- length(lambda)
bic.score <- rep(0, N)
likelihood <- rep(0, N)
for (j in 1:N){
cat("The ", j, "-th step in tuning... \n")
Ahat <- JSEM(trainX, trainY, index, lambda=lambda[j])$Ahat
for (k in 1:K){
Info[[k]] = zeroInd(Ahat[[k]], 1)$zeroArr
}
#In the second step of Joint group lasso, we use 0.1*log(p)/n as the default penalty for each graph.
fit <- multi.glasso(trainX, trainY, lambda = 0.1*log(p)/n, zero = Info, BIC = T)
bic.score[j] <- sum(fit$BIC)
for (k in 1:K){
data <- testX[which(testY == k), ]
empcov <- cov(data)
while (kappa(empcov) > 1e+2){
empcov = empcov + 0.05 * diag(p)
}
likelihood[j] = likelihood[j] + matTr(empcov %*% fit$Omega[[k]]) - log(det(fit$Omega[[k]]))
}
}
out <- list(BIC = bic.score, likelihood = likelihood)
return(out)
}
##########################################################
## function: stabsel.jsem.R
## purpose: to perform selection of the estimated network based on stability criteria
## Refs: Meinshausen and Buhlmann - Stability selection - JRSSB - 2010
## Arguments:
## X: a list of data matrices
## cnt: the number of subsampling
## lastar: the oracle lambda found in Glasso
##
## Outputs: A list of selected matrices and the actual number of replicates tried.
stabsel.jsem <- function(X, index, cnt, lastar) {
K = 1
p = ncol(X)
if (is.null(dim(X))) {
K = length(X)
p = ncol(X[[1]])
}
n = lapply(X, nrow)
X1 = vector("list",K)
X2 = vector("list", K)
sel.mat = vector("list", K)
for (k in 1:K){
sel.mat[[k]] = matrix(0, p, p)
}
count = 0
for (i in 1:cnt) {
model.1 = NULL
model.2 = NULL
for (k in 1:K){
ind.1 = sample(seq(1, n[[k]]), n[[k]]/2, F)
ind.2 = seq(1, n[[k]])[match(seq(1, n[[k]]), ind.1, 0) == 0]
X1[[k]] = X[[k]][ind.1, ]
X2[[k]] = X[[k]][ind.2, ]
model.1 = c(model.1, rep(k, length(ind.1)))
model.2 = c(model.2, rep(k, length(ind.2)))
}
tmp.1 = try(JSEM(trainX=do.call(rbind, X1), trainY=model.1, index, lambda=lastar))
tmp.2 = try(JSEM(trainX=do.call(rbind, X2), trainY=model.2, index, lambda=lastar))
if (inherits(tmp.1, "try-error") || inherits(tmp.2, "try-error")){
warning("There might be some error!")
next;
}
for (k in 1:K){
sel.mat[[k]] = sel.mat[[k]] + tmp.1$Ahat[[k]] + tmp.2$Ahat[[k]]
}
count = count + 1
}
return(list(mat = sel.mat, count = count))
}
##--------------------------------------------\
## sort.variables
##--------------------------------------------\
# Input: the node-sepcific group index
# e.g.
# index = cbind(do.call(cbind, rep(list(c(1,1,2,2)), 10)), do.call(cbind, rep(list(c(1,2,1,2)), 10)))
# dimension of index: K by p
# Output:
# v.index, index indicating group membership of each variables
# g.index, index for graphs
# x.index, index for columns of the design matrix X
# b.index, index for recovering the beta coefficients to the correct order
##--------------------------------------------\
sort.variables <- function(
index # the group index matrix of p-1 X K
){
K = nrow(index)
p = ncol(index) + 1
len = apply(index, 2, function(x) length(unique(x)) )
g.index = matrix(rep(1:K, p-1), nrow = K, ncol = p-1)
x.index = order(c(t(do.call(rbind, rep(list(1:ncol(index)), K)))))
#initialize the variable index
v.index = index
for (j in 2:ncol(index)) {
v.index[, j] = v.index[, j] + cumsum(len)[j-1]
}
v.index = c(v.index)
# re-order the variable index so that they are monotone
new.order = order(v.index)
v.index = v.index[new.order]
x.index = x.index[new.order]
g.index = g.index[new.order]
b.index = index
for (j in 1:ncol(index)) {
b.index[, j] = order(order(index[, j]))
}
res = list(v.index = v.index, g.index = g.index, x.index = x.index, b.index = b.index)
return(res)
}
sf.net <- function(
p, # number of variables
m = NULL,
rho=1
){
# generate a graph
g <- barabasi.game(n = p, power=rho, m = m, directed = F, algorithm = "psumtree")
adjm <- as.matrix(get.adjacency(g))
d <- graph.density(g)
return(list(A = adjm, g = g, density = d))
}
pd <- function(A, zeta=0.1){
if (sum(A != t(A)) > 0){
stop("This method only works for symmetric A!")
}
p <- dim(A)[1]
diag(A) <- rep(0, p)
diag(A) <- abs(min(eigen(A)$values)) + zeta
Ainv <- chol2inv(chol(A))
Ainv <- cov2cor(Ainv)
A <- chol2inv(chol(Ainv))
return(list(A = A, Ainv = Ainv))
}
# Symmetrize a matrix
symmetrize <- function(A, eps = 1e-06){
A <- (A + t(A))/2
A[abs(A)<eps] <- 0
diag(A) <- 0
return(A)
}
##--------------------------------------------\
## function: zeroInd
##--------------------------------------------\
## purpose: Get the indices for which we have external information
## output:
## zeroArr: the 2-column indices for which we will zero out
## zeroMat: the known 0's as a matrix.
## oneMat: the known 1's
##--------------------------------------------\
zeroInd <- function(Amat, r, eps=1e-06){
if (!isSymmetric(Amat)){
stop("This method only works for symmetric matrix!")
}
p <- dim(Amat)[1]
oneMat <- matrix(0, p, p)
zeroMat <- matrix(0, p, p)
one.pos <- which(abs(Amat)>=eps, arr.ind = TRUE)
zero.pos <- which(abs(Amat)<eps, arr.ind = TRUE)
zero.pos <- zero.pos[which(zero.pos[,1] > zero.pos[,2]) ,]
sel.zero <- sample(seq(1, dim(zero.pos)[1]), r * dim(zero.pos)[1], replace = FALSE)
zeroMat[zero.pos[sel.zero, ]] <- 1
zeroMat <- zeroMat + t(zeroMat)
zeroArr <- zero.pos[sel.zero, ]
out <- list()
out$zeroArr = zeroArr
out$zeroMat = zeroMat
if (dim(one.pos)[1] == 0){
warning("The matrix is zero!")
out$oneMat = matrix(0, p, p)
} else
{
one.pos <- one.pos[which(one.pos[,1] > one.pos[,2]) ,]
if (is.null(dim(one.pos))){
one.pos = matrix(one.pos, nrow = 1)
}
sel.one <- sample(seq(1, dim(one.pos)[1]), r * dim(one.pos)[1], replace = FALSE)
oneMat[one.pos[sel.one, ]] <- 1
oneMat <- oneMat + t(oneMat)
diag(oneMat) <- 0
out$oneMat = oneMat
}
return(out)
}
#To compute the trace of a matrix
matTr <- function(z) sum(diag(z))
##--------------------------------------------\
# multi.glasso
##--------------------------------------------\
#Purpose: to estimate multiple adjacency matrices using graphical lasso
# Input:
# trainX, a data matrix sorted by group label, n by p
# trainY, a n-dim vector indicating model membership of trainX
# lambda, a scalar as the penalty parameter in each glasso problem
# zero = NULL, entries of inverse covariance matrix to be constrained to zero. a list of matrices indicating the constraints for glasso.
# It's mainly used for our method.
# BIC = FALSE, whether to calculate the bic.score.
# eps = 1e-06
# Output: estimated adjacency matrices; and precision matrices
##--------------------------------------------\
multi.glasso <- function(
trainX,
trainY,
lambda,
zero = NULL,
BIC = FALSE,
eps = 1e-06
){
p = dim(trainX)[2]
K = length(unique(trainY))
n = as.numeric(table(trainY))
#penalty needed for glasso
if (length(lambda)==K) {rho = lambda} else {
rho = rep(lambda, K)}
#Initialize the estimated precision, partial correlation and adjacency matrix
Omega.hat = vector("list", K)
Theta = vector("list", K)
Ahat = vector("list", K)
#Whether there are entries that need to be constrained to zero
if (is.null(zero)){
zero = rep(list(zero), K)
}
if (max(sapply(zero, length)) == p*(p-1)){
stop("One or more matrices are constrained to be zero")
}
bic.score = rep(0, K)
for (k in 1:K) {
Ahat[[k]] = matrix(0, p, p)
data <- trainX[which(trainY == k), ]
empcov <- cov(data) #empirical cov
while (kappa(empcov) > 1e+2){
empcov = empcov + 0.05 * diag(p)
}
fit <- glasso(empcov, rho = rho[k], zero = zero[[k]], penalize.diagonal=FALSE, maxit = 30)
Omega.hat[[k]] = (fit$wi + t(fit$wi))/2
Theta[[k]] <- diag(diag(Omega.hat[[k]])^(-0.5)) %*% Omega.hat[[k]] %*% diag(diag(Omega.hat[[k]])^(-0.5))
Ahat[[k]][abs(Omega.hat[[k]])>eps] = 1
diag(Ahat[[k]]) = 0
if (BIC){
bic.score[k] = matTr(empcov %*% Omega.hat[[k]]) - log(det(Omega.hat[[k]])) + log(n[k]) * sum(Ahat[[k]])/(2*n[k])
}
}
out = list(Omega = Omega.hat, Theta = Theta, Adj = Ahat, BIC = bic.score, lambda = lambda)
return(out)
}
|
1ae289329bb7c132a2f1019a84d01d62c5647e7b
|
7529e25aa98bc5ea03ec5427516799285b8c646a
|
/man/reduce_dim.Rd
|
16c3e22e44706b4da48ed28d3d07681a37161ace
|
[] |
no_license
|
froh/plyr
|
62e35204f4031ff5389eba7739774e851786bef3
|
a6ad59c728305b623cd2ca52c2222e4baa2d7ef9
|
refs/heads/master
| 2021-01-24T00:09:23.835411
| 2011-03-04T09:41:12
| 2011-03-04T09:41:12
| 1,167,963
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 204
|
rd
|
reduce_dim.Rd
|
\name{reduce_dim}
\alias{reduce_dim}
\title{Reduce dimensions.}
\usage{reduce_dim(x)}
\description{
Reduce dimensions. Remove extraneous dimensions
}
\keyword{internal}
\arguments{
\item{x}{array}
}
|
6c8ae1e7b8f87f3966e79fd6780605b4c99e602b
|
5efec7ecbe1240bc4bcab23a7a6b696350348082
|
/man/rapid_latlon.Rd
|
2f5a9315587589af5c799501522d1ecb0a66d471
|
[
"MIT"
] |
permissive
|
italocegatta/rapidr
|
43beba88a5f4e94db9752d1f364bdce7befeeea1
|
874cb57e6659aef365be33bc6b7adb9f25836764
|
refs/heads/master
| 2020-12-05T13:01:07.753979
| 2017-03-17T14:39:02
| 2017-03-17T14:39:02
| 66,865,009
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 574
|
rd
|
rapid_latlon.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_info.R
\name{rapid_latlon}
\alias{rapid_latlon}
\title{Get the central latitude and longitude of the tile}
\usage{
rapid_latlon(xml)
}
\arguments{
\item{xml}{An XML document}
}
\value{
A SpatialPoints of central coordinates from Rapideye tile
}
\description{
Get the central latitude and longitude of the tile
}
\examples{
library(xml2)
xml.file <- list.files(
system.file("extdata", package="rapidr"),
pattern = ".xml", full.names = TRUE
)
x <- read_xml(xml.file)
rapid_latlon(x)
}
|
e62116ca6dadebeefc2bd5c21b80650b427746b2
|
036983f65dc517593f84d44cb14a712ea0687225
|
/homeworks/homework_10/kacper_grzymkowski/ui.R
|
046e2bf36cc2d8200e08579f405b387cb16a140a
|
[] |
no_license
|
iketutg/2021Z-DataVisualizationTechniques
|
027c0228ed95e37ddd73500909117449f6b87e25
|
125077c8b02426b779e351324c3c6d4e96ad64c1
|
refs/heads/master
| 2023-03-19T08:59:42.177496
| 2021-02-23T20:08:31
| 2021-02-23T20:08:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,918
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("TIMSS data"),
sidebarLayout(
sidebarPanel(
conditionalPanel(
condition = "input.grade == 4",
sliderInput("top4",
"Show how many:",
min = 1,
max = 64,
value = c(0, 10)
)
),
conditionalPanel(
condition = "(input.top4[1] - input.top4[0] > 20) && (input.grade == 4)",
h5("For best results, pick ranges smaller than 20"),
),
conditionalPanel(
condition = "input.grade == 8",
sliderInput("top8",
"Show how many:",
min = 1,
max = 46,
value = c(0, 10)
),
),
conditionalPanel(
condition = "(input.top8[1] - input.top8[0] > 20) && (input.grade == 8)",
h5("For best results, pick ranges smaller than 20"),
),
radioButtons(
"grade",
choiceValues = c(4, 8),
choiceNames = c("4th grade", "8th grade"),
selected = 4,
inline = T,
label = "Choose grade"
),
radioButtons(
"type",
choices = c("Math", "Science"),
selected = "Math",
inline = T,
label = "Choose type"
),
sliderInput(
"bins",
"Number of bins:",
min = 1,
max = 50,
value = 30
)
),
mainPanel(
plotOutput("rankingPlot"),
plotOutput("distPlot")
)
)
))
|
2991b74d5f94f450747c462aac4cae88fc90bcf7
|
a7a822605874425d0e8fc30d10982a3a9e1a3d8e
|
/man/chemr.Rd
|
6e3e956bf687b20b840cb55582d27d2d24542bb1
|
[] |
no_license
|
paleolimbot/chemr
|
c0afb2057a7331ecc5177936ff267fbbeba44acd
|
8283dc34168b5c0fcdefd77987eb1ebde43d0673
|
refs/heads/master
| 2021-09-08T16:40:49.712881
| 2021-09-03T16:26:58
| 2021-09-03T16:26:58
| 100,809,854
| 16
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 211
|
rd
|
chemr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chemr-package.R
\docType{package}
\name{chemr}
\alias{chemr}
\alias{chemr-package}
\title{chemr.}
\description{
Chem handling in R
}
|
095dc81cb7421da6df95ba16b90db4f2c090718e
|
132c59dbaf3c26fdf505da2dfef8f95cb0be061a
|
/cachematrix.R
|
6ebae743142026731c6d09555c6593db3c4d7349
|
[] |
no_license
|
DivyaAggarwal10/ProgrammingAssignment2
|
c693f9b9284ea15075ad7c1685fc14573f968d6e
|
985cf17032bd867ceb93886e1139d2c6f889fba0
|
refs/heads/master
| 2020-04-10T17:33:08.424798
| 2018-03-07T19:52:48
| 2018-03-07T19:52:48
| 124,282,982
| 0
| 0
| null | 2018-03-07T19:18:38
| 2018-03-07T19:18:37
| null |
UTF-8
|
R
| false
| false
| 1,236
|
r
|
cachematrix.R
|
## makeCacheMatrix: This function creates a special "matrix" object that
## can cache its inverse.
## makeCacheMatrix will accept matrix as an input
makeCacheMatrix<- function(x){
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function()
x
setinverse <- function (inv) {
inverse <<- inv
}
getinverse <- function ()
inverse
list(
set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse
)
}
## cacheSolve: This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then the cachesolve should
## retrieve the inverse from the cache.
cacheSolve <- function (x, ...) {
inverse <- x$getinverse()
if (!is.null(inverse)) {
message ("cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
return(inverse)
}
|
84a06bd3b7b8822a625144457e69769657cdc688
|
0d0ab985be679678a2ab3ac90c6532645c982af3
|
/R/addMetaData.R
|
2019bd1dd2cb7adf60401e84a4468ff3da76f7c5
|
[] |
no_license
|
thl-mjv/euromomo
|
38292686c19f08756a14c062feb6eea6d4a9b2e4
|
bbd46d4c13c843fda817bbf6c6eb7167f012fd28
|
refs/heads/master
| 2021-01-19T08:16:21.464050
| 2015-03-02T08:44:38
| 2015-03-02T08:44:38
| 24,886,718
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,197
|
r
|
addMetaData.R
|
#' Function to add meta data.
#'
#' @param df A \code{data.frame} containing the model output.
#' @param groupName A string containing the name of the group.
#' @param groupOptions A vector of strings containing the group specific options.
#' @note There is a lot of redudant data here!
#' @export
addMetaData <- function(df, groupName, groupOptions) {
#Extract options
opts <- getOption("euromomo")
#Define which parameters to extract from option object and extract.
varNames <- c("Country","Counties","Institution","BaselineSeasons","StartDelayEst",
"DayOfAggregation","nWorkdays")
varNamesValues <- as.data.frame(t(sapply(varNames, function(name) opts[[name]])),stringsAsFactors=FALSE)
#Group specific information.
groupNameValues <- as.data.frame(t(groupOpts),stringsAsFactor=FALSE)
#Add as columns to the data.frame
tmp <- cbind(varNamesValues,group.name=groupName,group=groupNameValues, df)
#Done.
return(tmp)
}
# Country=Denmark
# Counties=Seeland,Jylland
# Institution=ISS
# WorkDirectory=.
# InputFile=../data/DoD_DoR.txt
# HolidayFile=../data/holidays.txt
# BaselineSeasons=5
# StartDelayEst=2008-W01
# DayOfAggregation=2013-01-03
# nWorkdays=5
|
178a6986d2188ba3241be685371948db81af98e5
|
ec213b23bf4dcba4243ef834235f2b8352c3f500
|
/man/discover_meiotic_recombination.Rd
|
d9d292f346ff4665501d1f5898960b62bcc074ca
|
[] |
no_license
|
mccoy-lab/rhapsodi
|
941eaa317f7c5e83a0c15bfbf03c729a389459d6
|
8a5d712b1eb500594ac75428aa8dd94494bf81f3
|
refs/heads/master
| 2023-04-12T15:18:32.125743
| 2022-07-25T21:30:28
| 2022-07-25T21:30:28
| 328,792,330
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,372
|
rd
|
discover_meiotic_recombination.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discover_meiotic_recombination.R
\name{discover_meiotic_recombination}
\alias{discover_meiotic_recombination}
\title{A function to drive and report meiotic recombination breakpoint finding}
\usage{
discover_meiotic_recombination(
original_gamete_data,
complete_haplotypes,
filled_gamete_data_list,
positions,
smooth_crossovers = TRUE,
smooth_imputed_genotypes = FALSE,
sampleName = "sampleT",
chrom = "chrT",
threads = 2
)
}
\arguments{
\item{original_gamete_data}{original sparse gamete data matrix}
\item{complete_haplotypes}{dataframe of phased diploid donor genotypes in two columns, each column corresponding with a haplotype from the donor}
\item{filled_gamete_data_list}{the output list from \code{impute_gamete_genotypes} which contains each gamete data matrix with haplotype info from the HMM and \code{fill_NA} functions}
\item{positions}{the genomic positions corresponding to SNP indices}
\item{smooth_crossovers}{boolean, default is TRUE, whether to use smoothed data for recombination finding. If \code{TRUE}, doesn't replace with original reads}
\item{smooth_imputed_genotypes}{boolean, default is FALSE, whether to use smoothed data for ending genotypes. If \code{TRUE}, doesn't replace with original reads}
\item{sampleName}{sample name of sample given to rhapsodi, default is "sampleT"}
\item{chrom}{chromosome of sample given to rhapsodi, default is "chromT"}
\item{threads}{an integer, default = 2, the number of cores to use when we use mclapply or the like}
}
\value{
recomb_breaks a dataframe, specifying the predicted recombination breakpoints for each gamete
}
\description{
This function takes as input two booleans controlling whether the gamete haplotypes and genotypes remain smoothed before crossover discovery (directly from HMM)
or unsmoothed (replacing inferred HMM state with the original reads if they disagree)
Then the function runs recombination finding
It offers the option to avoid oversmoothing by superimposing initial haplotype assignments over each gamete. For example, if an
allele assigned to h1 was changed by the model to h2, the early functions can fill the NAs to h2, but then un-smoothing
will replace the singular h1 at the correct allele. This singular h1 could be an example of gene conversion or non-crossover.
}
|
b01c8d2211c48d91b61cdeb0e9c98022d8edb76e
|
a34c74086329dfd2aa7f8ad588e07f5bc7c05870
|
/scripts/wtc_metdata.R
|
ce1329bac141c2a8a34aa0eee8aa9b618cb4ac2d
|
[] |
no_license
|
CourtneyCampany/WTC3_tree
|
cbc1a0813edf337eba367d428974588f85bc440a
|
500c06a5c134fb6b419901b8708b26f22232c733
|
refs/heads/master
| 2020-04-09T18:37:49.332701
| 2018-09-12T14:49:24
| 2018-09-12T14:49:24
| 30,519,484
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,712
|
r
|
wtc_metdata.R
|
library(plyr)
library(devtools)
library(HIEv)
library(doBy)
setToken("u2xEk2wTte3AsdBxGTr5")
#Search HIEv for the ROS weather station data during the pot experiment
wtc_search <- searchHIEv(filename="wTCMET")
wtc_search2 <- searchHIEv(filename="OUTMET")
met_names <-
###download both sets met files (PPFD oustide chamber and AIR and )
oct_met <- downloadCSV(filename="WTC_TEMP_CM_WTCMET_20131001")
nov_met <- downloadCSV(filename="WTC_TEMP_CM_WTCMET_20131101")
dec_met <- downloadCSV(filename="WTC_TEMP_CM_WTCMET_20131201")
jan_met <- downloadCSV(filename="WTC_TEMP_CM_WTCMET_20140101")
feb_met <- downloadCSV(filename="WTC_TEMP_CM_WTCMET_20140201")
mar_met <- downloadCSV(filename="WTC_TEMP_CM_WTCMET_20140301")
apr_met <- downloadCSV(filename="WTC_TEMP_CM_WTCMET_20140401")
oct_met2 <- downloadCSV(filename="WTC_TEMP_CM_OUTMET_20131001")
nov_met2 <- downloadCSV(filename="WTC_TEMP_CM_OUTMET_20131101")
dec_met2 <- downloadCSV(filename="WTC_TEMP_CM_OUTMET_20131201")
jan_met2 <- downloadCSV(filename="WTC_TEMP_CM_OUTMET_20140101")
feb_met2 <- downloadCSV(filename="WTC_TEMP_CM_OUTMET_20140201")
mar_met2 <- downloadCSV(filename="WTC_TEMP_CM_OUTMET_20140301")
apr_met2 <- downloadCSV(filename="WTC_TEMP_CM_OUTMET_20140401")
chamber_met <- list(oct_met, nov_met, dec_met, jan_met, feb_met, mar_met, apr_met)
outside_met <- list(oct_met2, nov_met2, dec_met2, jan_met2, feb_met2, mar_met2, apr_met2)
##function to keep datetime, Tair, RH----------------------------------------------------------------------------------
vars_func <- function(x) {
dat<- x[ , c("chamber", "RH_al","DateTime", "Tair_al", "PPFD_Avg")]
dat$PPFD_Avg <- ifelse(dat$PPFD_Avg < 0, 0, dat$PPFD_Avg)
dat$ppfd_mol <- dat$PPFD_Avg/1000000
dat$PPFD15_mol_s <- dat$ppfd_mol*15*60
dat$Timestamp <- ymd_hms(dat$DateTime)
dat$Date <- as.Date(dat$Timestamp)
dat2 <- dat[, c("chamber", "DateTime", "RH_al", "Tair_al", "PPFD15_mol_s", "Date")]
return(dat2)
}
chams_met <- lapply(chamber_met, vars_func)
chams_met2 <- rbind.fill(chams_met)
library(plantecophys)
##use ecophys to conver rh to vpd
chams_met2$VPD <- RHtoVPD(chams_met2$RH_al, chams_met2$Tair_al)
###function to keep PPFD-----------------------------------------------------------------------------------------------
ppfd_fun <- function(x) {
dat<- x[ , c("DateTime", "PAR")]
dat$PAR <- ifelse(dat$PAR < 0, 0, dat$PAR)
dat$ppfd_mol <- dat$PAR/1000000
dat$PPFD15_mol_s <- dat$ppfd_mol*15*60
dat$Timestamp <- ymd_hms(dat$DateTime)
dat$Date <- as.Date(dat$Timestamp)
dat2 <- dat[,c("DateTime", "Date", "PPFD15_mol_s")]
return(dat2)
}
outside_ppfd <- lapply(outside_met, ppfd_fun)
chams_ppfd2 <- rbind.fill(outside_ppfd)
###get total daily par for each chamber and outside chamber and then save as a dataframe
PPFD_outside <- summaryBy(PPFD15_mol_s~Date, data=chams_ppfd2, FUN=sum, keep.names=TRUE)
names(PPFD_outside)[2] <- "PPFD_day"
PPFD_chamber <- summaryBy(PPFD15_mol_s~Date+chamber, data=chams_met2, FUN=sum, keep.names=TRUE)
names(PPFD_chamber)[3] <- "PPFD_day"
#with(PPFD_outside, plot(Date, PPFD15_mol_s, type = "l"))
with(PPFD_chamber, plot(Date, PPFD15_mol_s, col=chamber))
write.csv(PPFD_outside, "calculated_data/PPFD_outside.csv", row.names=FALSE)
write.csv(PPFD_chamber, "calculated_data/PPFD_chamber.csv", row.names=FALSE)
###air temp, and VPD inside chamber
met_chamber <- summaryBy(VPD+Tair_al~Date+chamber, data=chams_met2, FUN=c(min, max, mean))
write.csv(met_chamber, "calculated_data/met_chamber.csv", row.names=FALSE)
with(met_chamber, plot(Date, Tair_al.max, type = "l", col="red"))
with(met_chamber, points(Date, Tair_al.min, type = "l", col="blue"))
|
8f557a6b9b94dede9a8db47013f78290ddfa27b1
|
0e4887e4c79f3d959ddac08a365c7a31310c2ab1
|
/ch09/ch09_script.R
|
b1cb820b89c11b23c99c99e3467e35aa663d11e5
|
[] |
no_license
|
tpdhfl/Statistical_Analysis_R
|
8aa3de8f75f8f96ab91b220e5400f1192d8b8b6c
|
c22ee692a229b2006dde18aae90ed00d35ff21b0
|
refs/heads/master
| 2022-01-15T04:22:54.432688
| 2019-07-12T06:42:08
| 2019-07-12T06:42:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,110
|
r
|
ch09_script.R
|
setwd("D:/Workplace/Statistical_Analysis_R/ch09")
hf <- read.table("http://www.randomservices.org/random/data/Galton.txt",
header=T,stringsAsFactors=F)
str(hf)
hf$Gender <- factor(hf$Gender,levels=c("M","F"))
str(hf$Gender)
hf.son <- subset(hf,Gender=="M")
hf.son <- hf.son[c("Father","Height")]
str(hf.son)
plot(hf.son$Father,hf.son$Height,xlab="아버지의 키",
ylab="아들의 키",main="아버지와 아들의 키")
abline(v=mean(hf.son$Father),col=2,lty=2)
abline(h=mean(hf.son$Height),col=2,lty=2)
# Ex 9-1
f.mean <- mean(hf.son$Father)
s.mean <- mean(hf.son$Height)
cov.num <- sum((hf.son$Father-f.mean)*(hf.son$Height-s.mean))
(cov.xy <- cov.num/(nrow(hf.son)-1))
cov(hf.son$Father,hf.son$Height)
(r.xy <- cov.xy/(sd(hf.son$Father)*sd(hf.son$Height)))
cor(hf.son$Father,hf.son$Height)
# Ex 9-2
mean.x <- mean(hf.son$Father)
mean.y <- mean(hf.son$Height)
sxy <- sum((hf.son$Father-mean.x)*(hf.son$Height-mean.y))
sxx <- sum((hf.son$Father-mean.x)^2)
(b1 <- sxy/sxx)
(b0 <- mean.y-b1*mean.x)
(res <- lm(Height~Father,data=hf.son))
summary(res)
anova(res)
str(res)
names(res)
par(mfrow=c(2,2))
plot(res)
par(mfrow=c(1,1))
qqnorm(res$residuals) ; qqline(res$residuals)
shapiro.test(res$residuals)
plot(hf.son$Father,res$residuals,
ylab="Residuals",xlab="Father's height",main="Residual analysis")
abline(0,0)
install.packages("lmtest")
library(lmtest)
(res2 <- dwtest(res))
names(res2)
# Ex 9-3
women <- women
str(women)
attach(women)
plot(height,weight,type="p",col="blue",lwd=2,main="Women data")
(fit <- lm(weight~height,data=women))
summary(fit)
cor.test(weight,height)
plot(weight~height,data=women)
abline(fit,col="red")
title(expression(italic(weight==3.45%*%height-87.52)))
par(mfrow=c(2,2))
plot(fit)
par(mfrow=c(1,1))
shapiro.test(fit$residuals) # 정규성
install.packages("gvlma")
library(gvlma)
(gvmodel <- gvlma(fit)) # 선형성
summary(gvmodel)
(fit2 <- lm(weight~height+I(height^2),data=women))
summary(fit2)
par(mfrow=c(2,2))
plot(fit2)
par(mfrow=c(1,1))
plot(weight~height,data=women)
lines(height,fitted(fit2),col="green")
title(expression(italic(weight==-7.348%*%height+0.083%*%height^2+261.878)))
(newfit <- lm(weight~ height + I(height^2), data=women[-c(13,15),]))
summary(newfit)
AIC(fit,fit2)
# 참고 : https://rstudio-pubs-static.s3.amazonaws.com/190997_40fa09db8e344b19b14a687ea5de914b.html
## Multiple
state.x77 <- state.x77
states <- as.data.frame(state.x77[,c("Murder","Population",
"Illiteracy","Income","Frost")])
(fit1 <- lm(Murder~.,data=states))
summary(fit1)
par(mfrow=c(2,2))
plot(fit1)
par(mfrow=c(1,1))
summary(gvlma(fit1))
shapiro.test(fit1$residuals)
install.packages("car")
library(car)
vif(fit1) # VIF
sqrt(vif(fit1))>2
(fit2 <- lm(Murder~Population+Illiteracy,data=states))
summary(fit2)
AIC(fit1,fit2)
step(fit1,direction="backward") # backward elimination
fit3 <- lm(Murder~1,data=states)
step(fit3,direction="forward",
scope=~Population+Illiteracy+Income+Frost) # forward selection
step(fit3,direction="forward",
scope=list(upper=fit1,lower=fit3))
step(fit1,direction="both") # stepwise regression
install.packages("leaps")
library(leaps)
subsets1 <- regsubsets(Murder~.,data=states,
method='seqrep',nbest=4) # all possible regression
summary(subsets1)
plot(subsets1)
subsets2 <- regsubsets(Murder~.,data=states,
method='exhaustive',nbest=4)
summary(subsets2)
plot(subsets2)
require(car)
subsets(subsets1,statistic="cp",main="Cp Plot for All Subsets Regression")
abline(1,1,lty=2,col="red")
subsets(subsets2,statistic="cp",main="Cp Plot for All Subsets Regression")
abline(1,1,lty=2,col="blue")
## Logistic
data <- read.csv("http://stats.idre.ucla.edu/stat/data/binary.csv")
str(data)
head(data)
data$rank <- as.factor(data$rank)
str(data)
train <- data[1:200,]
test <- data[201:400,]
model1 <- glm(admit~.,data=data,family="binomial")
summary(model1)
model2 <- glm(admit~gpa+rank,data=data,family="binomial")
summary(model2)
AIC(model1,model2)
par(mfrow=c(2,2))
plot(model1)
par(mfrow=c(1,1))
|
b997aebab76b2e10c441bace445e7d2a2ad77ed4
|
8a6264c16e1944bb6f254072d50eb925e64bf52e
|
/goseq_combined_gallus.R
|
a4155219ffb0f021d8538ed246e133910d73636a
|
[] |
no_license
|
likit/RNASeq-methods-comparison
|
52fcc2ccfff3bf94d3c24b57baad0bc5569f59fe
|
26b8e67182ee7d2132fd790aa75ec9d49f02ac8e
|
refs/heads/master
| 2020-03-28T18:36:26.998307
| 2014-07-30T05:28:18
| 2014-07-30T05:28:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,573
|
r
|
goseq_combined_gallus.R
|
library(goseq)
library(org.Gg.eg.db)
library(KEGG.db)
library(biomaRt)
degenes.table<-read.table('line7u_vs_i.gimme.degenes.fdr.05.tophits.gga',
stringsAsFactors=F, sep="\t", header=T)
colnames(degenes.table)<-c("SeqId", "geneID")
annots<-select(org.Gg.eg.db, keys=degenes.table$geneID,
columns=c("SYMBOL","ENTREZID", "PATH"),
keytype="ENSEMBL")
annotated.degenes<-merge(degenes.table, annots,
by.x="geneID", by.y="ENSEMBL")
# remove duplicated Entrez ID
uniq.annotated.degenes<-annotated.degenes[
!duplicated(annotated.degenes$geneID),]
mart<-useMart(biomart="ensembl", dataset="ggallus_gene_ensembl")
allgenes<-getBM(attributes='ensembl_gene_id', mart=mart)
allgenes<-allgenes$ensembl_gene_id
gene.vector<-as.integer(allgenes%in%degenes.table$geneID)
names(gene.vector)<-allgenes
pwf=nullp(gene.vector, 'galGal4', 'ensGene')
kegg = goseq(pwf, "galGal4", "ensGene", test.cats="KEGG")
# Adjust P-value using BH method
kegg$padjust = p.adjust(kegg$over_represented_pvalue, method="BH")
# Get pathway names for significant patways
kegg.sig = kegg[kegg$padjust<0.05,]
pathway = stack(mget(kegg.sig$category, KEGGPATHID2NAME))
kegg.sig$pathway = pathway$values
write.table(kegg.sig, 'line7u_vs_i.gimme.degenes.gga.kegg.txt',
sep='\t', row.names=F, quote=F)
write.table(uniq.annotated.degenes[!is.na(uniq.annotated.degenes$PATH),],
'line7u_vs_i.gimme.degenes.gga.kegg.id.txt',
sep='\t', row.names=F, col.names=F, quote=F)
|
22868c98d5c80aa85ef136a5383a5d964677c240
|
cd8389e43d345e4daee9bea5a0d0282e6684944a
|
/02R_Programming/week3/rankall.R
|
c50e0466751d222527642fc4215ef2d35c3b8776
|
[] |
no_license
|
gooneraki/DataScienceSpecialization
|
49a4b89451ef70d681bbd0d21fb91e72aecde6cb
|
2e0d34b1329b3dcc17522aa80ee5a47cf405a98c
|
refs/heads/master
| 2021-01-10T10:54:05.418801
| 2015-11-24T17:48:32
| 2015-11-24T17:48:32
| 46,735,657
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,619
|
r
|
rankall.R
|
rankall <- function(outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Check that state and outcome are valid
if (outcome != "heart attack" && outcome != "heart failure" && outcome != "pneumonia" ) {
stop("invalid outcome") }
## For each state, find the hospital of the given rank
temp_data <- data.frame(matrix(ncol=3))
allstates <- unique(data$State)
output <- numeric()
result <- data.frame(Hospital.Name=character(0), State=character(0))
for (state in allstates) {
if (outcome == "heart attack") {
temp_data <- data[data[,7]==state, c(2,7,11)]
} else if (outcome == "heart failure") {
temp_data <- data[data[,7]==state, c(2,7,17)]
} else if (outcome == "pneumonia") {
temp_data <- data[data[,7]==state, c(2,7,23)] }
temp_data[,3] <- as.numeric(temp_data[,3])
temp_data <- temp_data[complete.cases(temp_data),]
temp_data <- temp_data[order(temp_data[,3],temp_data[,1]),]
if (num=="best") {
result <- rbind(result,temp_data[1,1:2])
} else if (num=="worst") {
result <- rbind(result,temp_data[nrow(temp_data),1:2])
} else {
output <- as.numeric(num)
result <- rbind(result,temp_data[output,1:2])
}
}
## Return a data frame with the hospital names and the
## (abbreviated) state name
result <- result[order(result[,2]),]
names(result) <- c("hospital","state")
result
}
|
7b7df17bf6a47c08bebdd3415060dbc07051cfb7
|
7df519d91bf590fb7cfc928fb9ced401453749f5
|
/plot1.R
|
156ecf6f84fb6f76bb301e4040afc0da164a8aaf
|
[] |
no_license
|
jonesdd/ExData_Plotting1
|
41675bbb8863dd6abc33f5630b74e51d0df3a1c2
|
64c152808f4d56533da05b04838522016f85e326
|
refs/heads/master
| 2021-01-17T17:44:34.763072
| 2015-05-07T23:34:06
| 2015-05-07T23:34:06
| 35,168,061
| 0
| 0
| null | 2015-05-06T15:47:30
| 2015-05-06T15:47:29
| null |
UTF-8
|
R
| false
| false
| 1,147
|
r
|
plot1.R
|
# plot1.R
# This file contains the R code to produce plot 1.
#first I download the file to the curent working directory and save it as temp.zip.
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",destfile = "temp.zip", method = "curl")
#with the file saved i sue unz to upzip the file and extract the txt file household_power_consumption.txt, using read.table I save the data from the text file into raw.data.
raw.data<-read.table(unz("temp.zip","household_power_consumption.txt"),sep = ";", header = TRUE, stringsAsFactors = FALSE)
#next I i filter out the data for 1/2/2007 and 2/2/2007 and store it in power.data.
power.data<-raw.data[(raw.data$Date =="1/2/2007" | raw.data$Date =="2/2/2007"),]
#convert the Global_active_power data from character to numeric
power.data$Global_active_power<-as.numeric(power.data$Global_active_power)
#create plot 1 in the screen graphic device.
hist(power.data$Global_active_power,col="Red", xlab = "Global Active Power (kilowatts", main="Global Active Power")
#copy the histogram to a png file called plot1.png.
dev.copy(png, file="plot1.png")
dev.off()
|
89b21542ba8cecd99c9f8f84a0e4aad825170d7c
|
837a3177789464eabb12b7abfb12e8621feb71fb
|
/(01)Setting_up/postBurnin_B0.R
|
92302be576647d46e4e6cfeec304f5721752b3a0
|
[] |
no_license
|
mcgregorv/AtlantisRscripts
|
21140225d43ba583a1bebc70557c8cb5f61b9b5c
|
1009f0d1961fc95bc4a98d25eea7dc1d7cccee77
|
refs/heads/master
| 2020-07-11T14:20:34.142538
| 2019-08-26T22:22:13
| 2019-08-26T22:22:13
| 204,566,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,039
|
r
|
postBurnin_B0.R
|
#read in nc file, and grab the biomass' for all groups after burnin
#for those groups that have B0 estimated prior to model run (with some confidence) read these in and compare
#plot all tracers for a given box and layer
this_run<-"base"
this_out<-"BASEH3"
## all boxes
nboxes<-30
boxIndex<-seq(1,nboxes)
mg_2_tonne<-2e-8; X_CN<-5.7
burnin<-35 #number of years to skip
this_path<-paste(DIR$'Base',"ATLANTISmodels\\",this_run,"\\",sep="")
outPath<-paste(this_path,"output",this_out,"\\",sep="")
#read in tracers
ThisNC.nc<-nc_open(paste(outPath,"output.nc",sep=""))
thisVol<-ncvar_get(ThisNC.nc,"volume") #used in conversion mg per m^3 to biomass
#read in B0's
thisB0df<-read.csv(paste(this_path,"..\\CRAM_B0.csv",sep=""))
groupsDF<-read.csv(paste(this_path,"..\\CRAM_groups.csv",sep="")); ng<-dim(groupsDF)[1]
storeB0postBurnin<-rep(NA, ng)
for(g in 1:ng){
thisCode<-groupsDF$Code[g]; thisName<-str_trim(groupsDF$Name[g], side="both")
thisTracer<-paste(thisName,"_N", sep=""); thisData<-ncvar_get(ThisNC.nc, thisTracer)
if(length(dim(thisData))==3){
#then it is per m^3, so use volume
xx<-apply(thisData*thisVol, 3, sum) * mg_2_tonne *X_CN ## convert to tonnes
} else{
# then it is per m^2, so use area
xx<-apply(thisData * thisVol[nlayers,,], 2, sum) * mg_2_tonne *X_CN ## convert to tonnes
}
storeB0postBurnin[g]<-xx[burnin]
}
outData<-data.frame(cbind("Code"=as.character(groupsDF$Code), "Bburnin"=unlist(storeB0postBurnin)))
outData$B0<-thisB0df$B0
outData$Bburnin<-as.double(as.character(outData$Bburnin))
write.csv(outData, paste(DIR$'Tables',"B0vsBburnin.csv", sep=""), row.names = FALSE)
plot(x=outData$B0, y=outData$Bburnin)
index<-!is.na(outData$B0)
plot(outData$Bburnin[index]/outData$B0[index], pch=20, xaxt="n", ylab="Bburnin/B0", xlab="")
abline(h=1, col="red", lty=2)
par(las=2)
axis(at=seq(1,length(outData$B0[index])), labels=outData$Code[index], side=1)
abline(v=c(10,25), col="grey")
thisArea<-sum(thisVol[6,2:25,1])
|
64cdb65ce4601c63c3e8233bb04ef16c2a02df80
|
fd153dd910f3a9ad29fbaf128b05d675a8270ca8
|
/page_rank.R
|
14540749140e3759b3c2cae22f29aa52f4fb3e6b
|
[] |
no_license
|
zheng-han-kao/R-Programming-practice
|
a9df9b16062c3b5af9858ad022d15bddf98e289c
|
72b579710e5f27441fb8c0c26c601437dd207da4
|
refs/heads/master
| 2021-04-30T08:25:34.965104
| 2018-02-13T14:23:33
| 2018-02-13T14:23:33
| 121,375,046
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 992
|
r
|
page_rank.R
|
data <- read.csv(file = "Pagerank_0525.CSV", header = TRUE)
N <- nrow(data) #有幾個元素
pr <- c(0)
d <- 0.15
#原始分數
for(i in 1:N){
pr[i] <- 1/N
pr[i] <- round(pr[i], digits = 7)
}
#機率矩陣
for(i in 1:ncol(data)){
data[,i] <- data[,i]/sum(data[,i])
}
pagerank <- function(N1, data1, pr1, iter){
pr_tmp <- c(0)
for(x in 1:iter){ #幾次迭代
for(i in 1:N1){
tmp <- 0
for(j in 1:N1){
tmp <- tmp + d*pr1[j]*data1[i,j]
}
pr_tmp[i] <- (1-d)/N1 + tmp #計算分數
pr_tmp[i] <- round(pr_tmp[i], digits = 7)
}
count <- 0
for(y in 1:N1){
if(pr1[y] == pr_tmp[y]){
count <- count + 1
}
}
if(count == N1){
print("converged!!")
}
pr1 <- pr_tmp #取代原本的分數
print(paste("第",x , "次 iteration : ", toString(pr1)))
}
}
pagerank(N, data, pr, 100)
|
1e3ea5558cd18e72f7181b965d9dce6b5445ec19
|
b9ac629b6268d231afb5cc006f973fbe2b9fdd38
|
/Experiment1_Walking/AdditionalAnalysis/GenderEffects/GetGenderInfo.R
|
42c66e7d1f3c0fd012e1389687f9d9cfd3cb5eb4
|
[] |
no_license
|
dlcen/OpticFlowWalkingHeadingJudgements
|
e7bf67a97d9c5e30975173e1197957da799c9d3a
|
b215f3ae01ccb4c181074e7a02fb5c77ba00e452
|
refs/heads/master
| 2020-06-04T23:40:33.381921
| 2020-02-23T20:15:22
| 2020-02-23T20:15:22
| 192,236,318
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 694
|
r
|
GetGenderInfo.R
|
library(data.table); library(plyr)
info.file <- "Data/SbjInfo.csv"
sbj.info <- read.csv(info.file)
sbj.info <- data.table(sbj.info)
sbj.nos <- as.character(unique(sbj.info$Subject.No))
load("Data/meanErr.RData")
load("Data/segData.RData")
trial.meanErr.4m <- data.table(trial.meanErr.4m)
segData_aligned <- data.table(segData_aligned)
trial.meanErr.4m$Gender <- " "
segData_aligned$Gender <- " "
for (this.sbj in sbj.nos){
trial.meanErr.4m[SubjectNo == this.sbj]$Gender <- sbj.info[Subject.No == this.sbj]$Gender
segData_aligned[SubjectNo == this.sbj]$Gender <- sbj.info[Subject.No == this.sbj]$Gender
}
save(trial.meanErr.4m, segData_aligned, file = "Data/GenderAnalysis.RData")
|
0caa37334908c6776ba8f20293030a07a9a8d274
|
20b579e6d28d3d9a7d21fbbba2d04d6eaa8b53fb
|
/R/GenerateData.R
|
2871265b4faa41d0335577c231a7c9e0ae34ac71
|
[] |
no_license
|
fake1884/moRtRe
|
24608504705120e18851ffcdcd940e01b2648748
|
858db79be7b18ab6195e7bfe933b285b2dd7217c
|
refs/heads/master
| 2020-05-17T13:17:34.721406
| 2020-01-20T09:12:04
| 2020-01-20T09:12:04
| 183,732,081
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,651
|
r
|
GenerateData.R
|
# This file contains a function to generate test-data
###########################################################################################
# simple model first
MakeData = function(){
# set parameters
# mu = 77.51859; sigma = 12.36892; estimates from data
mu_data = 77
sigma_data = 12
sigma_eps = 0.002427235
m = 10000 # Anzahl an Datensätzen
Alter = 0:95
# Set up a place to store the data
simple_data = matrix(c(Alter, rep(NA, length(Alter) * (m+1))), ncol = m+2)
# generate errorless data
simple_linear_function = function(x, m, s){1/sqrt(2*pi*s^2)* exp( - (m-x)^2 / (2*s^2))}
simple_data[,2] = simple_linear_function(simple_data[,1], mu_data, sigma_data)
# add errors for each year
set.seed(5)
for(i in 1:m){
obs = simple_data[,2] +
rnorm(n = length(Alter), mean = 0, sd = sigma_eps)
#obs[obs < 0] = 0 # no negative rates
simple_data[,i+2] = obs
}
devtools::use_data(simple_data, overwrite = T)
devtools::use_data(mu_data, overwrite = T)
devtools::use_data(sigma_data, overwrite = T)
##########################################################################################
# A data set with trend generated from a lee-carter model
# set up parameters
Zeitraum = 0:40
Alter = 0:95
sigma_xi = 2.270456
gamma_data = rep(NA, length(Zeitraum))
gamma_data[mean(Zeitraum)+1] = 0
set.seed(100)
for(i in 1:((length(Zeitraum)-1)/2)){
gamma_data[mean(Zeitraum)+1+i] = gamma_data[mean(Zeitraum)+1+i-1] + nu_data +
rnorm(1, mean = 0, sd = sigma_xi)
}
for(i in 1:((length(Zeitraum)-1)/2)){
gamma_data[mean(Zeitraum)+1-i] = gamma_data[mean(Zeitraum)+1-i+1] - nu_data +
rnorm(1, mean = 0, sd = sigma_xi)
}
gamma_data[1:20] = gamma_data[1:20]/sum(gamma_data[1:20]) # normal condition
gamma_data[22:41] = -gamma_data[22:41]/sum(gamma_data[22:41])
gamma_data = gamma_data * 300 # increase signal to noise ratio
plot(Zeitraum, gamma_data, type = "l") #check generation
lines(Zeitraum, rep(0, 41))
gamma_sprung = c(gamma_data[1:20]+1*mean(gamma_data[1:20]),0,
gamma_data[22:41]+1*mean(gamma_data[22:41])) # für den Srung datensatz
gamma_sprung[1:20] = gamma_sprung[1:20]/sum(gamma_sprung[1:20]) # normal condition
gamma_sprung[22:41] = -gamma_sprung[22:41]/sum(gamma_sprung[22:41])
gamma_sprung = gamma_sprung * 300 # increase signal to noise ratio
plot(Zeitraum, gamma_sprung, type = "l") #check generation
lines(Zeitraum, rep(0, 41))
# Set up a place to store the data
m = 10000 # Anzahl an Datensätzen
complex_period_data = matrix(rep(NA, length(Alter)*length(Zeitraum)*(m+3)), ncol = (m+3))
# set first two coloums
for(i in 0:(length(Zeitraum)-1)){
complex_period_data[(1+i*96):(96+i*96),1] = rep(i, length(Alter))
}
complex_period_data[,2] = rep(0:95,length(Zeitraum))
# generate errorless data
org_data = rep(NA, length(Alter)*length(Zeitraum))
for(i in 0:(length(Zeitraum)-1)){
complex_period_data[(1+i*96):(96+i*96),3] = exp(alpha_data + beta_data * gamma_data[i+1])
}
# generate the data
set.seed(10)
sd_rates = 0.05933034
for(i in 1:m){
complex_period_data[,i+3] = exp(log(complex_period_data[,3]) +
rnorm(n = length(Alter) * length(Zeitraum), mean = 0, sd = sd_rates))
}
# plot errorless data, observed data and data with white noise
pdf("../../1 Doku/graphics/SampleDataLee.pdf", width = 10, height = 8)
par(mfrow = c(1,2))
plot(Alter, complex_period_data[(1:96)+(10*96),3], type = "l", ylab = "Sterblichkeit")
lines(Alter, deathrates1965west[(1:96)+(10*96),3], lty = "dashed")
points(Alter, complex_period_data[(1:96)+(10*96),4], pch=4)
legend("topleft", legend=c("errorless", "original"),
col=c("black", "black"), lty=1:2, cex=1.5)
plot(Alter, log(complex_period_data[(1:96)+(10*96),3]), type = "l", ylab = "Sterblichkeit")
lines(Alter, log(deathrates1965west[(1:96)+(10*96),3]), lty = "dashed")
points(Alter, log(complex_period_data[(1:96)+(10*96),4]), pch=4)
legend("topleft", legend=c("errorless", "original"),
col=c("black", "black"), lty=1:2, cex=1.5)
par(mfrow = c(1,1))
dev.off()
# save result
devtools::use_data(complex_period_data, overwrite = T)
devtools::use_data(gamma_data, overwrite = T)
devtools::use_data(nu_data, overwrite = T)
##########################################################################################
# Sprung data
# Set up a place to store the data
complex_period_data_sprung = matrix(rep(NA, length(Alter)*length(Zeitraum)*(m+3)),
ncol = (m+3))
# set first two coloums
for(i in 0:(length(Zeitraum)-1)){
complex_period_data_sprung[(1+i*96):(96+i*96),1] = rep(i, length(Alter))
}
complex_period_data_sprung[,2] = rep(0:95,length(Zeitraum))
# generate errorless data
org_data = rep(NA, length(Alter)*length(Zeitraum))
for(i in 0:(length(Zeitraum)-1)){
complex_period_data_sprung[(1+i*96):(96+i*96),3] =
exp(alpha_data + beta_data * gamma_sprung[i+1])
}
# generate the data
set.seed(10)
sd_rates = 0.05933034
for(i in 1:m){
complex_period_data_sprung[,i+3] = exp(log(complex_period_data_sprung[,3]) +
rnorm(n = length(Alter) * length(Zeitraum),
mean = 0, sd = sd_rates))
}
# save result
devtools::use_data(complex_period_data_sprung, overwrite = T)
devtools::use_data(gamma_sprung, overwrite = T)
}
|
8f893ca264d667f5ecece7de6b00afafc832a4ce
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed_and_cleaned/12222_0/rinput.R
|
7a15567c79e746ab6869fe2f261e9765b060d551
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 137
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("12222_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="12222_0_unrooted.txt")
|
2af67d31df3173b025ce3b81fbd8e9d0d29b4bea
|
385c15d333d564afcfa207baf88a4e8f51d402be
|
/man/select_target_closing_cricket.Rd
|
6d3a7e56b8b81fcb87d59275e232e989f51f35ee
|
[] |
no_license
|
chringer-git/cricketmodel
|
86d57ff7911e092ffad8cbc01fc5a8765e845728
|
daff6c731fa329e766d1b1ff07d98abc6f836c5c
|
refs/heads/master
| 2020-04-15T23:13:53.437528
| 2019-09-08T17:33:52
| 2019-09-08T17:33:52
| 165,098,714
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,042
|
rd
|
select_target_closing_cricket.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/select_target_closing_cricket.R
\name{select_target_closing_cricket}
\alias{select_target_closing_cricket}
\title{Reasons which shot to select given a closing strategy.}
\usage{
select_target_closing_cricket(gm, shooter_player_id, opponent_player_id)
}
\arguments{
\item{gm}{object created from the set_cricket_game}
\item{shooter_player_id}{either 1 or 2 given the shooter}
\item{opponent_player_id}{either 1 or 2, but should not be the shooter's id}
}
\value{
name of the target
}
\description{
Reasons which shot the shooter will select based on a closing strategy (aka, boxing).
The shooter
(1) checks if behind in points (this is modified so that the shooter can be one mark's worth of points behind)
(2) if not behind, then shoots the highest bed or bull that is open for the opponent
(3) if not behind and the opponent has closed all marks, then shoot the highest bed or bull
(4) if behind, then shooter the highest open bed of the opponent
}
|
63a40f93469a7d4a2ad9864ce3fc22bd9cc03c01
|
80badebbbe4bd0398cd19b7c36492f5ab0e5facf
|
/man/polygons-methods.Rd
|
7a94017b1a754b7b2ed6d44bb4e7f5dcd6852c59
|
[] |
no_license
|
edzer/sp
|
12012caba5cc6cf5778dfabfc846f7bf85311f05
|
0e8312edc0a2164380592c61577fe6bc825d9cd9
|
refs/heads/main
| 2023-06-21T09:36:24.101762
| 2023-06-20T19:27:01
| 2023-06-20T19:27:01
| 48,277,606
| 139
| 44
| null | 2023-08-19T09:19:39
| 2015-12-19T10:23:36
|
R
|
UTF-8
|
R
| false
| false
| 796
|
rd
|
polygons-methods.Rd
|
\name{polygons-methods}
\docType{methods}
\alias{polygons-methods}
\alias{polygons,Spatial-method}
\alias{polygons,SpatialPolygons-method}
\alias{polygons<-,data.frame,SpatialPolygons-method}
\title{ Retrieve polygons from SpatialPolygonsDataFrame object }
\description{
Retrieve polygons from SpatialPolygonsDataFrame object
}
\section{Methods for polygons}{
\describe{
\item{obj = "SpatialPolygons"}{ object of, or deriving from, SpatialPolygons }
\item{obj = "SpatialPolygonsDataFrame"}{ object of, or deriving from,
SpatialPolygonsDataFrame }
}}
\section{Methods for "polygons<-"}{
\describe{
\item{object = "data.frame", value="SpatialPolygons"}{ promote data.frame to object of class
\link{SpatialPolygonsDataFrame-class}, by specifying polygons }
}}
\keyword{methods}
\keyword{spatial}
|
df38204b221b207b9afc09a2175b2a2ca389bcb6
|
8f09774b992fd23052201130a1ce4db3f3e27a53
|
/tests/testthat/test_print.R
|
d353636cb001bbf1f4f9057323cd45cbeec20fa5
|
[
"MIT"
] |
permissive
|
tkonopka/umap
|
77f3ff453181cdbe0844df1b4f9e499f23559121
|
96f077865243434ec94ad1581c7003a5f38545c7
|
refs/heads/master
| 2023-02-09T22:47:49.312192
| 2023-02-01T19:07:40
| 2023-02-01T19:07:40
| 129,778,978
| 133
| 21
|
NOASSERTION
| 2018-11-08T19:03:34
| 2018-04-16T17:12:03
|
R
|
UTF-8
|
R
| false
| false
| 1,488
|
r
|
test_print.R
|
## tests for output of print functions
## ############################################################################
## Tests for printing config objects
test_that("print complains on non-classed input", {
expect_error(print.umap.config(1:4))
})
test_that("print of config produces output", {
conf <- umap.defaults
# print should display a welcome message
expect_message(print(conf), "umap")
# print should display up-to-date content in the config object
conf$seed <- 1234567
expect_message(print(conf), "1234567")
})
## ############################################################################
## Tests for printing umap objects
test_that("print complains with non-umap input", {
expect_error(print.umap(1:4))
})
test_that("print display placeholder instead of matrix", {
conf <- umap.defaults
conf$init <- matrix(0, ncol=2, nrow=4)
expect_message(print(conf), "matrix")
})
## ############################################################################
## Tests for printing knn information
test_that("print complains with wrong input", {
expect_error(print.umap.knn(1:5))
})
test_that("print displays summary of umap result", {
mat <- matrix(1:36, ncol=2)
# create a fast umap result
result <- umap(mat, n_epochs=2, n_neighbors=3)
# check output for overall object
expect_message(print(result), "umap")
# check display of knn information
expect_message(print(result$knn), "k=3")
expect_message(print(result$knn), "approximate")
})
|
afb7cdfbd53bb7ca65fe90971936d25bde2497be
|
8e52cfdf2db46054104efc466320b4ad0217ea7f
|
/man/transform_claims_and_demographics_and_pharma.Rd
|
1f159076643f1ba981c924fb5d8712fbe6c342b3
|
[] |
no_license
|
jfontestad/hospital-readmission
|
82469e9e60203e11e7fe8d40452a3087ec4adad6
|
67f50933b54cfa22a69e1b2cb7a5a00168f2625e
|
refs/heads/master
| 2023-03-17T00:33:29.619647
| 2019-08-30T03:17:49
| 2019-08-30T03:17:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,645
|
rd
|
transform_claims_and_demographics_and_pharma.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transform_data.R
\name{transform_claims_and_demographics_and_pharma}
\alias{transform_claims_and_demographics_and_pharma}
\title{Transform claims and demographics and pharma data}
\usage{
transform_claims_and_demographics_and_pharma(data_dir, intermediate_dir,
claims_file, cpt_codes_file, ccs_cpt_file, ccs_bodysystem_file, ccs_icd_file,
eligibility_file, census_file, properties_file, pharma_file, threshold,
lowerbound, upperbound)
}
\arguments{
\item{data_dir}{the path to the directory in which the files are present}
\item{intermediate_dir}{the path to the dir to store intermediate files}
\item{claims_file}{the pattern of claims file}
\item{cpt_codes_file}{the name of cpt codes file}
\item{ccs_cpt_file}{the name of the ccs cpt file}
\item{ccs_bodysystem_file}{the name of the icd to bodysystem file}
\item{ccs_icd_file}{the name of the ccs icd file}
\item{eligibility_file}{the name of eligibility file}
\item{census_file}{the name of census file}
\item{properties_file}{the name of properties_file}
\item{pharma_file}{the name of the pharma file}
\item{threshold}{the threshold for grouping claims}
\item{lowerbound}{the lower bound of readmisison interval}
\item{upperbound}{the upper bound of readmission interval}
}
\value{
the data frame with claims, pharmacy claims and demographics
}
\description{
Transform claims and demographics and pharma data
}
\examples{
transform_claims_and_demographics_and_pharma("/home/avinash",
"/intermediate", "claims.csv", "cpt_codes_file.csv", "elig.rds",
"census.csv", "props.csv", "pharma.csv")
}
|
465b25135252265b9f62cea97711a5298973d581
|
3aaf257b3eb138c404f74ba06a2a9861a198f419
|
/man/youden_index.Rd
|
ca390c0a1ca17c522fe3e5edd587dac5c0022ef7
|
[] |
no_license
|
cran/ClinSigMeasures
|
f86d53d1fc6b69588f9454967ce682a753fe3f7b
|
99da607cbf920c25671a57fe2b9ef88d16ea9b35
|
refs/heads/master
| 2023-03-10T20:27:13.800615
| 2021-02-25T08:10:05
| 2021-02-25T08:10:05
| 342,293,709
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 971
|
rd
|
youden_index.Rd
|
\name{youden_index}
\alias{youden_index}
\title{Youden Index Calculation From a 2x2 Table}
\description{Calculates the Youden Index for data from a 2x2 table}
\usage{youden_index(Cell1, Cell2, Cell3, Cell4)}
\arguments{
\item{Cell1}{Value for cases with a positive test}
\item{Cell2}{Value for controls with a positive test}
\item{Cell3}{Value for cases with a negative test}
\item{Cell4}{Value for controls with a negative test}}
\value{Youden Index}
\author{Mike Malek-Ahmadi}
\references{
1. Ruopp MD, Perkins NJ, Whitcomb BW, Schisterman EF. Youden Index and optimal cut-point estimated from observations affected by a lower limit of detection. Biom J 2008;50(3):419-430.
2. Shaikh SA (2011) Measures derived from a 2 x 2 table for an accuracy of a diagnostic test. J Biomet Biostat 2:128
}
\examples{
#From Shaikh (2011), page 3, 2x2 table for "Diagnostic Test Evaluation"
youden_index(105, 171, 15, 87)
}
|
cb49443bdea6d4c574d171cf7a15795aa3950b90
|
fb322a643c6d8fbd1599887bdcf55d15a170ac92
|
/man/list_sim_data.Rd
|
8baa99f108b55cf1a5421b12d57b1301ef2a343f
|
[] |
no_license
|
mdlincoln/forestry
|
460a333c9e36b6888c017ef400cad4a53771caee
|
e224d55f6ef7fac89103e48a1d6df7b3aff4c89f
|
refs/heads/master
| 2021-09-03T02:09:54.270841
| 2018-01-04T19:54:42
| 2018-01-04T19:54:42
| 70,771,231
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 784
|
rd
|
list_sim_data.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chart_forest.R
\name{list_sim_data}
\alias{list_sim_data}
\title{Simulate data from a list of random forests}
\usage{
list_sim_data(rf, class, var1, breaks1 = 50, var2 = NULL, var3 = NULL,
n_cores = parallel::detectCores(), progress = interactive(), ...)
}
\arguments{
\item{rf}{random forest}
\item{class}{Which class to plot}
\item{var1}{(Required) Primary variable (preferably continuous)}
\item{breaks1}{How many values of the primary variable should be sampled when
calculating partial dependence?}
\item{var2}{Secondary variable}
\item{n_cores}{How many cores to use when calculating in parallel? Defaults
to all available cores.}
}
\description{
Simulate data from a list of random forests
}
|
8ffad147489da8acee18c3ec704222722f1cbbde
|
6cb726808934d2695c74cdb7f924a79d8f8b54b7
|
/data/data-raw/clean_data.R
|
43ca806ca1513f5e022703bf839c99bedd190cce
|
[] |
no_license
|
waterdatacollaborative/Get-the-Lead-Out
|
3b55a63383e9bd080b58fec72da9d64c94ce96be
|
9d95968cce892581a6fda1f3f9187bb8847859d3
|
refs/heads/master
| 2020-04-05T09:24:53.018404
| 2018-10-01T21:45:24
| 2018-10-01T21:45:24
| 156,754,988
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,088
|
r
|
clean_data.R
|
library(readxl)
library(tidyverse)
library(stringr)
library(lubridate)
# downloaded school locations from:
# https://data.sfgov.org/Economy-and-Community/Map-of-Schools/qb37-w9se/data
geo_sfusd <- read_csv('data-raw/Map_of_Schools.csv')
# munged by hand
sfusd <- read_csv('data-raw/sfusd_lead.csv')
sfusd_names_to_geo_sfusd <- read_csv('data-raw/missing_location.csv')
campus_name <- geo_sfusd %>%
select(geo_name = `Campus Name`, location = `Location 1`, address2 = `Campus Address`)
sfusd_lead <- sfusd %>%
left_join(sfusd_names_to_geo_sfusd) %>%
mutate(geo_name = ifelse(is.na(geo_name), school_name, geo_name)) %>%
left_join(campus_name) %>%
mutate(address = ifelse(is.na(address), address2, address),
address = toupper(address),
location = str_remove(location, 'CA\n'),
lat_long = ifelse(is.na(location), lat_long, location),
sample_date = dmy(sample_date)) %>%
select(-address2, -location) %>%
rename(geo_school_name = geo_name)
View(sfusd_lead)
glimpse(sfusd_lead)
write_csv(sfusd_lead, 'sfusd_lead_sampling.csv')
|
fc121c6f9cfa46682fd0e5df47503b145a8b1907
|
8036066874c5ff987566482971c36ad4e3f41551
|
/fmri/fsl_pipeline/functions/fsl_sceptic_model.R
|
3d96b01eb08fac88f3a0c39cf26faf3935afdf09
|
[] |
no_license
|
UNCDEPENdLab/clock_analysis
|
171ec5903fc00e8b0f6037f1b0a76b3f77558975
|
5aeb278aaf8278cebefa3f4b11360a9b3d5c364f
|
refs/heads/master
| 2023-06-22T07:18:03.542805
| 2023-06-19T18:31:40
| 2023-06-19T18:31:40
| 22,355,039
| 1
| 2
| null | 2021-04-20T20:07:08
| 2014-07-28T19:29:11
|
HTML
|
UTF-8
|
R
| false
| false
| 12,851
|
r
|
fsl_sceptic_model.R
|
#note: this is a small adapation from the original fslSCEPTICModel to avoid use of the clockfit objects and to move to the
#simpler build_design_matrix approach and the use of the trial_statistics csv files from vba_fmri
fsl_sceptic_model <- function(subj_data, sceptic_signals, l1_contrasts=NULL, mrfiles, runlengths, mrrunnums, execute_feat=FALSE, force=FALSE,
drop_volumes=0, outdir=NULL, usepreconvolve=FALSE, spikeregressors=FALSE, model_suffix="", ...) {
# subj_data is the trial-level data for one subject, as produced by parse_sceptic_outputs
# sceptic_signals is a character vector of column names in subj_data used for parametric modulator regressors
# mrfiles is a character vector of processed data to analyze
# runlengths is the number of volumes in each run
# mrrunnums is the numeric vector of the run numbers corresponding to mrfiles
# execute_feat specifies whether to execute feat for the runs after setting up the fsf files
# drop_volumes specifies how many volumes were dropped from the beginning of the run for elements of mrfiles.
# This is used by build_design_matrix to ensure that the convolved regressors line up properly with the fMRI data
# outdir is the base name of folder for the specified model. The resulting directories are nested inside the data folder for the subject
# usepreconvolve is a TRUE/FALSE denoting whether to use convolved regressors from build_design_matrix (TRUE) or let FSL handle 3-column format convolution (FALSE)
# any additional arguments trapped by ... are passed forward to build_design_matrix
require(Rniftilib)
require(dplyr)
require(tidyr)
require(dependlab)
if (is.null(outdir)) {
outdir=paste0("sceptic-", paste(sceptic_signals, collapse="-")) #define output directory based on combination of signals requested
if (usepreconvolve) { outdir=paste(outdir, "preconvolve", sep="-") }
outdir <- paste0(outdir, model_suffix) #add any model suffix, if requested
}
#determine which feat template is relevant
use_new <- TRUE #generate EV and contrast syntax dynamically
if (use_new) {
fsfTemplate <- readLines(file.path(getMainDir(), "clock_analysis", "fmri", "fsf_templates", "feat_lvl1_clock_sceptic_nparam_template.fsf"))
} else {
if (length(sceptic_signals) == 1L) {
##single model-based regressor
if (usepreconvolve) {
fsfTemplate <- readLines(file.path(getMainDir(), "clock_analysis", "fmri", "fsf_templates", "feat_lvl1_clock_sceptic_univariate_preconvolve_template.fsf"))
} else {
fsfTemplate <- readLines(file.path(getMainDir(), "clock_analysis", "fmri", "fsf_templates", "feat_lvl1_clock_sceptic_univariate_template.fsf"))
}
} else if (length(sceptic_signals) == 4L) { #pemax, dauc, vchosen, ventropy
if (usepreconvolve) {
fsfTemplate <- readLines(file.path(getMainDir(), "clock_analysis", "fmri", "fsf_templates", "feat_lvl1_clock_sceptic_4param_preconvolve_template.fsf"))
} else {
stop("not implemented yet")
}
} else if (length(sceptic_signals) == 5L) { #pemax, dauc, vchosen, ventropy, vtime
if (usepreconvolve) {
fsfTemplate <- readLines(file.path(getMainDir(), "clock_analysis", "fmri", "fsf_templates", "feat_lvl1_clock_sceptic_5param_preconvolve_template.fsf"))
} else { stop("not implemented yet") }
} else { stop("not implemented yet") }
}
#note: normalizePath will fail to evaluate properly if directory does not exist
fsl_run_output_dir <- file.path(normalizePath(file.path(dirname(mrfiles[1L]), "..")), outdir)
if (file.exists(fsl_run_output_dir) && force==FALSE) { message(fsl_run_output_dir, " exists. Skipping."); return(0) }
cat("fsl_run_output_dir create: ", fsl_run_output_dir, "\n")
dir.create(fsl_run_output_dir, showWarnings=FALSE) #one directory up from a given clock run
timingdir <- file.path(fsl_run_output_dir, "run_timing_sceptic")
#create the events and signals structures for the build_design_matrix call
#this assumes that we have received a data.frame with the structure from the _trial_statistics.csv.gz generated
#thus, we have $clock_onset and $feedback_onset, and $iti_onset available
events <- subj_data %>% dplyr::select(id, run, trial, clock_onset, feedback_onset, iti_onset, rt_csv) %>%
dplyr::mutate(clock_duration=rt_csv/1000, feedback_duration=iti_onset - feedback_onset) %>%
dplyr::select(-iti_onset, -rt_csv) %>% tidyr::gather(key="key", value="value", -id, -run, -trial) %>%
tidyr::separate(col = key, into = c("event", "onset_duration")) %>%
tidyr::spread(key=onset_duration, value=value) %>% dplyr::select(event, run, trial, onset, duration)
signals <- populate_sceptic_signals(sceptic_signals, subj_data)
#not currently handling vtime
# else if (thisName == "vtime") {
# #vtime is a runs x trials list with a data.frame per trial containing onsets, durations, and values within trial
# onsets[v] <- NA #not relevant
# durations[v] <- NA #not relevant
# normalizations[v] <- "none" #should not try to normalize the within-trial regressor since this starts to confused within/between trial variation
#save(file=file.path(fsl_run_output_dir, "bdm_call.RData"), events, signals, timingdir, drop_volumes, mrfiles, mrrunnums)
#NB. The tr argument should be passed in as part of ...
d <- build_design_matrix(events=events, signals=signals, baseline_coef_order=2, write_timing_files = c("convolved"), #, "FSL"),
center_values=TRUE, plot=FALSE, convolve_wi_run=TRUE, output_directory=timingdir, drop_volumes=drop_volumes,
run_volumes=mrfiles, runs_to_output=mrrunnums, ...)
save(d, subj_data, events, signals, timingdir, runlengths, mrrunnums, file=file.path(fsl_run_output_dir, "designmatrix.RData"))
allFeatFiles <- list()
#FSL computes first-level models on individual runs
for (r in 1:length(mrfiles)) {
stopifnot(file.exists(file.path(dirname(mrfiles[r]), "motion.par"))) #can't find motion parameters
runnum <- sub("^.*/clock(\\d+)$", "\\1", dirname(mrfiles[r]), perl=TRUE)
nvol <- nifti.image.read(mrfiles[r], read_data=0)$dim[4L]
##just PCA motion on the current run
##mregressors <- pca_motion(mrfiles[r], runlengths[r], motion_parfile="motion.par", numpcs=3, drop_volumes=drop_volumes)$motion_pcs_concat
##Add volumes to censor here. Use censor_intersection.mat, which flags fd > 0.9 and DVARS > 20
##15Jun2016: Switch to FD > 0.9mm censoring in general (moving away from wavelet)
##If fd_0.9.mat doesn't exist, it means no spike regressors were generated at this threshold
##Thus, do not include in the nuisance set. Also do not include PCA motion regressors
##censorfile <- file.path(dirname(mrfiles[r]), "motion_info", "censor_intersection.mat")
##if (file.exists(censorfile) && file.info(censorfile)$size > 0) {
## censor <- read.table(censorfile, header=FALSE)$V1
## censor <- censor[(1+drop_volumes):runlengths[r]]
## mregressors <- cbind(mregressors, censor)
##}
mregressors <- NULL #start with NULL
if (spikeregressors) { #incorporate spike regressors if requested (not used in conventional AROMA)
censorfile <- file.path(dirname(mrfiles[r]), "motion_info", "fd_0.9.mat")
if (file.exists(censorfile) && file.info(censorfile)$size > 0) {
censor <- read.table(censorfile, header=FALSE)
censor <- censor[(1+drop_volumes):runlengths[r],,drop=FALSE] #need no drop here in case there is just a single volume to censor
#if the spikes fall outside of the rows selected above, we will obtain an all-zero column. remove these
censor <- censor[,sapply(censor, sum) > 0,drop=FALSE]
if (ncol(censor) == 0L) { censor <- NULL } #no volumes to censor within valid timepoints
mregressors <- censor
}
}
##add CSF and WM regressors (with their derivatives)
nuisancefile <- file.path(dirname(mrfiles[r]), "nuisance_regressors.txt")
if (file.exists(nuisancefile)) {
nuisance <- read.table(nuisancefile, header=FALSE)
nuisance <- nuisance[(1+drop_volumes):runlengths[r],,drop=FALSE]
nuisance <- as.data.frame(lapply(nuisance, function(col) { col - mean(col) })) #demean
#cat("about to cbind with nuisance\n")
#print(str(mregressors))
#print(str(nuisance))
if (!is.null(mregressors)) { mregressors <- cbind(mregressors, nuisance) #note that in R 3.3.0, cbind with NULL or c() is no problem...
} else { mregressors <- nuisance }
}
motfile <- file.path(fsl_run_output_dir, paste0("run", runnum, "_confounds.txt"))
write.table(mregressors, file=motfile, col.names=FALSE, row.names=FALSE)
#search and replace within fsf file for appropriate sections
##.OUTPUTDIR. is the feat output location
##.NVOL. is the number of volumes in the run
##.FUNCTIONAL. is the fmri data to process (sans extension)
##.CONFOUNDS. is the confounds file for GLM
##.CLOCK_TIMES. is the three-column file for clock onset
##.FEEDBACK_TIMES. is the three-column file for feedback onset
##.VNAME. is the signal name in a univariate model
##.V_TIMES. is the three-column file for the signal
##.V_CON. is the contrast name for the signal
thisTemplate <- fsfTemplate
thisTemplate <- gsub(".OUTPUTDIR.", file.path(fsl_run_output_dir, paste0("FEAT_LVL1_run", runnum)), thisTemplate, fixed=TRUE)
thisTemplate <- gsub(".NVOL.", nvol, thisTemplate, fixed=TRUE)
thisTemplate <- gsub(".FUNCTIONAL.", gsub(".nii(.gz)*$", "", mrfiles[r]), thisTemplate, fixed=TRUE)
thisTemplate <- gsub(".CONFOUNDS.", motfile, thisTemplate, fixed=TRUE)
if (use_new) {
#generate ev syntax
dmat <- d$design_convolved[[paste0("run", runnum)]] %>% select(-matches("base\\d+")) #drop baseline columns
regressors <- as.list(names(dmat))
#add common ingredients for preconvolved regressors
regressors <- lapply(regressors, function(x) { list(name=x, waveform="custom_1", convolution="none", tempfilt=1, timing_file=file.path(timingdir, paste0("run", runnum, "_", x, ".1D"))) })
ev_syn <- dependlab::generate_fsf_lvl1_ev_syntax(regressors)
#creation of l1 contrast matrices, including the diagonal contrasts, now abstracted to finalize_pipeline_configuration.R
#thus, l1_contrasts is already a contrast matrix ready to be passed to the generate_fsf_contrast_syntax function
cmat_syn <- dependlab::generate_fsf_contrast_syntax(l1_contrasts)
thisTemplate <- c(thisTemplate, ev_syn, cmat_syn)
} else {
if (usepreconvolve) {
thisTemplate <- gsub(".CLOCK_TIMES.", file.path(timingdir, paste0("run", runnum, "_clock.1D")), thisTemplate, fixed=TRUE)
thisTemplate <- gsub(".FEEDBACK_TIMES.", file.path(timingdir, paste0("run", runnum, "_feedback.1D")), thisTemplate, fixed=TRUE)
} else {
thisTemplate <- gsub(".CLOCK_TIMES.", file.path(timingdir, paste0("run", runnum, "_clock_FSL3col.txt")), thisTemplate, fixed=TRUE)
thisTemplate <- gsub(".FEEDBACK_TIMES.", file.path(timingdir, paste0("run", runnum, "_feedback_FSL3col.txt")), thisTemplate, fixed=TRUE)
}
for (s in 1:length(sceptic_signals)) {
if (usepreconvolve) {
thisTemplate <- gsub(paste0(".V", s, "_TIMES."), file.path(timingdir, paste0("run", runnum, "_", sceptic_signals[s], ".1D")), thisTemplate, fixed=TRUE)
} else {
thisTemplate <- gsub(paste0(".V", s, "_TIMES."), file.path(timingdir, paste0("run", runnum, "_", sceptic_signals[s], "_FSL3col.txt")), thisTemplate, fixed=TRUE)
}
thisTemplate <- gsub(paste0(".V", s, "NAME."), sceptic_signals[s], thisTemplate) #define EV name
thisTemplate <- gsub(paste0(".V", s, "_CON."), sceptic_signals[s], thisTemplate) #define contrast name
}
}
featFile <- file.path(fsl_run_output_dir, paste0("FEAT_LVL1_run", runnum, ".fsf"))
if (file.exists(featFile) && force==FALSE) { next } #skip re-creation of FSF and do not run below unless force==TRUE
cat(thisTemplate, file=featFile, sep="\n")
allFeatFiles[[r]] <- featFile
}
#if execute_feat is TRUE, execute feat on each fsf files at this stage, using an 8-node socket cluster (since we have 8 runs)
#if execute_feat is FALSE, just create the fsf files but don't execute the analysis
if (execute_feat == TRUE) {
require(parallel)
cl_fork <- makeForkCluster(nnodes=8)
runfeat <- function(fsf) {
runname <- basename(fsf)
runFSLCommand(paste("feat", fsf), stdout=file.path(dirname(fsf), paste0("feat_stdout_", runname)), stderr=file.path(dirname(fsf), paste0("feat_stderr_", runname)))
}
clusterApply(cl_fork, allFeatFiles, runfeat)
stopCluster(cl_fork)
}
}
|
a81fba405386cd17784377d50cb8eea2014f574b
|
39d0c55687addd02ed07ba67796eb67c16dd8023
|
/man/dz2011.Rd
|
218d0e92d1a16f7bf17e7ca030b48ceee074df48
|
[] |
no_license
|
wfmackey/absmapsdata
|
02df7ebe8a2385bd5642285ebff42fdd4b7433b8
|
513415b949408d8696d74af3673df3367f290d5a
|
refs/heads/master
| 2023-08-03T08:06:45.431100
| 2023-07-27T13:47:17
| 2023-07-27T13:47:17
| 172,700,063
| 55
| 17
| null | 2023-07-27T08:19:49
| 2019-02-26T11:38:26
|
R
|
UTF-8
|
R
| false
| true
| 856
|
rd
|
dz2011.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geog_dat.R
\docType{data}
\name{dz2011}
\alias{dz2011}
\title{Destination Zones, 2011}
\format{
An \code{sf} object with nine variables:
\describe{
\item{\code{dz_code_2011}}{The destination zone code as a character string.}
\item{\code{sa2_code_2011}}{The full 9 digit SA2 code numeric}
\item{\code{sa2_shortcode_2011}}{The 5 digit SA2 code numeric}
\item{\code{sa2_name_2011}}{The SA2 name character}
\item{\code{state_name_2011}}{The full state name}
\item{\code{cent_lat}}{The latitide of the area's centroid}
\item{\code{cent_long}}{The latitide of the area's centroid}
\item{\code{geometry}}{A nested list containing the area's geometry (polygons)}
}
}
\usage{
dz2011
}
\description{
Geospatial data provided by the ABS for destination zones in 2011.
}
\keyword{datasets}
|
9ae31f1d441c7b8349544486f58fbda0aa6fec2e
|
35de0ba0d306a2813e6e0b49870d467a93094a9a
|
/2species_driver/explo/2competitors_and_climate.R
|
ff50c803eaaadb5b2f6dcdb94e930e215bb0a408
|
[] |
no_license
|
ha0ye/GCausality
|
cab83d2368a2896a8783fc43cd95b639e2e47f79
|
28b11c312c116a84bbf223fce445ac7db69e6d53
|
refs/heads/master
| 2020-09-04T16:35:59.576101
| 2019-09-04T06:58:16
| 2019-09-04T06:58:16
| 219,804,346
| 1
| 1
| null | 2019-11-05T17:41:54
| 2019-11-05T17:32:09
| null |
UTF-8
|
R
| false
| false
| 10,874
|
r
|
2competitors_and_climate.R
|
### FB 08/10/2017 -- adapted from Nantes et Irstea presentations though
### Uses a strongly seasonal driver. A logical case for conditional GC.
library(vars)
library(rEDM)
### First case with interactions
set.seed(42)
tmax=300
Y=matrix(1,nrow=tmax,ncol=2)
Y[1,1]=abs(rnorm(1,1,1))
Y[1,2]=abs(rnorm(1,1,1))
seasonality<-2*sin(2*pi*(1:tmax)/24) # must be enough to affect the growth rates
### Environmental variables
y1noise<-arima.sim(model=list(ar=c(0.1, 0.2, 0.1,0.5,-0.1)), n=tmax,sd=sqrt(0.5) )
###y2noise<-arima.sim(model=list(ar=c(0.1, 0.2, 0.1,0.5,-0.1)), n=n.time,sd=sqrt(1) ) # more noisy
y1<-seasonality+y1noise
##y2<-seasonality+y2noise
for (t in 1:(tmax-1)){
Y[t+1,1] = Y[t,1]*exp(3+0.5*y1[t] - 4*Y[t,1]-2*Y[t,2] + rnorm(1,0,0.1))
Y[t+1,2] = Y[t,2]*exp(2.1+0.5*y1[t] -0.31*Y[t,1]-3.1*Y[t,2] + rnorm(1,0,0.1))
}
y=log(Y)
varcompet2<-VAR(y, type="none",exogen=y1,lag.max=10,ic="SC")
causality(varcompet2,cause="y1") #p-value 0.004054
causality(varcompet2,cause="y2") #0.00000
### I may need to repeat this over many simulations
############
pdf(file="2competitors_wSharedSeasonalDriver.pdf",width=10,height=3)
par(cex=1.5,lwd=2,mar=c(4,4,1,2))
plot(100:200,y1[100:200],ylab="log(abundance)",col="green",type="l",xlab="Time",ylim=c(-10,max(y1)))
lines(100:200,y[100:200,1],col="blue")
lines(100:200,y[100:200,2],col="black")
dev.off()
### Second case without interactions
Y=matrix(1,nrow=tmax,ncol=2)
Y[1,1]=abs(rnorm(1,1,1))
Y[1,2]=abs(rnorm(1,1,1))
for (t in 1:(tmax-1)){
Y[t+1,1] = Y[t,1]*exp(3+0.5*y1[t] - 4*Y[t,1]-0*Y[t,2] + rnorm(1,0,0.1))
Y[t+1,2] = Y[t,2]*exp(2.1+0.5*y1[t] -0*Y[t,1]-3.1*Y[t,2] + rnorm(1,0,0.1))
}
y=log(Y)
pdf(file="2competitors_wSharedSeasonalDriver_noInteractions.pdf",width=10,height=3)
par(cex=1.5,lwd=2,mar=c(4,4,1,2))
plot(100:200,y1[100:200],ylab="log(abundance)",col="green",type="l",xlab="Time",ylim=c(-10,max(y1)))
lines(100:200,y[100:200,1],col="blue")
lines(100:200,y[100:200,2],col="black")
dev.off()
varcompet2bis<-VAR(y, type="none",exogen=y1,lag.max=10,ic="SC")
summary(varcompet2bis)
causality(varcompet2bis,cause="y1") #p-value 1.073e-06
causality(varcompet2bis,cause="y2") #0.1953
### Let's use another simulation with just a little more noise
# Y=matrix(1,nrow=tmax,ncol=2)
# Y[1,1]=abs(rnorm(1,1,1))
# Y[1,2]=abs(rnorm(1,1,1))
#
# for (t in 1:(tmax-1)){
# Y[t+1,1] = Y[t,1]*exp(3+0.5*y1[t] - 4*Y[t,1]-0*Y[t,2] + rnorm(1,0,0.3))
# Y[t+1,2] = Y[t,2]*exp(2.1+0.5*y1[t] -0*Y[t,1]-3.1*Y[t,2] + rnorm(1,0,0.3))
# }
# y=log(Y)
#
# varcompet2ter<-VAR(y, type="none",exogen=y1,lag.max=10,ic="SC")
# summary(varcompet2ter)
# causality(varcompet2ter,cause="y1") #p-value 6.263e-10
# causality(varcompet2ter,cause="y2") #0.5799
### So it is possible to craft examples that are difficult to infer with GC
### --- still we may need many simulations and parameter values for a good comparison of CCM and GC
### NB might well be possible that we would get better results by restricting the fit to a VAR1
### But we have no theoretical reason to do that
############## Two things to check though!!! ###################
### I haven't tested pairwise GC on these examples. Perhaps it works better than conditional GC?
### And I haven't CCM this, on the other hand.
### They say you don't need necessarily corrections for seasonality in the 2012 paper -- or do we? cf Deyle PNAS Influenza
############# Pairwise GC (without conditioning on the driver) & CCM to test absolutely first...
varcompet_noExo=VAR(y, type="none",lag.max=10,ic="SC") ## Direct pairwise GC
summary(varcompet_noExo)
causality(varcompet_noExo,cause="y1") #p-value 0.03633
causality(varcompet_noExo,cause="y2") #0.00000
### OK we do have a problem -- but what does CCM say?
########## CCM analysis of this dataset ######################
### Merging to a dataframe
species2_species1_temp=data.frame(1:tmax,y,y1)
names(species2_species1_temp)=c("time","species1","species2","temp")
pdf(file="CCM_2competitors_and_envDriver.pdf",height=10,width=10)
par(mfrow=c(2,2),cex=1.25,lwd=2)
species1_xmap_temp <- ccm(species2_species1_temp, E = 3, lib_column = "species1",
target_column = "temp", lib_sizes = seq(5, 100, by = 5), random_libs = FALSE)
temp_xmap_species1 <- ccm(species2_species1_temp, E = 3, lib_column = "temp", target_column = "species1",
lib_sizes = seq(5, 100, by = 5), random_libs = FALSE)
a_xmap_t_means <- ccm_means(species1_xmap_temp)
t_xmap_a_means <- ccm_means(temp_xmap_species1)
par(mar = c(4, 4, 1, 1), mgp = c(2.5, 1, 0))
plot(a_xmap_t_means$lib_size, pmax(0, a_xmap_t_means$rho), type = "l", col = "red",
xlab = "Library Size", ylab = "Cross Map Skill (rho)", ylim = c(0, 1.1))
lines(t_xmap_a_means$lib_size, pmax(0, t_xmap_a_means$rho), col = "blue")
legend(x = "topleft", legend = c("species1 xmap temp", "temp xmap species1"), col = c("red", "blue"), lwd = 1, inset = 0.02, cex = 0.8)
### Checking between species2 and species1
species1_xmap_species2 <- ccm(species2_species1_temp, E = 3, lib_column = "species1",
target_column = "species2", lib_sizes = seq(5, 100, by = 5), random_libs = FALSE)
species2_xmap_species1 <- ccm(species2_species1_temp, E = 3, lib_column = "species2", target_column = "species1",
lib_sizes = seq(5, 100, by = 5), random_libs = FALSE)
species1_xmap_species2_means <- ccm_means(species1_xmap_species2)
species2_xmap_species1_means <- ccm_means(species2_xmap_species1)
#par(mar = c(4, 4, 1, 1), mgp = c(2.5, 1, 0))
plot(species1_xmap_species2_means$lib_size, pmax(0, species1_xmap_species2_means$rho), type = "l", col = "red", xlab = "Library Size", ylab = "Cross Map Skill (rho)",ylim = c(0, 1.1))
lines(species2_xmap_species1_means$lib_size, pmax(0, species2_xmap_species1_means$rho), col = "blue")
legend(x = "topleft", legend = c("species1 xmap species2", "species2 xmap species1"), col = c("red", "blue"), lwd = 1, inset = 0.02, cex = 0.8)
# Checking between species2 and temp
species2_xmap_temp <- ccm(species2_species1_temp, E = 3, lib_column = "species2",
target_column = "temp", lib_sizes = seq(5, 100, by = 5), random_libs = FALSE)
temp_xmap_species2 <- ccm(species2_species1_temp, E = 3, lib_column = "temp", target_column = "species2",
lib_sizes = seq(5, 100, by = 5), random_libs = FALSE)
a_xmap_t_means <- ccm_means(species2_xmap_temp)
t_xmap_a_means <- ccm_means(temp_xmap_species2)
par(mar = c(4, 4, 1, 1), mgp = c(2.5, 1, 0))
plot(a_xmap_t_means$lib_size, pmax(0, a_xmap_t_means$rho), type = "l", col = "red",
xlab = "Library Size", ylab = "Cross Map Skill (rho)", ylim = c(0, 1.1))
lines(t_xmap_a_means$lib_size, pmax(0, t_xmap_a_means$rho), col = "blue")
legend(x = "topleft", legend = c("species2 xmap temp", "temp xmap species2"), col = c("red", "blue"), lwd = 1, inset = 0.02, cex = 0.8)
dev.off()
### We recover the correct causal order for effects of the environment
### Though it looks like species 1 affects species 2 with CCM
############################# Using many simulations to have distributions of P-values #############
nsims=100
p12=p12_noExo=p12_noInter=p12_noInter_noExo=rep(1,nsims)#initializing
p21=p21_noExo=p21_noInter=p21_noInter_noExo=rep(1,nsims)
### Loop over repeats
for (krep in 1:nsims){
tmax=300
### Model with interactions
Y=matrix(1,nrow=tmax,ncol=2)
Y[1,1]=abs(rnorm(1,1,1))
Y[1,2]=abs(rnorm(1,1,1))
### Without interactions
Z=matrix(1,nrow=tmax,ncol=2)
Z[1,1]=abs(rnorm(1,1,1))
Z[1,2]=abs(rnorm(1,1,1))
seasonality<-2*sin(2*pi*(1:tmax)/24) # must be enough to affect the growth rates
### Environmental variables
y1noise<-arima.sim(model=list(ar=c(0.1, 0.2, 0.1,0.5,-0.1)), n=tmax,sd=sqrt(0.5) )
###y2noise<-arima.sim(model=list(ar=c(0.1, 0.2, 0.1,0.5,-0.1)), n=n.time,sd=sqrt(1) ) # more noisy
y1<-seasonality+y1noise
##y2<-seasonality+y2noise
for (t in 1:(tmax-1)){
Y[t+1,1] = Y[t,1]*exp(3+0.5*y1[t] - 4*Y[t,1]-2*Y[t,2] + rnorm(1,0,0.1))
Y[t+1,2] = Y[t,2]*exp(2.1+0.5*y1[t] -0.31*Y[t,1]-3.1*Y[t,2] + rnorm(1,0,0.1))
Z[t+1,1] = Z[t,1]*exp(3+0.5*y1[t] - 4*Z[t,1]-0*Z[t,2] + rnorm(1,0,0.1))
Z[t+1,2] = Z[t,2]*exp(2.1+0.5*y1[t] -0*Z[t,1]-3.1*Z[t,2] + rnorm(1,0,0.1))
}
y=log(Y)
z=log(Z)
varcompet2<-VAR(y, type="none",exogen=y1,lag.max=10,ic="SC")
c21=causality(varcompet2,cause="y1") #same notation as in the interaction matrix // effect of 1 on 2
c12=causality(varcompet2,cause="y2")
p12[krep]=c12$Granger$p.value
p21[krep]=c21$Granger$p.value
varcompet2_noExo=VAR(y, type="none",lag.max=10,ic="SC") ## Direct pairwise GC
c21=causality(varcompet2_noExo,cause="y1")
c12=causality(varcompet2_noExo,cause="y2")
p12_noExo[krep]=c12$Granger$p.value
p21_noExo[krep]=c21$Granger$p.value
varcompet2bis<-VAR(z, type="none",exogen=y1,lag.max=10,ic="SC")
c21=causality(varcompet2bis,cause="y1") #same notation as in the interaction matrix // effect of 1 on 2
c12=causality(varcompet2bis,cause="y2")
p12_noInter[krep]=c12$Granger$p.value
p21_noInter[krep]=c21$Granger$p.value
varcompet2bis_noExo<-VAR(z, type="none",lag.max=10,ic="SC")
c21=causality(varcompet2bis_noExo,cause="y1") #same notation as in the interaction matrix // effect of 1 on 2
c12=causality(varcompet2bis_noExo,cause="y2")
p12_noInter_noExo[krep]=c12$Granger$p.value
p21_noInter_noExo[krep]=c21$Granger$p.value
}
library(sm)
par(mfrow=c(2,2))
## three groups, each of length: length(x), length(y), length(z)
group.index <- rep(1:2, c(length(p12), length(p21)))
## collect data together and use sm.density.compare()
den <- sm.density.compare(c(p12,p21), group = group.index, model = "none") #"equal" does bootstrap for comparison
group.index <- rep(1:2, c(length(p12_noExo), length(p21_noExo)))
## collect data together and use sm.density.compare()
den <- sm.density.compare(c(p12_noExo,p21_noExo), group = group.index, model = "none") #"equal" does bootstrap for comparison
group.index <- rep(1:2, c(length(p12_noInter), length(p21_noInter)))
## collect data together and use sm.density.compare()
den <- sm.density.compare(c(p12_noInter,p21_noInter), group = group.index, model = "none") #"equal" does bootstrap for comparison
group.index <- rep(1:2, c(length(p12_noInter_noExo), length(p21_noInter_noExo)))
## collect data together and use sm.density.compare()
den <- sm.density.compare(c(p12_noInter_noExo,p21_noInter_noExo), group = group.index, model = "none") #"equal" does bootstrap for comparison
### Not very illustrative
p12>0.05 ## Should be mostly false given there is causality 2->1
p12_noInter>0.05 ## Should be mostly true since there is not causality 2->1
### Many p-values stay quite low. For this F-test at least.
# other stuff to plot densities
# ylim <- range(dx$y, dy$y, dz$y)
# ## make plot
# plot(dx$x, dx$y, col = 1, lwd = 2, type = "l", xlim = xlim, ylim = ylim)
# lines(dy$x, dy$y, col = 2, lwd = 2)
# lines(dz$x, dz$y, col = 3, lwd = 2)
|
57252f3d888eee5f24332b6a58937c8ef499dd4d
|
f176f883f8e6c4ff2a9e823768243777c1c0e269
|
/R/RcppExports.R
|
4c0e7a0f8db930610b7131e34fa321669a28f52d
|
[] |
no_license
|
rbagd/fastLink
|
b1032b45c9a52cd78705081e4034e6e3d579c1d4
|
4dd09a7c054dc0741d8d0d836548970c76933c99
|
refs/heads/master
| 2021-07-09T16:39:59.215249
| 2017-10-09T21:25:36
| 2017-10-09T21:25:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 395
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
m_func_par <- function(temp, ptemp, natemp, limit1, limit2, nlim1, nlim2, ind, listid, matchesLink = FALSE, threads = 1L) {
.Call('fastLink_m_func_par', PACKAGE = 'fastLink', temp, ptemp, natemp, limit1, limit2, nlim1, nlim2, ind, listid, matchesLink, threads)
}
|
20016c14e2bb7ea000734f01689d46fdc2ce7cd0
|
6a1b47add0547f66b79528c6db4ff121d3fb9515
|
/mergeAndAnnotateClusters.py
|
c26c87dface3bca1f18b8f31a0bd974887070eb6
|
[] |
no_license
|
dpryan79/RNAcentralTools
|
749d5e4828fbb1d1e3a79d7b0968306e57093fd1
|
17903d47955c13ea19969fe6c1b68f3c0f7d3a7f
|
refs/heads/master
| 2021-01-22T11:38:21.146465
| 2014-12-09T21:59:54
| 2014-12-09T21:59:54
| 27,717,715
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,050
|
py
|
mergeAndAnnotateClusters.py
|
#!/usr/bin/env Rscript
d <- read.delim("cluster_to_id.txt", header=F)
colnames(d) <- c("ID", "Cluster", "Description", "L")
d$Description <- as.character(d$Description)
mergeCluster <- function(df, nme) {
IDX <- grep(nme, df$Description)
cur_clusters <- unique(df$Cluster[IDX])
df$Cluster[df$Cluster %in% cur_clusters] <- df$Cluster[df$Cluster %in% cur_clusters][1]
df
}
#N.B., mergeCluster() MUST have already been run!
mergeClustersTogether(df, nme1, nme2) {
IDX1 <- grep(nme1, df$Description)
targetCluster <- df$Cluster[IDX1][1]
IDX1 <- which(df$Cluster == targetCluster)
ID2 <- grep(nme2, df$Description)
IDX2 <- which(df$Cluster == df$Cluster[IDX2][1])
df$Cluster[c(IDX1, IDX2)] <- targetCluster
df
}
#Merge Xist
d <- mergeCluster(d, "Xist")
#SNORA17 and SNORA43 can be merged together
d <- mergeCluster(d, "SNORA17")
d <- mergeCluster(d, "SNORA43")
d <- mergeClustersTogether(d, "SNORA17", "SNORA43")
#SNORA19
d <- mergeCluster(d, "SNORA19")
#SNORA20
d <- mergeCluster(d, "SNORA20")
#SNORA21
d <- mergeCluster(d, "SNORA21")
#SNORA26
d <- mergeCluster(d, "SNORA26")
#SNORA28
d <- mergeCluster(d, "SNORA28")
#SNORA29
d <- mergeCluster(d, "SNORA29")
#SNORA31
d <- mergeCluster(d, "SNORA31")
#SNORA32
d <- mergeCluster(d, "SNORA32")
#SNORA33
d <- mergeCluster(d, "SNORA33")
#SNORA35
d <- mergeCluster(d, "SNORA35")
#SNORA38
d <- mergeCluster(d, "SNORA38")
#SNORA4
d <- mergeCluster(d, "SNORA4")
#SNORA41
d <- mergeCluster(d, "SNORA41")
#SNORA43 merges with 17
d <- mergeCluster(d, "SNORA43")
d <- mergeCluster(d, "SNORA17")
d <- mergeClustersTogether(d, "SNORA43", "SNORA17")
#SNORA44
d <- mergeCluster(d, "SNORA44")
#SNORA46
d <- mergeCluster(d, "SNORA46")
#SNORA47
d <- mergeCluster(d, "SNORA47")
#SNORA48
d <- mergeCluster(d, "SNORA48")
#SNORA5
d <- mergeCluster(d, "SNORA5")
#SNORA51
d <- mergeCluster(d, "SNORA51")
#SNORA55
d <- mergeCluster(d, "SNORA55")
#SNORA57
d <- mergeCluster(d, "SNORA57")
#SNORA58
d <- mergeCluster(d, "SNORA58")
#SNORA61
d <- mergeCluster(d, "SNORA61")
#SNORA63
d <- mergeCluster(d, "SNORA63")
#SNORA65
d <- mergeCluster(d, "SNORA65")
#SNORA66
d <- mergeCluster(d, "SNORA66")
#SNORA67
d <- mergeCluster(d, "SNORA67")
#SNORA68
d <- mergeCluster(d, "SNORA68")
#SNORA69
d <- mergeCluster(d, "SNORA69")
#SNORA7
d <- mergeCluster(d, "SNORA7$")
#SNORA70
d <- mergeCluster(d, "SNORA70")
#SNORA71
d <- mergeCluster(d, "SNORA71")
#SNORA72
d <- mergeCluster(d, "SNORA72")
#SNORA74
d <- mergeCluster(d, "SNORA74")
#SNORA75
d <- mergeCluster(d, "SNORA75")
#SNORA76 merges with 50
d <- mergeCluster(d, "SNORA76")
d <- mergeCluster(d, "SNORA50")
d <- mergeClustersTogether(d, "SNORA76", "SNORA50")
#SNORA79
d <- mergeCluster(d, "SNORA29")
#SNORA81
d <- mergeCluster(d, "SNORA29")
#SNORA84
d <- mergeCluster(d, "SNORA29")
#SNORA16B/SNORA16A family
d <- mergeCluster(d, "SNORA16B/SNORA16A family")
#SNORA2/SNORA34 family
d <- mergeCluster(d, "SNORA2/SNORA34 family")
#SNORA3/SNORA45 family
d <- mergeCluster(d, "SNORA3/SNORA45")
#SNORA30/SNORA37 family
d <- mergeCluster(d, "SNORA30/SNORA37")
#SNORA36 family
d <- mergeCluster(d, "SNORA36 family")
#SNORA42/SNORA80 family
d <- mergeCluster(d, "SNORA42/SNORA80")
#SNORA62/SNORA6 family
d <- mergeCluster(d, "SNORA62/SNORA6")
#SNORA64/SNORA10 family
d <- mergeCluster(d, "SNORA64/SNORA10 family")
#SNORA24
d <- mergeCluster(d, "SNORA24")
#SNORA40
d <- mergeCluster(d, "SNORA40")
#mir-1937
d <- mergeCluster(d, "mir-1937")
#SNORD113/114 family
d <- mergeCluster(d, "SNORD113")
#SNORD115
d <- mergeCluster(d, "SNORD115")
#SNORD116
d <- mergeCluster(d, "SNORD116")
#Recalculate the Cluster IDs
d <- d[order(d$Cluster),]
last <- length(unique(d$Cluster))
reps <- rle(d$Cluster)$lengths
d$Cluster <- rep(c(1:last), reps)
#order by ID
d <- d[order(d$ID),]
#Remove some of the common species labels
d$Description <- gsub("Homo sapiens ","", d$Description, ignore.case=T)
d$Description <- gsub("Pan troglodytes ","", d$Description, ignore.case=T)
d$Description <- gsub("Trypanosoma congolense IL3000 ","", d$Description, ignore.case=T)
d$Description <- gsub("domesticus ","", d$Description, ignore.case=T)
d$Description <- gsub("Mus musculus ","", d$Description, ignore.case=T)
d$Description <- gsub("\\(house mouse) ","", d$Description, ignore.case=T)
d$Description <- gsub("\\(human) ","", d$Description, ignore.case=T)
#Write out the mappings
write.table(d[,c(1,2)], file="RNAcentral_clusters.txt", col.names=F, row.names=F, sep="\t", quote=F)
#Determine cluster length, type, name
d <- d[order(d$Cluster),]
dl <- split(d, d$Cluster)
clusters <- data.frame(ID=unique(d$Cluster))
clusters$Length <- sapply(dl, function(x) max(x$L))
clusters$Type <- sapply(dl, function(x) {
#possible outputs: rRNA, tRNA, lncRNA, snoRNA, snRNA, piRNA, miRNA, miscRNA, Unknown
rv="Unknown"
if(length(grep("ribo", x$Description, ignore.case=T))>0) {
rv = "rRNA"
} else if(length(grep("tRNA", x$Description, ignore.case=T))>0) {
rv = "tRNA"
} else if(length(grep("long", x$Description, ignore.case=T))>0) {
rv = "lncRNA"
} else if(length(grep("lincRNA", x$Description, ignore.case=T))>0) {
rv = "lncRNA"
} else if(length(grep("lncRNA", x$Description, ignore.case=T))>0) {
rv = "lncRNA"
} else if(length(grep("Cajal body", x$Description, ignore.case=T))>0) {
rv = "snoRNA"
} else if(length(grep("SNOR", x$Description, ignore.case=T))>0) {
rv = "snoRNA"
} else if(length(grep("small nucleolar", x$Description, ignore.case=T))>0) {
rv = "snoRNA"
} else if(length(grep("piR", x$Description, ignore.case=T))>0) {
rv = "piRNA"
} else if(length(grep("miR", x$Description, ignore.case=T))>0) {
rv = "miRNA"
} else if(length(grep("-let-", x$Description, ignore.case=T))>0) {
rv = "miRNA"
} else if(length(grep("microRNA", x$Description, ignore.case=T))>0) {
rv = "miRNA"
} else if(length(grep("oocyte_clustered", x$Description, ignore.case=T))>0) {
rv = "miscRNA"
} else if(length(grep("miscellaneous", x$Description, ignore.case=T))>0) {
rv = "miscRNA"
} else if(length(grep("germline", x$Description, ignore.case=T))>0) { #These are categorized as miscRNA by Ensembl
rv = "miscRNA"
} else if(length(grep("anti", x$Description, ignore.case=T))>0) { #antisense is lncRNA
rv = "lncRNA"
} else if(length(grep("spacer", x$Description, ignore.case=T))>0) { #External transcribed spacers are rRNA
rv = "rRNA"
} else if(length(grep("Y RNA", x$Description, ignore.case=T))>0) {
rv = "snRNA"
} else if(length(grep("snRNA", x$Description, ignore.case=T))>0) {
rv = "snRNA"
} else if(length(grep("spliceosomal", x$Description, ignore.case=T))>0) {
rv = "snRNA"
}
rv
}) #This still needs some work
clusters$name<- sapply(dl, function(x) x$Description[1])
write.table(clusters, file="RNAcentral_clusters_annotations.txt", col.names=F, row.names=F, sep="\t", quote=F)
|
7fea5dbb762fd54ab61289fcf3b2b51c942ecac1
|
9e2296070f5ac43c6030de7b6cf45a29a7086bd1
|
/plot1.R
|
3a4392845591776b84399c0a3d6046bbb33b1391
|
[] |
no_license
|
kovumadh3/ExData_Plotting1
|
16a8f64b2c0af2effc1f5ca4a33ad01dc4c420f7
|
3beb6b2b525d721dbfaec0817b2e84463a872b2c
|
refs/heads/master
| 2020-12-26T14:14:12.572623
| 2014-09-04T05:44:33
| 2014-09-04T05:44:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 595
|
r
|
plot1.R
|
#We would be using sqldf package for this program. If package is not installed,
# install it through install.packages("sqldf")
library(sqldf)
data <- read.csv.sql( file='household_power_consumption.txt',
sep=";",sql="select * from file where Date = '1/2/2007' or Date = '2/2/2007'", header=TRUE)
# Initiate plot1.png
png("plot1.png", height=480, width=480)
# Generate histogram
hist(data$Global_active_power, col='red',
xlab = 'Global Active Power (kilowatts)',
main = 'Global Active Power')
# Close PNG file
dev.off()
|
f6d698c8f0968271605561ff63f012fa5bdce6f5
|
d6003b28f81c59cc8731583fb930bb82f4365eac
|
/R/util.plot.R
|
a837b796ba622f3aa71fff868400ec3587950f67
|
[] |
no_license
|
cran/CHNOSZ
|
79616d683d9af7e16674cf08c7912c5f438c45d9
|
6c61dcecdbce300a990341f5370552b63ec42647
|
refs/heads/master
| 2023-03-24T08:33:38.797654
| 2023-03-13T16:10:32
| 2023-03-13T16:10:32
| 17,678,274
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,870
|
r
|
util.plot.R
|
# CHNOSZ/util.plot.R
# Functions to create and modify plots
thermo.plot.new <- function(xlim,ylim,xlab,ylab,cex=par('cex'),mar=NULL,lwd=par('lwd'),side=c(1,2,3,4),
mgp=c(1.7,0.3,0),cex.axis=par('cex'),col=par('col'),yline=NULL,axs='i',plot.box=TRUE,
las=1,xline=NULL, grid = "", col.grid = "gray", ...) {
# Start a new plot with some customized settings
thermo <- get("thermo", CHNOSZ)
# 20120523 store the old par in thermo()$opar
if(is.null(thermo$opar)) {
thermo$opar <- par(no.readonly=TRUE)
assign("thermo", thermo, CHNOSZ)
}
# 20090324 mar handling: NULL - a default setting; NA - par's setting
# 20090413 changed mar of top side from 2 to 2.5
marval <- c(3, 3.5, 2.5, 1)
if(identical(mar[1], NA)) marval <- par("mar")
# 20181007 get mar from the current device (if it exists) and par("mar") is not the default
if(!is.null(dev.list())) {
if(!identical(par("mar"), c(5.1, 4.1, 4.1, 2.1))) marval <- par("mar")
}
# Assign marval to mar if the latter is NULL or NA
if(!is.numeric(mar)) mar <- marval
par(mar=mar,mgp=mgp,tcl=0.3,las=las,xaxs=axs,yaxs=axs,cex=cex,lwd=lwd,col=col,fg=col, ...)
plot.new()
plot.window(xlim=xlim,ylim=ylim)
if(plot.box) box()
# Labels
if(is.null(xline)) xline <- mgp[1]
thermo.axis(xlab,side=1,line=xline,cex=cex.axis,lwd=NULL)
if(is.null(yline)) yline <- mgp[1]
thermo.axis(ylab,side=2,line=yline,cex=cex.axis,lwd=NULL)
# (optional) tick marks
if(1 %in% side) thermo.axis(NULL,side=1,lwd=lwd, grid = grid, col.grid = col.grid, plot.line = !plot.box)
if(2 %in% side) thermo.axis(NULL,side=2,lwd=lwd, grid = grid, col.grid = col.grid, plot.line = !plot.box)
if(3 %in% side) thermo.axis(NULL,side=3,lwd=lwd, plot.line = !plot.box)
if(4 %in% side) thermo.axis(NULL,side=4,lwd=lwd, plot.line = !plot.box)
}
label.plot <- function(x, xfrac=0.07, yfrac=0.93, paren=FALSE, italic=FALSE, ...) {
# Make a text label e.g., "(a)" in the corner of a plot
# xfrac, yfrac: fraction of axis where to put label (default top right)
# paren: put a parenthesis around the text, and italicize it?
if(italic) x <- substitute(italic(a), list(a=x))
if(paren) x <- substitute(group('(',a,')'), list(a=x))
if(italic | paren) x <- as.expression(x)
pu <- par('usr')
text(pu[1]+xfrac*(pu[2]-pu[1]), pu[3]+yfrac*(pu[4]-pu[3]), labels=x, ...)
}
usrfig <- function() {
# Function to get the figure limits in user coordinates
# Get plot limits in user coordinates (usr) and as fraction [0,1] of figure region (plt)
xusr <- par('usr')[1:2]; yusr <- par('usr')[3:4]
xplt <- par('plt')[1:2]; yplt <- par('plt')[3:4]
# Linear model to calculate figure limits in user coordinates
xlm <- lm(xusr ~ xplt); ylm <- lm(yusr ~ yplt)
xfig <- predict.lm(xlm, data.frame(xplt=c(0, 1)))
yfig <- predict.lm(ylm, data.frame(yplt=c(0, 1)))
return(list(x=xfig, y=yfig))
}
label.figure <- function(x, xfrac=0.05, yfrac=0.95, paren=FALSE, italic=FALSE, ...) {
# Function to add labels outside of the plot region 20151020
f <- usrfig()
# Similar to label.plot(), except we have to set xpd=TRUE here
opar <- par(xpd=NA)
if(italic) x <- substitute(italic(a), list(a=x))
if(paren) x <- substitute(group('(',a,')'), list(a=x))
if(italic | paren) x <- as.expression(x)
text(f$x[1]+xfrac*(f$x[2]-f$x[1]), f$y[1]+yfrac*(f$y[2]-f$y[1]), labels=x, ...)
par(opar)
}
water.lines <- function(eout, which=c('oxidation','reduction'),
lty=2, lwd=1, col=par('fg'), plot.it=TRUE) {
# Draw water stability limits for Eh-pH, logfO2-pH, logfO2-T or Eh-T diagrams
# (i.e. redox variable is on the y axis)
# Get axes, T, P, and xpoints from output of affinity() or equilibrate()
if(missing(eout)) stop("'eout' (the output of affinity(), equilibrate(), or diagram()) is missing")
# Number of variables used in affinity()
nvar1 <- length(eout$vars)
# If these were on a transect, the actual number of variables is less
dim <- dim(eout$loga.equil[[1]]) # for output from equilibrate()
if(is.null(dim)) dim <- dim(eout$values[[1]]) # for output from affinity()
nvar2 <- length(dim)
# We only work on diagrams with 1 or 2 variables
if(!nvar1 %in% c(1,2) | !nvar2 %in% c(1,2)) return(NA)
# If needed, swap axes so redox variable is on y-axis
# Also do this for 1-D diagrams 20200710
if(is.na(eout$vars[2])) eout$vars[2] <- "nothing"
swapped <- FALSE
if(eout$vars[2] %in% c("T", "P", "nothing")) {
eout$vars <- rev(eout$vars)
eout$vals <- rev(eout$vals)
swapped <- TRUE
}
xaxis <- eout$vars[1]
yaxis <- eout$vars[2]
xpoints <- eout$vals[[1]]
# Make xaxis "nothing" if it is not pH, T, or P 20201110
# (so that horizontal water lines can be drawn for any non-redox variable on the x-axis)
if(!identical(xaxis, "pH") & !identical(xaxis, "T") & !identical(xaxis, "P")) xaxis <- "nothing"
# T and P are constants unless they are plotted on one of the axes
T <- eout$T
if(eout$vars[1]=="T") T <- envert(xpoints, "K")
P <- eout$P
if(eout$vars[1]=="P") P <- envert(xpoints, "bar")
# logaH2O is 0 unless given in eout$basis
iH2O <- match("H2O", rownames(eout$basis))
if(is.na(iH2O)) logaH2O <- 0 else logaH2O <- as.numeric(eout$basis$logact[iH2O])
# pH is 7 unless given in eout$basis or plotted on one of the axes
iHplus <- match("H+", rownames(eout$basis))
if(eout$vars[1]=="pH") pH <- xpoints
else if(!is.na(iHplus)) {
minuspH <- eout$basis$logact[iHplus]
# Special treatment for non-numeric value (happens when a buffer is used, even for another basis species)
if(can.be.numeric(minuspH)) pH <- -as.numeric(minuspH) else pH <- NA
}
else pH <- 7
# O2state is gas unless given in eout$basis
iO2 <- match("O2", rownames(eout$basis))
if(is.na(iO2)) O2state <- "gas" else O2state <- eout$basis$state[iO2]
# H2state is gas unles given in eout$basis
iH2 <- match("H2", rownames(eout$basis))
if(is.na(iH2)) H2state <- "gas" else H2state <- eout$basis$state[iH2]
# Where the calculated values will go
y.oxidation <- y.reduction <- NULL
if(xaxis %in% c("pH", "T", "P", "nothing") & yaxis %in% c("Eh", "pe", "O2", "H2")) {
# Eh/pe/logfO2/logaO2/logfH2/logaH2 vs pH/T/P
if('reduction' %in% which) {
logfH2 <- logaH2O # usually 0
if(yaxis=="H2") {
logK <- suppressMessages(subcrt(c("H2", "H2"), c(-1, 1), c("gas", H2state), T=T, P=P, convert=FALSE))$out$logK
# This is logfH2 if H2state=="gas", or logaH2 if H2state=="aq"
logfH2 <- logfH2 + logK
y.reduction <- rep(logfH2, length.out=length(xpoints))
} else {
logK <- suppressMessages(subcrt(c("H2O", "O2", "H2"), c(-1, 0.5, 1), c("liq", O2state, "gas"), T=T, P=P, convert=FALSE))$out$logK
# This is logfO2 if O2state=="gas", or logaO2 if O2state=="aq"
logfO2 <- 2 * (logK - logfH2 + logaH2O)
if(yaxis=="O2") y.reduction <- rep(logfO2, length.out=length(xpoints))
else if(yaxis=="Eh") y.reduction <- convert(logfO2, 'E0', T=T, P=P, pH=pH, logaH2O=logaH2O)
else if(yaxis=="pe") y.reduction <- convert(convert(logfO2, 'E0', T=T, P=P, pH=pH, logaH2O=logaH2O), "pe", T=T)
}
}
if('oxidation' %in% which) {
logfO2 <- logaH2O # usually 0
if(yaxis=="H2") {
logK <- suppressMessages(subcrt(c("H2O", "O2", "H2"), c(-1, 0.5, 1), c("liq", "gas", H2state), T=T, P=P, convert=FALSE))$out$logK
# This is logfH2 if H2state=="gas", or logaH2 if H2state=="aq"
logfH2 <- logK - 0.5*logfO2 + logaH2O
y.oxidation <- rep(logfH2, length.out=length(xpoints))
} else {
logK <- suppressMessages(subcrt(c("O2", "O2"), c(-1, 1), c("gas", O2state), T=T, P=P, convert=FALSE))$out$logK
# This is logfO2 if O2state=="gas", or logaO2 if O2state=="aq"
logfO2 <- logfO2 + logK
if(yaxis=="O2") y.oxidation <- rep(logfO2, length.out=length(xpoints))
else if(yaxis=="Eh") y.oxidation <- convert(logfO2, 'E0', T=T, P=P, pH=pH, logaH2O=logaH2O)
else if(yaxis=="pe") y.oxidation <- convert(convert(logfO2, 'E0', T=T, P=P, pH=pH, logaH2O=logaH2O), "pe", T=T)
}
}
} else return(NA)
# Now plot the lines
if(plot.it) {
if(swapped) {
if(nvar1 == 1 | nvar2 == 2) {
# Add vertical lines on 1-D diagram 20200710
abline(v = y.oxidation[1], lty=lty, lwd=lwd, col=col)
abline(v = y.reduction[1], lty=lty, lwd=lwd, col=col)
} else {
# xpoints above is really the ypoints
lines(y.oxidation, xpoints, lty=lty, lwd=lwd, col=col)
lines(y.reduction, xpoints, lty=lty, lwd=lwd, col=col)
}
} else {
lines(xpoints, y.oxidation, lty=lty, lwd=lwd, col=col)
lines(xpoints, y.reduction, lty=lty, lwd=lwd, col=col)
}
}
# Return the values
return(invisible(list(xpoints=xpoints, y.oxidation=y.oxidation, y.reduction=y.reduction, swapped=swapped)))
}
mtitle <- function(main, line=0, spacing=1, ...) {
# Make a possibly multi-line plot title
# Useful for including expressions on multiple lines
# 'line' is the margin line of the last (bottom) line of the title
len <- length(main)
for(i in 1:len) mtext(main[i], line = line + (len - i)*spacing, ...)
}
# Get colors for range of ZC values 20170206
ZC.col <- function(z) {
# Scale values to [1, 1000]
z <- z * 999/diff(range(z))
z <- round(z - min(z)) + 1
# Diverging (blue - light grey - red) palette
# dcol <- colorspace::diverge_hcl(1000, c = 100, l = c(50, 90), power = 1)
# Use precomputed values
file <- system.file("extdata/cpetc/bluered.txt", package = "CHNOSZ")
dcol <- read.table(file, as.is=TRUE)[[1]]
# Reverse the palette so red is at lower ZC (more reduced)
rev(dcol)[z]
}
# Function to add axes and axis labels to plots,
# with some default style settings (rotation of numeric labels)
# With the default arguments (no labels specified), it plots only the axis lines and tick marks
# (used by diagram() for overplotting the axis on diagrams filled with colors).
thermo.axis <- function(lab=NULL,side=1:4,line=1.5,cex=par('cex'),lwd=par('lwd'),col=par('col'), grid = "", col.grid="gray", plot.line=FALSE) {
if(!is.null(lwd)) {
for(thisside in side) {
## Get the positions of major tick marks
at <- axis(thisside,labels=FALSE,tick=FALSE)
# Get nicer divisions for axes that span exactly 15 units 20200719
if(thisside %in% c(1,3)) lim <- par("usr")[1:2]
if(thisside %in% c(2,4)) lim <- par("usr")[3:4]
if(abs(diff(lim)) == 15) at <- seq(lim[1], lim[2], length.out = 6)
if(abs(diff(lim)) == 1.5) at <- seq(lim[1], lim[2], length.out = 4)
# Make grid lines
if(grid %in% c("major", "both") & thisside==1) abline(v = at, col=col.grid)
if(grid %in% c("major", "both") & thisside==2) abline(h = at, col=col.grid)
## Plot major tick marks and numeric labels
do.label <- TRUE
if(missing(side) | (missing(cex) & thisside %in% c(3,4))) do.label <- FALSE
# col and col.ticks: plot the tick marks but no line (we make it with box() in thermo.plot.new()) 20190416
# mat: don't plot ticks at the plot limits 20190416
if(thisside %in% c(1, 3)) pat <- par("usr")[1:2]
if(thisside %in% c(2, 4)) pat <- par("usr")[3:4]
mat <- setdiff(at, pat)
if(plot.line) axis(thisside, at=mat, labels=FALSE, tick=TRUE, lwd=lwd, col.axis=col, col=col)
else axis(thisside, at=mat, labels=FALSE, tick=TRUE, lwd=lwd, col.axis=col, col = NA, col.ticks = col)
# Plot only the labels at all major tick points (including plot limits) 20190417
if(do.label) axis(thisside, at=at, tick=FALSE, col=col)
## Plot minor tick marks
# The distance between major tick marks
da <- abs(diff(at[1:2]))
# Distance between minor tick marks
di <- da / 4
if(!da %% 3) di <- da / 3
else if(da %% 2 | !(da %% 10)) di <- da / 5
# Number of minor tick marks
if(thisside %in% c(1,3)) {
ii <- c(1,2)
myasp <- par('xaxp')
} else {
ii <- c(3,4)
myasp <- par('yaxp')
}
myusr <- par('usr')[ii]
daxis <- abs(diff(myusr))
nt <- daxis / di + 1
## If nt isn't an integer, it probably
## means the axis limits don't correspond
## to major tick marks (expect problems)
##at <- seq(myusr[1],myusr[2],length.out=nt)
# Start from (bottom/left) of axis?
bl <- 1
#if(myasp[2]==myusr[2]) bl <- 2
# Is forward direction (top/right)?
tr <- 1
if(xor(myusr[2] < myusr[1] , bl==2)) tr <- -1
#at <- myusr[bl] + tr * di * seq(0:(nt-1))
# Well all of that doesn't work in a lot of cases,
# where none of the axis limits correspond to
# major tick marks. perhaps the following will work
at <- myusr[1] + tr * di * (0:(nt-1))
# Apply an offset
axt <- axTicks(thisside)[1]
daxt <- (axt - myusr[1])/di
daxt <- (daxt-round(daxt))*di
at <- at + daxt
## Get the positions of major tick marks and make grid lines
if(grid %in% c("minor", "both") & thisside==1) abline(v = at, col=col.grid, lty = 3)
if(grid %in% c("minor", "both") & thisside==2) abline(h = at, col=col.grid, lty = 3)
tcl <- par('tcl') * 0.5
at <- setdiff(at, pat)
if(plot.line) axis(thisside,labels=FALSE,tick=TRUE,lwd=lwd,col.axis=col,at=at,tcl=tcl, col = col)
else axis(thisside,labels=FALSE,tick=TRUE,lwd=lwd,col.axis=col,at=at,tcl=tcl, col = NA, col.ticks = col)
}
}
# Rotate labels on side axes
for(thisside in side) {
if(thisside %in% c(2,4)) las <- 0 else las <- 1
if(!is.null(lab)) mtext(lab,side=thisside,line=line,cex=cex,las=las)
}
}
# Function to add transparency to given color 20220223
add.alpha <- function(col, alpha) {
x <- col2rgb(col)
newcol <- rgb(x[1], x[2], x[3], maxColorValue = 255)
newcol <- paste0(newcol, alpha)
newcol
}
|
5af2bf373ad603bdd603848d7e589b68d9d54749
|
b7e40784cbdfb4592c5d027341c444552267218f
|
/tests/testthat/test_fitness.R
|
50b840706fded5e15947f755731da53d4a5f2639
|
[] |
no_license
|
Shixindatiancai/GA
|
8ec5a710dbb14bd74be3198d467bd63217727ec6
|
0c7ad7c4ab76965722d6e92b1a7b6e7fb5f3c775
|
refs/heads/master
| 2021-08-30T12:50:21.203488
| 2017-12-18T02:08:04
| 2017-12-18T02:08:04
| 114,583,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 788
|
r
|
test_fitness.R
|
context("calculation_fitness tests")
dict.fitness <<- new.env()
test_that("Input is sanitized", {
expect_error(calculate_fitness("a", "y"))
})
dict.fitness <<- new.env()
test_that("Normal input", {
expect_that(calculate_fitness(1:3, data.frame(replicate(10,sample(0:1,1000,rep=TRUE))), 1:1000, fit_func=AIC), is_a("numeric"))
})
dict.fitness <<- new.env()
bad <- function(a, b, c) {
return("a")
}
test_that("User input error function doesn't return a numeric", {
expect_error(calculate_fitness(1:3, data.frame(replicate(10,sample(0:1,1000,rep=TRUE))), 1:1000, fit_func=bad))
})
dict.fitness <<- new.env()
test_that("X and y have different numbers of rows and columns", {
expect_error(calculate_fitness(1:3, data.frame(replicate(10,sample(0:1,1100,rep=TRUE))), 1:1000, AIC))
})
|
a4d0e26ad3157f0ad4421929238b556a7e2a8255
|
822afc07c6897d71c8e60924d8e691dee4df6876
|
/Plot3.R
|
0912fdb5065033389138a91d10b3fedfa54ffc01
|
[] |
no_license
|
HenrryNadal/ExData_Plotting1
|
da8982ab18357f241065a653b6390378e574a61c
|
dccf43bb590cb5c9d486a80b36592452017379b7
|
refs/heads/master
| 2021-01-18T20:57:13.187712
| 2016-02-14T19:15:53
| 2016-02-14T19:15:53
| 51,679,411
| 0
| 0
| null | 2016-02-14T04:21:51
| 2016-02-14T04:21:50
| null |
UTF-8
|
R
| false
| false
| 997
|
r
|
Plot3.R
|
library(lubridate)
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", temp, method="curl")
contain<-unz(temp, "household_power_consumption.txt")
Data<-read.table(contain, sep=";", header = T, stringsAsFactors = FALSE, na.strings = "?")
Data$DateTime<-paste(Data$Date,Data$Time)
Data$DateTime<-strptime(Data$DateTime, "%d/%m/%Y %H:%M:%S")
Data$Date<-as.Date(Data$Date, "%d/%m/%Y")
Data$Time<-hms(Data$Time)
Data<-subset(Data, "2007-02-01"<=Date & Date<="2007-02-02")
png(filename = "plot3.png", width = 480, height = 480)
#Plotting Energy submetering 1-3 vs DateTime
with(Data, plot(DateTime,Sub_metering_1, "l", xlab="",
ylab="Energy sub metering"))
lines(Data$DateTime, Data$Sub_metering_2, col="red")
lines(Data$DateTime, Data$Sub_metering_3, col="blue")
legend("topright", col=c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1)
dev.off()
|
2c5f2c952427c3848243634efbb78f914899e6b0
|
75a99fd02c5b224ea4676847415f0b8ca541e784
|
/Code/hesselinke_topic_modeling_untagged.R
|
ac748b5ebb646cf77cb84fabc37111b7f914fa2c
|
[] |
no_license
|
livieee/textanalysis
|
6684e516b71212ad9a1f521e7cd41a4a3409e3d8
|
ac9503dbdbe2b40022b090da7a87fb1883ede6c7
|
refs/heads/master
| 2020-04-18T01:03:35.453956
| 2019-01-23T02:57:36
| 2019-01-23T02:57:36
| 167,103,798
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,465
|
r
|
hesselinke_topic_modeling_untagged.R
|
#This file is to generating untagged wordclouds for Hesslink_data
#Only the words version (do not include digits)
#It was using the tm_stopwords
#1, 15, 30 Topics
hesselink_data <- readRDS("~/Documents/Hesselink copy2/R object for Hesselink Dataset/hesselink_data.rds")
user.comment.v <- as.character(hesselink_data$User.Comment)
length(user.comment.v) #87 Comments in total
user.name <- as.character(hesselink_data$User)
chunk.size <- 20 # number of words per chunk
library(quanteda)
library(qdap)
#A Function to handle the chunking to do pecentage-based by default
#And it can change to word-count-based segmentation
makeFlexTextChunks <- function(text.v, chunk.size=20, percentage=TRUE){
words <- paste(text.v, collapse = " ")
words.lower <- tolower(words)
words.v <-strip(words.lower,
digit.remove = T, apostrophe.remove=F,lower.case=T)
word.v <- tokens(words.v, what = "word", remove_symbols = TRUE,
remove_url = TRUE,
remove_punct = TRUE)
word.v <- word.v[which(word.v!="")]
x <- seq_along(word.v)
if(percentage){
max.length <- length(word.v)/chunk.size
chunks.l <- split(word.v, ceiling(x/max.length))
} else {
chunks.l <- split(word.v, ceiling(x/chunk.size))
#deal with small chunks at the end
if(length(chunks.l[[length(chunks.l)]]) <=
length(chunks.l[[length(chunks.l)]])/2){
chunks.l[[length(chunks.l)-1]] <-
c(chunks.l[[length(chunks.l)-1]],
chunks.l[[length(chunks.l)]])
chunks.l[[length(chunks.l)]] <- NULL
}
}
chunks.l <- lapply(chunks.l, paste, collapse=" ")
chunks.df <- do.call(rbind, chunks.l)
return(chunks.df)
}
topic.m <- NULL
for(i in 1:length(user.comment.v)){
comment <- user.comment.v[i]
comment <- enc2utf8(comment)
chunk.m <- makeFlexTextChunks(comment, chunk.size,
percentage=FALSE)
#Delete file extensions
username <- user.name[i]
segments.m <- cbind(paste(username,
segment=1:nrow(chunk.m), sep="#"), chunk.m)
topic.m <- rbind(topic.m, segments.m)
}
documents <- as.data.frame(topic.m, stringsAsFactors=F)
colnames(documents) <- c("id", "comment")
library(mallet)
#Step3: Simple topic modeling with a standard stop list
#First step to generate topic model: invoke mallet.import
mallet.instances <- mallet.import(documents$id,
documents$comment,
"tm_stopwords.csv",
FALSE,
token.regexp="[\\p{L}']+")
#Create only one topic
topic.model <- MalletLDA(num.topics=1)
#Fill in trainer object with textual data
topic.model$loadDocuments(mallet.instances)
#Access a list of the entire vocabulary of the corpus
vocabulary <- topic.model$getVocabulary()
length(vocabulary)
head(vocabulary)
vocabulary[1:50]
word.freqs <- mallet.word.freqs(topic.model)
head(word.freqs)
#Control the optimization interval and burn-in
topic.model$setAlphaOptimization(40, 80)
#Set the number of iterations in training
topic.model$train(400)
#Step 3: Unpack the model into a matrix
#That has each topic with their word type
topic.words.m <- mallet.topic.words(topic.model,
smoothed=TRUE,
normalized=TRUE)
dim(topic.words.m)
#1*910
rowSums(topic.words.m)
#Add the vocabulary column for each word type
colnames(topic.words.m) <- vocabulary
library(slam)
library(wordcloud)
#Create world cloud for each topic and save each
topic.top.words <- mallet.top.words(topic.model,
topic.words.m[1,], 100)
#Get first three words in a topic
#topic.top.words$words[1:3]
set.seed(142)
png(paste("Topic_Modeling_Untagged/tm_stopwords_1_topic/topic1-",
paste(topic.top.words$words[1],
topic.top.words$words[2],
topic.top.words$words[3],sep = "-"), ".png", sep = ""),
width=12,height=8, units='in', res=300)
wordcloud(topic.top.words$words,
topic.top.words$weights,
c(4,.8), rot.per=0.2, random.order=F,
colors = rainbow(20))
dev.off()
#Create 15 topics
topic.model <- MalletLDA(num.topics=15)
#Fill in trainer object with textual data
topic.model$loadDocuments(mallet.instances)
#Access a list of the entire vocabulary of the corpus
vocabulary <- topic.model$getVocabulary()
#Access some basic info about frequency of words in the corpus
#And in the various documents of the corpus
word.freqs <- mallet.word.freqs(topic.model)
head(word.freqs)
#Control the optimization interval and burn-in
topic.model$setAlphaOptimization(40, 80)
#Set the number of iterations in training
topic.model$train(400)
#Step 3: Unpack the model into a matrix
#That has each topic with their word type
topic.words.m <- mallet.topic.words(topic.model,
smoothed=TRUE,
normalized=TRUE)
dim(topic.words.m)
#15 * 910
rowSums(topic.words.m)
#Add the vocabulary column for each word type
colnames(topic.words.m) <- vocabulary
#Generate 15 word clouds
for (i in 1: 15){
#each topic is a row in topic.words.m
topic.top.words <- mallet.top.words(topic.model,
topic.words.m[i,], 80)
#Produce a word cloud of top words in a topic
set.seed(142)
png(paste("Topic_Modeling_Untagged/tm_stopwords_15_topic/topic",
i, "-",
paste(topic.top.words$words[1],
topic.top.words$words[2],
topic.top.words$words[3],sep = "-"), ".png", sep = ""),
width=12,height=8, units='in', res=300)
wordcloud(topic.top.words$words,
topic.top.words$weights,
c(4,.8), rot.per=0.2, random.order=F, colors = rainbow(20))
dev.off()
}
#Create 30 topics
topic.model <- MalletLDA(num.topics=30)
#Fill in trainer object with textual data
topic.model$loadDocuments(mallet.instances)
#Access a list of the entire vocabulary of the corpus
vocabulary <- topic.model$getVocabulary()
#Access some basic info about frequency of words in the corpus
#And in the various documents of the corpus
word.freqs <- mallet.word.freqs(topic.model)
head(word.freqs)
#Control the optimization interval and burn-in
topic.model$setAlphaOptimization(40, 80)
#Set the number of iterations in training
topic.model$train(400)
#Step 3: Unpack the model into a matrix
#That has each topic with their word type
topic.words.m <- mallet.topic.words(topic.model,
smoothed=TRUE,
normalized=TRUE)
dim(topic.words.m)
#30 * 910
rowSums(topic.words.m)
#Add the vocabulary column for each word type
colnames(topic.words.m) <- vocabulary
#Generate 15 word clouds
for (i in 1: 30){
#each topic is a row in topic.words.m
topic.top.words <- mallet.top.words(topic.model,
topic.words.m[i,], 80)
#Produce a word cloud of top words in a topic
set.seed(142)
png(paste("Topic_Modeling_Untagged/tm_stopwords_30_topic/topic",
i, "-",
paste(topic.top.words$words[1],
topic.top.words$words[2],
topic.top.words$words[3],sep = "-"), ".png", sep = ""),
width=12,height=8, units='in', res=300)
wordcloud(topic.top.words$words,
topic.top.words$weights,
c(4,.3), rot.per=0, random.order=F, colors = rainbow(20))
dev.off()
}
|
fcbde7182882852008dc69e35359ae505ddac948
|
e6462914de5de27eb35b7aa2b526e85e4ea7ee42
|
/R/auth.R
|
980bbe6a2538ed40ad006b68554c665c959e4e8e
|
[] |
no_license
|
ekmixon/rtweet
|
0a540d7eb8fa7256ffbcdb84ae0ec2cf74dad55a
|
ea1a9e136867a3607a6ca8b50091dce7dfd27ebc
|
refs/heads/master
| 2023-08-11T02:20:33.870520
| 2021-09-28T09:41:11
| 2021-09-28T09:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,912
|
r
|
auth.R
|
#' Set up default authentication
#'
#' You'll need to run this function once per computer so that rtweet can use
#' your personal twitter account. See [rtweet_app()]/[rtweet_bot] and
#' [auth_save()] for other authentication options.
#'
#' @export
#' @family authentication
auth_setup_default <- function() {
auth_save(rtweet_user(), "default")
}
#' Authentication options
#'
#' @description
#' There are three ways that you can authenticate with the twitter API:
#'
#' * `rtweet_user()` interactively authenticates an existing twitter user.
#' This form is most appropriate if you want rtweet to control your
#' twitter account.
#'
#' * `rtweet_app()` authenticates as a twitter application. An application can't
#' perform actions (i.e. it can't tweet) but otherwise has generally higher
#' rate limits (i.e. you can do more searches). See details
#' at <https://developer.twitter.com/en/docs/basics/rate-limits.html>.
#' This form is most appropriate if you are collecting data.
#'
#' * `rtweet_bot()` authenticates as bot that takes actions on behalf of an app.
#' This form is most appropriate if you want to create a twitter account that
#' is run by a computer, rather than a human.
#'
#' To use `rtweet_app()` or `rtweet_bot()` you will need to create your own
#' Twitter app following the instructions in `vignette("auth.Rmd")`.
#' `rtweet_user()` _can be_ used with your own app, but generally there is
#' no need to because it uses the Twitter app provided by rtweet.
#'
#' Use [auth_as()] to set the default auth mechanism for the current session,
#' and [auth_save()] to save an auth mechanism for use in future sessions.
#'
#' # Security
#'
#' All of the arguments to these functions are roughly equivalent to
#' passwords so should generally not be typed into the console (where they
#' the will be recorded in `.Rhistory`) or recorded in a script (which is
#' easy to accidentally share). Instead, call these functions without arguments
#' since the default behaviour is to use [askpass::askpass()] to interactively
#' prompt you for the values.
#'
#' @param api_key,api_secret Application API key and secret. These are
#' generally not required for `tweet_user()` since the defaults will use
#' the built-in rtweet app.
#' @param access_token,access_secret Access token and secret.
#' @param bearer_token App bearer token.
#' @family authentication
#' @export
rtweet_user <- function(api_key = NULL, api_secret = NULL) {
check_installed("httpuv")
if (is.null(api_key) && is.null(api_secret)) {
decrypt <- function(x) {
rawToChar(openssl::rsa_decrypt(x[[2]], x[[1]]))
}
api_key <- decrypt(sysdat$DYKcJfBkgMnGveI)
api_secret <- decrypt(sysdat$MRsnZtaKXqGYHju)
} else {
stopifnot(is_string(api_key), is_string(api_secret))
}
app <- httr::oauth_app("rtweet", key = api_key, secret = api_secret)
TwitterToken1.0$new(
app = app,
endpoint = httr::oauth_endpoints("twitter"),
params = list(as_header = TRUE),
cache_path = FALSE
)
}
#' @export
#' @rdname rtweet_user
rtweet_bot <- function(
api_key = ask_pass("API key"),
api_secret = ask_pass("API secret"),
access_token = ask_pass("access token"),
access_secret = ask_pass("access token")
) {
stopifnot(is_string(api_key), is_string(api_secret))
stopifnot(is_string(access_token), is_string(access_secret))
app <- httr::oauth_app("rtweet", key = api_key, secret = api_secret)
credentials <- list(
oauth_token = access_token,
oauth_token_secret = access_secret
)
httr::Token1.0$new(
app = app,
endpoint = httr::oauth_endpoints("twitter"),
params = list(as_header = TRUE),
credentials = credentials,
cache_path = FALSE
)
}
#' @export
#' @rdname rtweet_user
rtweet_app <- function(bearer_token = ask_pass("bearer token")) {
structure(
list(token = bearer_token),
class = "rtweet_bearer"
)
}
ask_pass <- function(type) {
check_installed("askpass")
message <- paste0("Please enter your ", type, ": ")
val <- askpass::askpass(message)
if (is.null(val)) {
abort("Cancelled by user")
}
val
}
is_auth <- function(x) {
inherits(x, "Token") || inherits(x, "rtweet_bearer")
}
#' @export
print.rtweet_bearer <- function(x, ...) {
# Make it hard to accidentally reveal token
cat("<twitter bearer token>\n")
invisible(x)
}
# Get default auth --------------------------------------------------------
#' Get the current authentication mechanism
#'
#' If no authentication has been set up for this session, `auth_get()` will
#' call [auth_as()] to set it up.
#'
#' @keywords internal
#' @family authentication
#' @export
auth_get <- function() {
if (is.null(.state$auth)) {
auth_as()
}
.state$auth
}
# Save authentication across sessions -------------------------------------
#' Save an authentication mechanism for use in a future session
#'
#' Use `auth_save()` with [auth_as()] to avoid repeatedly entering app
#' credentials, making it easier to share auth between projects.
#' Use `auth_list()` to list all saved credentials.
#'
#' @param auth One of [rtweet_app()], [rtweet_bot()], or [rtweet_user()].
#' @param name Cache name to use.
#' @family authentication
#' @export
#' @examples
#' \dontrun{
#' # save app auth for use in other sessions
#' auth <- rtweet_app()
#' auth_save(auth, "my-app")
#'
#' # later, in a different session...
#' auth_as("my-app")
#' }
auth_save <- function(auth, name) {
stopifnot(is_auth(auth), is_string(name))
path <- auth_path(paste0(name, ".rds"))
inform(paste0("Saving auth to '", path, "'"))
dir.create(auth_path(), showWarnings = FALSE, recursive = TRUE)
saveRDS(auth, path)
invisible(path)
}
#' @export
#' @rdname auth_save
auth_list <- function() {
paths <- dir(auth_path(), pattern = "\\.rds$")
tools::file_path_sans_ext(paths)
}
auth_path <- function(...) {
# Use private option to make testing easier
path <- getOption("rtweet:::config_dir", rappdirs::user_config_dir("rtweet", "R"))
file.path(path, ...)
}
# Set default auth -------------------------------------------------------------
#' Set default authentication for the current session
#'
#' `auth_as()` sets up the default authentication mechanism used by all
#' rtweet API calls. See [rtweet_user()] to learn more about the three
#' available authentication options.
#'
#' @param auth One of the following options:
#' * `NULL`, the default, will look for rtweet's "default" authentication
#' which uses your personal twitter account. If it's not found, it will
#' call [auth_setup_default()] to set it up.
#' * A string giving the name of a saved auth file made by [auth_save()].
#' * An auth object created by [rtweet_app()], [rtweet_bot()], or
#' [rtweet_user()].
#' @return Invisibly returns the previous authentication mechanism.
#' @family authentication
#' @export
#' @examples
#' \dontrun{
#' # Use app auth for the remainder of this session:
#' my_app <- rtweet_app()
#' auth_as(my_app)
#'
#' # Switch back to the default user based auth
#' auth_as()
#'
#' # Load auth saved by auth_save()
#' auth_as("my-saved-app")
#' }
auth_as <- function(auth = NULL) {
old <- .state$auth
.state$auth <- find_auth(auth)
invisible(old)
}
find_auth <- function(auth = NULL) {
if (is.null(auth)) {
if (is_testing()) {
rtweet_test() %||% no_token()
} else if (is_dev_mode()) {
rtweet_test() %||% default_cached_auth()
} else{
default_cached_auth()
}
} else if (is_auth(auth)) {
auth
} else if (is_string(auth)) {
path <- auth_path(paste0(auth, ".rds"))
if (!file.exists(path)) {
abort(paste0("Can't find saved auth with name '", auth, "'"))
}
inform(paste0("Reading auth from '", path, "'"))
readRDS(path)
} else {
abort("Unrecognised input to `auth`")
}
}
default_cached_auth <- function() {
default <- auth_path("default.rds")
if (file.exists(default)) {
readRDS(default)
} else {
names <- auth_list()
if (length(names) == 0) {
abort("No default authentication found. Please call `auth_setup_default()`")
} else {
abort(c(
"No default authentication found. Pick existing auth with:",
paste0("auth_as('", names, "')")
))
}
}
}
no_token <- function() {
if (is_testing()) {
testthat::skip("Auth not available")
} else {
stop("Could not authenticate", call. = FALSE)
}
}
rtweet_test <- function() {
access_token <- Sys.getenv("RTWEET_ACCESS_TOKEN")
access_secret <- Sys.getenv("RTWEET_ACCESS_SECRET")
if (identical(access_token, "") || identical(access_secret, "")) {
return()
}
rtweet_bot(
"7rX1CfEYOjrtZenmBhjljPzO3",
"rM3HOLDqmjWzr9UN4cvscchlkFprPNNg99zJJU5R8iYtpC0P0q",
access_token,
access_secret
)
}
local_auth <- function(env = parent.frame()) {
auth <- auth_get()
withr::defer(auth_as(auth), envir = env)
}
# Twitter Token -----------------------------------------------------------
# Twitter requires a callback url that uses 127.0.0.1 rather than localhost
# so we temporarily override HTTR_SERVER during initialisation.
TwitterToken1.0 <- R6::R6Class("TwitterToken1.0", inherit = httr::Token1.0, list(
init_credentials = function(force = FALSE) {
self$credentials <- twitter_init_oauth1.0(
self$endpoint,
self$app,
permission = self$params$permission,
private_key = self$private_key
)
}
))
twitter_init_oauth1.0 <- function (endpoint, app, permission = NULL,
is_interactive = interactive(),
private_key = NULL) {
withr::local_envvar("HTTR_SERVER" = "127.0.0.1")
httr::init_oauth1.0(
endpoint,
app,
permission = permission,
is_interactive = is_interactive,
private_key = private_key
)
}
|
aef92506de186b6f842a6dd472fd71048037d2a5
|
9c1f77cd384c5cced1363df5139f1449f04b12b3
|
/man/stress.Rd
|
d2cbfc35660e1f501819c40d1ce0154c2c54c4d5
|
[] |
no_license
|
StaThin/ggm
|
1d693e9bac56d4e3bc907e13693413caf19b2711
|
b8033f742e215ce1cd5929b199f47003ff9cb673
|
refs/heads/main
| 2023-08-25T21:15:37.962146
| 2023-08-25T14:32:11
| 2023-08-25T14:32:11
| 178,892,273
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 714
|
rd
|
stress.Rd
|
\name{stress}
\alias{stress}
\docType{data}
\title{
Stress
}
\description{
Stress data
}
\usage{data(stress)}
\format{
A \eqn{4 \times 4} covariance matrix for the following variables.
\describe{
\item{Y}{}
\item{V}{}
\item{X}{}
\item{U}{}
}
}
\details{
See Cox and Wermuth (1996).
}
\references{
Cox, D. R. & Wermuth, N. (1996). \emph{Multivariate
dependencies}. London: Chapman & Hall.
Slangen K., Kleemann P.P and Krohne H.W. (1993). Coping with surgical stress. In: Krohne H. W. (ed.). \emph{Attention and avoidance: Strategies in coping with aversiveness}. New York, Heidelberg: Springer, 321-346.
}
\examples{
data(stress)
G = UG(~ Y*X + X*V + V*U + U*Y)
fitConGraph(G, stress, 100)
}
\keyword{datasets}
|
7a6effcb15cb17071bc5f189409bb87c5df88bd7
|
3e94d680c7c63ad8e8813abeb1f26bffa5ddcac2
|
/penguins.R
|
af19e89d85932468e1778284f5453c214670d951
|
[] |
no_license
|
RavindraRashmiDasappa/Penguins-Data-Time-Series-Analysis-in-R
|
568271e8fdb70b7c92ba41c8d34eaa90e3bb52d0
|
93a32c741960ad4be45634ce04574bcb40cf22fe
|
refs/heads/master
| 2021-01-24T04:08:06.455913
| 2018-02-26T06:09:59
| 2018-02-26T06:09:59
| 122,921,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,612
|
r
|
penguins.R
|
#Reading Data
data <- read.csv(file="/Users/rahmi/Desktop/Penguins/_penguin.csv", stringsAsFactors = FALSE)
#Viewing the data
#View(data)
#checking the type of data
class(data)
#Converting dataframe into time series data
library(tidyr)
data %>% separate(Month, c("Year", "Month"), "M")
data <- subset(data, select = -Month )
print(data)
myts <- ts(data, start=c(2000, 01), end=c(2006, 12), frequency=12)
print(myts)
#This tells you that the data series is in a time series format
start(myts)
end(myts)
frequency(myts)
#summary of data
summary(myts)
#The number of penguins distributed across the spectrum
plot(myts)
abline(reg=lm(myts~time(myts)))
#Trends observed
#1. The count of penguins and variance is decreasing
#To print the cycle across years
cycle(myts)
#This will aggregate the cycles and display a year on year trend
plot(aggregate(myts,FUN=mean))
#Box plot across months will give us a sense on seasonal effect
boxplot(myts~cycle(myts))
#Trends Observed
#1. The number of penguins trend varied across every month
#2. Variance and mean are varying
#3. Mean is high in the month of December
#4. There is a sudden decrease in the month of June that may be due to irregular environmental
#issues or the penguins migrate to the other place during those months.
#Before performing Augmented Dickey-Fuller Test to see if the data is stationarized
install.packages('tseries')
library(tseries)
adf.test(diff(log(myts)), alternative=c("stationary","explosive"), k=0)
#plotting "q"
acf(diff(log(myts)))
#plotting "p"
pacf(diff(log(myts)))
#Here, the partial auto correlation function returns a negative tail.
#This is suggestive that the time series data needs extra differencing.
#You can continue on to do things like difference the mean, use a Box-Cox Transformation,
#or any number of other methods, but here I will choose to use the auto.arima function to aid in the forecasting.
#This function comes from the forecast package so we will need to call its library.
library(forecast)
auto.arima(myts)
fit <- arima(log(myts), c(2, 1, 0),seasonal = list(order = c(0, 1, 1), period = 12))
pred <- predict(fit,n.ahead=10*12)
pred1 <- 2.718^pred$pred
ts.plot(myts,2.718^pred$pred, log = "y", lty = c(1,3))
#Testing
datawide <- ts(data,frequency = 12,start =c(2000, 01), end=c(2005, 12) )
fit <- arima(log(datawide), c(2, 1, 0),seasonal = list(order = c(0, 1, 1), period = 12))
pred <- predict(fit,n.ahead=10*12)
pred1 <- 2.718^pred$pred
data1<- head(pred1,12)
predicted_2006<- round(data1,digits=0)
original_2006 <- tail(myts,12)
ts.plot(myts,2.718^pred$pred, log = "y", lty = c(1,3))
|
ff463da80e184bb0d53020945825d82a7bad1706
|
7db9b9b9ddb24ada82149f961bdf53dc6c2dc2eb
|
/Source/load_data.R
|
9623f1549b27c56205a9cb6d8cb44c39b5965750
|
[] |
no_license
|
RicFraga/Recreate-Graphs
|
4695ba41c27edf363f248792c252c6728ee05ede
|
f11a767ce233c0db98f77b23a380b56d3cfa3dad
|
refs/heads/master
| 2022-11-18T14:27:32.771409
| 2020-07-17T18:13:28
| 2020-07-17T18:13:28
| 280,322,620
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 653
|
r
|
load_data.R
|
# This file has the purpose of reading the data from the Data folder and
# pre process it to be in good shape to work with it in the other R files
read_data <- function(path_to_data) {
# We'll only be using data from 2007-02-01 and 2007-02-02
data <- read.csv(path_to_data, sep = ';', stringsAsFactors = FALSE)
# We transform the strings to dates with the appropriate format
dates <- as.Date(data$Date, '%d/%m/%Y')
# Getting the data we are interested in
matches <- which(dates == as.Date('2007-02-01') |
dates == as.Date('2007-02-02'))
return(data[matches, ])
}
|
608d766765b4adaba26c064b33444bb23a8a5aca
|
0c9fbe0ffcc21ae5e12582ab75541c777a5017ac
|
/R/chordgen.R
|
af3ac237cf208d49a9a73c439c890a652d5525f7
|
[] |
no_license
|
meumesmo/chordgen
|
a8cf5e282fc1c4c97df77dbd6e7d25eaf570d2d7
|
63fba426c4ac40657d2a002ac668573163366f4b
|
refs/heads/master
| 2020-04-18T16:40:39.785927
| 2018-10-06T20:26:45
| 2018-10-06T20:26:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,284
|
r
|
chordgen.R
|
#' Generate random chords
#'
#' Generate random chords using many options
#'
#' @param n number of chords
#' @param n_notes number of notes used for each chord. Between 3 and 7.
#' @param invert make inversions flag
#' @param triad_types 1:major, 2:minor, 3:augmented, 4:diminished,
#' 5:sus4, 6:m3d5
#' @param octave add octaves
#' @param transpose interval to make transpositions
#' @param add_lyric add text below chords?
#'
#' @export
chordgen <- function(n, n_notes = 4, invert = TRUE,
triad_types = 1:5,
octave = seq(-1, 1, 1),
transpose = seq(-5, 6, 1),
add_lyric = TRUE) {
s <- music21$stream$Stream()
chords <- purrr::map(seq_len(n), chordgen_,
n_notes = n_notes, invert = invert,
triad_types = triad_types,
octave = octave,
transpose = transpose,
add_lyric = add_lyric)
for(chord in chords) s$append(chord)
s
}
chordgen_ <- function(n, n_notes, invert, triad_types,
octave, transpose, add_lyric) {
n <- sample(rep(n_notes, 2), 1)
if (n > 4) {
ch <- all_chords() %>%
dplyr::filter(n_notes == n) %>%
dplyr::sample_n(1) %>%
dplyr::pull(notes_c) %>%
chord()
} else {
triad_type <- sample(rep(triad_types, 2), 1)
ch <- triad(triad_type)
if (n == 4) {
accident <- sample(possible_tetrads(triad_type), 1)
ch$add(accident)
}
}
# transpose!
transpose_value <- sample(rep(transpose, 2), 1)
transpose_value <- transpose_value + 12 * sample(rep(octave, 2), 1)
ch <- ch$transpose(transpose_value)
root_transpose <- music21$note$Note("C4")$transpose(transpose_value)
ch$root(root_transpose)
# invert!
if (invert) {
invert_value <- sample(rep(0:(n-1), 2), 1)
copy <- reticulate::import("copy")
ch2 <- copy$deepcopy(ch)
res <- try({
ch2$inversion(invert_value, transposeOnSet = TRUE)
}, silent = TRUE)
if (class(res) != "try-error") ch <- ch2
}
if (add_lyric) {
ch$simplifyEnharmonics(inPlace = TRUE)
h <- music21$harmony$chordSymbolFigureFromChord(ch)
if (stringr::str_detect(h, "Cannot")) h <- "?"
ch$addLyric(h)
}
ch
}
|
c12a342a5fbc4cf2c2f8a8b2e368afc2d581d72e
|
843689a35f0e800373ce9fae0f3c6344c363d662
|
/data-raw/create_kelley_markers.R
|
cf7732ecc74b590506095d80ab6b88cb3ff3bdd3
|
[] |
no_license
|
andymckenzie/BRETIGEA
|
cd9449b54ec2d21bc853a759e0d243ed12caf50e
|
d96ef000aae730441841873878815a374d2d6dc5
|
refs/heads/master
| 2021-06-04T09:09:12.379076
| 2021-05-04T23:07:17
| 2021-05-04T23:07:17
| 148,084,036
| 11
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,180
|
r
|
create_kelley_markers.R
|
library(devtools)
library(HGNChelper)
#Downloaded from http://oldhamlab.ctec.ucsf.edu/ on 11/2/2018
marker_data = read.csv("data-raw/ALL_Fidelity.csv")
switch_to_HGCN_where_possible <- function(gene_list){
gene_listHGNC = suppressWarnings(checkGeneSymbols(gene_list))
to_change = which((gene_listHGNC$Approved == FALSE) &
!is.na(gene_listHGNC$Suggested.Symbol))
gene_list[to_change] = gene_listHGNC[to_change, ]$Suggested.Symbol
gene_list = toupper(gene_list)
gene_list[which(gene_list == "MT-CO1 /// PTGS1")] = "PTGS1"
gene_list[which(gene_list == "LBHD1 /// C11orf98")] = "LBHD1"
return(gene_list)
}
marker_data$Gene_name = switch_to_HGCN_where_possible(marker_data$Gene)
#str(markers_df_brain)
# 'data.frame': 6000 obs. of 2 variables:
# $ markers: chr "AQP4" "ALDH1L1" "BMPR1B" "SLC14A1" ...
# $ cell : chr "ast" "ast" "ast" "ast" ...
marker_types = c("ALL_Astrocyte", "ALL_Oligodendrocyte",
"ALL_Microglia", "ALL_Neuron", "CTX_Astrocyte", "CTX_Oligodendrocyte",
"CTX_Microglia", "CTX_Neuron", "AMY_Astrocyte", "AMY_Oligodendrocyte",
"AMY_Microglia", "AMY_Neuron", "BF_Astrocyte", "BF_Oligodendrocyte",
"BF_Microglia", "BF_Neuron", "CB_Astrocyte", "CB_Oligodendrocyte",
"CB_Microglia", "CB_Neuron", "CLA_Astrocyte", "CLA_Oligodendrocyte",
"CLA_Microglia", "CLA_Neuron", "DI_Astrocyte", "DI_Oligodendrocyte",
"DI_Microglia", "DI_Neuron", "FCX_Astrocyte", "FCX_Oligodendrocyte",
"FCX_Microglia", "FCX_Neuron", "GP_Astrocyte", "GP_Oligodendrocyte",
"GP_Microglia", "GP_Neuron", "HIP_Astrocyte", "HIP_Oligodendrocyte",
"HIP_Microglia", "HIP_Neuron", "IN_Astrocyte", "IN_Oligodendrocyte",
"IN_Microglia", "IN_Neuron", "LIM_Astrocyte", "LIM_Oligodendrocyte",
"LIM_Microglia", "LIM_Neuron", "MED_Astrocyte", "MED_Oligodendrocyte",
"MED_Microglia", "MED_Neuron", "MID_Astrocyte", "MID_Oligodendrocyte",
"MID_Microglia", "MID_Neuron", "OCX_Astrocyte", "OCX_Oligodendrocyte",
"OCX_Microglia", "OCX_Neuron", "PCX_Astrocyte", "PCX_Oligodendrocyte",
"PCX_Microglia", "PCX_Neuron", "PON_Astrocyte", "PON_Oligodendrocyte",
"PON_Microglia", "PON_Neuron", "SC_Astrocyte", "SC_Oligodendrocyte",
"SC_Microglia", "SC_Neuron", "STR_Astrocyte", "STR_Oligodendrocyte",
"STR_Microglia", "STR_Neuron", "TCX_Astrocyte", "TCX_Oligodendrocyte",
"TCX_Microglia", "TCX_Neuron", "WM_Astrocyte", "WM_Oligodendrocyte",
"WM_Microglia", "WM_Neuron")
marker_type_names = gsub("Astrocyte", "ast", marker_types)
marker_type_names = gsub("Oligodendrocyte", "oli", marker_type_names)
marker_type_names = gsub("Microglia", "mic", marker_type_names)
marker_type_names = gsub("Neuron", "neu", marker_type_names)
marker_type_names = gsub("ALL_", "", marker_type_names)
n_markers = 1000
for(i in 1:length(marker_types)){
top_markers = marker_data[order(marker_data[ , marker_types[i]], decreasing = TRUE), "Gene_name"]
tmp_markers_top = head(top_markers, n_markers)
tmp_df = data.frame(markers = tmp_markers_top, cell = rep(marker_type_names[i], length.out = n_markers))
if(i == 1){
kelley_df_brain = tmp_df
} else {
kelley_df_brain = rbind(kelley_df_brain, tmp_df)
}
}
setwd("/Users/amckenz/Documents/packages/BRETIGEA")
devtools::use_data(kelley_df_brain, overwrite = TRUE)
|
43e25dc903a896d09952e0442b2dcfe48a1a8e92
|
01a48c8566d2598cc75fc0532f8baef596e142e0
|
/LinearLogisticProbablity.R
|
fb98c4e07ff6490013081919ea76a8b1e12e86e7
|
[] |
no_license
|
urotolo/R-Data-Analysis
|
9313beea88af7f32f621cbcf0f430b9702f3e56b
|
f53606c90e964e589bbdf11281797b6768609505
|
refs/heads/master
| 2020-04-29T07:58:49.328702
| 2019-03-16T16:42:19
| 2019-03-16T16:42:19
| 175,900,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,854
|
r
|
LinearLogisticProbablity.R
|
library(ggplot2)
library(caret)
library(deSolve)
library(lattice)
# Ulisse Rotolo
trainingData <- read.table("/root/Desktop/DataAnalysisTest/training_data.txt", header=TRUE) # Read in data for Part 1
testData.txt <- read.table("/root/Desktop/DataAnalysisTest/test_data.txt") # Read in data for Part 1
testData.txt
# PART 1
#Part 1. a
ggplot() + geom_point(trainingData, mapping=(aes(e, target))) + ylab("Target") + xlab("E") # e vs target column
ggplot() + geom_point(trainingData, mapping=(aes(i, target))) + ylab("Target") + xlab("E") # i vs target column
linearmod = lm(as.numeric(as.character(V14))~as.numeric(as.character(V5))+as.numeric(as.character(V9)), data=trainingData.txt) # Linear regression model
interc=linearmod$coefficients
slop=linearmod$coefficients
#
# PART 2
exam1_golf_data.csv <- read.csv("/root/Desktop/DataAnalysisTest/exam1_golf_data.csv")
head(exam1_golf_data.csv)
#Part 2. a
is.na_replace_0 <- exam1_golf_data.csv$hole # I replace all the non NA's with 1
is.na_replace_0[!is.na(is.na_replace_0)] <- 1
is.na_replace_0
is.na_replace_0[is.na(is.na_replace_0)] <- 0 # All the NA's are replaced with 0
is.na_replace_0 # This more clearly tells me when the putt goes in or misses.
rowHasG = subset(exam1_golf_data.csv, From =="G") # creates a partial matrix that only includes rows with the G.
rowHasG # This allows my measurements to be in only feet.
ace = c()
for(i in 2:nrow(rowHasG)){ # Set for loop to length of matrix
if(rowHasG[[5]][i] > rowHasG[[5]][i-1]){ # Checks distance column. I noticed that when
ace[i-1] = 1 # the succeding value for a cell in distance is
}else{ # greater then the previous cell, that means a new golf.
ace[i-1] = 0 # round begins. I store this data in a vector, then add to my matrix of G rows.
} # Now I know when a golf game iteration begins and ends in terms of G.
}
ace[328] = 0
ace
rowHasG$Ace <- ace
rowHasG$Ace
logimodel <- glm(rowHasG$Ace~rowHasG$dist, family = binomial(link = "logit")) # Logistic model
B0=logimodel$coefficients[1]
B1=logimodel$coefficients[2]
B0
B1
#Part 2. b && part 2. c
# For 2. b: According to my model probabilty is .50 for distance 6 feet
# For 2. c: According to my model at distance 20 feet the probability is .0007209147
for(i in 100:1){ # Gives the distance and probability of scoring at that distance,
dist = i # distance starts at 1 foot and ascends upwards until distance is 100 feet.
cat("Distance: ", i)
print("")
print("probability: ")
print(1/(1+exp(-B0-B1*dist))) # Gives probability of scoring a putt at i distance
}
# PART 3
#Part 3. a
testData.txt <- read.table("/root/Desktop/DataAnalysisTest/logisticData.txt")
head(testData.txt)
testData.txt
# TestData keeps including the string "time" and "amount" in the actual calculation,
# that throws everything off, since I didnt know how to remove "time" or "amount",
# I just created another data frame with only the numeric values and not including "time" or "amount".
# Times and Amount are the names of the column now, instead of how when you first import logisticData
# "time" and "amount" are the actual first value of column V1 and V2. Also did the same thing for my differential
# equation data, for some reason explicitely redeclaring them as data frames made my calculations run smoother.
times = c(0,350,700,1050,1400,1750,2100,2450,2800,3150,3500)
amounts = c(86.9,130.8,401.0,596.5,1288.3,2072.2,2346.8,3495.3,4097.2,3936.4,4220.8)
mimicTestDataDf = data.frame(times, amounts)
y0 = 200
r = .0015 # Initial values given to me
c = 5000
diffFunc <- function(r,y,c){ # Programs the equation dy/dx = ry(1-(y/c))
return(list(r*y0*(1-y0/c)))
}
output_times = seq(0,3500, by=291.666)
solveDiff = ode(y=y0, times = output_times , func=diffFunc) # Solves differential equation
head(solveDiff)
class(solveDiff)
solveDiff # Results given initial parameters, compare to testData.txt for accuracy of solution
deSolveAmounts = c(200.000,329.0052,531.9992,837.7755,1269.3723,1825,7823,2464.9363,3108.7039,3676.7529,4122.3597)
mimicSolveDiffDataDf = data.frame(times, deSolveAmounts)
mimicSolveDiffDataDf
ggplot()+geom_point(aes(x=mimicTestDataDf$times, y=mimicTestDataDf$amounts)) + geom_line(aes(solveDiff[,1],solveDiff[,2])) +
ylim(c(0,5000)) + ylab("Amounts") + xlab("Times")
#Part 3. b
sumSquareResidualData = 0
sumSquareResidualModel = 0
calculateError <- function(x,y){
for(i in 1:nrow(x)){
squaredValue = x[[2]][i] * x[[2]][i]
sumSquareResidualData = sumSquareResidualData + squaredValue
}
for(i in 1:nrow(y)){
squaredValue = y[[2]][i] * y[[2]][i]
sumSquareResidualModel = sumSquareResidualModel + squaredValue
}
return(list(sqrt(sumSquareResidualData), sqrt(sumSquareResidualModel)))
}
print(calculateError(mimicTestDataDf, mimicSolveDiffDataDf)) # Compares the sqrt of the squared residual sum
# of logisticData and the differential eq solution.
#Part 3. c
#optim_results = optim(c(5000,200,.0015), calculateError(mimicTestDataDf, mimicSolveDiffDataDf))
#optim_results
logistic_error = function(params) {
fun_values = diffFunc(params[1],params[2],params[3])
return(sqrt(sum( (as.numeric(testData.txt$amount)-as.numeric(fun_values))^2 )))
}
optim_results = optim(c(r, y0, 5000), logistic_error)
optim_results
|
7360f26b84670c0de16c543c5e84e9511e1c7d4c
|
fd754fd3d3b1a4653456e77295a2aa6079224674
|
/man/StartupMessage.Rd
|
364f1f36a673bfa8c410f65afffd77feece1560b
|
[] |
no_license
|
cran/nptest
|
aeaf83979c692038aff68f6da5e350c4330167fb
|
f471e4e4ac5a5789bc610da5d3e15ff32342964c
|
refs/heads/master
| 2023-04-28T05:14:58.839986
| 2023-04-14T18:50:02
| 2023-04-14T18:50:02
| 182,997,519
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
rd
|
StartupMessage.Rd
|
\name{StartupMessage}
\alias{StartupMessage}
\alias{nptestStartupMessage}
\title{Startup Message for nptest}
\description{
Prints the startup message when nptest is loaded. Not intended to be called by the user.
}
\details{
The `nptest' ascii start-up message was created using the taag software.
}
\references{
https://patorjk.com/software/taag/
}
|
1159afa9d815f0b47f195eb32e55b9c37be00a40
|
c1015f8592171ed58bac4ae441a4df59089f5774
|
/man/add_simplyaxis_ts.Rd
|
394119f1793faebc3d8a2534cd76c12f50e05667
|
[] |
no_license
|
kotalikg/simplystats
|
4e2ea9f36ad2e98b93e4ad84c7e06266219f1f9c
|
b20d04ad2a4383cfccff0371f59973cb1ff5f61e
|
refs/heads/master
| 2021-01-14T10:36:59.071049
| 2015-01-16T04:43:01
| 2015-01-16T04:43:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 234
|
rd
|
add_simplyaxis_ts.Rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{add_simplyaxis_ts}
\alias{add_simplyaxis_ts}
\title{A function for adding simplystats themed axes}
\usage{
add_simplyaxis_ts()
}
\description{
Add axis to time series plot
}
|
3299851af82c19311642a433eec831518590a03d
|
f9b31c3ffc52f4b6280171cb62fac72c6f708b8f
|
/man/imageHTS.Rd
|
0b9a2d4737de95c996506eed27e36b93ac1b479e
|
[] |
no_license
|
josephbarry/imageHTS
|
2b98a566629e05c595bf650fb279bc529f722d5a
|
600c6437fbdbf3a291e428e7b83f0c97bbd7caae
|
refs/heads/master
| 2021-05-10T19:53:20.961761
| 2018-01-23T15:11:10
| 2018-01-23T15:11:10
| 118,170,626
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,813
|
rd
|
imageHTS.Rd
|
\name{imageHTS}
\alias{imageHTS}
\alias{imageHTS-class}
\docType{package}
\title{Package overview}
\description{
imageHTS is an R package dedicated to the analysis of high-throughput
microscopy-based screens. The package provides a modular and
extensible framework to segment cells, extract quantitative cell features,
predict cell types and browse screen data through web interfaces. Designed
to operate in distributed environments, imageHTS provides a standardized
access to remote screen data, facilitating the dissemination of
high-throughput microscopy-based screens.
}
\section{Package content}{
The following function instantiates the imageHTS object.
\itemize{
\item parseImageConf: instantiate an imageHTS object from a local
or remote screen data repository
}
The following functions process, segment, quantify, summarize
the well images.
\itemize{
\item segmentWells: segment cells in well images
\item extractFeatures: extract cell features from segmented images
\item readLearnTS: train a cell classifier
\item predictCellLabels: predict cell labels
\item summarizeWells: summarize cell populations
}
The following functions provides means to display and inspect the
screen data.
\itemize{
\item installWebQuery: install the webQuery module
\item popWebQuery: pop the webQuery module
\item installCellPicker: install the cellPicker module
\item popCellPicker: pop the cellPicker module
\item segmentATH: segment cells stained for DNA, actin and tubulin
\item getCellFtrsATH: extract features from cells stained for DNA, actin and tubulin
}
The following functions give access to the screen data.
\itemize{
\item fileHTS: build the path to a screen data file
\item readHTS: read a screen data file
\item parseDCF: read a DCF configuration file
\item collectCellFeatures: collect cell features
\item getWellFeatures: get well metadata, features and annotation information
}
The following manipulate well unique names.
\itemize{
\item getUnames: get well unique names
\item prw2uname: convert a (plate, replicate, well) data frame in well unique names
\item uname2prw: convert well unique names in a (plate, replicate, well) data frame
\item rowcol2well: convert a (row, col) data frame in well names
\item well2rowcol: convert well names in a (row, col) data frame
\item well2wellid: convert well coordinates in numerical well identifiers
}
Miscellaneous functions.
\itemize{
\item zprime: compute the Z'-factor quality score
\item highlightSegmentation: highlight segmented objects in an image
\item countObjects: count the number of objects in a segmented image
\item getImageConf: get the imageHTS configuration
}
}
\section{Authors}{
Gregoire Pau, \email{gregoire.pau@embl.de}, 2010
}
\keyword{package}
|
d9e5689095fb5871dbe8e3fdd42b06a1eb4be443
|
c436672902d550b7166fc512e0b628d0d0d53055
|
/run/copy_reports.R
|
d0836b5709c387a65822abb6da8bc16b5f8647bf
|
[] |
no_license
|
restartus/global-lmic-reports-orderly
|
ad4e7c667b4f90b13a6c88c98555188a65471d23
|
9d954619a0e2442e21e6e0e3a6554cb2bd4bff88
|
refs/heads/master
| 2022-11-10T15:45:41.490968
| 2020-06-20T16:09:04
| 2020-06-20T16:09:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,412
|
r
|
copy_reports.R
|
#!/usr/bin/env Rscript
file_copy <- function(from, to) {
ok <- file.copy(from, to, overwrite = TRUE, recursive = TRUE)
if (any(!ok)) {
stop("There was an error copying files")
}
}
## Possibly useful:
## dir("gh-pages", pattern = "index\\.html$", recursive = TRUE)
copy_outputs <- function(date = NULL, is_latest = TRUE) {
db <- orderly::orderly_db("destination")
if (is.null(date)) {
date <- as.character(Sys.Date())
}
## First find the id corresponding to the ecdc report with data. If
## there are more than one, it's not totally clear what you want to
## do as you might want to take the earliest or the latest.
## Probably we want to take *all* and do the join over that, which
## is easy enough to do if you replace the '= $1' and replace with
## 'IN (%s)' and interpolate 'paste(sprintf('"%s"', id), collapse = ", ")'
sql <- 'SELECT report_version.id
FROM report_version
JOIN parameters
ON parameters.report_version = report_version.id
WHERE report_version.report = "ecdc"
AND parameters.value = $1'
id <- DBI::dbGetQuery(db, sql, date)$id
if (length(id) == 0L) {
stop(sprintf("No 'ecdc' report for '%s'", as.character(date)))
} else if (length(id) > 1) {
message(sprintf("Multiple 'ecdc' reports for '%s'", as.character(date)))
}
## Then find all lmic_reports reports that use files from this ecdc
## report. This is a bit awful and I might add direct link or a
## view to make this easier at some point.
sql <- 'SELECT report_version.id, parameters.value as country
FROM report_version_artefact
JOIN file_artefact
ON file_artefact.artefact = report_version_artefact.id
JOIN depends
ON depends.use = file_artefact.id
JOIN report_version
ON report_version.id = depends.report_version
JOIN parameters
ON parameters.report_version = report_version.id
WHERE report_version_artefact.report_version IN (%s)
AND report = "lmic_reports"
AND parameters.name = "iso3c"
ORDER BY country, report_version.id'
sql <- sprintf(sql, paste(sprintf('"%s"', id), collapse = ", "))
reports <- DBI::dbGetQuery(db, sql)
if (any(duplicated(reports$country))) {
keep <- tapply(seq_len(nrow(reports)), reports$country, max)
reports <- reports[keep, ]
rownames(reports) <- NULL
}
reports$date <- as.character(date)
target <- "gh-pages"
src <- file.path("archive", "lmic_reports", reports$id)
dest <- sprintf("gh-pages/%s/%s", reports$country, reports$date)
copy <- c("index.html",
"projections.csv",
"index.pdf",
"input_params.json")
copy_to <- c("v1.html",
"projections.csv",
"v1.pdf",
"input_params.json")
for (i in seq_along(dest)) {
message(sprintf("Copying %s (%s)", dest[[i]], reports$id[[i]]))
dir.create(dest[[i]], FALSE, TRUE)
file_copy(file.path(src[[i]], copy), dest[[i]])
if (is_latest) {
dest_latest <- dirname(dest[[i]])
prev <- dir(dest_latest, full.names = TRUE, pattern = "\\.")
unlink(c(prev, file.path(dest_latest, "figures")), recursive = TRUE)
file_copy(file.path(dest[[i]], copy), file.path(dest_latest, copy_to))
}
}
pdf_input <- file.path(src, "index.pdf")
message(sprintf("Building combined pdf from %d files", length(pdf_input)))
qpdf::pdf_combine(pdf_input, "gh-pages/combined_reports.pdf")
## Aha, this is so naughty, but probably a reasonable shout given
## the situation. The alternative is to depend on _all_ the country
## tasks for that date.
summaries <- do.call(rbind,
lapply(file.path(src, "summary_df.rds"), readRDS))
saveRDS(summaries, "src/index_page/summaries.rds")
projections <- do.call(rbind,
lapply(file.path(src, "projections.csv"), read.csv))
dir.create("gh-pages/data", FALSE, TRUE)
write.csv(projections, paste0("gh-pages/data/",date,"_v1.csv"), row.names = FALSE, quote = FALSE)
zip(paste0("gh-pages/data/",date,"_v1.csv.zip"),paste0("gh-pages/data/",date,"_v1.csv"))
file.remove(paste0("gh-pages/data/",date,"_v1.csv"))
}
if (!interactive()) {
usage <- "Usage:\n./copy_outputs.R [<date>]"
args <- docopt::docopt(usage)
copy_outputs(args$date)
}
|
83eeb11c79d573df4615ed3cf6cb2ec02bc0d1a4
|
cb1edbd312fe5583702e8567e1aa6e32e103d300
|
/R/sim.history.R
|
4ecbe7a137e62d9f82a1063f94829abe595a71d5
|
[] |
no_license
|
cran/phytools
|
e8cb2ddac5592a9c27a0036df4599649a393717a
|
910fa95b3f5f1619c85ac420bd07286a3fe8cfcf
|
refs/heads/master
| 2023-07-22T15:18:46.363446
| 2023-07-14T20:00:02
| 2023-07-14T21:30:43
| 17,698,535
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,229
|
r
|
sim.history.R
|
## function simulates stochastic character history under some model
## written by Liam J. Revell 2011, 2013, 2014, 2016, 2020
sim.history<-function(tree,Q,anc=NULL,nsim=1,direction=c("column_to_row","row_to_column"),
...){
if(!inherits(tree,"phylo"))
stop("tree should be an object of class \"phylo\".")
if(hasArg(message)) message<-list(...)$message
else message<-TRUE
direction<-direction[1]
direction<-strsplit(direction,"_")[[1]][1]
# reorder to cladewise
tree<-reorder.phylo(tree,"cladewise")
# check Q
if(!isSymmetric(Q)) if(message){
if(direction=="column")
cat("Note - the rate of substitution from i->j should be given by Q[j,i].\n")
else if(direction=="row")
cat("Note - the rate of substitution from i->j should be given by Q[i,j].\n")
}
if(direction=="column"){
if(!all(round(colSums(Q),10)==0)){
if(all(round(rowSums(Q),10)==0)&&!isSymmetric(Q)){
if(message){
cat("Detecting that rows, not columns, of Q sum to zero :\n")
cat("Transposing Q for internal calculations.\n")
}
Q<-t(Q)
} else {
if(message)
cat("Some columns (or rows) of Q don't sum to 0.0. Fixing.\n")
diag(Q)<-0
diag(Q)<--colSums(Q,na.rm=TRUE)
}
}
} else if(direction=="row"){
Q<-t(Q)
if(!all(round(colSums(Q),10)==0)){
if(all(round(rowSums(Q),10)==0)&&!isSymmetric(Q)){
if(message){
cat("Detecting that columns, not rows, of Q sum to zero :\n")
cat("Transposing Q for internal calculations.\n")
}
Q<-t(Q)
} else {
if(message)
cat("Some columns (or rows) of Q don't sum to 0.0. Fixing.\n")
diag(Q)<-0
diag(Q)<--colSums(Q,na.rm=TRUE)
}
}
}
# does Q have names?
if(is.null(dimnames(Q))) dimnames(Q)<-list(1:nrow(Q),1:ncol(Q))
# create "multiPhylo" object
mtrees<-vector(mode="list",length=nsim)
class(mtrees)<-c("multiSimmap","multiPhylo")
## deal with ancestral state
if(is.null(anc))
anc<-setNames(rep(1/ncol(Q),ncol(Q)),colnames(Q))
if(is.character(anc)){
anc<-colSums(to.matrix(anc,colnames(Q)))
anc<-anc/sum(anc)
}
# now loop
for(i in 1:nsim){
# set root state
a<-rstate(anc)
# create the map tree object
mtree<-tree
mtree$maps<-vector(mode="list",length=nrow(tree$edge))
# now we want to simulate the node states on the tree
node.states<-matrix(NA,nrow(tree$edge),ncol(tree$edge))
node.states[which(tree$edge[,1]==(length(tree$tip)+1)),1]<-a
for(j in 1:nrow(tree$edge)){
if(tree$edge.length[j]==0){
map<-vector()
map[1]<-tree$edge.length[j]
names(map)[1]<-
node.states[which(tree$edge[,1]==tree$edge[j,2]),1]<-
node.states[j,2]<-node.states[j,1]
} else {
time=0
state<-node.states[j,1]
new.state<-state
dt<-vector()
map<-vector()
k<-1
while(time<tree$edge.length[j]){
dt[1]<-time
dt[2]<-dt[1]+rexp(n=1,rate=-Q[state,state])
if(dt[2]<tree$edge.length[j])
new.state<-rstate(Q[,state][-match(state,rownames(Q))]/
sum(Q[,state][-match(state,rownames(Q))]))
dt[2]<-min(dt[2],tree$edge.length[j])
map[k]<-dt[2]-dt[1]
names(map)[k]<-state
k<-k+1
state<-new.state
time<-dt[2]
}
names(map)[length(map)]->node.states[j,2]->
node.states[which(tree$edge[,1]==tree$edge[j,2]),1]
}
mtree$maps[[j]]<-map
}
# add a couple of elements
mtree$node.states<-node.states
tip.states<-node.states[tree$edge[,2]<=length(tree$tip),2]
tip.states<-tip.states[order(tree$edge[tree$edge[,2]<=length(tree$tip),2])]
names(tip.states)<-tree$tip.label
mtree$states<-tip.states
# now construct the matrix "mapped.edge" (for backward compatibility
allstates<-vector()
for(j in 1:nrow(mtree$edge))
allstates<-c(allstates,names(mtree$maps[[j]]))
allstates<-unique(allstates)
mtree$mapped.edge<-matrix(data=0,length(mtree$edge.length),
length(allstates),dimnames=list(apply(mtree$edge,1,
function(x) paste(x,collapse=",")),state=allstates))
for(j in 1:length(mtree$maps))
for(k in 1:length(mtree$maps[[j]]))
mtree$mapped.edge[j,names(mtree$maps[[j]])[k]]<-
mtree$mapped.edge[j,names(mtree$maps[[j]])[k]]+
mtree$maps[[j]][k]
class(mtree)<-c("simmap",setdiff(class(mtree),"simmap"))
mtrees[[i]]<-mtree
}
if(nsim==1) mtrees<-mtrees[[1]]
if(message) cat("Done simulation(s).\n")
mtrees
}
## simulate DNA sequence from a tree & model parameters
## written by Liam J. Revell 2013, 2019
genSeq<-function(tree,l=1000,Q=NULL,rate=1,format="DNAbin",...){
if(is.null(Q)){
Q<-matrix(1,4,4)
rownames(Q)<-colnames(Q)<-c("a","c","g","t")
diag(Q)<-0
diag(Q)<--colSums(Q)
}
if(length(rate)!=l){
if(length(rate)==1) rate<-rep(rate,l)
else {
cat("warning: length(rate) & l should match for length(rate)>1\n")
cat(" rate will be recycled.\n")
rate<-rep(rate,ceiling(l/length(rate)))[1:l]
}
}
cat("simulating sequences....\n")
flush.console()
X<-sapply(rate,function(a,b,c) sim.Mk(b,a*c),b=tree,c=Q)
if(format=="DNAbin") return(as.DNAbin(X))
else if(format=="phyDat") return(as.phyDat(X))
else if(format=="matrix") return(X)
}
|
7abc08f09615320ffe17a763970b5d626535e052
|
cd93b111a05518883959aaec399b1a4933afc1f4
|
/public/project/explorergee/man/get_terrain.Rd
|
03d951a6c6a94a5a8659e20211c132c4ca7b2263
|
[] |
no_license
|
joshualerickson/hydroblog
|
fc9d5dd1bd21fb7ef90dd961b59082aa2cea9fbb
|
65c3d21cb725a0e126c3c459c42e656cf4579d02
|
refs/heads/master
| 2023-03-06T20:50:27.443995
| 2023-02-26T16:44:06
| 2023-02-26T16:44:06
| 253,122,579
| 1
| 0
| null | 2021-08-17T01:23:05
| 2020-04-05T00:04:22
|
HTML
|
UTF-8
|
R
| false
| true
| 1,478
|
rd
|
get_terrain.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get.R
\name{get_terrain}
\alias{get_terrain}
\title{Get Terrain Products}
\usage{
get_terrain(
aoi,
method = "NED",
param = "slope",
mask = FALSE,
m.low = NULL,
m.high = NULL,
az = 270,
el = 45
)
}
\arguments{
\item{aoi}{A sf object indicating the extent of the geom.}
\item{method}{\code{character} indicating what method to use, e.g. 'NED', 'SRTM'.}
\item{param}{\code{character} indicating terrain type, e.g. 'dem', 'FA', 'TWI', 'TRI', 'TPI', 'aspect', 'slope', 'cos', 'sin', 'hillshade', 'complete'.}
\item{mask}{\code{logical} whether to mask out certain ranges}
\item{m.low}{\code{numeric} low value for mask, e.g. greater than 'm.low'}
\item{m.high}{\code{numeric} high value for mask, e.g. less than 'm.high'}
\item{az}{\code{numeric} The illumination azimuth in degrees from north.}
\item{el}{\code{numeric} The illumination elevation in degrees.}
}
\value{
A list of Earth Engine Objects and arguments.
}
\description{
This function takes the USGS NED (National Elevation Dataset)
or SRTM (Shuttle Radar Topography Mission) and gets a handful of terrain indices. This is good for
downloaded areas for further analysis or passing on for some quick stats.
}
\examples{
\dontrun{
# Load Libraries
library(rgee)
rgee::ee_intialize()
library(exploreRGEE)
# Bring in data
huc <- exploreRGEE::huc
# get terrain
twi <- get_terrain(huc, method = 'NED', param = 'TWI')
}
}
|
b4040827d42925c9afcaae01735922385ccfcc25
|
8af2d532c8ea764a73d77628bd8ad2129e21fd8f
|
/analysis_code/Party_vote_by_electorate.R
|
0647f204b47a5d7e0fee644d65444e7754982f61
|
[] |
no_license
|
ellisp/election2011
|
c3ab5b25568abdf07365bf07dada639a99f96e37
|
42b2c9cee7b884d4fde318c570f28ef54a6bd351
|
refs/heads/master
| 2020-09-22T13:43:48.121716
| 2014-03-23T05:53:06
| 2014-03-23T05:53:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,324
|
r
|
Party_vote_by_electorate.R
|
library(election2011)
library(plyr)
library(directlabels)
#--------------party by electorate--------------------
party <- ddply(party_results_polling_place, .(Party, Electorate), summarise,
Votes = sum(Votes))
party_cand <- ddply(candidate_results_polling_place, .(Party, Electorate), summarise,
Votes = sum(Votes))
comb <- merge(party, party_cand, by=c("Party", "Electorate"),
suffixes=c("_partyvote", "_candidatevote"))
png("output/party vote by candidate vote by party and electorate.png", 12*res, 8*res, res=res)
print(ggplot(comb, aes(x=Votes_partyvote, y=Votes_candidatevote, color=Party)) +
geom_abline(xintercept=0, slope=1, colour="white") +
geom_point() +
scale_x_continuous(label=comma) +
scale_y_continuous(label=comma) +
coord_equal() +
facet_wrap(~Party, scales="free"))
dev.off()
#--------------------party overall vote---------------
legit_party <- sum(subset(party_results_polling_place, Party != "Informal Party Votes")$Votes)
legit_cand <- sum(subset(candidate_results_polling_place, Party != "Informal Candidate Votes")$Votes)
party_total <- ddply(subset(party_results_polling_place, Party != "Informal Party Votes"),
.(Party), summarise,
Votes = sum(Votes),
Percentage = sum(Votes) / legit_party)
cand_total <- ddply(subset(candidate_results_polling_place,
Party != "Informal Candidate Votes"), .(Party), summarise,
Votes = sum(Votes),
Percentage = sum(Votes) / legit_cand)
comb_total <- merge(party_total, cand_total, by="Party", all=FALSE,
suffixes=c("_partyvote", "_candidatevote"))
comb_total_m <- melt(comb_total[, c("Party", "Votes_partyvote", "Votes_candidatevote")],
id.vars="Party", value.name="Votes")
comb_total_m$Party <- factor(comb_total_m$Party,
levels=comb_total$Party[order(-comb_total$Votes_partyvote)])
png("output/party vote and candidate vote barcharts.png", 12*res, 8*res, res=res)
print(
ggplot(comb_total_m, aes(x=Party, weight=Votes)) +
geom_bar() +
coord_flip() +
facet_wrap(~variable, ncol=2) +
scale_y_continuous("\nNumber of votes", label=comma) +
labs(x="")
)
dev.off()
comb_total_m_p <- melt(comb_total[, c("Party", "Percentage_partyvote", "Percentage_candidatevote")],
id.vars="Party", value.name="Percentage")
comb_total_m_p$Party <- factor(comb_total_m_p$Party,
levels=comb_total$Party[order(-comb_total$Votes_partyvote)])
png("output/party vote and candidate percentage barcharts.png", 12*res, 8*res, res=res)
print(
ggplot(comb_total_m_p, aes(x=Party, weight=Percentage)) +
geom_bar() +
coord_flip() +
facet_wrap(~variable, ncol=2) +
scale_y_continuous("\nNumber of votes", label=percent) +
labs(x="")
)
dev.off()
png("output/party vote and candidate percentage scatterplot.png", 12*res, 8*res, res=res)
print(direct.label(
ggplot(comb_total, aes(x=Percentage_candidatevote, y=Percentage_partyvote)) +
geom_abline(xintercept=0, slope=1, colour="white") +
geom_point(aes(color=Party)) +
scale_x_log10(label=percent) +
scale_y_log10(label=percent) +
coord_equal()
)
)
dev.off()
|
df8af2664bbe7ba27196b8277e84e582e8df861c
|
ccb8ff0945f271b61f0b4b011ec6e29248fa32d0
|
/countInvariantsByGroup.R
|
4527027214e4e5d85b2329e415d817add2ea115a
|
[] |
no_license
|
dschluter/RScripts
|
068dc80fa1ff548cbe43bf661eb296c9501561dc
|
f2fe176468b82b1313bfb0749ea09f5dcd304aa4
|
refs/heads/master
| 2023-08-11T15:08:34.881383
| 2023-07-30T17:09:22
| 2023-07-30T17:09:22
| 37,339,907
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,632
|
r
|
countInvariantsByGroup.R
|
#!/usr/bin/Rscript
# Run in Unix as " Rscript countGoodInvariantsByGroup.R ... "
# One strategy is to use a very low threshold when deciding whether to drop a base,
# eg keep any base that has at least one genotype in at least 2 of the groups
# qsub -I -l walltime=03:00:00 -l mem=2gb # work interactively; use "exit" to exit
# module load R/3.1.2
# R
# Expect to read these arguments from args
project <- NULL
chrname <- NULL
groupnames <- NULL
DPmin <- 1 # default if not provided
Glazerize <- "FALSE" # Requires file "glazerFileS4 NewScaffoldOrder.csv" in current directory
args <- commandArgs(TRUE)
# args <- c("chrname=chrM", "project=Benlim", "groupnames=paxl,paxb,pril,prib,qryl,qryb,ensl,ensb,marine-pac,marine-atl,marine-jap,solitary,stream", "Glazerize=TRUE", "DPmin=1")
# Parses the args into a data frame with two columns (V1=left and V2=right of each "=" sign)
# and then assigns V2 to variables whose names are in V1
x <- read.table(text = args, sep = "=", colClasses = "character")
for(i in 1:nrow(x)){ assign(x[i,1], x[i,2]) }
# Check args
# [1] "chrname=chrM"
# [2] "project=Benlim"
# [3] "groupnames=paxl,paxb,pril,prib,qryl,qryb,ensl,ensb,marine-pac,marine-atl,marine-jap,solitary,stream"
# [4] "Glazerize=TRUE"
# [5] "DPmin=1"
if(is.null(chrname)) stop("Provide chrname= in arguments")
if(is.null(project)) stop("Provide project= in arguments")
if(is.null(groupnames)) stop("Provide groupnames= in arguments (grounames separated by commas, no spaces)")
cat("\nproject is", project, "\n")
cat("\nchrname is", chrname, "\n")
cat("\nGlazerize is", Glazerize, "\n")
GTmissing <- "." # how GATK represents missing genotypes in the vcf file "./."
# load "chrvec" for the current chromosome
chrno <- gsub("^chr", "", chrname)
chrmaskfile <- paste("chrvec.", chrno, ".masked.rdd", sep = "") # chrvec.*.masked.rdd
load(chrmaskfile) # object is named "chrvec"
# table(chrvec)
# A C G M T
# 8754452 7589050 7570271 29869838 8766600
invariantsummaryname <- paste(project, ".", chrname, ".DP.inv.gz", sep="")
textfile <- paste(project, ".", chrname, ".goodInv.gz", sep="")
goodInvariantsFile <- paste(project, ".", chrname, ".goodInv.rdd", sep="")
nLinesAtaTime <- 100000
# nLinesAtaTime <- 10000
INFILE <- file(invariantsummaryname, open = "r")
OUTFILE <- gzfile(textfile, "w")
x <- readLines(con = INFILE, n = nLinesAtaTime)
x1 <- strsplit(x, split = "\t")
headerline <- x1[[1]]
headerline[1] <- "POS"
x1 <- x1[-1]
nlines <- length(x1)
# FUDGE TO FIX PRIEST in file names
headerline <- gsub("Priest", "Pri", headerline, ignore.case = TRUE)
groupnames <- unlist(strsplit(groupnames, split = ","))
writeLines(paste(c(headerline[1:2], groupnames), collapse = "\t"), OUTFILE)
cat("\ngroupnames\n")
cat(groupnames, sep = "\n")
# groupnames
# [1] "paxl" "paxb" "pril" "prib" "qryl"
# [6] "qryb" "ensl" "ensb" "marine-pac" "marine-atl"
# [11] "marine-jap" "solitary" "stream"
# assign numbers corresponding to groups
fishnames <- headerline[-c(1,2)]
groupcodes <- g$groupFishCodes(fishnames, groupnames)
# groupcodes
# [1] 8 8 8 8 8 8 7 7 7 7 7 7 10 10 10 11 11 11 11 9 0 9 9 9 9
# [26] 9 0 0 9 9 4 4 4 4 4 4 3 3 3 3 3 3 3 3 2 2 2 2 2 1
# [51] 1 1 1 1 1 1 4 4 4 4 3 3 6 6 6 6 6 6 5 5 5 5 5 5 12
# [76] 12 12 12 12 12 12 12 13 13 13 13 13 13 2 2 2 2 2 2 1 1 1 1 1 1
# [101] 1 1 1
# Masked rows in the reference genome
maskedPOS <- which(chrvec == "M")
rm(chrvec)
# Loop
while(nlines >0){
x2 <- as.data.frame(do.call("rbind", x1), stringsAsFactors = FALSE)
names(x2) <- headerline
z <- x2$POS %in% maskedPOS
x <- x2[!z,]
# Elements of invariantsummaryname should be "0" or like "3:6" but some may be like ".:3" or even NA:NA
# Note: the columns haven't been divided into groups yet
y <- lapply(x[, -c(1:2)], function(x){
# y <- as.integer(sub("([0-9]*)[:/][0-9]+", "\\1", x[,3]))
y <- sub("([0-9.NA]*)[:][0-9.NA]+", "\\1", x)
y[y %in% c(".", "NA")] <- "0"
y <- ( as.integer(y) >= DPmin ) # TRUE if DP of a given genotype is at least DPmin
})
# Reunite columns
y <- data.frame(y)
# Split the data frame by group and take the sum of each row to get the total number of individuals that
# meet the DP minimum depth of coverage criterion DPmin within each group.
z <- split(names(y), groupcodes)
# cat("\nIdentified groups:\n")
# print(z)
# $`0`
# [1] "Marine.Pac.Bamfield.VI17.Sara" "Marine.Pac.Oyster.12.Sara"
# [3] "Marine.Pac.Salmon.01.Sara"
# $`1`
# [1] "PaxLim.PxCL09maleLM1.GS14" "PaxLim.PxLfemale6.GS18"
# [3] "PaxLim.PxLmale102.GS16" "PaxLim.PxLmale106.GS15"
# [5] "PaxLim.PxLmale107.GS17" "PaxLim.formerlyPrLfemale1.GS8"
# [7] "PaxLim.formerlyPrLmale5.GS5" "paxl01"
# [9] "paxl02.formerlyPril02" "paxl05"
# [11] "paxl09" "paxl10"
# [13] "paxl13" "paxl14"
# [15] "paxl20.formerlyPril20" "paxl21.formerlyPril21"
# $`2`
# [1] "PaxBen.PxBmale5.GS11" "PaxBen.PxBmale6.GS12"
# [3] "PaxBen.PxBmale8.GS10" "PaxBen.PxCL09femaleBF6.GS13"
# [5] "PaxBen.RPxCL09maleBM2.GS9" "paxb04"
# [7] "paxb05" "paxb06"
# [9] "paxb07" "paxb08"
# [11] "paxb09"
# $`3`
# [1] "PRIL101" "PRIL102"
# [3] "PRIL104" "PRIL108"
# [5] "PRIL112" "PRIL16"
# [7] "PRIL17" "PRIL18"
# [9] "PriLim.PrCL09maleLM3.GS7" "PriLim.PrLmale2.GS6"
# $`4`
# [1] "PRIB02" "PRIB05"
# [3] "PRIB06" "PRIB07"
# [5] "PRIB11" "PRIB15"
# [7] "PriBen.PrBfemale5.GS4" "PriBen.PrBmale3.GS2"
# [9] "PriBen.RPrCL09maleBM4.GS3" "PriBen.RPrCL09maleBM6.GS1"
# $`5`
# [1] "QRYL04" "QRYL05" "QRYL07" "QRYL08" "QRYL09" "QRYL10"
# $`6`
# [1] "QRYB01" "QRYB06" "QRYB08" "QRYB11" "QRYB13" "QRYB25"
# $`7`
# [1] "ENSL172" "ENSL24" "ENSL25" "ENSL33" "ENSL37" "ENSL50"
# $`8`
# [1] "ENSB01" "ENSB03" "ENSB08" "ENSB12" "ENSB15" "ENSB23"
# $`9`
# [1] "Marine.Pac.BIGR.52_54_2008.02" "Marine.Pac.Japan.01.Katie"
# [3] "Marine.Pac.LITC_0_05_2008.FO" "Marine.Pac.LittleCampbell.LC1D"
# [5] "Marine.Pac.MANC_X_X05" "Marine.Pac.Oyster.06.Sara"
# [7] "Marine.Pac.Seyward.01.Sara" "Marine.Pac.WestCreek.01.Sara"
# $`10`
# [1] "Marine.Atl.BITJ_X_X17" "Marine.Atl.Denmark.BS27.Fuelner"
# [3] "Marine.Atl.TYNE_1_2001.14"
# $`11`
# [1] "Marine.JapSea.fem.Kitano.KIA31" "Marine.JapSea.fem.Kitano.KIA46"
# [3] "Marine.JapSea.fem.Kitano.KIA48" "Marine.JapSea.male01.Katie"
# $`12`
# [1] "Solitary.Black.BL4.Sara" "Solitary.Bullock.19.Sara"
# [3] "Solitary.Cranby.04.Sara" "Solitary.Hoggan.13.Sara"
# [5] "Solitary.Kirk.12.Sara" "Solitary.Stowell.04.Sara"
# [7] "Solitary.Tom.01.Sara" "Solitary.Trout.01.Sara"
# $`13`
# [1] "Stream.LittleCampbell.01.Sara"
# [2] "Stream.LittleCampbell_23_32_2008.306"
# [3] "Stream.LittleCampbell_23_32_2008.324"
# [4] "Stream.LittleCampbell_23_32_2008.347"
# [5] "Stream.LittleCampbell_23_32_2008.356"
# [6] "Stream.LittleCampbell_23_32_2008.744"
# Count how many genotypes per group
z1 <- lapply(z, function(z){
z1 <- apply(y[, z], 1, sum) # summing logicals treats TRUE as 1's
})
goodInvariants <- data.frame(x[, c(1:2)], z1)
# Drop rows not having at least 1 good genotype in at least 2 groups
# Count number of groups having at least 1 good genotype
z2 <- lapply(z1, function(z){z >= 1})
z2 <- data.frame(z2)
z3 <- apply(z2, 1, sum)
# table(z3)
keep <- z3 >= 2
goodInvariants <- goodInvariants[keep, ]
goodInvariants <- apply(goodInvariants, 1, function(x){paste(x, collapse = "\t")})
if(length(goodInvariants) > 0) writeLines(goodInvariants, OUTFILE)
x <- readLines(con = INFILE, n = nLinesAtaTime)
x1 <- strsplit(x, split = "\t")
nlines <- length(x)
} # end while loop
close(INFILE)
close(OUTFILE)
# Read the whole thing into memory and save
goodInvariants <- read.table(file = textfile, header = TRUE, comment.char = "", stringsAsFactors = FALSE)
cat("\nRe-read output text invariants file, now saving in rdd format\n")
save(goodInvariants, file = goodInvariantsFile)
# load(goodInvariantsFile) # object named "goodInvariants"
# If Glazerize is TRUE, split goodInvariants txt file by new assembly chromosome number
# Requires conversion file "glazerFileS4 NewScaffoldOrder.csv" in current working directory
if(Glazerize){
# grab pos information
pos <- goodInvariants$POS
# convert pos to newChr and newPos
if(chrno != "M" & !grepl("pitx1", chrno) ){
newCoords <- g$glazerConvertOld2New(chrname, pos)
} else {
newCoords <- data.frame(newChr = rep(chrno, length(pos)), newPos = pos)
}
newChr <- newCoords$newChr
newPos <- newCoords$newPos
rm(newCoords)
goodInvariants <- cbind.data.frame( data.frame(newChr = newChr, newPos = newPos), goodInvariants)
z <- unique(newChr)
# [1] "21" "Un"
# goodList <- split(goodInvariants, goodInvariants$newChr) # make sure that the list elements are named "21" and "Un"
# print(names(goodList)) # checking names of split data set
# for(i in z){ # saved object is "goodInvariantsPart"
# goodInvariantsPart <- goodList[[i]]
# save(goodInvariantsPart, file = paste(project, chrname, "goodInvPart", i, "rdd", sep = "."))
# # load(goodInvariantsPart) # object named "goodInvariantsPart"
# }
for(i in z){ # saved object is "goodInvariantsPart"
goodInvariantsPart <- goodInvariants[newChr == i, ]
goodInvariantsPart$newChr <- newChr[newChr == i]
goodInvariantsPart$newPos <- newPos[newChr == i]
save(goodInvariantsPart, file = paste(project, chrname, "goodInvPart", i, "rdd", sep = "."))
}
} # end if(Glazerize)
|
e059952f9b360250186bb586d3b9f4658e075111
|
55abc2669cb686c448ace0636f534426a344dd78
|
/Code/Dev_Code/Monitoring Data Summary.R
|
0a6a97b3c5fcd717237133983a2a206018a2a594
|
[] |
no_license
|
smartenies/ALA_HIA
|
aaa0e87bcd41b0074663b1fad19cb7812ffc0e89
|
65ed55eb5fc0cb12204e2d6ad6f8c5d7b9087ee4
|
refs/heads/master
| 2021-03-24T13:09:13.986091
| 2019-07-11T22:32:28
| 2019-07-11T22:32:28
| 103,997,716
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,043
|
r
|
Monitoring Data Summary.R
|
#' =============================================================================
#' Project: American Lung Association HIA
#' Date created: February 22, 2018
#' Author: Sheena Martenies
#' Contact: Sheena.Martenies@colostate.edu
#'
#' Description:
#'
#' This project estimates the health impacts attributable to two coal-fired
#' power plants in the front range region of CO: Comanche (in Pueblo, CO) and
#' Martin Drake (in Colorado Springs, CO). The facilities are slated to be
#' decommissioned by 2025.
#'
#' This script summarizes O3 and PM2.5 monitoring data in the SFR
#' =============================================================================
library(foreign)
library(sp)
library(gstat)
library(rgdal)
library(raster)
library(ggplot2)
library(ggmap)
library(rgeos)
library(maptools)
library(ggthemes)
library(ggrepel)
library(RColorBrewer)
library(gridExtra)
library(plyr)
library(stringr)
#' For ggplots
simple_theme <- theme(
#aspect.ratio = 1,
text = element_text(family="Calibri",size = 12, color = 'black'),
panel.spacing.y = unit(0,"cm"),
panel.spacing.x = unit(0.25, "lines"),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.border=element_rect(fill = NA),
panel.background=element_blank(),
axis.ticks = element_line(colour = "black"),
axis.text = element_text(color = "black", size=10),
# legend.position = c(0.1,0.1),
plot.margin=grid::unit(c(0,0,0,0), "mm"),
legend.key = element_blank()
)
simple_theme2 <- theme(
#aspect.ratio = 1,
text = element_text(family="Calibri",size = 12, color = 'black'),
panel.spacing.y = unit(0,"cm"),
panel.spacing.x = unit(0.25, "lines"),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.border=element_rect(fill = NA),
panel.background=element_blank(),
axis.ticks = element_blank(),
axis.text = element_blank(),
# legend.position = c(0.1,0.1),
plot.margin=grid::unit(c(0,0,0,0), "mm"),
legend.key = element_blank()
)
windowsFonts(Calibri=windowsFont("TT Calibri"))
options(scipen = 9999) #avoid scientific notation
geo_data <- "T:/Rsch-MRS/ECHO/SEM Large Data/Spatial Data"
utm_13 <- "+init=epsg:26913"
lat_long <- "+proj=longlat +datum=NAD83 +no_defs +ellps=GRS80 +towgs84=0,0,0"
#' =============================================================================
#' -----------------------------------------------------------------------------
# Identifying air monitors
#' -----------------------------------------------------------------------------
pm_full <- read.csv("./Data/AQS Data/AQS Data 2009-2017.csv", header=T,
stringsAsFactors = F)
#' Monitor IDs 08041* are in El Paso county and 08101* are in Pueblo
#' Monitor IDs 08031* are in Denver
sfr_ids <- c("080410013", "080410015", "080410016", "080410017", "081010015",
)
aqs_sfr <- aqs_full[which(aqs_full$monitor_id %in% sfr_ids),]
#' -----------------------------------------------------------------------------
load("./Data/Air Quality/monitors.RData")
load("./Data/Spatial Data/dm_tracts_utm_map.RData")
monitors_coord <- monitors[,c("Longitude", "Latitude")]
monitors_spdf <- SpatialPointsDataFrame(coords = monitors_coord,
data = monitors,
proj4string = CRS("+init=epsg:4326"))
monitors_utm <- spTransform(monitors_spdf, CRS(utm_13)) #UTM 13N
monitors_utm@data$id <- rownames(monitors_utm@data)
monitors_utm@data$long <- coordinates(monitors_utm)[,1]
monitors_utm@data$lat <- coordinates(monitors_utm)[,2]
monitors_utm_map <- as.data.frame(monitors_utm)
pm2.5_mon_utm <- monitors_utm[which(monitors_utm$Parameter.Code=="88101"),]
pm2.5_mon_utm_map <- monitors_utm_map[which(monitors_utm_map$Parameter.Code=="88101"),]
o3_mon_utm <- monitors_utm[which(monitors_utm$Parameter.Code=="44201"),]
o3_mon_utm_map <- monitors_utm_map[which(monitors_utm_map$Parameter.Code=="44201"),]
save(monitors_utm, monitors_utm_map,
pm2.5_mon_utm, pm2.5_mon_utm_map,
o3_mon_utm, o3_mon_utm_map,
file="./Data/Spatial Data/monitors_utm_map.RData")
#' -----------------------------------------------------------------------------
#' Mapping the air monitors
#' -----------------------------------------------------------------------------
load("./Data/Spatial Data/monitors_utm_map.RData")
o3_mon <- as.data.frame(spTransform(o3_mon_utm, CRS(lat_long)))
pm2.5_mon <- as.data.frame(spTransform(pm2.5_mon_utm, CRS(lat_long)))
mon_df <- rbind(o3_mon, pm2.5_mon)
base_map <- get_map(location = "Aurora, CO", zoom=9)
scale_bar <- ggsn::scalebar(location = "bottomright", dd2km = T, model="WGS84",
dist=10, st.bottom=F, st.size = 3, height=0.0125,
x.min = -105.5, x.max = -104.0,
y.min = 39.08, y.max = 40.4)
n_arrow <- geom_segment(arrow=arrow(length=unit(4, "mm")),
aes(x=-104.05, xend=-104.05, y=39.15, yend=39.25),
color="black", size=1)
n_label <- geom_text(aes(x=-104.05, y=39.28), label="N")
ggmap(base_map) +
scale_bar + n_arrow + n_label +
scale_x_continuous(breaks = scales::pretty_breaks(n = 10)) +
scale_y_continuous(breaks = scales::pretty_breaks(n = 10))
monitor_map <- ggmap(base_map) +
ggtitle("PM\u2082.\u2085 and O\u2083 monitors in the Denver Area") +
geom_point(data=mon_df, aes(x=Longitude, y=Latitude, color=as.factor(Parameter.Code)),
pch=19, cex=3, position = position_jitter(w = 0.3, h = 0)) +
scale_color_manual(name="Pollutant",
values=c("red", "blue"),
labels=c("O\u2083", "PM\u2082.\u2085")) +
scale_bar + n_arrow + n_label +
simple_theme2
print(monitor_map)
ggsave(monitor_map, filename = "./Maps/Area Monitors.jpeg", device = "jpeg",
dpi=600, width = 7, height = 7, units="in")
rm(monitor_map)
#' -----------------------------------------------------------------------------
#' Time series plots of pm2.5 and ozone at each monitor
#' -----------------------------------------------------------------------------
# load("./Data/Air Quality/AQS Data 2009-2017.RData")
#
# p <- c("88101", "44201")
# p_name <- c("PM\u2082.\u2085 (\u03BCg/m\u00B3)", "O\u2083 (ppm)")
# p_lab <- c("PM2.5", "Ozone")
#
# for (i in 1:length(p)) {
# df <- output[which(output$Parameter.Code==as.integer(p[i]) & output$POC==1),]
# ts_plot <- ggplot(data=df, aes(x=datetime, y=Sample.Measurement)) +
# geom_line(aes(group=monitor_id, color=monitor_id)) +
# geom_smooth(color="black", size=1) +
# scale_x_datetime(date_breaks = "6 months", date_labels = "%b %d %H:%M") +
# xlab("Date") + ylab(p_name[i]) +
# theme(axis.text.x=element_text(angle=60, hjust=1)) +
# simple_theme
# print(ts_plot)
#
# ggsave(ts_plot, filename = paste("./Maps/Criteria Pollutants/TS_",
# p_lab[i], "_2009-2017.jpeg", sep=""),
# device = "jpeg", dpi=400, width = 7, height = 6, units="in")
# }
|
b477789fa11d74d597d32f6a9cf65f8f996541cd
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stcov/examples/haff_cov.Rd.R
|
62171aca58a0252b9b53009a239a5c1aa21d823a
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
haff_cov.Rd.R
|
library(stcov)
### Name: haff_cov
### Title: Stein/Haff's covariance estimator
### Aliases: haff_cov
### ** Examples
p <- 5
n <- 10
S <- rWishart(1, n, diag(p))[,,1]
haff_cov(S, n)
|
db366c2222c1cd5f75cffede9f557e59fba66dd2
|
8f789aa5b5e5c0054f8d501dbb00f22a70c7a22a
|
/man/getAnnotations.Rd
|
e2cb96c141edc9d12787ad476c96ad66062a2212
|
[] |
no_license
|
ddiez/rTRM
|
3458ea89798a374736f19df59e8be64ddc480e9b
|
8603c339ee67a94958d2be6f8cd8b7c6bb55f7f2
|
refs/heads/master
| 2021-01-10T20:30:21.929922
| 2015-10-09T03:14:59
| 2015-10-09T03:14:59
| 37,570,884
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 576
|
rd
|
getAnnotations.Rd
|
\name{getAnnotations}
\alias{getAnnotations}
%%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Obtain the 'pwm' table fromt the database, containing PWM's annotations.
}
\description{
Obtain the 'pwm' table fromt the database, containing PWM's annotations.
}
\usage{
getAnnotations(filter, dbname = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{filter}{
one or more PWM ids.
}
\item{dbname}{
the location of the database (to load custom datbases).
}
}
\author{
Diego Diez
}
\examples{
ann = getAnnotations()
}
|
1af66eb2ef60b2e7fed0b9ab24e4ea3f448c3631
|
05ebfa1015404f62337e452210f4c949f30beb58
|
/plot2.R
|
28b453461741626716063b1d0f1d67db22fddb82
|
[] |
no_license
|
SaschaC/ExData_Plotting1
|
cf3defc135616a5c2810b362e19da8abda5577a9
|
adaf26ad98dddce37142faa93d3d859a2d9cf26c
|
refs/heads/master
| 2021-01-21T03:51:09.226851
| 2015-05-09T20:20:58
| 2015-05-09T20:20:58
| 35,307,674
| 0
| 0
| null | 2015-05-09T00:03:19
| 2015-05-09T00:03:18
| null |
UTF-8
|
R
| false
| false
| 758
|
r
|
plot2.R
|
par(mfrow = c(1,1))
#see which line correspond to the wanted dates
indices <- grep(c("^(1/2/2007|2/2/2007)"), readLines("household_power_consumption.txt"))
# read in only the data from the interesting dates
d <- read.table("household_power_consumption.txt", skip= indices[1]-1,nrows =length(indices),sep =";")
#create column names
colnames(d) = c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
#make dates
d$timedate <- paste(d$Date, d$Time)
d = transform(d, timedate = strptime(d$timedate, "%d/%m/%Y %H:%M:%S"))
#plot
png("plot2.png")
with(d, plot(d$timedate,Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)"))
dev.off()
|
2fc97504f4fc6708fbbf6d3b4925ba17d9f81f9b
|
02a557ed0914471fa4013a49df2b0ec75338992d
|
/Clustering/Users/ClusteringBy4H.R
|
f8c3aa41466123c3eea440977096bca8d364bcde
|
[] |
no_license
|
Flibidi42/Pe---Big-Data
|
43dc39376c7ca420fc28da62f1c425a91286109d
|
246222fe574ddb75cc090beb4d5776045b83a563
|
refs/heads/master
| 2021-01-17T14:33:14.852034
| 2017-03-24T09:36:23
| 2017-03-24T09:36:23
| 84,090,963
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
ClusteringBy4H.R
|
Table_clust_user_4h <- Table_clust_user_date
#On regroupe les horaires
Table_clust_user_4h[, seq(2, 13, by = 2)] =
Table_clust_user_date[, seq(2, 13, by = 2)] + Table_clust_user_date[, seq(2, 13, by =
2) + 1]
Table_clust_user_4h = Table_clust_user_4h[,-seq(3, 13, by = 2)]
colnames(Table_clust_user_4h) <-
c("UserId",
"0h-4h",
"4h-8h",
"8h-12h",
"12h-16h",
"16h-20h",
"20h-24h")
write.csv(Table_clust_user_4h, file = "Clustering/ClusteringUser4h.csv")
|
f265d76ced7f2c0b0cf496d72180f259a5b74021
|
24de4c676a5313bde21d8636456f955218a63499
|
/Statistic/Non_parametric/Friedman_v1.R
|
62e0a9103ed161013f2b4823ed110c7c378fa094
|
[] |
no_license
|
isacmendes/Gerador_R
|
c1d90d97076de3b3219147b7f391f7fd4e2191c2
|
c8fda09678e9a0ec4ef61b688bb776b7bea29a48
|
refs/heads/master
| 2023-03-26T21:33:22.864353
| 2021-03-28T03:50:12
| 2021-03-28T03:50:12
| 327,659,760
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,758
|
r
|
Friedman_v1.R
|
# Title : TODO
# Objective : Compares pairs samples (batchs xx)
# Created by: Isac
# Created on: 19/01/2021
#1) Library and data source
options(scipen=999)
#
if (!require(dplyr)) install.packages("dplyr")
library(dplyr)
if (!require(rstatix)) install.packages("rstatix")
library(rstatix)
if (!require(stats)) install.packages("stats")
library(stats)
if (!require(PMCMRplus)) install.packages("PMCMRplus")
library(PMCMRplus)
if (!require(ggplot2)) install.packages("ggplot2")
library(ggplot2)
# if (!require(nortest)) install.packages("nortest")
# library(nortest)
# if (!require(fpp)) install.packages("fpp")
# library(fpp)
# if (!require(ARTool)) install.packages("ARTool")
# library(ARTool)
# if (!require(car)) install.packages("car")
# library(car)
# if (!require(moments)) install.packages("moments")
# library(moments)
#amostra<-read.csv('G:/Meu Drive/Doutorado/Orientação/Escalonamento/Implementações/Max_NPV/Samples/2021-01-14/Result2021-14-01.csv', sep=',',dec=',' )
#amostra<-read.csv('G:/Meu Drive/Doutorado/Orientação/Escalonamento/Implementações/Max_NPV/Samples/2021-01-17/Result2021-17-01.csv', sep=',',dec=',' )
amostra<-read.csv('G:/Meu Drive/Doutorado/Orientação/Escalonamento/Implementações/Max_NPV/Samples/2021-01-21/Result2021-21-01_Last_batch.csv', sep=',',dec=',' )
amostra$algo <- factor(amostra$algo)
amostra$vertices <- factor(amostra$vertices)
amostra$percNeg <- factor(amostra$percNeg)
amostra$disRate <- factor(amostra$disRate)
amostra$layer <- factor(amostra$layer)
amostra$fan <- factor(amostra$fan)
id <- c()
count_e <- 1
len <- (length(amostra$effort))/3
for (i in 1:len){
for (count_i in 1:3) {
id <- c(id, as.integer(count_e))
}
count_e <- count_e + 1
}
algo <- amostra$algo
effort <- amostra$effort
time <- amostra$time
amostra2 <- tibble(id, algo, effort, time)
amostra2$id <- factor(amostra2$id)
View(amostra2)
glimpse(amostra2)
#2) Aplly the Wilcoxon test
# amostra2 %>% group_by(algo) %>%
# get_summary_stats(amostra2$effort, type="median_iqr")
friedman.test(effort ~ algo | id, data=amostra2)
#3) Post-hoc
frdAllPairsSiegelTest(amostra2$effort, amostra2$algo,
amostra2$id, p.adjust.method = "bonferroni")
print('#4) Descriptve analysis')
amostra2 %>% group_by(algo) %>%
get_summary_stats(effort, type="median_iqr")
# #4) Plots to the explanied
# par(mfrow=c(1,3))
# boxplot(amostra2$effort[amostra2$algo=='RS'], main='RS', cex.main=3, cex.lab=1.3, cex.axis=2)
# boxplot(amostra2$effort[amostra2$algo=='SAA'], main='SAA', cex.main=3, cex.lab=1.3, cex.axis=2)
# boxplot(amostra2$effort[amostra2$algo=='HS'], main='HS', cex.main=3, cex.lab=1.3, cex.axis=2)
# par(mfrow=c(1,1))
# boxplot(amostra2$effort ~amostra2$algo)
# #
# par(mfrow=c(1,3))
# hist(amostra2$effort[amostra2$algo=='RS'],
# ylab='Frequency', xlab='Cost', main='RS', cex.main=3, cex.lab=1.3, cex.axis=2)
# hist(amostra2$effort[amostra2$algo=='SAA'],
# ylab='Frequency', xlab='Cost', main='SAA', cex.main=3, cex.lab=1.3, cex.axis=2)
# hist(amostra2$effort[amostra2$algo=='HS'],
# ylab='Frequency', xlab='Cost', main='HS', cex.main=3, cex.lab=1.3, cex.axis=2)
# #Global histogram
# hist(amostra2$effort,
# ylab='Frequency', xlab='Cost', main='RS x SAA x HS', cex.main=3, cex.lab=1.3, cex.axis=2)
# Global stacked histogram
# ggplot(amostra2, aes(x=effort)) +
# geom_histogram(aes(color=algo, fill=algo,
# alpha=0.2, position='stack', binwidth=30))
# O teste de Friedman mostrou que há efeito significativo do algoritmo
# sobre a ordem do custo computacional para o escalonamento (chi-quadrado = 3209.3;
# p-value < 2.2e-16 em grau de significação de 5%). O post-hoc de Dunn-Bonferroni mostrou mostrou que o custo computacional
# de HS < SAA < RS.
|
09fa03379bbb61b0020f23ed745410faf2a1ceb4
|
443b37ea9377bfd267f68c20123893a85db17852
|
/tests/testthat.R
|
edb9011cc70a4cefbf956570193bdb03091e633b
|
[] |
no_license
|
CharlesJB/rnaseq
|
7cda74303ebceaf9f8561ce5f331c35f034793d6
|
77352def1a5a2b0e0b7e6400ad13bd7385de0181
|
refs/heads/master
| 2023-04-02T13:03:09.120128
| 2023-03-15T20:12:24
| 2023-03-15T20:12:24
| 125,427,062
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(rnaseq)
test_check("rnaseq")
|
ec419ed37f059c9e77363312e7423e282fc4aafc
|
81a62ed4c5689f9cda1dbf51ff2e5812506e8058
|
/1.propensity_score_analysis/05-1_ui_ranking.R
|
3be150dfab73f447b745365cd7e1f8d0577ff065
|
[
"MIT"
] |
permissive
|
taiyoutsuhara/portfolio-r
|
7a138c60e60fa71e3e98bd26d3c70872f21401bd
|
49c1f74e4a5322ab1ef41ea35a3cba20e5e33208
|
refs/heads/master
| 2020-12-01T11:07:09.409804
| 2020-02-02T10:31:30
| 2020-02-02T10:31:30
| 230,613,432
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 974
|
r
|
05-1_ui_ranking.R
|
## 推定結果の可視化 ##
# 可視化用データの用意 #
dataframe_for_ggplot2_at_1st_Comparison = dataframe_for_ggplot2_at_2nd_Comparison = dataframe_for_ggplot2_at_3rd_Comparison = list()
ranking_of_ipwe_including_all_zero = modifyList(list(rep(0, length_of_ServiceType - 1)), ranking_of_ipwe)
dataframe_for_ggplot2_at_1st_Comparison = lapply(ranking_of_ipwe_including_all_zero, function(x){make_dataframe_for_ggplot2(x, "1st")})
dataframe_for_ggplot2_at_2nd_Comparison = lapply(ranking_of_ipwe_including_all_zero, function(x){make_dataframe_for_ggplot2(x, "2nd")})
dataframe_for_ggplot2_at_3rd_Comparison = lapply(ranking_of_ipwe_including_all_zero, function(x){make_dataframe_for_ggplot2(x, "3rd")})
# 可視化画面 #
tabItem_Visualization = tabItem(
"tab_Visualization",
h2("Visualization"),
# Layout for Ranking tab #
plotOutput("plot",
dblclick = dblclickOpts(id = "plot_dbl_click")
),
verbatimTextOutput("plot_dbl_click_info")
)
|
1340f8e7d70156a93e8e70767053febc0798a8ba
|
436570c53fbf34dd2ac73282b4b3cf558c214d3e
|
/graphs/axestitlemtext.R
|
df4a81fc2ff3e99b8207aaaf8b49849b736aa246
|
[] |
no_license
|
dupadhyaya/dspgmsc2017
|
4ce6debe7f87a4ac20da98cb3cf049c6c60335c5
|
e6062aa49fd0e10466830c6c03511823aa42e5ca
|
refs/heads/master
| 2021-01-22T16:53:35.407476
| 2018-05-23T04:09:50
| 2018-05-23T04:09:50
| 100,725,221
| 9
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,105
|
r
|
axestitlemtext.R
|
# Axes Mtext Text
#Text can be added to graphs using the text( ) and mtext( ) functions.
#text( ) places text within the graph while
#mtext( ) places text in one of the four margins.
#location location can be an x,y coordinate.
#Alternatively, the text can be placed interactively via mouse by specifying location as locator(1).
#pos position relative to location. 1=below, 2=left, 3=above, 4=right.
#If you specify pos, you can specify offset= in percent of character width.
#side which margin to place text. 1=bottom, 2=left, 3=top, 4=right.
#you can specify line= to indicate the line in the margin starting with 0 and moving out. you can also specify adj=0 for left/bottom alignment or
#adj=1 for top/right alignment.
#Other common options are cex, col, and font (for size, color, and font style respectively).
#Text ---------
attach(mtcars)
plot(wt, mpg, main="Milage vs. Car Weight",
xlab="Weight", ylab="Mileage", pch=18, col="blue")
text(wt, mpg, row.names(mtcars), cex=0.6, pos=4, col="red")
#You can add mathematically formulas to a graph using TEX-like rules. See help(plotmath) for details and examples.
#Axes -----------
axis(side, at=, labels=, pos=, lty=, col=, las=, tck=, ...)
#side an integer indicating the side of the graph to draw the axis (1=bottom, 2=left, 3=top, 4=right)
#at a numeric vector indicating where tic marks should be drawn
#labels a character vector of labels to be placed at the tickmarks
#(if NULL, the at values will be used)
#pos the coordinate at which the axis line is to be drawn.
#(i.e., the value on the other axis where it crosses)
#lty line type
#col the line and tick mark color
#las labels are parallel (=0) or perpendicular(=2) to axis
#tck length of tick mark as fraction of plotting region (negative number is outside graph, positive number is inside, 0 suppresses ticks, 1 creates gridlines) default is -0.01
#The option axes=FALSE suppresses both x and y axes. xaxt="n" and yaxt="n" suppress the x and y axis respectively
# A Silly Axis Example
# specify the data
x <- c(1:10); y <- x; z <- 10/x
# create extra margin room on the right for an axis
par(mar=c(5, 4, 4, 8) + 0.1)
# plot x vs. y
plot(x, y,type="b", pch=21, col="red",
yaxt="n", lty=3, xlab="", ylab="")
# add x vs. 1/x
lines(x, z, type="b", pch=22, col="blue", lty=2)
# draw an axis on the left
axis(2, at=x,labels=x, col.axis="red", las=2)
# draw an axis on the right, with smaller text and ticks
axis(4, at=z,labels=round(z,digits=2),
col.axis="blue", las=2, cex.axis=0.7, tck=-.01)
# add a title for the right axis
mtext("y=1/x", side=4, line=3, cex.lab=1,las=2, col="blue")
# add a main title and bottom and left axis labels
title("An Example of Creative Axes", xlab="X values",
ylab="Y=X")
# Minor Ticks -------------
library(Hmisc)
minor.tick(nx=n, ny=n, tick.ratio=n)
#nx is the number of minor tick marks to place between x-axis major tick marks.
#ny does the same for the y-axis.
#tick.ratio is the size of the minor tick mark relative to the major tick mark. The length of the major tick mark is retrieved from par("tck").
|
14c81dd13c841b040ecfceeac60646c9e8280efa
|
3025bff143cf6f0977bfcf5fc51f65ccbe14a98b
|
/man/MSALinearity-class.Rd
|
d56f0248d07e553174e1c7642d43951a9a35b92e
|
[] |
no_license
|
acabaya/qualityTools
|
b17169b73dd92e86380e759dd228c1d79ecc3203
|
e7eeac7cc7e95272b828f96db66f0a339ef6c957
|
refs/heads/master
| 2021-05-31T05:43:02.741691
| 2016-02-24T17:40:35
| 2016-02-24T17:40:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,742
|
rd
|
MSALinearity-class.Rd
|
\name{MSALinearity-class}
\keyword{Measurement Systems Analysis}
\Rdversion{1.1}
\docType{class}
\alias{MSALinearity-class}
\title{Class \code{"MSALinearity"}}
\description{MSALinearity Class
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("MSALinearity", ...)}.
}
\section{Slots}{
\describe{
\item{\code{X}:}{Object of class \code{"data.frame"} ~~ }
\item{\code{Y}:}{Object of class \code{"data.frame"} ~~ }
\item{\code{model}:}{Object of class \code{"lm"} ~~ }
\item{\code{conf.level}:}{Object of class \code{"numeric"} ~~ }
\item{\code{Linearity}:}{Object of class \code{"numeric"} ~~ }
\item{\code{GageName}:}{Object of class \code{"character"} ~~ }
\item{\code{GageTolerance}:}{Object of class \code{"numeric"} ~~ }
\item{\code{DateOfStudy}:}{Object of class \code{"character"} ~~ }
\item{\code{PersonResponsible}:}{Object of class \code{"character"} ~~ }
\item{\code{Comments}:}{Object of class \code{"character"} ~~ }
\item{\code{facNames}:}{Object of class \code{"character"} ~~ }
}
}
\section{Methods}{
\describe{
\item{as.data.frame}{\code{signature(x = "MSALinearity")}: ... }
\item{plot}{\code{signature(x = "MSALinearity")}: ... }
\item{response}{\code{signature(object = "MSALinearity")}: ... }
\item{response<-}{\code{signature(object = "MSALinearity")}: ... }
\item{show}{\code{signature(object = "MSALinearity")}: ... }
\item{summary}{\code{signature(object = "MSALinearity")}: ... }
}
}
\author{
Thomas Roth: thomas.roth@tu-berlin.de \cr
Etienne Stockhausen: stocdarf@mailbox.tu-berlin.de
}
\examples{
showClass("MSALinearity")
}
\keyword{classes}
|
0e19b1027fdff104a401ff433b216ab13aa0e5bd
|
7f141116154eed50968bddd35c9a47b7194e9b88
|
/tests/testthat/test-simulate.R
|
8c50422f380048eb1fa85a44d1b35c9e073896db
|
[] |
no_license
|
adw96/breakaway
|
36a9d2416db21172f7623c1810d2c6c7271785ed
|
d81b1799f9b224113a58026199a849c2ec147524
|
refs/heads/main
| 2022-12-22T06:20:56.466849
| 2022-11-22T22:35:57
| 2022-11-22T22:35:57
| 62,469,870
| 65
| 22
| null | 2022-11-22T22:35:58
| 2016-07-02T21:10:56
|
R
|
UTF-8
|
R
| false
| false
| 253
|
r
|
test-simulate.R
|
context("simulate")
library(breakaway)
data("apples")
test_that("simulation code works", {
expect_is(rnbinomtable(C = 100, size = 20, probability = .1), "data.frame")
expect_is(rztnbinomtable(C = 100, size = 20, probability = .1), "data.frame")
})
|
1affb7c297d2244bf49458a5375a803d533db476
|
d81623f397b960279be3666db4060a67815b22da
|
/Kaggle/scripts_for_github/kNN_test.R
|
7cac015a7a7cf6c83b006f0ff008ca520a7de593
|
[] |
no_license
|
elcovi/NYCDSA07_Projects
|
b1d41c4de6ba0acf3bfb55f0f3b7ffb3d65c573e
|
176c617c4dd30839de1d048d196290fd7732d70c
|
refs/heads/master
| 2020-03-06T19:02:54.565868
| 2017-06-29T17:30:43
| 2017-06-29T17:30:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,851
|
r
|
kNN_test.R
|
start_time <- Sys.time()
library(data.table)
setwd("~/Desktop/Project4")
dt_train <- as.data.frame(fread("train.csv", stringsAsFactors = T))
dt_test <- as.data.frame(fread("test.csv", stringsAsFactors = T))
test_id <- dt_test[[1]]
temp <- dt_test[,-1]
temp$loss <- NA
test_num <- nrow(temp)
train_num <- nrow(dt_train)
k_value <- round(sqrt(train_num))
ht_env <- new.env(hash=TRUE)
with(ht_env, temp_train <- dt_train)
with(ht_env, temp_train <- temp_train[,-1])
with(ht_env, temp_train$loss <- log(temp_train$loss + 200))
with(ht_env, temp_test <- temp)
rm(dt_test)
rm(dt_train)
rm(temp)
with(ht_env, submission <- data.frame(id = numeric(), loss = numeric()))
for (i in seq(1, (test_num + 454), 2000)){
if (nrow(ht_env$temp_test) > 2000){
with(ht_env, temp_train <- rbind(temp_train, temp_test[1:2000,]))
with(ht_env, temp_test <- temp_test[-(1:2000),])
}else{
with(ht_env, temp_train <- rbind(temp_train, temp_test))
}
with(ht_env, pred <- VIM::kNN(ht_env$temp_train, k = k_value))
with(ht_env, temp_train <- temp_train[1:train_num,])
if (nrow(ht_env$temp_test) > 2000){
with(ht_env, submission_temp <- exp(pred$loss[(train_num+1):(train_num+2000)]) - 200)
}else{
with(ht_env, submission_temp <- exp(pred$loss[(train_num+1):(train_num+1546)]) - 200)
}
with(ht_env, temp_sub <- data.frame(id = test_id[i:(i+length(submission_temp)-1)], loss = submission_temp))
with(ht_env, submission <- rbind(submission, temp_sub))
print(i)
print(Sys.time())
print(ht_env$temp_sub)
}
write.csv(ht_env$submission, "submission_knn.csv", row.names = FALSE)
end_time <- Sys.time()
cat ("Run started at :", as.character(as.POSIXct(start_time, format = "%Y-%m-%d %H:%M:%S")))
cat ("\n")
cat ("Run finished at :", as.character(as.POSIXct(end_time, format = "%Y-%m-%d %H:%M:%S")))
|
7a1d0daf4f2c55bd0520fc8bcfd2040e51c998c2
|
1d38e74b55dee8655f5f67bfda31220dda4affa7
|
/R/simulation.R
|
d25f936b01bcf9dbb9fb5c6e9c8b7c6a26d63b46
|
[
"MIT"
] |
permissive
|
ZhuolinSong/Length-penalty-for-principal-curve
|
b421529b5befb6bae592f7870764b57c27724ac6
|
29426aeaf378ae930441bb2f36fb77068ebd8418
|
refs/heads/main
| 2023-08-25T02:18:43.095136
| 2021-10-14T06:02:42
| 2021-10-14T06:02:42
| 394,865,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,390
|
r
|
simulation.R
|
#' Grid Cross Validation simulation
#'
#' parallel simulation
#'
#' @param ncores number of cores used in simulation
#' @param s_n number of observations
#' @param s_k number of knots
#' @param s_q dimension
#' @param ... other args for cv_grid
#' (maxit, thresh, l_range, s_range, folds, lambda_l)
#' @import parallel
#' @return a list consisting fitted ppslp object for each simulation
#' @keywords Principal curve
#' @export
grid_simulation <- function(ncores = 16, s_n = 100L,
s_k = 25L, s_q = 2L, ...) {
grid <- mclapply(1:7, inner_loop <- function(case) {
init <- NULL
if (case == 3 || case == 6) {
init <- xy_truth(case, s_k)
}
m_y <- y_generator(case, s_n, s_q)
output <- ppslp::cv_grid(m_y, s_k, init = init, ...)
return(output)
}, mc.cores = ncores)
return(grid)
}
#' Princurve Cross Validation simulation
#'
#' parallel simulation
#'
#' @param ncores number of cores used in simulation
#' @param s_n number of observations
#' @param s_k number of knots
#' @param s_q dimension
#' @param ... other args for cv_grid
#' (maxit, thresh, s_range, folds, lambda_l)
#' @import parallel
#' @return a list consisting fitted ppslp object for each simulation
#' @keywords Principal curve
#' @export
princurve_simulation <- function(ncores = 16, s_n = 100L,
s_k = 25L, s_q = 2L, ...) {
fit <- mclapply(1:7, inner_loop <- function(case) {
init <- NULL
if (case == 3 || case == 6) {
init <- xy_truth(case, s_k)
}
m_y <- y_generator(case, s_n, s_q)
output <- ppslp::cv_princurve(m_y, s_k, start = init, ...)
return(output)
}, mc.cores = ncores)
return(fit)
}
#' Smoothing parameter Cross Validation simulation
#'
#' parallel simulation
#'
#' @param ncores number of cores used in simulation
#' @param s_n number of observations
#' @param s_k number of knots
#' @param s_q dimension
#' @param ... other args for cv_smooth
#' (maxit, thresh, l_range, s_range, folds, lambda_l)
#' @import parallel
#' @return a list consisting fitted ppslp object for each simulation
#' @keywords Principal curve
#' @export
smooth_simulation <- function(ncores = 16, s_n = 100L,
s_k = 25L, s_q = 2L, ...) {
out <- mclapply(1:7, inner_loop <- function(case) {
init <- NULL
if (case == 3 || case == 6) {
init <- xy_truth(case, s_k)
}
m_y <- y_generator(case, s_n, s_q)
output <- ppslp::cv_smooth(m_y, s_k, init = init, ...)
return(output)
}, mc.cores = ncores)
return(out)
}
#' Smoothing parameter Cross Validation simulation
#'
#' parallel simulation
#'
#' @param ncores number of cores used in simulation
#' @param s_n number of observations
#' @param s_k number of knots
#' @param s_q dimension
#' @param ... other args for cv_smooth
#' (maxit, thresh, l_range, s_range, folds, lambda_l)
#' @import parallel
#' @return a list consisting fitted ppslp object for each simulation
#' @keywords Principal curve
#' @export
length_simulation <- function(ncores = 16, s_n = 100L,
s_k = 25L, s_q = 2L, ...) {
out <- mclapply(1:7, inner_loop <- function(case) {
init <- NULL
if (case == 3 || case == 6) {
init <- xy_truth(case, s_k)
}
m_y <- y_generator(case, s_n, s_q)
output <- ppslp::cv_length(m_y, s_k, init = init, ...)
return(output)
}, mc.cores = ncores)
return(out)
}
#' Simulation Examples
#'
#' @param case number indicates the examples(1-7):
#' 1: 3/4 circle
#' 2: 3/4 circle with noise
#' 3: 3/4 circle with noise, initialize with the truth
#' 4: 1/2 circle
#' 5: 1/2 circle with noise
#' 6: 1/2 circle with noise, initialize with the truth
#' 7: the shape of number 3 from mnist dataset
#' @param n number of obervation in the simulation
#' @param q dimension of the simulation
#' @return the simulation examples
#' @keywords Principal curve
#' @export
y_generator <- function(case, n=100L, q=2L, seeds=999983) {
if (case == 1) {
noise <- runif(n, 0, 1.5 * pi)
return(matrix(5 * c(sin(noise), cos(noise)), n, q, byrow = FALSE))
} else if (case == 2 || case == 3) {
RNGkind("L'Ecuyer-CMRG")
set.seed(seeds)
noise <- runif(n, 0, 1.5 * pi)
truth <- matrix(5 * c(sin(noise), cos(noise)), n, q, byrow = FALSE)
return(truth + matrix(rnorm(q * n), n, q))
} else if (case == 4) {
mean <- runif(n, 0, pi)
return(matrix(5 * c(cos(mean), sin(mean)), n, q, byrow = FALSE))
} else if (case == 5 || case == 6) {
RNGkind("L'Ecuyer-CMRG")
set.seed(seeds + 1)
mean <- runif(n, 0, pi)
mean <- 5 * c(cos(mean), sin(mean))
return(matrix(rnorm(n = q * n, mean), n, q, byrow = FALSE))
} else if (case == 7) {
return(cbind(threeExample$x, threeExample$y))
}
}
#' Start and end point of the simulation Examples
#'
#' @param case number indicates the examples(1-7):
#' 1: 3/4 circle
#' 2: 3/4 circle with noise
#' 3: 3/4 circle with noise, initialize with the truth
#' 4: 1/2 circle
#' 5: 1/2 circle with noise
#' 6: 1/2 circle with noise, initialize with the truth
#' 7: the shape of number 3 from mnist dataset
#' @return the start_end point for each case
#' @keywords Principal curve
#' @export
#' @return start_end points (matrix):
#' [start, end]^T
xy_fix <- function(case) {
if (case == 1 || case == 2 || case == 3) {
return(rbind(c(0, 5), c(-5, 0)))
} else if (case == 4 || case == 5 || case == 6) {
return(rbind(c(5, 0), c(-5, 0)))
} else if (case == 7) {
return(cbind(threeExample$xFix, threeExample$yFix))
}
}
#' The truth curve of the simulation Examples
#'
#' @param case number indicates the examples(1-7):
#' 1: 3/4 circle
#' 2: 3/4 circle with noise
#' 3: 3/4 circle with noise, initialize with the truth
#' 4: 1/2 circle
#' 5: 1/2 circle with noise
#' 6: 1/2 circle with noise, initialize with the truth
#' 7: the shape of number 3 from mnist dataset
#' @param n number of the truth
#' @param q dimension of the simulation
#' @return the start_end point for each case
#' @keywords Principal curve
#' @export
xy_truth <- function(case, n=10000, q=2) {
if (case == 1 || case == 2 || case == 3) {
noise <- 1.5 * pi / n * seq_len(n)
return(matrix(5 * c(sin(noise), cos(noise)), n, q, byrow = FALSE))
} else if (case == 4 || case == 5 || case == 6) {
noise <- pi / n * seq_len(n)
return(matrix(5 * c(cos(noise), sin(noise)), n, q, byrow = FALSE))
} else if (case == 7) {
return(matrix(0, n, q, byrow = FALSE))
}
}
mse_calculate <- function(case, theta, n=1e5) {
truth <- xy_truth(case, n)
proj <- ppslp::project(truth, theta, diff(theta))
return(proj$dist / n)
}
#' Analysis plot
#'
#' plot simulation results
#'
#' @param cv list of object ppslp of the simulation
#' @param cases vector of the example simulations
#' @param benchmark indicate whether to plot hastie
#' @return mse
#' @import princurve
#' @keywords Principal curve
#' @export
analysis_plot <- function(cv, cases, benchmark = T) {
mse <- sapply(cases, outer_loop <- function(case) {
output <- cv[[case]]
if (is.atomic(output)) { # check bugs !
print(case)
}
pp_plot(output)
theta <- output$theta
dist <- mse_calculate(case, theta, 1e5)
if (benchmark) {
y <- output$y
fit <- principal_curve(y)
lines(fit, type = "l", col = "green", lwd = 2, lty = 1)
legend("topleft",
c("Our method", "Hastie"),
lwd = 2,
col = c("blue", "green"),
lty = 1)
theta <- fit$s
dist[2] <- mse_calculate(case, theta)
}
lines(xy_truth(case), lty = 3)
return(dist)
})
rownames(mse) <- c("ppslp", "Hastie's")
return(mse)
}
|
f8b2ea65a0decf98a71cc53d1c0470331cb3b828
|
8de30e7b191229668555fe93d3a1488fee518ec0
|
/Plot2.R
|
48c2c5be86bcbc3a77903daed56af69e3a11e6c1
|
[] |
no_license
|
Epawlakos/ExData_Plotting1
|
920f39159a3513ae7170934eb0b60fb3466db666
|
79ea4864773afc655994a363540633e7a0aa196b
|
refs/heads/master
| 2021-01-16T22:24:31.452268
| 2016-06-16T02:09:09
| 2016-06-16T02:09:09
| 61,254,526
| 0
| 0
| null | 2016-06-16T02:05:59
| 2016-06-16T02:05:58
| null |
UTF-8
|
R
| false
| false
| 202
|
r
|
Plot2.R
|
###### Plot 2 ######
plot2 <- function(){
plot(df$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
}
png(file = "plot2.png")
plot2()
dev.off()
|
5fc185cfd4acfc132d0acade2d7896943889a9ef
|
15761261732e0dbca6f5e29d439c25a014656771
|
/unit_test/shniy_unit_test2.r
|
41ccd9535a237d5d308de57d94a9b3ea02bcccf8
|
[] |
no_license
|
linuxcarey/stock_analysis_checklist
|
927e053261e0e5c8a35fcc17e7a6b476bdf05c0c
|
1c58da15724570d5a8617171a69513575db92c1b
|
refs/heads/master
| 2020-08-13T01:17:35.247879
| 2020-01-02T14:31:06
| 2020-01-02T14:31:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
shniy_unit_test2.r
|
setwd("~/R/stockVis")
# Load packages ----
library(shiny)
library(tidyverse)
library(quantmod)
library(xml2)
library(rvest)
library(tibble)
library(DT)
# Source helpers ----
# source("helpers.R")
#source("Stock_Analysis_Checklist_shiny.R", local = TRUE)
# User interface ----
ui <- fluidPage(
titlePanel("stockVis"),
sidebarLayout(
sidebarPanel(
helpText("Select a stock to examine.
Information will be collected from Yahoo finance."),
textInput("symb", "Symbol", "FNV"),
# table output
),
mainPanel(
tableOutput("tableDT")
)
)
)
# Server logic
server <- function(input, output) {
output$tableDT <- renderTable(
paste0("https://www.marketwatch.com/investing/Stock/", stck, "/financials/balance-sheet/quarter") %>%
read_html() %>%
html_table() %>%
map_df(bind_cols)
%>%
select(1:5) %>%
slice(11)
)
}
shinyApp(ui = ui, server = server)
|
e706a04cbd5659867a2ee0f1fbefa78c98b7d80b
|
db7a2379f4371d3f628a91b768322044b55ee464
|
/man/rmg_consecutive_four_identical.Rd
|
1031d37a7c458958290aaa6836d528b5f084d796
|
[
"MIT"
] |
permissive
|
psungho/rmgarbage
|
a51978c5b3278413c18f684ba2e5715d3ec6aa57
|
168c80fd0bb0e40dd6133f240f3bf69120cd6f16
|
refs/heads/master
| 2022-04-21T13:39:24.098837
| 2020-04-18T20:30:40
| 2020-04-18T20:30:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 729
|
rd
|
rmg_consecutive_four_identical.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rmgarbage.R
\name{rmg_consecutive_four_identical}
\alias{rmg_consecutive_four_identical}
\title{Rule R: four identical characters}
\usage{
rmg_consecutive_four_identical(x, n = 4L)
}
\arguments{
\item{x}{a character vector}
\item{n}{integer, how identical consequtive characters to detect}
}
\value{
Logical
}
\description{
If a string has 4 identical characters in a row, it is garbage.
From https://github.com/foodoh/rmgarbage
}
\examples{
garbage_string_4c1 <- "aaaaazzzezrertrg"
garbage_string_4c2 <- "azertyuiopqsdfghj"
rmg_consecutive_four_identical(garbage_string_4c1) # TRUE
rmg_consecutive_four_identical(garbage_string_4c2) # FALSE
}
|
927548374d2c605dc6944192e343ed3274b63a0f
|
9782b6c469f3b0b26d971093edb5c5dce8583b8a
|
/Decision Tree/Classification problem statement.R
|
a58cc3d6b7be2c7656716e4b08af3364ca7e3b82
|
[] |
no_license
|
bhavukbhardwaj/Decision-Tree
|
0511e52d2ef11de13a05dc768038352f618cbb0e
|
051e6596b3cfada494c7082d392e1843dfac48c7
|
refs/heads/master
| 2020-03-24T00:11:25.985725
| 2018-07-26T05:08:41
| 2018-07-26T05:08:41
| 142,278,092
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,890
|
r
|
Classification problem statement.R
|
#load the data into R
diabet<-read.csv("Diabetes.csv")
View(diabet)
#divide the data into Training and Testing datasets
#instead of using library caTools and splitting the data, we can split data by
set.seed(3)
id<-sample(2,nrow(diabet),prob = c(0.7,0.3),replace = TRUE)
diabet_train<-diabet[id==1,]
diabet_test<-diabet[id==2,]
#Building Decision Tree
#For rpart() we need to load rpart library
library(rpart)
colnames(diabet)
diabet_model<-rpart(is_diabetic~.,data = diabet_train)
#Here we are using all columns for the model
diabet_model
#We can plot it as
plot(diabet_model,margin = 0.1)
#margin is used to adjust the size of the plot, For viewing labels
text(diabet_model,use.n = TRUE,pretty = TRUE,cex =0.8)
#create subset and verify
temp<-diabet_train[diabet_train$glucose_conc<154.5 & diabet_train$BMI<26.35,]
table(temp$is_diabetic)
#Prediction of test dataset
pred_diabet<-predict(diabet_model,newdata = diabet_test,type = "class")
pred_diabet
#Now we need to compare it with actual values
table(pred_diabet,diabet_test$is_diabetic)
#For creating confusion matrix we can use the following
library(caret)
confusionMatrix(table(pred_diabet,diabet_test$is_diabetic))
#Random Forest
library(randomForest)
diabet_forest<-randomForest(is_diabetic~.,data = diabet_train)
diabet_forest
#Prediction of Test set
pred1_diabet<-predict(diabet_forest,newdata = diabet_test,type = "class")
pred1_diabet
#confusion matrix
library(caret)
confusionMatrix(table(pred1_diabet,diabet_test$is_diabetic))
# Naive Bayes Classifier
library(e1071)
diabet_naive<-naiveBayes(is_diabetic~.,data = diabet_train)
diabet_naive
#Prediction of Test set
pred2_diabet<-predict(diabet_naive,newdata = diabet_test,type = "class")
pred2_diabet
#Confusion matrix
confusionMatrix(table(pred2_diabet,diabet_test$is_diabetic))
|
0a63697f3176ebfa417c96106175eb26eafe9a4e
|
dd0a313992eb1ed00b3d4ec292cfabf88e65ca02
|
/Summer Project -Quantitative Tightening/Shadow Rate/WX Shadow Rate replication.R
|
f4661fde6190631053d0f6731f242f7b3a0343c5
|
[] |
no_license
|
nikhilg12/nikhil
|
f7e9bfe436aba24f47108f4be48e93c9e9ce7e20
|
1f3de16c06ff1ac7a5884cdfdcb7b27eabf92c8f
|
refs/heads/master
| 2021-01-18T20:05:47.723310
| 2018-08-23T18:46:25
| 2018-08-23T18:46:25
| 86,937,015
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,895
|
r
|
WX Shadow Rate replication.R
|
#DoubleLine ForwardRate
library(readxl)
library(data.table)
library(lubridate)
library(zoo)
library(readr)
library(R.matlab)
library(matlib)
library(MASS)
library(optimx)
library(matrixcalc)
#Take Maturity & Fed as inputs.
Maturity<-c(3,6,12,24,60,84,120)
Fed <-fread("feds200628.xls.csv",skip=9)
#Compute Monthly Forward Rate
MonthlyForwardRate<-function(Fed,Maturity){
Fed<-data.table(Fed)
setnames(Fed,'V1','Date')
Fed[,Year:=year(Date)][,Month:=month(Date)][,Day:=day(Date)]
Fed<-Fed[Year>=1990]
setorder(Fed,Date)
Fed[,MaxDay:=max(Day),by=c("Year","Month")] #Select the last line of each month.
GSWdata<-Fed[which(Day==MaxDay),c("Year","Month","Day","BETA0","BETA1","BETA2","BETA3","TAU1","TAU2")]
GSWdata[!(is.na(BETA0)|is.na(BETA1)|is.na(BETA2)|is.na(BETA3)|is.na(TAU1)|is.na(TAU2)), ] #Remove NA.
if(GSWdata[nrow(GSWdata),3]<31) GSWdata[nrow(GSWdata),]=NULL #Check if the last observation is the end of the month.
GSWdata[,YearMon:=Year*100+Month]
#Calculate the annualized forward rate.
#NSS Function
nelson=function(t,beta0=GSWdata$BETA0,beta1=GSWdata$BETA1,beta2=GSWdata$BETA2,beta3=GSWdata$BETA3,tau1=GSWdata$TAU1,tau2=GSWdata$TAU2){
beta0+beta1*(1-exp(-t/tau1))/(t/tau1)+beta2*((1-exp(-t/tau1))/(t/tau1)-exp(-t/tau1))+beta3*((1-exp(-t/tau2))/(t/tau2)-exp(-t/tau2))
}
n=Maturity/12
Yield1<-sapply(n,nelson) #Apply the NNS model to get yield for each maturity.
Yield1<-n*t(Yield1)
n2=(Maturity+1)/12
Yield2<-sapply(n2,nelson)
Yield2<-n2*t(Yield2)
ForwardRates=(Yield2-Yield1)*12
ForwardRates<-t(ForwardRates)
return(ForwardRates)
}
forwardrates<-MonthlyForwardRate(Fed,Maturity)
forwardrates<-t(forwardrates)
#Import parameters Wu and Xia used.
parameters<-unlist(readMat("parameters.mat"))
#Kalman Filter (non-extended for GATSM)
KF_GATSM<-function(parameters)
{
#Parameterization
T=ncol(forwardrates)
J=Maturity
rhoP = parameters[1:9];
rhoP = matrix(rhoP,ncol=3)
muP = parameters[10:12]
rhoQ1 = parameters[13]
rhoQ2 = parameters[14]
sigma = matrix(c(abs(parameters[15]), 0, 0 , parameters[16], abs(parameters[18]) ,0, parameters[17], parameters[19], abs(parameters[20])),nrow=3,byrow=TRUE)
omega = sigma%*%t(sigma)
delta0 = parameters[21];
omegaM<-matrix(0,nrow=nrow(forwardrates),ncol=nrow(forwardrates))
diag(omegaM)=parameters[22]^2
I3=matrix(0,ncol=3,nrow=3)
diag(I3)=1
sigma_test_mat<-matrix(c(abs(parameters[15]), parameters[16], parameters[17] , parameters[16], abs(parameters[18]) , parameters[19], parameters[17], parameters[19], abs(parameters[20])),nrow=3,byrow=TRUE)
#Restrictions on parameters.(return big values if restrictions not met)
if(abs(parameters[1])>1 |abs(parameters[5])>1 |abs(parameters[9])>1) return(10^8) #Rho_P
if(abs(parameters[13])>1 |abs(parameters[14])>1) return(10^8) #Rho_Q
if(parameters[22]<0) return(10^8) #OmegaM
#Initialization
X1<-matrix(NA,nrow=3,ncol=T+1)
X2<-matrix(NA,nrow=3,ncol=T+1) #Here we assume X1 and X2 are one the SAME time horizon.
X1[,1]=0 #X1 starts at 0.
#Compute aJ and bJ
JJ=seq(1,max(J))
bn=cbind(rhoQ1^JJ,rhoQ2^JJ,JJ*rhoQ2^(JJ-1)) #rho^J in Appendix A.
Bn=rbind(c(1,1,0),bn)
Bn<-apply(Bn,2,cumsum) #Summation of rho^J
Bn<-Bn[-nrow(Bn),] #Summation up to (n-1)
delta1=c(1,1,0)
aJ=c()
for(i in 1:length(J))
{
aJ[i]<-delta0-0.5*t(Bn[J[i],])%*%((sigma%*%t(sigma))%*%Bn[J[i],])/1200
}
bJ=cbind(rhoQ1^J,rhoQ2^J,J*rhoQ2^(J-1))
V1<-array(NA,dim=c(3,3,T+1)) #V1 is a 3-D arry of covariance at each time
V2<-array(NA,dim=c(3,3,T+1)) #So is V2.
V1[,,1]=0
diag(V1[,,1])=100
V1[3,3,1]<-V1[3,3,1]/144
loglikvec<-c()
for(i in 1:T)
{
F_h=aJ+bJ%*%X1[,i] #Prediction of forward rate based on Factors.
err=forwardrates[,i]-F_h #Error in prediction.
H=bJ
S=H%*%V1[,,i]%*%t(H)+omegaM
if (rcond(S) < 1e-8 | is.na(rcond(S))) return(10^8)
flag=det(S)
if(!is.finite(flag) | (flag<=0)) return(10^9)
InvS=solve(S)
K=V1[,,i]%*%t(H)%*%InvS #Kalman Gain using guessed value of V1
loglikvec[i] = length(J)*log(2*pi) + log(det(S)) + t(err)%*%InvS%*%err
loglikvec[i] = -1/2*loglikvec[i]
X2[,i+1]=X1[,i]+K%*%err #Correct the guess with Kalman Gain.
X1[,i+1]=muP+rhoP%*%X2[,i+1] #Update next value of state variable(X1).
I3=matrix(0,ncol=3,nrow=3)
diag(I3)=1
V2[,,i+1]=(I3-K%*%H)%*%V1[,,i] #Update the covariance matrix.
V1[,,i+1]=rhoP%*%V2[,,i+1]%*%t(rhoP)+sigma%*%t(sigma) #Predict the covariance for next step.
}
llf=sum(loglikvec)
return(-llf)
}
KF_GATSM(parameters)
#Optimization
set.seed(20)
startvalue<-rnorm(22)
startvalue[21]=12
test1<-optim(startvalue,KF_GATSM,method="BFGS")
parameters_test<-c(test1$par)
parameters_test
#Import parameters Wu and Xia used.
parameters_2<-unlist(readMat("parameters_rlb.mat"))
#Extended Kalman Filter
EKF_SRTSM<-function(parameters)
{
#Parameterization
T=ncol(forwardrates)
J=Maturity
rhoP = parameters[1:9];
rhoP = matrix(rhoP,ncol=3)
muP = parameters[10:12]
rhoQ1 = parameters[13]
rhoQ2 = parameters[14]
sigma = matrix(c(abs(parameters[15]), 0, 0 , parameters[16], abs(parameters[18]) ,0, parameters[17], parameters[19], abs(parameters[20])),nrow=3,byrow=TRUE);
omega = sigma%*%t(sigma)
delta0 = parameters[21];
omegaM<-matrix(0,nrow=nrow(forwardrates),ncol=nrow(forwardrates))
diag(omegaM)=parameters[22]^2
rlb=0.25
#Restrictions on parameters.(return big values if restrictions not met)
if(abs(parameters[1])>1 |abs(parameters[5])>1 |abs(parameters[9])>1) return(10^8) #Rho_P
if(abs(parameters[13])>1 |abs(parameters[14])>1) return(10^8) #Rho_Q
if(parameters[22]<0) return(10^8) #OmegaM
#Initialization
X1<-matrix(NA,nrow=3,ncol=T+1)
X2<-matrix(NA,nrow=3,ncol=T+1) #Here we assume X1 and X2 are one the SAME time horizon.
X1[,1]=0 #X1 starts at 0.
#Compute aJ and bJ
JJ=seq(1,max(J))
bn=cbind(rhoQ1^JJ,rhoQ2^JJ,JJ*rhoQ2^(JJ-1)) #rho^J in Appendix A.
Bn=rbind(c(1,1,0),bn)
Bn<-apply(Bn,2,cumsum) #Summation of rho^J
Bn<-Bn[-nrow(Bn),] #Summation up to (n-1)
delta1=c(1,1,0)
aJ=c()
SigmaJ=c()
for(i in 1:length(J))
{
aJ[i]<-delta0-0.5*t(Bn[J[i],])%*%((sigma%*%t(sigma))%*%Bn[J[i],])/1200
}
#Compute SigmaJ.
cn=rbind(c(1,1,0),bn)
cn=cn[-nrow(cn),]
Sigma_step=c()
SigmaJ=c()
for(i in 1:length(J))
{
for(j in 1:Maturity[i])
{
Sigma_step[j]<-t(cn[j,])%*%omega%*%cn[j,]
}
SigmaJ[i]=sqrt(sum(Sigma_step))
}
bJ=cbind(rhoQ1^J,rhoQ2^J,J*rhoQ2^(J-1))
V1<-array(NA,dim=c(3,3,T+1)) #V1 is a 3-D arry of covariance at each time
V2<-array(NA,dim=c(3,3,T+1)) #So is V2.
V1[,,1]=0
diag(V1[,,1])=100
V1[3,3,1]<-V1[3,3,1]/144
loglikvec<-c()
for(i in 1:T)
{
F_h=aJ+bJ%*%X1[,i] #Prediction of forward rate based on Factors.
Z1_temp=(F_h-rlb)/SigmaJ
Z2_temp=rlb+(F_h-rlb)*pnorm(Z1_temp)+SigmaJ*dnorm(Z1_temp)
err=forwardrates[,i]-Z2_temp #Error in prediction.
H=rep(pnorm(Z1_temp))*bJ
S=H%*%V1[,,i]%*%t(H)+omegaM
if (rcond(S) < 1e-8 | is.na(rcond(S))) return(10^8)
flag=det(S)
if(!is.finite(flag) | (flag<=0)) return(10^9)
InvS=solve(S)
K=V1[,,i]%*%t(H)%*%InvS #Kalman Gain using guessed value of V1
loglikvec[i] = length(J)*log(2*pi) + log(det(S)) + t(err)%*%InvS%*%err
loglikvec[i] = -1/2*loglikvec[i]
X2[,i+1]=X1[,i]+K%*%err #Correct the guess with Kalman Gain.
X1[,i+1]=muP+rhoP%*%X2[,i+1] #Update next value of state variable(X1).
I3=matrix(0,ncol=3,nrow=3)
diag(I3)=1
V2[,,i+1]=(I3-K%*%H)%*%V1[,,i] #Update the covariance matrix.
V1[,,i+1]=rhoP%*%V2[,,i+1]%*%t(rhoP)+sigma%*%t(sigma) #Predict the covariance for next step.
}
llf=sum(loglikvec)
X2<-X2[,-1] #Remove NA column.
SR=t(delta0+matrix(c(1,1,0),nrow=1)%*%X2) #1st column of X2 is NA. Third row is muliply by 0.
Xf=X2 #Factors.
return(-llf)
}
#Optimize SRTSM.
EKF_SRTSM(parameters_2)
set.seed(222)
start=rnorm(22)
start[21]=12
#Optimization
test3<-optim(parameters_test,EKF_SRTSM,method="BFGS")
#WX method
EKF_Shadow_Rate=function(parameters,index=1){
#Parameterization
T=ncol(forwardrates)
J=Maturity
rhoP = parameters[1:9];
rhoP = matrix(rhoP,ncol=3)
muP = parameters[10:12]
rhoQ1 = parameters[13]
rhoQ2 = parameters[14]
sigma = matrix(c(abs(parameters[15]), 0, 0 , parameters[16], abs(parameters[18]) ,0, parameters[17], parameters[19], abs(parameters[20])),nrow=3,byrow=TRUE);
omega = sigma%*%t(sigma)
delta0 = parameters[21];
omegaM<-matrix(0,nrow=nrow(forwardrates),ncol=nrow(forwardrates))
diag(omegaM)=parameters[22]^2
#rlb=parameters[23]
rlb=0.25
#Initialization
X1<-matrix(NA,nrow=3,ncol=T+1)
X2<-matrix(NA,nrow=3,ncol=T+1) #Here we assume X1 and X2 are one the SAME time horizon.
X1[,1]=0 #X1 starts at 0.
#Compute aJ and bJ
JJ=seq(1,max(J))
bn=cbind(rhoQ1^JJ,rhoQ2^JJ,JJ*rhoQ2^(JJ-1)) #rho^J in Appendix A.
Bn=rbind(c(1,1,0),bn)
Bn<-apply(Bn,2,cumsum) #Summation of rho^J
Bn<-Bn[-nrow(Bn),] #Summation up to (n-1)
delta1=c(1,1,0)
aJ=c()
SigmaJ=c()
for(i in 1:length(J))
{
aJ[i]<-delta0-0.5*t(Bn[J[i],])%*%((sigma%*%t(sigma))%*%Bn[J[i],])/1200
}
#Compute SigmaJ.
cn=rbind(c(1,1,0),bn)
cn=cn[-nrow(cn),]
Sigma_step=c()
SigmaJ=c()
for(i in 1:length(J))
{
for(j in 1:Maturity[i])
{
Sigma_step[j]<-t(cn[j,])%*%omega%*%cn[j,]
}
SigmaJ[i]=sqrt(sum(Sigma_step))
}
bJ=cbind(rhoQ1^J,rhoQ2^J,J*rhoQ2^(J-1))
V1<-array(NA,dim=c(3,3,T+1)) #V1 is a 3-D arry of covariance at each time
V2<-array(NA,dim=c(3,3,T+1)) #So is V2.
V1[,,1]=0
diag(V1[,,1])=100
V1[3,3,1]<-V1[3,3,1]/144
loglikvec<-c()
for(i in 1:T)
{
F_h=aJ+bJ%*%X1[,i] #Prediction of forward rate based on Factors.
Z1_temp=(F_h-rlb)/SigmaJ
Z2_temp=rlb+(F_h-rlb)*pnorm(Z1_temp)+SigmaJ*dnorm(Z1_temp)
err=forwardrates[,i]-Z2_temp #Error in prediction.
H=rep(pnorm(Z1_temp))*bJ
S=H%*%V1[,,i]%*%t(H)+omegaM
if (rcond(S) < 1e-8 | is.na(rcond(S))) return(10^8)
flag=det(S)
if(!is.finite(flag) | (flag<=0)) return(10^9)
InvS=solve(S)
K=V1[,,i]%*%t(H)%*%InvS #Kalman Gain using guessed value of V1
loglikvec[i] = length(J)*log(2*pi) + log(det(S)) + t(err)%*%InvS%*%err
loglikvec[i] = -1/2*loglikvec[i]
X2[,i+1]=X1[,i]+K%*%err #Correct the guess with Kalman Gain.
X1[,i+1]=muP+rhoP%*%X2[,i+1] #Update next value of state variable(X1).
I3=matrix(0,ncol=3,nrow=3)
diag(I3)=1
V2[,,i+1]=(I3-K%*%H)%*%V1[,,i] #Update the covariance matrix.
V1[,,i+1]=rhoP%*%V2[,,i+1]%*%t(rhoP)+sigma%*%t(sigma) #Predict the covariance for next step.
}
llf=sum(loglikvec)
X2<-X2[,-1] #Remove NA column.
SR=t(delta0+matrix(c(1,1,0),nrow=1)%*%X2) #1st column of X2 is NA. Third row is muliply by 0.
Xf=X2 #Factors.
if(index==1) result=SR
else if(index==2)
{F_shadow = aJ + bJ%*%Xf #Repeat aJ.
z1_temp = (F_shadow - rlb)/SigmaJ
y = rlb + (F_shadow - rlb)*pnorm(z1_temp) + SigmaJ*dnorm(z1_temp)
result=y
}
else result=Xf
return(result)
}
#with Wu and Xia's parameters
par(mfrow=c(1,1))
rateswx=EKF_Shadow_Rate(parameters,1)
plot(rateswx,type='l')
# factorswx=EKF_Shadow_Rate(parameters,0)
# par(mfrow=c(3,1))
# plot(factorswx[1,],type='l')
# plot(factorswx[2,],type='l')
# plot(factorswx[3,],type='l')
#
# #with our optimized parameters (with random starting values)
# par(mfrow=c(1,1))
# optimpara=unlist(test3)[1:22]
# rates=EKF_Shadow_Rate(optimpara,1)
# plot(rates,type='l')
# factors=EKF_Shadow_Rate(optimpara,0)
# par(mfrow=c(3,1))
# plot(factors[1,],type='l')
# plot(factors[2,],type='l')
# plot(factors[3,],type='l')
#Forwardrates Fitting for new Maturities.
# Maturity=seq(1,120)
# forwardrates<-MonthlyForwardRate(Fed,Maturity)
# forwardrates<-t(forwardrates)
# forwardrates_fitted<-EKF_Shadow_Rate(parameters_2,2)
# selected_month<-c(3,6,12,24,60,84,120)
# fr_dot<-rowMeans(forwardrates[,264:276])[selected_month]
# plot(seq(0,10,length.out=120),rowMeans(forwardrates_fitted[,264:276]),type='l',lwd=3,main="Fitted Average Forward Rate Curve in 2012",xlab="Maturity/month",ylab="Forward Rate/%")
# points(selected_month/12,fr_dot,pch=8,cex=2)
|
67fb061159f72cb3c550959d7691ebe6ce824863
|
2224d28c707fe7f0df84f02fcf0f486e909f150e
|
/MultipleRegression.R
|
989ebffbd0382dccef5f75d3691bc451d49da43e
|
[
"MIT"
] |
permissive
|
ak202/CHAAHK-thesis-analysis
|
932669abd116378390461a490ba10e4a4f3e996c
|
96f9c4143e41346ba4db23db7adfaa9c0ab5b519
|
refs/heads/master
| 2020-08-01T10:54:42.018001
| 2019-09-26T22:46:01
| 2019-09-26T22:46:01
| 210,974,753
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,416
|
r
|
MultipleRegression.R
|
setwd("/home/akara/workspace/R/CHAAHK/")
argss = commandArgs(trailingOnly = TRUE)
data <- read.csv(paste("output/", argss, ".csv", sep=""))
fit.MaxPop <- lm(MaxPop ~ disturbanceRemovalChance +
fecundityPromotiveIncRate +
fecundityPromotiveRes +
fecundityDemotiveIncRate +
fecundityDemotiveRes +
costPromotiveIncRate +
costPromotiveRes +
costDemotiveIncRate +
costDemotiveRes, data = data)
tval.MaxPop <- summary(fit.MaxPop)$coef[,"t value"]
slope.MaxPop <- summary(fit.MaxPop)$coef[,"Estimate"]
Rsquare.MaxPop <- summary(fit.MaxPop)$r.squared
fit.MinPop <- lm(MinPop ~ disturbanceRemovalChance +
fecundityPromotiveIncRate +
fecundityPromotiveRes +
fecundityDemotiveIncRate +
fecundityDemotiveRes +
costPromotiveIncRate +
costPromotiveRes +
costDemotiveIncRate +
costDemotiveRes, data = data)
tval.MinPop <- summary(fit.MinPop)$coef[,"t value"]
slope.MinPop <- summary(fit.MinPop)$coef[,"Estimate"]
Rsquare.MinPop <- summary(fit.MinPop)$r.squared
fit.FinalPop <- lm(FinalPop ~ disturbanceRemovalChance +
fecundityPromotiveIncRate +
fecundityPromotiveRes +
fecundityDemotiveIncRate +
fecundityDemotiveRes +
costPromotiveIncRate +
costPromotiveRes +
costDemotiveIncRate +
costDemotiveRes, data = data)
tval.FinalPop <- summary(fit.FinalPop)$coef[,"t value"]
slope.FinalPop <- summary(fit.FinalPop)$coef[,"Estimate"]
Rsquare.FinalPop <- summary(fit.FinalPop)$r.squared
fit.SLG <- lm(SustainabilityOrLackOfGrowth ~ disturbanceRemovalChance +
fecundityPromotiveIncRate +
fecundityPromotiveRes +
fecundityDemotiveIncRate +
fecundityDemotiveRes +
costPromotiveIncRate +
costPromotiveRes +
costDemotiveIncRate +
costDemotiveRes, data = data)
tval.SLG <- summary(fit.SLG)$coef[,"t value"]
slope.SLG <- summary(fit.SLG)$coef[,"Estimate"]
Rsquare.SLG <- summary(fit.SLG)$r.squared
rnames <- c("Disturbance Removal Chance", "Fecundity Promotive Increase Rate", "Fecundity Promotive Resilience", "Fecundity Demotive Increase Rate", "Fecundity Demotive Resilience", "Cost Promotive Increase Rate", "Cost Promotive Resilience", "Cost Demotive Increase Rate", "Cost Demotive Resilience")
tval.all <- data.frame(tval.MaxPop, tval.MinPop, tval.FinalPop, tval.SLG)
tval.all <- round(tval.all, 3)
colnames(tval.all) <- substr(colnames(tval.all), 6,100)
rownames(tval.all) <- c("Intercept", rnames)
Symbol <- c("","$Drc$", "$F_{p}i$", "$F_{p}r$", "$F_{d}i$", "$F_{d}r$", "$C_{p}i$", "$C_{p}r$", "$C_{d}i$", "$C_{d}r$")
tval.all <- data.frame(Symbol, tval.all)
write.csv(tval.all, paste(argss, "/tables/tvals.csv", sep=""), quote=FALSE)
slope.all <- data.frame(slope.MaxPop, slope.MinPop, slope.FinalPop, slope.SLG)
slope.all <- round(slope.all, 3)
colnames(slope.all) <- substr(colnames(slope.all), 7,100)
rownames(slope.all) <- c("Intercept", rnames)
Symbol <- c("","$Drc$", "$F_{p}i$", "$F_{p}r$", "$F_{d}i$", "$F_{d}r$", "$C_{p}i$", "$C_{p}r$", "$C_{d}i$", "$C_{d}r$")
slope.all <- data.frame(Symbol, slope.all)
write.csv(slope.all, paste(argss, "/tables/slopes.csv", sep=""), quote=FALSE)
Rsquare.all <- t(data.frame(Rsquare.MaxPop, Rsquare.MinPop, Rsquare.FinalPop, Rsquare.SLG))
Rsquare.all <- round(Rsquare.all, 3)
colnames(Rsquare.all) <- "$R^2$"
rownames(Rsquare.all) <- substr(rownames(Rsquare.all), 9,100)
write.csv(Rsquare.all, paste(argss, "/tables/Rsquares.csv", sep=""), quote=FALSE)
|
082fada92bd96e6ec8d1787002cd2cd1398aeed5
|
1e92e8308d62de17938b5b784ae14368b0e888d7
|
/R/utils_posterior.R
|
c12c6264290b18da987310bfb6d25c631e3b02e4
|
[] |
no_license
|
cran/bayestestR
|
fa6e437695dc4d5657937bedfea594891c6ceda7
|
053d5208164a7eccb0edff2752c83e3a99c8f69d
|
refs/heads/master
| 2023-04-15T11:20:25.132889
| 2023-04-07T14:20:02
| 2023-04-07T14:20:02
| 200,848,742
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 900
|
r
|
utils_posterior.R
|
# helper ------------------------------
.posterior_draws_to_df <- function(x) {
UseMethod(".posterior_draws_to_df")
}
.posterior_draws_to_df.default <- function(x) {
insight::format_error(paste0("Objects of class `%s` are not yet supported.", class(x)[1]))
}
.posterior_draws_to_df.data.frame <- function(x) {
x
}
.posterior_draws_to_df.draws_df <- function(x) {
insight::check_if_installed("posterior")
datawizard::data_remove(as.data.frame(posterior::as_draws_df(x)), c(".chain", ".iteration", ".draw"))
}
.posterior_draws_to_df.draws_matrix <- .posterior_draws_to_df.draws_df
.posterior_draws_to_df.draws_array <- .posterior_draws_to_df.draws_df
.posterior_draws_to_df.draws_list <- .posterior_draws_to_df.draws_df
.posterior_draws_to_df.draws_rvars <- .posterior_draws_to_df.draws_df
.posterior_draws_to_df.rvar <- .posterior_draws_to_df.draws_df
|
7c02896c850d1ab4770b3ca43d5a774a929c62be
|
8c3cf30becb570aaab6d2c7b0273d02d0e2737a6
|
/R/projection.mat.R
|
f7dd95db2a533b840f8c68e34b712972189d30e6
|
[] |
no_license
|
yfyang86/paftoga
|
357d124b6e09e9ecd3e1a01c4bebfc3646194091
|
20470687b4f36065975dfc61381f15b18d9b7103
|
refs/heads/master
| 2021-06-22T23:49:19.852806
| 2016-11-27T16:07:51
| 2016-11-27T16:07:51
| 19,086,047
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 297
|
r
|
projection.mat.R
|
projection.mat <-
function(X,LinearSys,mod=1){
Y=as.matrix(LinearSys)
YY=matrix(0,nrow=nrow(Y),ncol=ncol(Y))
if (mod==1) {Y.qr=qr(Y);Y=qr.R(Y.qr)[,1:(Y.qr$rank)];}
YY[1:(Y.qr$rank),1:(Y.qr$rank)]=Y
tmp=YY%*%solve(t(YY)%*%YY)%*%t(YY)%*%X
list(projection=tmp,orth=X-tmp)#lm(X~Y)$residual...
}
|
2ad15f9546166d79a4891f5a0def5ea002de25b1
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/osmdata/examples/osm_lines.Rd.R
|
cdfe6048adcea2b41039e3240f67d82b8006dba5
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 755
|
r
|
osm_lines.Rd.R
|
library(osmdata)
### Name: osm_lines
### Title: Extract all 'osm_lines' from an osmdata object
### Aliases: osm_lines
### ** Examples
## Not run:
##D dat <- opq ("hengelo nl") %>% add_osm_feature (key="highway") %>%
##D osmdata_sf ()
##D bus <- dat$osm_points [which (dat$osm_points$highway == 'bus_stop'),] %>%
##D rownames () # all OSM IDs of bus stops
##D osm_lines (dat, bus) # all highways containing bus stops
##D
##D # All lines which intersect with Piccadilly Circus in London, UK
##D dat <- opq ("Fitzrovia London") %>% add_osm_feature (key="highway") %>%
##D osmdata_sf ()
##D i <- which (dat$osm_polygons$name == "Piccadilly Circus")
##D id <- rownames (dat$osm_polygons [i,])
##D osm_lines (dat, id)
## End(Not run)
|
d9c4d6c117720b29d28e05502f7d2097def050cb
|
bd0ff0492d12431e88ae8b1d4a471c15ab26e76b
|
/R/conditionalprob.JGR.R
|
85b5c4a9abea5c4d87fe648e01d55c09e2c90675
|
[] |
no_license
|
cran/CADStat
|
b01fe3c32c4e7ab24a592c744e259e6ed67aa6d4
|
2c8110fb89a8b7c9706359c00d12ccae399f276f
|
refs/heads/master
| 2021-01-18T14:20:35.989297
| 2017-05-10T01:09:22
| 2017-05-10T01:09:22
| 17,717,270
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,150
|
r
|
conditionalprob.JGR.R
|
#' @export
conditionalprob.JGR = function(my.data, x, y, weights=NULL,
cond.val,cond.val.direction,prob.direction,
alpha=0.05,R=100,
subset1.name=NULL,subset1.val=NULL,
subset2.name=NULL,subset2.val=NULL,
main="Conditional Probability Plot", xlab="", ylab="",
pch=20, col="black", cex.main=1, cex.lab=1,
cex=1, cex.axis=1, browserResults=FALSE, ...)
{
### CADStat Conditional probability function
### modeled after:
### Hollister, J.W., H.A. Walker, J.F. Paul. (In Press)
### CProb: A Computational Tool for Conducting Conditional Probability Analysis.
### Journal of Environmental Quality
### Testing the function
### embed=runif(50,0,100)
### ept=as.integer(30-(embed+rnorm(50,0,10))/4)
### test.data=data.frame(cbind(ept,embed))
### conditionalprob.JGR(my.data=condprob.data,x="percfines",y="epttaxa",weights="Weights",cond.val=9,cond.val.direction="lt",prob.direction="gte",xlab="% embedeness")
### computes and plots conditional probabilities using the JGR dialog box gui input
### my.data data.frame
### x column name containing the stressor
### y column name containing the response
### cond.val conditioning impairment value
### cond.val.direction le or gt the impairment value
### prob.direction
### main plot title
### xlab x-axis label
### ylab y-axis label
### pch plotting character
### col plotting color for the data points
### cex.main magnification of the plot title
### cex.lab magnification of the axes labels
### cex magnification of the plotted points and lines
### cex.axis magnification of the axes
### ... optional paramters passed to "plot"
#get place to store results
if (browserResults) resultLocation = genResultSpace()
#data subsetting
my.data = gisdt.subset(my.data,
subset1.name=subset1.name, subset1.val=subset1.val,
subset2.name=subset2.name, subset2.val=subset2.val,
na.check = c(x,y,weights))
n = nrow(my.data)
prob.direction.logical = regexpr("g|>",prob.direction)>0
xunique = sort(unique(my.data[,x]), decreasing = !prob.direction.logical)
ord = order(my.data[,x],my.data[,y],decreasing=!prob.direction.logical)
my.data = my.data[ord,]
ord = order(my.data[,x])
if(is.null(weights)) {
weights = "weights"
my.data$weights = 1
}
condprob = condprob.fn(response=my.data[,y],wts=my.data[,weights],
cond.val=cond.val,cond.val.direction=cond.val.direction,
stressor=my.data[,x], xunique = xunique,
p.direct = prob.direction.logical)
if (! is.null(R)) {
# if(!is.null(R) & !(coef(lm(condprob~xunique))[[2]]<0 &
# !prob.direction.logical) ){
# condprod.boot = matrix(NA,R, n)
condprod.boot = matrix(NA,R,length(xunique))
for(r in 1:R){
isamp = sample(1:nrow(my.data), replace = TRUE, prob=my.data[, weights])
my.data.resamp = my.data[isamp, ]
# sampler = resample(c(0,1),n,replace=TRUE)
# fudge = as.numeric(any(my.data[,y]==0))
# response = condprob.impute((my.data[,y]+fudge)*sampler,c(1:n),
# prob.direction.logical)
# condprod.boot[r,] = condprob.fn(response=(response-fudge),
# wts=my.data[,weights],ord=ord,
# cond.val=cond.val,
# cond.val.direction=cond.val.direction)
condprod.boot[r,] = condprob.fn(response=my.data.resamp[, y],
wts=my.data.resamp[,weights],
cond.val=cond.val,
cond.val.direction=cond.val.direction,
stressor = my.data.resamp[,x],
xunique = xunique,
p.direct = prob.direction.logical)
}
}
if(browserResults){
# DELETE THESE 3 CATS WHEN DONE TESTING.
#cat(browserResults,"\n")
#cat(resultLocation,"\n")
#cat(file.path(resultLocation,paste(main,".png",sep="")),"\n")
png(filename=file.path(resultLocation,paste(main,".png",sep="")),width=600,height=600)
} else {
JavaGD(name="Conditional Probability Plot", width=600, height=500, ps=14)
#par(mfrow=c(1,3))
#plot(my.data[,x],my.data[,y],xlab=xlab, ylab=ylab, pch=pch, col=col,
# main="Scatterplot",cex.lab=cex.lab, cex=cex, cex.axis=cex.axis,las=1)
#plot(my.data[,x],my.data[,y],xlab=xlab, ylab=ylab, pch=pch, col=col,
# main="Cumulative Distribution Function",cex.lab=cex.lab, cex=cex, cex.axis=cex.axis,las=1)
}
par(mar=c(5,6,4,2)+0.1)
plot(xunique,condprob,ylim=c(0,1),
ylab=paste("Probability of",ylab,cond.val.direction,cond.val,"if X",prob.direction,"Xc"),
xlab=paste("Xc,",xlab),pch=pch, col=col,
main=main,cex.lab=cex.lab, cex=cex, cex.axis=cex.axis,las=1)
if (! is.null(R)) {
# if(!is.null(R) & !(!(coef(lm(condprob~xunique))[[2]]>0) &
# !prob.direction.logical) ){
points(xunique,(apply(condprod.boot,2,quantile,1-as.numeric(alpha),
na.rm = TRUE)),col="grey",type="l")
points(xunique,(apply(condprod.boot,2,quantile,as.numeric(alpha),
na.rm = TRUE)),col="grey",type="l")
}
# if( !is.null(R) & coef(lm(condprob~xunique))[[2]]<0 & !prob.direction.logical ){
# par(lheight=1.5)
# text(median(xunique),y=0.2,cex=1.25,pos=4,"Confidence intervals are not calculated for\nProbability Direction of '<=' and a decreasing\nstressor-response relationship.")
# }
grid()
if(browserResults){
dev.off()
page.text = paste("The conditional probability plot gives the probability of ",y," < ", cond.val," as a function of ",x,sep="")
buildresultsXML(title=main,location=resultLocation,text=page.text)
}
return(invisible())
}
condprob.fn = function(response,wts,cond.val,cond.val.direction,
stressor, xunique, p.direct){
n = length(response)
nunique = length(xunique)
Num = numeric(nunique)
Denom = numeric(nunique)
if(regexpr("l|<",cond.val.direction)>0) {
w.expr = expression(wts[incvec][response[incvec] < cond.val])
} else {
w.expr = expression(wts[incvec][response[incvec] > cond.val])
}
if(is.null(wts)) wts = rep(1,n)
if (p.direct) {
for (i in 1:nunique) {
incvec = stressor >= xunique[i]
Num[i] = sum(eval(w.expr))
Denom[i] = sum(wts[incvec])
}
}
else {
for (i in 1:nunique) {
incvec = stressor <= xunique[i]
Num[i] = sum(eval(w.expr))
Denom[i] = sum(wts[incvec])
}
}
Num/Denom
}
# if(regexpr("l|<",cond.val.direction)>0) {
# w.expr = expression(wts[i:n][response[i:n] < cond.val])
# } else {
# w.expr = expression(wts[i:n][response[i:n] > cond.val])
# }
# Num = numeric(n)
# Denom = numeric(n)
# for(i in ord){
# Num[i] = sum(eval(w.expr))
# Denom[i] = sum(wts[i:n])
# }
# Num/Denom
#}
condprob.impute = function(x,o,exceed.logical){
ord = order(o,decreasing=exceed.logical)
response = x[order(o,decreasing=exceed.logical)]
response.nonzero = response[response!=0]
ord.nonzero = ord[response!=0]
n = length(response.nonzero)
for(i in 1:n){
response[ord > ord.nonzero[i] & response==0] = response.nonzero[i]
}
response[ord < ord.nonzero[n] & response==0] = response.nonzero[n]
response[order(o,decreasing=exceed.logical)]
}
resample = function(x, size, ...)
if(length(x) <= 1) { if(!missing(size) && size == 0) x[FALSE] else x
} else sample(x, size, ...)
|
a10c9a97e5fd12a8b66fdbe2017bb6da4b6f2fc6
|
ea18769d30047829fa313ab88c345c379718122e
|
/omlTuneBenchRLocal/R/get_available_tasks.R
|
f6a71a7b62fa6582f981fff775a29fcd06203070
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
jakob-r/lookup-server
|
95e894661f61a28b2cc36e94bc716c5581b0aac6
|
72464e0f14296cdf17328d1eb6ff4b4db90cc470
|
refs/heads/master
| 2021-10-24T09:37:23.535650
| 2019-03-04T10:12:26
| 2019-03-04T10:12:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 581
|
r
|
get_available_tasks.R
|
#' Returns the task_ids, which are saved in the database and can be queried.
#'
#' @return A vector of task_ids.
#' @param restrict [logical]
#' Restrict the tasks to those that appear in the following paper:
#' Automatic Exploration of Machine Learning Experiments on OpenML
#' Daniel K¨uhn, Philipp Probst, Janek Thomas, Bernd Bischl
#' https://arxiv.org/pdf/1806.10961.pdf
#' @export
get_available_tasks = function(restrict = FALSE) {
if (restrict) {
unique(task_metadata[get("data_in_paper") == TRUE,]$task_id)
} else {
unique(task_metadata$task_id)
}
}
|
f58bde9e36d03f36564c07b6d4ba9e873f591e07
|
32a987f755b13c118a42c64f032135f240bacc06
|
/R/annotation.summary.R
|
393f8c8880627972040ae1eccb645215c25d5719
|
[] |
no_license
|
sneumann/RAMClustR
|
b1183f2faec40b3fa9466bbb74f89724bbad0cc3
|
40fc289ecf0f7a1fb9700fdb9a5df948888c8c53
|
refs/heads/master
| 2021-01-15T16:56:41.528502
| 2018-06-22T13:00:12
| 2018-06-22T13:00:12
| 13,795,865
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,620
|
r
|
annotation.summary.R
|
#' annotation.summary()
#'
#' Write a .csv file containing a summary of the annotations in the ramclustR object.
#' @param ramclustObj R object - the ramclustR object which was used to write the .mat or .msp files
#' @param outfile file path/name of output csv summary file. if NULL (default) will be exported to spectra/annotaionSummary.csv
#' @details this function exports a csv file summarizing annotation evidence for each compound
#' @return nothing
#' @references Broeckling CD, Afsar FA, Neumann S, Ben-Hur A, Prenni JE. RAMClust: a novel feature clustering method enables spectral-matching-based annotation for metabolomics data. Anal Chem. 2014 Jul 15;86(14):6812-7. doi: 10.1021/ac501530d. Epub 2014 Jun 26. PubMed PMID: 24927477.
#' @keywords 'ramclustR' 'RAMClustR', 'ramclustR', 'metabolomics', 'mass spectrometry'
#' @author Corey Broeckling
#' @export
annotation.summary<-function(ramclustObj = RC,
outfile = NULL
) {
if(!is.null(outfile)) {
f<-basename(outfile)
p<-dirname(outfile)
if(!dir.exists(p)) {
dir.create(p)
}
} else {
outfile <- paste0(getwd(), "/spectra/annotationSummary.csv")
}
out<- data.frame("cmpd" = ramclustObj$cmpd,
"rt" = ramclustObj$clrt,
"annotation" = ramclustObj$ann,
"ann.confidence" = ramclustObj$annconf,
"median signal" = as.vector(apply(ramclustObj$SpecAbund, 2, "median")))
if(any(names(ramclustObj) == "M")) {
out<- data.frame(out, "inferred M" = ramclustObj$M)
}
if(any(names(ramclustObj) == "zmax")) {
out<- data.frame(out, "zmax" = ramclustObj$zmax)
}
if(any(names(ramclustObj) == "msfinder.formula")) {
out<- data.frame(out, "inferred formula" = ramclustObj$msfinder.formula)
}
if(any(names(ramclustObj) == "inchikey")) {
out<- data.frame(out, "inchikey" = ramclustObj$inchikey)
}
if(any(names(ramclustObj) == "inchi")) {
out<- data.frame(out, "inchi" = ramclustObj$inchi)
}
if(any(names(ramclustObj) == "synonyms")) {
out<- data.frame(out, "synonyms" = sapply(1:length(ramclustObj$synonyms),
FUN = function(x) {
paste(ramclustObj$synonyms[[x]], collapse = " __ ")
}))
}
if(any(names(ramclustObj) == "classyfire")) {
out<- data.frame(out, ramclustObj$classyfire)
}
write.csv(out, file = outfile, row.names = FALSE)
}
|
813da72bdda15d48f665b147a1618adfc10c59f6
|
ee6c0637beb2ea64c1a9c2968068270a415fd08d
|
/tests_private/adp_rdi_01.R
|
e3498ea60af8eca278b4ecd4899530375fd8e66a
|
[] |
no_license
|
pablovaldes/oce
|
b4dca22f110ebbdabd1ddf8cf388cdc356d169c2
|
a93f72ed925a2fe00e4e5651c0fa0c4da70d147c
|
refs/heads/develop
| 2020-07-27T03:47:48.025749
| 2018-01-18T22:06:30
| 2018-01-18T22:06:30
| 73,704,563
| 0
| 0
| null | 2018-01-19T11:25:29
| 2016-11-14T12:55:59
|
R
|
UTF-8
|
R
| false
| false
| 500
|
r
|
adp_rdi_01.R
|
library(oce)
path <- "~/Dropbox/oce-working-notes/tests/adp-rdi"
files <- list.files(path, "*", recursive=TRUE, full.names=TRUE)
n <- 0
for (file in files) {
try({
cat("\nfile '", file, "' ", sep="")
if ("adp/rdi" == oceMagic(file)) {
cat("is adp/rdi\n")
d <- read.oce(file)
n <- n + 1
} else {
cat("is not adp/rdi, so it is being skipped\n")
}
})
}
cat("Successfully checked", n, "ADP files in", path, "\n")
|
3ac01c0f24fddddbd5cdfe99372311d39cfa3e40
|
28a069ebaae78533c6de3ce38ae16b0665e4bec5
|
/src/scripts/segment.R
|
8f382b890df9b9a1e1439af9ec73995c21a3b0d9
|
[] |
no_license
|
baliga-lab/gaggle-genomebrowser
|
6bd102ea2c3ebe0897dc9ab94e6f24889a67eebf
|
bda2de1fa7374e7ab329355dfd5e67ba1a91f97f
|
refs/heads/master
| 2016-09-09T20:49:16.918358
| 2013-11-06T18:48:05
| 2013-11-06T18:48:05
| 6,000,905
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,611
|
r
|
segment.R
|
# Compute segment tracks from expression data.
# Finds the average value over probes whose (central) position is within the segment boundaries. Returns
# a data frame with columns (sequence, strand, start, end, value)
#
# To be used with the data file: tilingArraySeg_mm_export.RData
# Not complete for general use.
#
# parameters:
# df: data.frame in the form of refs or rats (with the following columns)
# $ GENE_EXPR_OPTION: Factor w/ 2 levels "FORWARD","REVERSE"
# $ POSITION : int
# plus some value column to be named in the parameter 'col'
# breaks: positions of segment boundaries
# seqs: names and lengths of sequences in the form: list( list('Chr', 'NC_002754', 2992245), list('SSV1', 'X07234', 15465) )
# where 'Chr' is the name used to index the breaks data structure:
# > str(breaks)
# List of 2
# $ Chr :List of 2
# ..$ REVERSE: num [1:1902] 895 3191 3787 5295 6395 ...
# ..$ FORWARD: num [1:1884] 5349 6355 11414 11774 11926 ...
# $ SSV1:List of 2
# ..$ REVERSE: num [1:2] 11284 11807
# ..$ FORWARD: num [1:5] 1153 2732 11266 11366 12179
# and 'NC_002754' is used in the SEQ_ID column of df
# $ SEQ_ID : Factor w/ 2 levels "NC_002754","X07234" ...
#
# col: the name of the value column to be averaged over
#
# note that strands are assumed to be reversed in df and breaks
# not really suitable for general use.
segment <- function(df, breaks, seqs, col='value') {
result = list()
for (seq in seqs) {
seq.name = seq[[1]]
seq.id = seq[[2]]
seq.len = seq[[3]]
cat('name=', seq.name, ', id=',seq.id,', len=',seq.len, '\n')
starts = c(0,breaks[[seq.name]]$REVERSE)
ends = c(breaks[[seq.name]]$REVERSE, seq.len)
cat('len + = ', length(starts), '\n')
values = c()
for (i in 1:(length(starts))) {
a = subset(df, SEQ_ID==seq.id & POSITION>=starts[i] & POSITION<ends[i] & GENE_EXPR_OPTION=='REVERSE', select=col)
values[i] = mean(a[,col])
}
result[[length(result)+1]] = data.frame(sequence=seq.name, strand='+', start=starts, end=ends, value=values)
starts = c(0,breaks[[seq.name]]$FORWARD)
ends = c(breaks[[seq.name]]$FORWARD, seq.len)
values = c()
cat('len - = ', length(starts), '\n')
for (i in 1:(length(starts))) {
a = subset(df, SEQ_ID==seq.id & POSITION>=starts[i] & POSITION<ends[i] & GENE_EXPR_OPTION=='FORWARD', select=col)
values[i] = mean(a[,col])
}
result[[length(result)+1]] = data.frame(sequence=seq.name, strand='-', start=starts, end=ends, value=values)
}
return( do.call("rbind", result) )
}
|
9d7bd3008c23a8f5f1252ab18cb1bf38e9e0f83b
|
ee0689132c92cf0ea3e82c65b20f85a2d6127bb8
|
/52-CART/c-dt-rpart-sales1.R
|
74239082524dba512c6b22b2e5656ba6cb7db942
|
[] |
no_license
|
DUanalytics/rAnalytics
|
f98d34d324e1611c8c0924fbd499a5fdac0e0911
|
07242250a702631c0d6a31d3ad8568daf9256099
|
refs/heads/master
| 2023-08-08T14:48:13.210501
| 2023-07-30T12:27:26
| 2023-07-30T12:27:26
| 201,704,509
| 203
| 29
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,228
|
r
|
c-dt-rpart-sales1.R
|
# CART Models - Regression Trees - Merchent Sales
#Predict the mean sales from the data; create decision tree
library(rpart) # CART algo
library(rpart.plot)# visualising decision tree
library(forecast)# calculating accuracy
msales = read.csv(file = "./data/msales.csv", skip=1, header = T)
dim(msales)
# Summarize the dataset
summary(msales)
str(msales)
# See Summary values of sales
aggregate(msales$sales,by=list(msales$zone), FUN = mean)
# Random Sampling : Train and Test
set.seed(777) # To ensure reproducibility
Index = sample(x = 1:nrow(msales), size = 0.7*nrow(msales))
Index
length(Index)
#divide data into 2 parts - Train (70%), Test(30%)
# Create Train dataset
train = msales[Index, ]
nrow(train); dim(train)
# Create Test dataset
test = msales[-Index, ]
nrow(test); dim(test)
#### Modeling ############
# Build a full model with default settings
set.seed(123)
CartModel=rpart(sales ~ . ,data=train[,-1], method="anova")
CartModel
summary(CartModel)
# Plot the Regression Tree
rpart.plot(CartModel,type=4,fallen.leaves=T,nn=T, cex=1)
mean(msales$sales)
printcp(CartModel)
rsq.rpart(CartModel) #only for Regression Tree
prune1 = prune(CartModel, cp=.014)
prune1
rpart.plot(prune1, nn=T, cex=1)
#### Using CP to expand / Prune the tree ###
#Start fresh Model
RpartControl = rpart.control(cp = 0.005)
set.seed(123)
CartModel_1 = rpart(sales ~ ., data = msales[,-1],
method = "anova", control = RpartControl)
CartModel_1
CartModel_1$where
msales[987,'sales']
CartModel_1$frame
trainingnodes = rownames(CartModel_1$frame) [ CartModel_1$where]
trainingnodes
summary(CartModel_1)
rpart.plot(CartModel_1, type = 4,cex = 0.6)
printcp(CartModel_1)
rsq.rpart(CartModel_1)
#Validation / Test Accuracy
PredictTest=predict(CartModel,newdata=test,type="vector")
PredictTest1=predict(CartModel_1,newdata=test,type="vector")
PredictTest1
length(PredictTest)
msales[2,'sales']
cbind(test$sales, PredictTest, PredictTest1)
# Calculate RMSE and MAPE
library(forecast)
# Validate RMSE and MAPE calculation with a function in R
length(train$sales)
ModelAccuracy = accuracy(PredictTest,test$sales)
ModelAccuracy1 = accuracy(PredictTest1,test$sales)
rbind(ModelAccuracy,ModelAccuracy1)
#select model with least error
|
4673ec695f160900fcc1a826ea74ede45c8cea6f
|
74df9ce87872f43ff6836563cd8019eb9b95f5b0
|
/3_predictions/src/nc_utils.R
|
d273549e8869b4bd4215eb711a507986ca0b8dc5
|
[] |
no_license
|
USGS-R/delaware-model-prep
|
017f0d9f727d5d5b4449cd69758c4b32f12860ed
|
45e1ffeee7d6ea4a95e374e16cbc1196bf703f41
|
refs/heads/main
| 2023-06-08T19:38:46.764070
| 2023-06-01T23:56:42
| 2023-06-01T23:56:42
| 202,405,091
| 2
| 14
| null | 2023-04-07T23:28:32
| 2019-08-14T18:31:12
|
R
|
UTF-8
|
R
| false
| false
| 32,875
|
r
|
nc_utils.R
|
#' Wrapper function that creates nc file, gathers PRMS-SNTemp output, and stores output in nc file
#'
#' @param nc_name_out name of the netcdf file to be created and store PRMS-SNTemp output
#' @param model_run_loc location of the PRMS-SNTemp model run
#' @param model_output_file file path to the PRMS-SNTemp variable output
#' @param model_locations_file file path to the rds file that has PRMS model idx and seg_id_nat
#' @param dynamic_vars vector of variables we want to store from PRMS-SNTemp model run
#' @param static_vars vector of variables we want to store that describe segment characteristics (i.e. static)
#' @param vars_desc description of the variables which will be used to describe the netcdf file
#' @param n the ensemble member to be stored
#' @param n_en total number of ensemble members
#' @param dates vector of dates of the model run
#' @param project_id string of the model project id
#'
nc_store_output <- function(nc_name_out,
model_run_loc,
model_output_file,
model_locations_file,
dynamic_vars,
static_vars,
vars_desc,
n,
n_en,
dates,
project_id){
vars = c(dynamic_vars, static_vars)
# might have to move this out of this function if we want to store multiple PRMS-SNTemp ensembles in one nc file
nc_create_pb_out(model_locations_file,
n_en,
dates,
project_id,
vars,
vars_desc,
nc_name_out)
# gather prms-sntemp output
model_out = gather_sntemp_output(model_run_loc = model_run_loc,
model_output_file = model_output_file,
model_fabric_file = 'GIS/Segments_subset.shp',
sntemp_vars = dynamic_vars)
print(sprintf('storing prms-sntemp %s of %s ensembles in ncdf', n, n_en))
# put model output into nc file
nc_model_put(var_df = model_out,
var_names = vars,
ens = n,
nc_file = nc_name_out)
return(nc_name_out)
}
#' Creates a NetCDF file for storing PRMS-SNTemp output
#'
#' @param model_locations_file rds file with the locations of the PRMS-SNTemp output variables
#' @param n_en number of ensemble runs of PRMS-SNTemp
#' @param dates vector of model dates
#' @param project_id string of the model project id
#' @param vars vector of variables we want to store from PRMS-SNTemp model run
#' @param vars_desc description of the variables which will be used to describe the netcdf file
#' @param nc_name_out netcdf file name output
#' @param overwrite T/F if the nc file should be overwritten if it exists already
#'
nc_create_pb_out = function(model_locations_file,
n_en,
dates,
project_id,
vars,
vars_desc,
nc_name_out,
overwrite = T){
#Set dimensions; should always be [(forecast_issue_time), time, (vertical dimension), location (or lon/ lat), ens];
# for this project, we should have the dimensions of [time, model_location, ens]
n_times <- length(dates)
times <- as.integer(seq(0, n_times - 1, 1)) # days since dates[1]
model_locations <- as.integer(readRDS(model_locations_file)$seg_id_nat) # PRMS-SNTemp seg_id_nat
ens <- as.integer(seq(1, n_en, 1))
time_dim <- ncdim_def("time",
units = sprintf('days since %s', dates[1]),
longname = 'time',
vals = times)
loc_dim <- ncdim_def("seg_id_nat",
units = "",
vals = model_locations,
longname = 'PRMS-SNTemp stream segment id national')
ens_dim <- ncdim_def("ens",
units = "",
vals = ens,
longname = 'ensemble member')
dim_nchar <- ncdim_def("nchar",
units = "",
vals = 1:nchar(as.character(dates[1])),
create_dimvar = FALSE)
## quick check that units are valid
udunits2::ud.is.parseable(ens_dim$units)
udunits2::ud.is.parseable(loc_dim$units)
udunits2::ud.is.parseable(time_dim$units)
udunits2::ud.is.parseable(dim_nchar$units)
#Define variables
fillvalue <- 1e32
def_list <- list()
# loop through variables that we're predicting
n_vars = length(vars)
for(i in 1:n_vars){
cur_var = vars[i]
def_list[[i]] <- ncvar_def(name = vars_desc$state[vars_desc$state==cur_var],
units = vars_desc$units[vars_desc$state==cur_var],
dim = list(time_dim, loc_dim, ens_dim),
missval = fillvalue,
longname = vars_desc$longname[vars_desc$state==cur_var],
prec = vars_desc$prec[vars_desc$state==cur_var])
}
if(file.exists(nc_name_out)){
if(overwrite){
file.remove(nc_name_out)
ncout <- nc_create(nc_name_out, def_list, force_v4 = T)
}else{stop('cannot overwrite nc output file')}
}else{ncout <- nc_create(nc_name_out, def_list, force_v4 = T)}
#Global file metadata
ncatt_put(nc = ncout,
varid = 0,
attname = "project_id",
attval = as.character(project_id),
prec = "text")
nc_close(ncout)
return(nc_name_out)
}
#' insert PRMS-SNTemp output into NetCDF file
#'
#' @param var_df data frame of the PRMS-SNTemp model output, will be generated from gather_sntemp_output()
#' @param var_names vector of variable names to store from PRMS-SNTemp
#' @param ens current ensemble member being stored
#' @param nc_file file path to the netcdf file
#'
nc_model_put = function(var_df,
var_names,
ens, # current ensemble
nc_file){
ncout <- nc_open(nc_file, write = T)
all_seg_id_nats = as.numeric(unique(var_df$seg_id_nat))
for(i in seq_along(var_names)){
print(sprintf(' storing %s', var_names[i]))
cur_var = ncout$var[[var_names[i]]]
varsize = cur_var$varsize
# temp output dims [time_dim, loc_dim, ens_dim]; position of dimensions following:
time_pos = 1
loc_pos = 2
ens_pos = 3
n_dims = cur_var$ndims
nc_seg_id_nats = ncout$dim$seg_id_nat$vals
for(j in seq_along(nc_seg_id_nats)){
cur_seg_id_nat = which(nc_seg_id_nats[j] == nc_seg_id_nats)
cur_en = ens
n_en = varsize[ens_pos]
start = rep(1, n_dims)
start[ens_pos] = cur_en
start[loc_pos] = cur_seg_id_nat
count = varsize
count[ens_pos] = 1 # adding output from only one ensemble
count[loc_pos] = 1 # adding output from only one location
cur_output = dplyr::filter(var_df, as.numeric(seg_id_nat) == nc_seg_id_nats[j]) %>%
dplyr::select(var_names[i]) %>% pull()
# qa / qc values
ncvar_put(nc = ncout,
varid = var_names[i],
vals = cur_output,
start = start,
count = count)
}
}
nc_close(ncout)
}
#' function for returning PRMS-SNTemp variables; returns tibble
#'
#' @param nc_file file path to the netcdf file
#' @param var_name PRMS-SNTemp output variable name you want returned from the netcdf file
#' @param times model times you want returned
#' @param seg_id_nats seg_id_nat segments you want returned
#' @param ens ensembles members you want returned
#'
nc_model_get = function(nc_file,
var_name,
times = NULL,
seg_id_nats = NULL,
ens = NULL){
ncout = nc_open(nc_file)
# output dims [time_dim, loc_dim, ens_dim]; position of dimensions following:
time_pos = 1
loc_pos = 2
ens_pos = 3
cur_var = ncout$var[[var_name]]
varsize = cur_var$varsize
all_valid_times = ncdf_times(nc = ncout)
all_seg_id_nats = as.character(cur_var$dim[[loc_pos]]$vals)
all_ens = as.integer(cur_var$dim[[ens_pos]]$vals)
n_dims = cur_var$ndims
# return all values, and then filter
all_out = array(ncvar_get(nc = ncout, varid = var_name), dim = varsize) %>%
reshape2::melt(varnames = c('time', 'seg_id_nat', 'ensemble')) %>%
mutate(time = all_valid_times[time],
seg_id_nat = all_seg_id_nats[seg_id_nat],
ensemble = all_ens[ensemble]) %>%
rename(!!var_name := value) %>%
as_tibble()
if(!is.null(times)){
cur_valid_times = as.Date(times)
}else{cur_valid_times = as.Date(all_valid_times)} # return all time if NULL
if(!is.null(seg_id_nats)){
cur_seg_id_nats = as.character(seg_id_nats)
}else{cur_seg_id_nats = as.character(all_seg_id_nats)} # return all seg_id_nats if NULL
if(!is.null(ens)){
cur_ens = as.integer(ens)
}else{cur_ens = as.integer(all_ens)}
out = dplyr::filter(all_out,
time %in% cur_valid_times,
seg_id_nat %in% cur_seg_id_nats,
ensemble %in% cur_ens)
nc_close(ncout)
return(out)
}
#' Get time attribute from NetCDF file and convert it to years (or Rdate)
#'
#' This function reads the NetCDF time attribute and converts it to years
#' with decimal fraction for monthly data. For sub-monthly data the conversion
#' is to the Rdate format (only works with standard calendar!).
#'
#' original code from https://rdrr.io/github/jonasbhend/geoutils/src/R/ncdf_times.R
#'
#' @param nc object with link to NetCDF file (from \code{\link{open.ncdf}})
#' @param timename dimension name with time (optional)
#' @param as.Rdate logical, should output be converted to Rdate?
#' @param force logical, force Rdate conversion for monthly times?
#' @param tz time zone for time zone support (experimental)
#'
ncdf_times <- function(nc, timename=NULL, as.Rdate=TRUE, force=TRUE, tz="UTC") {
## this function converts netcdf times to the
## R date-time format or to the udunits dates
## you can choose to switch to uduints format
## for gregorian calendars by setting as.Rdate
## to FALSE
## non-gregorian calendar dates are output using
## udunits date format
## you can force to get back an R date format, even
## if the calendar used is not gregorian using
## force=T (may return udunits dates if conversion
## is not successful)
## WARNING: time zones are not fully supported yet
## check whether udunits is available
.udunitsInclude <- FALSE
if (any(.packages() == "udunits") & class(try(utInit(), silent=T)) != "try-error"){
.udunitsInclude <- TRUE
}
if (is.null(timename)){
timei <- which(names(nc$dim) %in% c("time", "TIME", "tim", "TIM"))
} else {
timei <- which(names(nc$dim) == timename)
}
units <- nc$dim[[timei]]$units
refdate <- strsplit(units, " ")[[1]]
refdate <- refdate[grep('-', refdate)]
## debug reference date
if (substr(refdate, nchar(refdate) - 1, nchar(refdate)) == '00') {
rtmp <- strsplit(refdate, '-')[[1]]
refdate <- paste(c(rtmp[-length(rtmp)], '01'), collapse='-')
rm(rtmp)
}
vals <- nc$dim[[timei]]$vals
tmp <- ncatt_get(nc, names(nc$dim)[timei], "calendar")
if (tmp$hasatt) {
calendar <- tmp$value
} else {
calendar <- "standard"
## print(paste("Warning: Calendar is missing in", nc$filename))
}
if (calendar == "proleptic_gregorian" || calendar == "gregorian") calendar <- "standard"
if (as.Rdate){
if (charmatch("hours since", units, nomatch=0) |
charmatch("minutes since", units, nomatch=0) |
charmatch("seconds since", units, nomatch=0) & calendar == 'standard') {
mul <- 1
ref.txt <- substr(units, 15,33)
if (charmatch("minutes", units, nomatch=0)) mul <- 60
if (charmatch("hours", units, nomatch=0)) {
mul <- 3600
ref.txt <- substr(units, 13,31)
}
times <- vals * mul
if (nchar(ref.txt) == 19){
ref <- as.POSIXct(ref.txt, tz)
} else {
ref <- as.POSIXct(paste(ref.txt, "00", sep=":"), tz)
}
time <- as.Date(ref + times)
} else if (charmatch("days since", units, nomatch=0) & calendar == 'standard'){
time <- as.Date(refdate, "%Y-%m-%d") + vals
} else if (charmatch("days since", units, nomatch=0) &
calendar %in% c('365_day', 'noleap', '360_day')) {
if (calendar == '365_day' || calendar == 'noleap'){
vals <- round(vals/365*365.24*2)/2
time <- as.Date(refdate, "%Y-%m-%d") + vals
} else if (calendar == '360_day'){
vals <- round(vals/360*365.24*2)/2
time <- as.Date(refdate, "%Y-%m-%d") + vals
}
} else if (charmatch("months since", units, nomatch=0)) {
ref.yr <- as.numeric(format(as.Date(refdate), '%Y'))
ref.month <- as.numeric(format(as.Date(refdate), '%m'))
ref.day <- as.numeric(format(as.Date(refdate), '%d'))
if (is.null(ref.day) | ref.day == 0) ref.day <- 1
month <- floor((vals+ref.yr*12 + ref.month-1) %% 12) + 1
year <- floor((vals+ref.yr*12 + ref.month-1)/12)
time <- as.Date(ISOdate(year, month, ref.day))
} else if (charmatch("years since", units, nomatch=0)) {
unit.tmp <- paste(strsplit(units, " ")[[1]][3:4], collapse=" ")
## ref.yr <- substr(units, 13,16)
## ref.month <- as.numeric(substr(units, 18,19))
ref.yr <- as.numeric(format(as.Date(unit.tmp), "%Y"))
ref.month <- as.numeric(format(as.Date(unit.tmp), "%m"))
if (is.null(ref.month)) ref.month <- 1
##ref.day <- as.numeric(substr(units, 21,22))
ref.day <- as.numeric(format(as.Date(unit.tmp), "%d"))
if (is.null(ref.day)) ref.day <- 1
year <- floor(vals)
month <- floor((vals*12)%%12)
time <- as.Date(ISOdate(ref.yr + year, ref.month + month, ref.day))
} else if (charmatch("day as", units, nomatch=0)) {
date <- floor(vals)
day <- as.numeric(substr(date, nchar(date)-1, nchar(date)))
if (all(day > 28)) date <- as.character(as.numeric(date) - max(day, na.rm=T) + 28)
date <- paste("000",date, sep="")
date <- substr(date, nchar(date)-7, nchar(date))
time <- as.Date(date, "%Y%m%d")
} else {
stop(paste("Can't deal with calendar", calendar))
}
} else {
if (.udunitsInclude){
time <- utCalendar(vals, units, calendar=calendar, style="array")
if (force){
tmp <- try(ISOdatetime(time$year, time$month, time$day, time$hour,
time$minute, time$second, tz), silent=T)
if (class(tmp)[1] != "try-error") time <- tmp
}
} else {
warning("Package udunits cannot be loaded or initialized via utInit()")
}
}
return(time)
}
# create nc file for storing PRMS-SNTemp parameters
#' nc_create_pb_params = function(model_locations_ind,
#' n_en,
#' project_id,
#' vars, # parameter names
#' nc_name_out_ind,
#' model_run_loc,
#' param_default_file = 'control/delaware.control.par_name',
#' n_segments = 456,
#' n_hrus = 765,
#' n_gwr = 765,
#' n_ssr = 765,
#' overwrite = T,
#' gd_config = 'lib/cfg/gd_config.yml'){
#'
#' model_locations <- as.integer(readRDS(sc_retrieve(model_locations_ind, remake_file = 'getters.yml'))$seg_id_nat) # PRMS-SNTemp seg_id_nat
#'
#' #Set dimensions
#' ens <- as.integer(seq(1, n_en, 1))
#' seg_model_idxs <- as.integer(seq(1, n_segments, 1))
#' hru_model_idxs <- as.integer(seq(1, n_hrus, 1))
#' gwr_model_idxs <- as.integer(seq(1, n_gwr, 1))
#' ssr_model_idxs <- as.integer(seq(1, n_ssr, 1))
#'
#' ens_dim <- ncdim_def("ens",
#' units = "",
#' vals = ens,
#' longname = 'ensemble member')
#' seg_dim <- ncdim_def("seg_loc",
#' units = "",
#' vals = seg_model_idxs,
#' longname = 'stream segment model index')
#' hru_dim <- ncdim_def("hru_loc",
#' units = "",
#' vals = hru_model_idxs,
#' longname = 'HRU model index')
#' gwr_dim <- ncdim_def("gwr_loc",
#' units = "",
#' vals = gwr_model_idxs,
#' longname = 'groundwater reservoir model index')
#' ssr_dim <- ncdim_def("ssr_loc",
#' units = "",
#' vals = ssr_model_idxs,
#' longname = 'shallow subsurface reservoir model index')
#' one_dim <- ncdim_def('global_loc',
#' units = '',
#' vals = 1,
#' longname = 'global parameter; applied to every segment or hru')
#' month_dim <- ncdim_def('month_param',
#' units = '',
#' vals = seq(1,12,1),
#' longname = 'parameter applied either globally by segment / hru for a given month')
#'
#' #Define variables
#' fillvalue_float <- 1e32
#' fillvalue_int <- -99
#'
#' # use same dimensions as NOAA forecasts [lon, lat, forecast hours, ensemble, issue date]
#' def_list <- list()
#' # loop through variables that we're forecasting
#' n_vars = length(vars$param)
#' for(i in 1:n_vars){
#' cur_defaults = get_default_param_vals(param_name = vars$param[i],
#' model_run_loc = model_run_loc,
#' param_default_file = param_default_file)
#' cur_time_dim = NULL
#'
#' if(cur_defaults$dim == 'one'){
#' cur_loc_dim = one_dim
#' }else if(cur_defaults$ndim == '1' & cur_defaults$dim == 'nsegment'){ # a stream segment-based parameter
#' cur_loc_dim = seg_dim
#' }else if(cur_defaults$ndim == '1' & cur_defaults$dim == 'nhru'){
#' cur_loc_dim = hru_dim
#' }else if(cur_defaults$ndim == '1' & cur_defaults$dim == 'ngw'){
#' cur_loc_dim = gwr_dim
#' }else if(cur_defaults$ndim == '1' & cur_defaults$dim == 'nssr'){
#' cur_loc_dim = ssr_dim
#' }else if(cur_defaults$dim == 'nmonths'){
#' cur_loc_dim = one_dim
#' cur_time_dim = month_dim
#' }else if(cur_defaults$ndim == '2'){
#' if(grepl('nsegment', cur_defaults$dim) & grepl('nmonths', cur_defaults$dim)){
#' cur_loc_dim = seg_dim
#' cur_time_dim = month_dim
#' }else if(grepl('nhru', cur_defaults$dim) & grepl('nmonths', cur_defaults$dim)){
#' cur_loc_dim = hru_dim
#' cur_time_dim = month_dim
#' }
#' }
#' # print(cur_loc_dim$name)
#'
#' if(!is.null(cur_time_dim)){
#' def_list[[i]] <- ncvar_def(name = vars$param[i],
#' units = vars$units[i],
#' dim = list(cur_loc_dim, ens_dim, cur_time_dim),
#' missval = ifelse(vars$prec[i] == 'float', fillvalue_float, fillvalue_int),
#' longname = vars$longname[i],
#' prec = vars$prec[i])
#' }else{
#' def_list[[i]] <- ncvar_def(name = vars$param[i],
#' units = vars$units[i],
#' dim = list(cur_loc_dim, ens_dim),
#' missval = ifelse(vars$prec[i] == 'float', fillvalue_float, fillvalue_int),
#' longname = vars$longname[i],
#' prec = vars$prec[i])
#' }
#'
#' }
#'
#' nc_name_out = scipiper::as_data_file(nc_name_out_ind)
#'
#' if(file.exists(nc_name_out)){
#' if(overwrite){
#' file.remove(nc_name_out)
#' ncout <- nc_create(nc_name_out, def_list, force_v4 = T)
#' }else{stop('cannot overwrite nc output file')}
#' }else{ncout <- nc_create(nc_name_out, def_list, force_v4 = T)}
#'
#' #Global file metadata
#' ncatt_put(nc = ncout,
#' varid = 0,
#' attname = "project_id",
#' attval = as.character(project_id),
#' prec = "text")
#'
#' nc_close(ncout)
#' gd_put(remote_ind = nc_name_out_ind, local_source = nc_name_out, config_file = gd_config)
#' }
#'
#'
#'
#' #' insert parameters by drawing from distribution
#' #'
#' nc_params_put = function(vars,
#' n_en,
#' nc_file_ind){
#'
#' ncout <- nc_open(sc_retrieve(nc_file_ind, remake_file = 'getters.yml'), write = T)
#'
#' param_names = vars$param
#'
#' for(i in seq_along(param_names)){
#'
#' cur_var = ncout$var[[param_names[i]]]
#' varsize = cur_var$varsize
#' # dims [loc_dim, ens_dim, time_dim]; position of dimensions following:
#' loc_pos = 1
#' ens_pos = 2
#' time_pos = 3
#'
#' n_dims = cur_var$ndims
#'
#' cur_mean = vars$mean[i]
#' cur_sd = vars$sd[i]
#' cur_rparam = rnorm(n = n_en, mean = cur_mean, sd = cur_sd) # draw params from distribution; setting all segments to these param draws
#' # check to make sure params are within min-max range
#' cur_rparam = ifelse(cur_rparam < vars$min[i], vars$min[i], cur_rparam)
#' cur_rparam = ifelse(cur_rparam > vars$max[i], vars$max[i], cur_rparam)
#' cur_rparam = array(rep(cur_rparam, each =varsize[1]), dim = varsize)
#'
#' start = rep(1, n_dims)
#' # start[time_pos] = # need to update if there are month based parameters
#'
#' count = varsize
#' # count[time_pos] = 1 # adding only one issue time step
#'
#' ncvar_put(nc = ncout,
#' varid = param_names[i],
#' vals = cur_rparam,
#' start = start,
#' count = count)
#'
#' }
#' nc_close(ncout)
#' }
#'
#' #' function for returning parameters; returns list
#' #'
#' nc_params_get = function(nc_file_ind,
#' param_names = NULL,
#' model_idxs = NULL,
#' ens = NULL,
#' months = NULL){
#'
#' nc = nc_open(sc_retrieve(nc_file_ind, remake_file = 'getters.yml'), write = T)
#'
#' if(!is.null(param_names)){
#' param_names = param_names
#' }else{
#' param_names = names(nc$var)
#' }
#'
#' # dims [loc_dim, ens_dim, time_dim]; position of dimensions following:
#' loc_pos = 1
#' ens_pos = 2
#' time_pos = 3
#'
#' out_list = vector(mode = 'list', length = length(param_names))
#' for(i in seq_along(param_names)){
#' cur_var = nc$var[[param_names[i]]]
#' varsize = cur_var$varsize
#' n_dims = cur_var$ndims
#'
#' all_model_idxs = as.character(cur_var$dim[[loc_pos]]$vals)
#' all_ens = as.integer(cur_var$dim[[ens_pos]]$vals)
#' if(n_dims > 2){
#' all_months = cur_var$dim[[time_pos]]$vals
#' # return all values, and then filter
#' all_out = ncvar_get(nc = nc, varid = param_names[i]) %>% array(dim = varsize) %>%
#' reshape2::melt(varnames = c('model_idx', 'ensemble', 'month')) %>%
#' mutate(model_idx = all_model_idxs[model_idx],
#' ensemble = all_ens[ensemble],
#' month = all_months[month]) %>%
#' rename(!!param_names[i] := value) %>%
#' as_tibble()
#' }else{
#' # return all values, and then filter
#' all_out = ncvar_get(nc = nc, varid = param_names[i]) %>% array(dim = varsize) %>%
#' reshape2::melt(varnames = c('model_idx', 'ensemble')) %>%
#' mutate(model_idx = all_model_idxs[model_idx],
#' ensemble = all_ens[ensemble]) %>%
#' rename(!!param_names[i] := value) %>%
#' as_tibble()
#' }
#'
#' if(!is.null(model_idxs)){
#' cur_model_idxs = as.character(model_idxs)
#' }else{cur_model_idxs = as.character(all_model_idxs)}
#' if(!is.null(ens)){
#' cur_ens = as.integer(ens)
#' }else{cur_ens = as.integer(all_ens)}
#' if(n_dims > 2){
#' if(!is.null(months)){
#' cur_months = as.integer(months)
#' }else{cur_months = as.integer(all_months)}
#' }
#'
#' if(n_dims > 2){
#' out = dplyr::filter(all_out,
#' model_idx %in% cur_model_idxs,
#' ensemble %in% cur_ens,
#' month %in% cur_months)
#' }else{
#' out = dplyr::filter(all_out,
#' model_idx %in% cur_model_idxs,
#' ensemble %in% cur_ens)
#' }
#'
#' out_list[[i]] = out[,ncol(out)] %>% pull()
#' names(out_list)[i] = param_names[i]
#' }
#'
#' nc_close(nc)
#'
#' return(out_list)
#' }
#'
#'
#' # example function call:
#' # nc_create_drivers(hru_model_idxs = seq(1,765),
#' # forecast_horizon = 8,
#' # n_en = 20,
#' # issue_dates = dates,
#' # forecast_project_id = forecast_project_id,
#' # vars = drivers,
#' # nc_name_out = '2_2_model_drivers/out/forecasted_drivers.nc',
#' # overwrite = T)
#'
#'
#' nc_create_drivers = function(hru_model_idxs,
#' forecast_horizon,
#' n_en,
#' issue_dates,
#' forecast_project_id,
#' vars,
#' nc_name_out,
#' overwrite = T){
#'
#' #Set dimensions
#' ens <- as.integer(seq(1, n_en, 1))
#' model_locations <- as.integer(hru_model_idxs)
#' n_issue_date <- length(issue_dates)
#' timestep <- as.integer(seq(0, n_issue_date - 1, 1)) # days since issue date #1
#' forecast_days <- as.integer(seq(0, forecast_horizon - 1, 1))
#'
#' ens_dim <- ncdim_def("ens",
#' units = "",
#' vals = ens,
#' longname = 'ensemble member')
#' loc_dim <- ncdim_def("loc",
#' units = "",
#' vals = model_locations,
#' longname = 'hydrologic reach unit model index')
#' time_dim <- ncdim_def("timestep",
#' units = '1 day',
#' longname = sprintf('Days since %s', issue_dates[1]),
#' vals = timestep)
#' fdays_dim <- ncdim_def('forecast_days',
#' units = '1 day',
#' longname = 'Valid dates from issue time',
#' vals = forecast_days)
#'
#' dim_nchar <- ncdim_def("nchar",
#' units = "",
#' vals = 1:nchar(as.character(issue_dates[1])),
#' create_dimvar = FALSE)
#' ## quick check that units are valid
#' udunits2::ud.is.parseable(ens_dim$units)
#' udunits2::ud.is.parseable(loc_dim$units)
#' udunits2::ud.is.parseable(time_dim$units)
#' udunits2::ud.is.parseable(fdays_dim$units)
#' udunits2::ud.is.parseable(dim_nchar$units)
#'
#' #Define variables
#' fillvalue <- 1e32
#'
#' # use same dimensions as NOAA forecasts [lon, lat, forecast hours, ensemble, issue date]
#' def_list <- list()
#' # loop through variables that we're forecasting
#' n_vars = length(vars$driver)
#' for(i in 1:n_vars){
#' def_list[[i]] <- ncvar_def(name = vars$driver[i],
#' units = vars$units[i],
#' dim = list(loc_dim, fdays_dim, ens_dim, time_dim),
#' missval = fillvalue,
#' longname = vars$longname[i],
#' prec = vars$prec[i])
#' }
#'
#' def_list[[n_vars + 1]] <- ncvar_def(name = 'issue_time',
#' units = 'datetime',
#' dim = list(dim_nchar, time_dim),
#' longname = 'Forecast issue time',
#' prec = 'char')
#'
#' if(file.exists(nc_name_out)){
#' if(overwrite){
#' file.remove(nc_name_out)
#' ncout <- nc_create(nc_name_out, def_list, force_v4 = T)
#' }else{stop('cannot overwrite nc output file')}
#' }else{ncout <- nc_create(nc_name_out, def_list, force_v4 = T)}
#'
#' ncvar_put(nc = ncout,
#' varid = def_list[[n_vars + 1]],
#' vals = issue_dates)
#'
#' #Global file metadata
#' ncatt_put(nc = ncout,
#' varid = 0,
#' attname = "forecast_project_id",
#' attval = as.character(forecast_project_id),
#' prec = "text")
#'
#' nc_close(ncout)
#' }
#'
#'
#' nc_drivers_put = function(var_df,
#' var_name,
#' en,
#' issue_date,
#' nc_name_out){
#'
#' ncout <- nc_open(nc_name_out, write = T)
#'
#' cur_var = ncout$var[[var_name]]
#' varsize = cur_var$varsize
#' issue_dates = ncvar_get(ncout, varid = 'issue_time')
#' # temp output dims [loc_dim, fdays_dim, ens_dim, time_dim]; position of dimensions following:
#' loc_pos = 1
#' fdays_pos = 2
#' ens_pos = 3
#' time_pos = 4
#'
#' n_dims = cur_var$ndims
#'
#' cur_issue_time = which(issue_date == issue_dates)
#' cur_en = en
#'
#' n_fdays = varsize[fdays_pos]
#' n_en = varsize[ens_pos]
#'
#' start = rep(1, n_dims)
#' start[time_pos] = cur_issue_time
#' start[ens_pos] = cur_en
#'
#' count = varsize
#' count[time_pos] = 1 # adding only one issue time step
#' count[ens_pos] = 1 # adding output from only one ensemble
#'
#' ncvar_put(nc = ncout,
#' varid = var_name,
#' vals = pull(var_df, var_name),
#' start = start,
#' count = count)
#'
#' nc_close(ncout)
#' }
#'
#'
#' #' function for returning forecasted driver variables; returns tibble
#' #'
#' nc_drivers_get = function(nc_file,
#' var_name = c('tmin','tmax','prcp'), # default return all drivers
#' issue_dates = NULL,
#' model_idxs = NULL, # hru model idx
#' ens = NULL,
#' fdays = NULL){
#'
#' nc = nc_open(nc_file)
#'
#' # temp output dims [loc_dim, fdays_dim, ens_dim, time_dim]; position of dimensions following:
#' loc_pos = 1
#' fdays_pos = 2
#' ens_pos = 3
#' time_pos = 4
#'
#' cur_var = nc$var[[1]]
#'
#' all_issue_dates = as.Date(ncvar_get(nc, varid = 'issue_time')) # all possible issue dates
#' all_model_idxs = as.character(cur_var$dim[[loc_pos]]$vals)
#' all_ens = as.integer(cur_var$dim[[ens_pos]]$vals)
#' all_valid_times = cur_var$dim[[fdays_pos]]$vals
#'
#' nc_close(nc)
#'
#' if(!is.null(issue_dates)){
#' cur_issue_dates = which(all_issue_dates %in% as.Date(issue_dates)) - 1 # indexed starting with 0
#' }else{
#' cur_issue_dates = seq(0, length(all_issue_dates), 1)
#' }
#' if(!is.null(model_idxs)){
#' cur_model_idxs = which(all_model_idxs %in% as.character(model_idxs))
#' }else{
#' cur_model_idxs = seq_along(all_model_idxs)
#' }
#' if(!is.null(ens)){
#' cur_ens = which(all_ens %in% as.integer(ens))
#' }else{
#' cur_ens = seq_along(all_ens)
#' }
#' if(!is.null(fdays)){
#' cur_fdays = which(all_valid_times %in% as.integer(fdays)) - 1 # indexed starting with 0
#' }else{
#' cur_fdays = seq(0, length(all_valid_times), 1)
#' }
#'
#' # return all values, and then filter
#' out = tidync::tidync(x = nc_file) %>%
#' tidync::activate(var_name) %>%
#' tidync::hyper_filter(loc = loc %in% cur_model_idxs,
#' forecast_days = forecast_days %in% cur_fdays,
#' ens = ens %in% cur_ens,
#' timestep = timestep %in% cur_issue_dates) %>%
#' tidync::hyper_tibble() %>%
#' mutate(issue_time = all_issue_dates[timestep + 1],
#' model_idx = all_model_idxs[loc],
#' valid_time = issue_time + as.difftime(all_valid_times[forecast_days + 1], units = 'days'),
#' ensemble = all_ens[ens]) %>%
#' dplyr::select(model_idx, valid_time, ensemble, issue_time, var_name)
#'
#' return(out)
#' }
#'
|
647a59c15ec58de319e7c0537632c1b7bc196529
|
6c10fa091932a4c73422b3573ab04aed11df392e
|
/man/plot.graph.Rd
|
c6b55975bcc0d0835400b16783384b526c97759b
|
[] |
no_license
|
cran/GGMnonreg
|
fd2c5b18a196b4b32244a209a5688ebf651911c4
|
1b7b02bdcc0d5e14daed2f8d99b2e597986cb0fc
|
refs/heads/master
| 2023-04-01T07:10:52.959431
| 2021-04-08T10:30:06
| 2021-04-08T10:30:06
| 355,965,335
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,776
|
rd
|
plot.graph.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_graph.R
\name{plot.graph}
\alias{plot.graph}
\title{Network Plot for \code{graph} Objects}
\usage{
\method{plot}{graph}(
x,
layout = "circle",
neg_col = "#D55E00",
pos_col = "#009E73",
edge_magnify = 1,
node_size = 10,
palette = 2,
node_names = NULL,
node_groups = NULL,
...
)
}
\arguments{
\item{x}{An object of class \code{graph} obtained from \code{\link[GGMnonreg]{get_graph}}.}
\item{layout}{Character string. Which graph layout (defaults is \code{circle}) ?
See \link[sna]{gplot.layout}.}
\item{neg_col}{Character string. Color for the positive edges
(defaults to a colorblind friendly red).}
\item{pos_col}{Character string. Color for the negative edges
(defaults to a colorblind friendly green).}
\item{edge_magnify}{Numeric. A value that is multiplied by the edge weights. This increases (> 1) or
decreases (< 1) the line widths (defaults to 1).}
\item{node_size}{Numeric. The size of the nodes (defaults to \code{10}).}
\item{palette}{A character string sepcifying the palette for the \code{groups}.
(default is \code{Set3}). See \href{http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/}{palette options here}.}
\item{node_names}{Character string. Names for nodes of length \emph{p}.}
\item{node_groups}{A character string of length \emph{p} (the number of nodes in the model).
This indicates groups of nodes that should be the same color
(e.g., "clusters" or "communities").}
\item{...}{Currently ignored.}
}
\value{
An object of class \code{ggplot}
}
\description{
Visualize the conditional (in)dependence structure.
}
\examples{
# data
Y <- ptsd
# estimate graph
fit <- ggm_inference(Y, boot = FALSE)
# get info for plotting
plot(get_graph(fit))
}
|
e76db0525ebf75b29467cdf7417058dbb3181eb4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/surveysd/tests/test_generateHHID.R
|
cc0199ffcbe709f697ac7ce49bb5fad683cbb724
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,992
|
r
|
test_generateHHID.R
|
#################################
# test generate.HHID()
#
context("generate.HHID()")
library(surveysd)
library(laeken)
library(data.table)
eusilc <- surveysd:::demo.eusilc()
eusilc[,rb030split:=rb030]
# create spit households
eusilc[,rb030split:=rb030]
year <- eusilc[,unique(year)]
year <- year[-1]
leaf_out <- c()
for(y in year){
split.person <- eusilc[year==(y-1)&!duplicated(db030)&!db030%in%leaf_out,
sample(rb030,20)]
overwrite.person <- eusilc[year==(y)&!duplicated(db030)&!db030%in%leaf_out,
.(rb030=sample(rb030,20))]
overwrite.person[,c("rb030split","year_curr"):=.(split.person,y)]
eusilc[overwrite.person,rb030split:=i.rb030split,on=.(rb030,year>=year_curr)]
leaf_out <- c(leaf_out,
eusilc[rb030%in%c(overwrite.person$rb030,overwrite.person$rb030split),
unique(db030)])
}
# test input parameter
test_that("test para - data",{
expect_error(generate.HHID(as.matrix(eusilc),period="year",pid="rb030",hid="db030"),
"dat must be a data.frame or data.table")
expect_error(generate.HHID(eusilc,period="year",pid="rb030",hid="db030"),NA)
})
test_that("test para - hid, pid and period",{
expect_error(generate.HHID(eusilc,period="years",pid="rb030",hid="db030"),
"years is not a column of dat")
expect_error(generate.HHID(eusilc,period="year",pid="rb030s",hid="db030"),
"rb030s is not a column of dat")
expect_error(generate.HHID(eusilc,period="year",pid="rb030",hid="db030s"),
"db030s is not a column of dat")
eusilc[,year.char:=as.character(year)]
expect_error(generate.HHID(eusilc,period="year.char",pid="rb030",hid="db030"),
"year.char must be an integer or numeric vector")
})
test_that("test return",{
dat.HHID <- generate.HHID(eusilc,period="year",pid="rb030split",hid="db030")
dat.HHID <- dat.HHID[,uniqueN(db030),by=rb030split][V1>1]
expect_true(nrow(dat.HHID)==0)
})
|
d41ceb647f8351a3a85894890568c2823fdc9c49
|
75a981dbcb03a471ed72df9eee09604d84a3956a
|
/R/pshiftpois.R
|
5f4e77722d100574a3509cf36756150323dbd281
|
[] |
no_license
|
benaug/move.HMM
|
7c1e897aa33a2aaa0564844d9e8ee20fbcf0d96a
|
1095eedcc3c47976ea0d966131e5e1855eaaa9ef
|
refs/heads/master
| 2022-02-11T01:31:16.309023
| 2022-01-22T19:41:44
| 2022-01-22T19:41:44
| 13,788,227
| 3
| 2
| null | 2014-10-01T17:46:47
| 2013-10-22T22:58:15
|
R
|
UTF-8
|
R
| false
| false
| 447
|
r
|
pshiftpois.R
|
#'Shifted Poisson cdf
#'
#'This function evaluates the shifted Poisson cdf. The shift is fixed to 1.
#'@param x a vector of values where the cdf is to be evaluated
#'@param lambda vector of positive means (of an ordinary Poisson distribution)
#'@return A vector of shifted negative binomial cdf values
#'@export
pshiftpois=function(x,lambda){
if(any(x<1))stop("This distribution only accepts values >=1")
out=ppois(x-1,lambda)
return(out)
}
|
8096b92613eb48c261d3d359712a641ffd806ee8
|
85df540e33b8aa3809b8f783b5eb51b1a407dc8e
|
/cheb/cheb.R
|
48c5023c17db1f89e5851032ab6085dc5227e888
|
[] |
no_license
|
ricky1223321/mathematical
|
c65caaff514aadd7cef2a25ed474f721bba60a44
|
2d2cc457d7c1230498f6ed7eccd74fb772f57ac4
|
refs/heads/master
| 2023-06-25T02:28:42.658872
| 2021-07-30T10:31:52
| 2021-07-30T10:31:52
| 382,778,559
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,250
|
r
|
cheb.R
|
chebyshev = function(xs,k){
library(ggplot2)
library(magrittr)
library(cowplot)
set.seed(123)
#xs <- c(rnorm(2000))
#xs <- c(runif(2000,0,1))
#k=1.2
#xs = c(rexp(2000,rate=0.5))
#xs = c(rbinom(2000,10,0.5))
lower.bound = mean(xs) - k*sd(xs)
upper.bound = mean(xs) + k*sd(xs)
num.in.bounds = length(xs[(xs > lower.bound) & (xs < upper.bound)])
proportion.in.bounds = round(num.in.bounds/length(xs),digits=4)
chebyshev.guarantee = round(1-1/k^2, digits = 4)
hp = qplot(x =xs, fill=..count.., geom="histogram")
hp %>%
+geom_vline(aes(xintercept=lower.bound), colour="#990000", linetype="dashed",size=1.5) %>%
+geom_vline(aes(xintercept=upper.bound), colour="#990000", linetype="dashed",size=1.5) %>%
+geom_vline(aes(xintercept=mean(xs)), colour="#990000", linetype="dashed",size=1) %>%
+labs(title = paste("k = ",k),
subtitle = paste("proportion in bounds = ",proportion.in.bounds),
caption = paste("chebyshev guarantees at least " , chebyshev.guarantee)) %>%
+theme(
plot.title = element_text(size = 14, face = "bold",hjust = 0.5),
plot.subtitle = element_text(size = 14,hjust = 0.5),
plot.caption = element_text(size = 14, face = "italic",hjust = 0.5)
)
}
|
2c63c1822f1796e9588a684d42d449fcd5bac3d2
|
16ad11958d4dccadb41fabc37006daa64cb1a69c
|
/02_check_tables_in_database.R
|
4f73e5dbd5b59bb28b12203073437e0a10d3f2a4
|
[] |
no_license
|
kguidonimartins/create_database_from_csv
|
3975ee8a2f256ff58a79eac01db2e6b19c427ddb
|
1662f4427a432bc6bdadc0f404d05876b2240f48
|
refs/heads/master
| 2022-09-18T15:17:24.471753
| 2021-04-20T21:50:24
| 2021-04-20T21:50:24
| 223,270,518
| 1
| 0
| null | 2022-08-23T17:57:16
| 2019-11-21T21:40:57
|
Python
|
UTF-8
|
R
| false
| false
| 612
|
r
|
02_check_tables_in_database.R
|
if (!require("tidyverse")) install.packages("tidyverse")
if (!require("DBI")) install.packages("DBI")
if (!require("RSQLite")) install.packages("RSQLite")
if (!require("dbplyr")) install.packages("dbplyr")
database <- DBI::dbConnect(RSQLite::SQLite(), "test_small_db.db")
src_dbi(database)
table_list <- dbListTables(database)
table_list
database %>%
dbReadTable(table_list[1]) %>%
head()
table_00 <-
table_list %>%
.[1] %>%
tbl(database, .)
table_00 %>%
head()
table_list %>%
.[1] %>%
tbl(database, .) %>%
select(A, C) %>%
filter_if(is.numeric, all_vars(. > 500)) %>%
show_query()
|
9f5aa9aad1b1cb445a5f3a88a3b8db9446731670
|
da35cc8651015a9f106bd21692102ec52db33c01
|
/R/create_statement_local_pkgs.R
|
1ca35682499b43b178065c7f872fdfcb238a27d5
|
[
"MIT"
] |
permissive
|
gwd666/dockr
|
3b291a44babc6f1301beff006506660670eb98f0
|
c6b762dbd71608b556fde94038cc35bf244e5dd9
|
refs/heads/master
| 2022-03-27T04:37:57.249913
| 2019-12-15T17:00:55
| 2019-12-15T17:00:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
create_statement_local_pkgs.R
|
#' Create Install Statements for Local Source Packages
#'
#' @param pkgs_df \code{data.frame} with names and version numbers of desired
#' packages.
#' @param pkgs_deps \code{character} package dependencies ordered recursively.
#' @inheritParams prepare_docker_image
#' @inheritParams match_pkg_local
#'
#' @return \code{character} install chunk for Dockerfile.
create_statement_local_pkgs <- function(pkgs_df,
dir_src_docker = "",
verbose = TRUE,
pkgs_deps) {
# handle case, when no local dependencies are required.
if (is.null(pkgs_df) || nrow(pkgs_df) == 0) {
return(NULL)
}
# order packages, such that they are installed in correct order, and
# installation does not break.
pkgs_order <- match(pkgs_deps, pkgs_df$pkg)
pkgs_order <- pkgs_order[!is.na(pkgs_order)]
pkgs_df <- pkgs_df[pkgs_order, , drop = FALSE]
# create install statements.
statements <- mapply(
FUN = function(pkg, vrs) {
# create source package file name.
fn <- paste0(pkg, "_", vrs, ".tar.gz")
# create full file path.
fn <- file.path("source_packages", fn)
# create install statements.
statement <- paste0("RUN R -e 'install.packages(pkgs = \"", fn,
"\", repos = NULL)'")
},
pkg = pkgs_df$pkg, vrs = pkgs_df$vrs, SIMPLIFY = FALSE, USE.NAMES = FALSE)
# convert to one character vector.
statements <- do.call(c, statements)
# combine into one statement.
statements <- c("# install local source packages",
statements,
"")
# print service information.
if (verbose) {
cat_bullet("Preparing install statements for local source packages",
bullet = "tick",
bullet_col = "green")
}
statements
}
|
6a1aed72dcede6e10ff0093e24822754128cdc83
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/selfingTree/R/extractProbs.R
|
f841df64b7a1a37a9670f9ab218595f8c0d37ade
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,852
|
r
|
extractProbs.R
|
## Copyright (c) 2014, Pioneer Hi-Bred International, Inc.
## Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## Neither the name of Pioneer Hi-Bred International, Inc. nor the
## names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
extractProbs <- function(F){
probs <- foreach(i = 1:length(F), .combine = "c") %do% F[[i]][[2]]
names(probs) <- foreach(i = 1:length(F), .combine = "c") %do% {
paste(F[[i]][[1]][1,], F[[i]][[1]][2,], sep = "-")
}
return(probs)
}
|
9e3bedf11d9f57095d8d8209a55d50593c7b60cd
|
b0ea087649d39514c3683264a66bb392a5e52cba
|
/poppr.R
|
2b20e723f84bf72eed669f478df6a4c91789c2e8
|
[] |
no_license
|
evhersh/GBS_results
|
e449411e8795e9e82748c6a947a1339c1a139241
|
fdb3bdf81db57cd20fd94f5016cce6dada60baa9
|
refs/heads/master
| 2022-12-08T19:12:43.891642
| 2020-09-05T07:41:08
| 2020-09-05T07:41:08
| 193,580,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,246
|
r
|
poppr.R
|
############
# Packages
############
library("vcfR")
library("poppr")
library("ape")
library("RColorBrewer")
library(gdsfmt)
library(SNPRelate)
library("ggplot2")
library(reshape2)
library(plotly)
library(adegenet)
library(knitr)
library(ggpubr)
library(hierfstat)
library(ggtree)
###############
# Data import #
###############
# strata (sample, pop, ms)
mystrata <- read.csv("~/Google Drive/GitHub/Hookeri-GBS/popmap_all.csv")
# vcf to vcfR
# vcf <- read.vcfR("~/Google Drive/GitHub/Hookeri-GBS/Data/final.filtered.snps.vcf")
# vcf.dips <- read.vcfR("~/Google Drive/GitHub/Hookeri-GBS/Data/filtered.dips.vcf")
# vcf.trips <- read.vcfR("~/Google Drive/GitHub/Hookeri-GBS/Data/filtered.trips.vcf")
# vcf.tets <- read.vcfR("~/Google Drive/GitHub/Hookeri-GBS/Data/filtered.tets.vcf")
# vcf <- read.vcfR("final.filtered.snps.allalleles.vcf")
# vcf.dips <- read.vcfR("filtered.dips.allalleles.vcf")
# vcf.trips <- read.vcfR("filtered.trips.allalleles.vcf")
# vcf.tets <- read.vcfR("filtered.tets.allalleles.vcf")
head(is.biallelic(vcf.dips))
# vcfdips.gt <- vcfR::extract.gt(vcf.dips, return.alleles = TRUE, convertNA = TRUE)
# dips <- df2genind(vcfdips.gt, sep="/", NA.char="NA", type="codom")
# vcfR to genind to genclone
dips.gi <- vcfR2genind(vcf.dips, sep = "/", ploidy=2, return.alleles = TRUE)
#dips.gi <- vcfR2genind(vcf.dips, sep = "/", ploidy=2)
dips.gc <- as.genclone(dips.gi)
sampleorder <- match(indNames(dips.gc), mystrata$id)
strata(dips.gc) <- mystrata[sampleorder,]
setPop(dips.gc) <- ~pop
trips.gi <- vcfR2genind(vcf.trips, sep = "/", ploidy=3, return.alleles = TRUE)
#trips.gi <- vcfR2genind(vcf.trips, sep = "/", ploidy=3)
trips.gl <- vcfR2genlight(vcf.trips)
#ploidy(trips.gi) <- 3
trips.gc <- as.genclone(trips.gi)
sampleorder <- match(indNames(trips.gc), mystrata$id)
strata(trips.gc) <- mystrata[sampleorder,]
setPop(trips.gc) <- ~pop
tets.gi <- vcfR2genind(vcf.tets, sep = "/", ploidy=4, return.alleles = TRUE)
#tets.gi <- vcfR2genind(vcf.tets, sep = "/", ploidy=4)
tets.gc <- as.genclone(tets.gi)
sampleorder <- match(indNames(tets.gc), mystrata$id)
strata(tets.gc) <- mystrata[sampleorder,]
setPop(tets.gc) <- ~pop
# combine genclones
dipsNtripsNtets.gc <- repool(dips.gc,trips.gc,tets.gc)
#dipsNtripsNtets.gc$pop <- factor(dipsNtripsNtets.gc$pop, levels=c("B53-S", "B60-S", "B42-S", "B46-S", "B49-S", "L62-S", "L62-A", "L05-S", "L08-S", "L10-S", "L11-S", "L12-S", "L13-S", "L06-A", "L16-A", "L17-A", "L39-A", "L41-A","L45-S", "L45-A", "C87-A", "C86-A", "C88-A", "C85-A", "C27-A", "C23-A", "C43-A", "S03-A", "SM-A", "C59-S"))
AllPops.gc <-as.genclone(dipsNtripsNtets.gc)
AllPops.gc$pop <- factor(AllPops.gc$pop, levels=c("B53-S", "B60-S", "B42-S", "B46-S", "B49-S", "L62-S", "L62-A", "L05-S", "L08-S", "L10-S", "L11-S", "L12-S", "L13-S", "L06-A", "L16-A", "L17-A", "L39-A", "L41-A","L45-S", "L45-A", "C87-A", "C86-A", "C88-A", "C85-A", "C27-A", "C23-A", "C43-A", "S03-A", "SM-A", "C59-S"))
ploidy.infotable <- info_table(AllPops.gc, type="ploidy")
ploidy.infotable[60:90,30:60]
mll(AllPops.gc)
# save(AllPops.gc, file="AllPopsReturnAlleles.gc.RData")
# save(AllPops.gc, file="AllPops.gc.RData")
#
# x.mat <- as.matrix(AllPops.gc)
# x.mat[x.mat == 0] <- "1/1"
# x.mat[x.mat == 1] <- "1/2"
# x.mat[x.mat == 2] <- "2/2"
#
# genind2genalex(AllPops.gc, "hookeri_genalex.txt", sep="\t", sequence=TRUE, overwrite = TRUE)
# hook.fstat <- genind2hierfstat(AllPops.gc)
# hookeri.AF <-genind2df(AllPops.gc)
# hookeri.AF <-cbind(ind = rownames(hookeri.AF), hookeri.AF)
# hookeri.AF[4]
#
# rownames(hookeri.AF) <- NULL
# write.table(hookeri.AF, file="Hookeri_AF.txt", sep="\t", row.names=TRUE, quote=FALSE)
# make more colors
n <- 60
qual_col_pals = brewer.pal.info[brewer.pal.info$category == 'qual',]
col_vector = unlist(mapply(brewer.pal, qual_col_pals$maxcolors, rownames(qual_col_pals)))
pie(rep(1,n), col=sample(col_vector, n))
cols <- brewer.pal(n = nPop(AllPops.gc), name = "Paired")
# allele frequencies
all.freqs <- tab(AllPops.gc, freq = TRUE)
all.freqs[50:100, 50:100]
###################
# data conversion #
###################
# genind2structure <- function(obj, file="", pops=FALSE){
# if(!"genind" %in% class(obj)){
# warning("Function was designed for genind objects.")
# }
#
# # get the max ploidy of the dataset
# pl <- max(obj@ploidy)
# # get the number of individuals
# S <- adegenet::nInd(obj)
# # column of individual names to write; set up data.frame
# tab <- data.frame(ind=rep(indNames(obj), each=pl))
# # column of pop ids to write
# if(pops){
# popnums <- 1:adegenet::nPop(obj)
# names(popnums) <- as.character(unique(adegenet::pop(obj)))
# popcol <- rep(popnums[as.character(adegenet::pop(obj))], each=pl)
# tab <- cbind(tab, data.frame(pop=popcol))
# }
# loci <- adegenet::locNames(obj)
# # add columns for genotypes
# tab <- cbind(tab, matrix(-9, nrow=dim(tab)[1], ncol=adegenet::nLoc(obj),
# dimnames=list(NULL,loci)))
#
# # begin going through loci
# for(L in loci){
# thesegen <- obj@tab[,grep(paste("^", L, "\\.", sep=""),
# dimnames(obj@tab)[[2]]),
# drop = FALSE] # genotypes by locus
# al <- 1:dim(thesegen)[2] # numbered alleles
# for(s in 1:S){
# if(all(!is.na(thesegen[s,]))){
# tabrows <- (1:dim(tab)[1])[tab[[1]] == indNames(obj)[s]] # index of rows in output to write to
# tabrows <- tabrows[1:sum(thesegen[s,])] # subset if this is lower ploidy than max ploidy
# tab[tabrows,L] <- rep(al, times = thesegen[s,])
# }
# }
# }
#
# # export table
# write.table(tab, file=file, sep="\t", quote=FALSE, row.names=FALSE)
# }
# genind2structure(AllPops.gc, file="AllPops.structure", pops=TRUE)
my_gt <- extract.gt(vcf, convertNA = FALSE)
my_gt <- t(my_gt)
my_gt[] <- gsub("[/|]", "", my_gt) # remove separators
my_gt[] <- gsub("1", "2", my_gt) # change 1 to 2
my_gt[] <- gsub("0", "1", my_gt) # change 0 to 1
my_gt[] <- gsub("\\.", "0", my_gt) # change . to 0
my_gt[1:30, 1:30]
my_gt <-cbind(ind = rownames(my_gt), my_gt)
#rownames(my_gt) <- NULL
write.table(my_gt, file="in.txt", sep="\t", row.names=TRUE, quote=FALSE)
relat_mat <- read.csv("relatedness_mat.csv", header=FALSE)
relat_mat <- as.matrix(relat_mat)
ncol(relat_mat)
nrow(relat_mat)
inds <- rownames(my_gt)
colnames(relat_mat) <- inds
rownames(relat_mat) <- inds
relat_mat <- as.matrix(relat_mat)
diss_mat = 3.6449128-relat_mat
simrel_pair <- as.dist(diss_mat)
############
# Clone ID #
############
# calculate raw euclidian distance
dist <- dist(AllPops.gc)
dist0 <- dist(dips.gc)
# assign MLG's using raw euclidian distance from dist() [above]
fstats <- filter_stats(AllPops.gc, distance=dist, plot=TRUE)
fstats0 <- filter_stats(dips.gc, distance=dist0, plot=TRUE)
# looks like this gives the same clone mlg assignments as my IBS stuff
mlg.filter(AllPops.gc, distance=dist) <- 100 # 100-140 is the plateau for 10 MLGs
mlg.filter(dips.gc, distance=dist0) <- 105 # after 70 C59 clumps together, after 100 other sexual pops start to clump
mlg.table(AllPops.gc)
mlg.table(dips.gc)
#mll(AllPops.gc)
# indNames(AllPops.gc)
#########
# Poppr #
#########
set.seed(420)
load("hookeri.poppr.RData")
#hookeri.poppr <- poppr(AllPops.gc, sample=999, clonecorrect = TRUE, strata=~ms/pop/id)
#hookeri.poppr.pop <- poppr(AllPops.gc, sample=999, clonecorrect = TRUE, strata=~pop/id)
B53 <- popsub(AllPops.gc, "B53-S")
B53.ia <- ia(B53, sample = 999)
B46 <- popsub(AllPops.gc, "B46-S")
B53.ia <- ia(B53, sample = 999)
###################
# private alleles #
###################
my.ploidy <- c(17, 17, 17, 17, 17, 17, 21, 17, 17, 17, 17, 17, 17, 21, 21, 21, 21, 21, 17, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 17)
my.ploidy <-replace(my.ploidy,my.ploidy==21, 3)
my.ploidy <-replace(my.ploidy,my.ploidy==17, 2)
my.ploidy[29] <- 4
my.ploidy
apo.ploidy <- my.ploidy[my.ploidy>2]
## tabcount is a matrix pop x alleles, counting alleles per pop
tabcount <- apply(tab(MULTI.gc), 2, tapply, pop(MULTI.gc), sum, na.rm=FALSE)
MULTI.gp <- new("genpop", tabcount, type="codom", ploidy=my.ploidy)
summary(AllPops.gp)
popNames(AllPops.gp)
# tabcount2 <- apply(tab(apos.gc), 2, tapply, pop(apos.gc), sum, na.rm=FALSE)
# apos.gp <- new("genpop", tabcount2, type="codom", ploidy=apo.ploidy)
# summary(apos.gp)
# popNames(apos.gp)
#
# dips.gp <- genind2genpop(dips.gc, pop=~pop)
pal <- private_alleles(MULTI.gp, level="population", report="data.frame")
ggplot(pal) + geom_tile(aes(x = population, y = allele, fill = count))
ggplot(pal) + geom_boxplot(aes(x=population, y= count))
#########
# AMOVA #
#########
hookeri.amova <- poppr.amova(AllPops.gc, ~ms/pop, within=FALSE, cutoff = 0.1)
hookeri.amova.cc <- poppr.amova(AllPops.gc, ~ms/pop, within=FALSE, cutoff = 0.1, clonecorrect = TRUE)
hookeri.amova.cc.pop <- poppr.amova(AllPops.gc, ~pop, within=FALSE, cutoff = 0.1, clonecorrect = TRUE)
amova.dips <- poppr.amova(dips.gc, ~pop, cutoff=0.1)
# pop_combinations <- combn(popNames(AllPops.gc), 2)
# amova_list.cc <- apply(pop_combinations, MARGIN = 2, function(i) poppr.amova(AllPops.gc[pop = i], ~pop, within=FALSE, cutoff=0.1, clonecorrect = TRUE))
#mlg.table(AllPops.gc)
#mll.custom(AllPops.gc) <- c(1,2,3, 4,5,6, 7,8,9, 10,11,12, 13,14,15, 16,17,18, 19,20,21, 22,23,24, 25,26,27, 28,29,30, 31,32,33, 34,35,36, 37,38,39,40, #B42 - L62
#C23 #C27 #C43 #C85 #C86 #C87 #C88 #L06 #L16 #L17 #L39 #L41 #L45 change? #L62 #S03 #SM
# 41,41,41,41,41, 42,42,42,42,42, 43,43,43,43,43, 42,42,42,42,42, 42,42,42,42,42, 42,42,42,42,42, 42,42,42,42,42, 44,44,44,44,44, 45,45,44,45,44, 44,44,44,44,44, 46,46,46,46,46, 42,42,42,42, 47,42,42,42,42, 47, 48,48,48,48, 49,49,49,49,49)
sampleorder <- match(indNames(AllPops.gc), mystrata$id)
#######
# Fst #
#######
my.ploidy <- c(17, 17, 17, 17, 17, 17, 21, 17, 17, 17, 17, 17, 17, 21, 21, 21, 21, 21, 17, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 17)
my.ploidy <-replace(my.ploidy,my.ploidy==21, 3)
my.ploidy <-replace(my.ploidy,my.ploidy==17, 2)
my.ploidy[29] <- 4
matFST <- pairwise.fst(AllPops.gc, res.type="matrix")
Nei.mat <- dist.genpop(AllPops.gp, method=1)
Nei.tree <- nj(Nei.mat)
plot.phylo(Nei.tree, type="unrooted", tip.col=my.cols.ms, cex=0.6, lab4ut = "axial", font=2, show.tip.label = TRUE, no.margin = TRUE)
#annot <- round(Nei.tree$edge.length,2)
#edgelabels(annot[annot>0], which(annot>0), frame="n")
add.scale.bar()
Nei.mat.dips <- dist.genpop(dips.gp, method=1)
Nei.tree.dips <- nj(Nei.mat.dips)
plot.phylo(Nei.tree.dips, type="unrooted", tip.col="coral3", cex=0.6, lab4ut = "axial", font=2, show.tip.label = TRUE, no.margin = TRUE)
#edgelabels(annot[annot>0], which(annot>0), frame="n")
add.scale.bar()
##################
##### DAPC #######
##################
###########
# K-means #
###########
grp <- find.clusters(AllPops.gc, max.n.clust=30) # looks like lowest BIC is 13 clusters...
names(grp)
table(pop(AllPops.gc), grp$grp)
table.value(table(pop(AllPops.gc), grp$grp), col.lab=paste("inf", 1:13),
row.lab=paste("ori", 1:13))
## from vcfR -- run k-means multiple times
# library(vcfR)
# vcf <- read.vcfR("prubi_gbs.vcf.gz")
# pop.data <- read.table("population_data.gbs.txt", sep = "\t", header = TRUE)
# all(colnames(vcf@gt)[-1] == pop.data$AccessID)
# ## [1] TRUE
# gl_rubi <- vcfR2genlight(vcf)
# library(adegenet)
maxK <- 30
myMat <- matrix(nrow=50, ncol=maxK)
colnames(myMat) <- 1:ncol(myMat)
for(i in 1:nrow(myMat)){
grp <- find.clusters(AllPops.gc, n.pca = 200, choose.n.clust = FALSE, max.n.clust = maxK)
myMat[i,] <- grp$Kstat
}
my_df2 <- melt(myMat)
colnames(my_df2)[1:3] <- c("Group", "K", "BIC")
my_df2$K <- as.factor(my_df2$K)
head(my_df2)
p1 <- ggplot(my_df2, aes(x = K, y = BIC))
p1 <- p1 + geom_boxplot()
p1 <- p1 + theme_bw()
p1 <- p1 + xlab("Number of groups (K)")
p1 # 10 clusters kind of looks right
# plot 2 #
my_k0 <- 9
grp0_l <- vector(mode = "list", length = length(my_k0))
dapc0_l <- vector(mode = "list", length = length(my_k0))
for(i in 1:length(dapc0_l)){
set.seed(10)
grp0_l[[i]] <- find.clusters(AllPops.gc, n.pca = 200, n.clust = my_k0[i])
dapc0_l[[i]] <- dapc(AllPops.gc, pop = grp0_l[[i]]$grp, n.pca = 20, n.da = my_k0[i])
# dapc_l[[i]] <- dapc(gl_rubi, pop = grp_l[[i]]$grp, n.pca = 3, n.da = 2)
}
my_df0 <- as.data.frame(dapc0_l[[ length(dapc0_l) ]]$ind.coord)
my_df0$Group <- dapc0_l[[ length(dapc0_l) ]]$grp
head(my_df0)
p2 <- ggplot(my_df0, aes(x = LD1, y = LD2, color = Group, fill = Group))
p2 <- p2 + geom_point(size = 4, shape = 21)
p2 <- p2 + theme_bw()
p2 <- p2 + scale_color_manual(values=c(col_vector))
p2 <- p2 + scale_fill_manual(values=c(paste(col_vector, "66", sep = "")))
p2
# plot 3
my_k <- 9:12
grp_l <- vector(mode = "list", length = length(my_k))
dapc_l <- vector(mode = "list", length = length(my_k))
for(i in 1:length(dapc_l)){
set.seed(10)
grp_l[[i]] <- find.clusters(AllPops.gc, n.pca = 200, n.clust = my_k[i])
dapc_l[[i]] <- dapc(AllPops.gc, pop = grp_l[[i]]$grp, n.pca = 20, n.da = my_k[i])
# dapc_l[[i]] <- dapc(gl_rubi, pop = grp_l[[i]]$grp, n.pca = 3, n.da = 2)
}
my_df <- as.data.frame(dapc_l[[ length(dapc_l) ]]$ind.coord)
my_df$Group <- dapc_l[[ length(dapc_l) ]]$grp
head(my_df)
tmp <- as.data.frame(dapc_l[[1]]$posterior)
tmp$K <- my_k[1]
tmp$Sample <- rownames(tmp)
tmp <- melt(tmp, id = c("Sample", "K"))
names(tmp)[3:4] <- c("Group", "Posterior")
tmp$pop <- mystrata$pop[match(tmp$Sample, mystrata$id)]
my_df <- tmp
for(i in 2:length(dapc_l)){
tmp <- as.data.frame(dapc_l[[i]]$posterior)
tmp$K <- my_k[i]
tmp$Sample <- rownames(tmp)
tmp <- melt(tmp, id = c("Sample", "K"))
names(tmp)[3:4] <- c("Group", "Posterior")
tmp$pop <- mystrata$pop[match(tmp$Sample, mystrata$id)]
my_df <- rbind(my_df, tmp)
}
grp.labs <- paste("K =", my_k)
names(grp.labs) <- my_k
my_df$pop <- factor(my_df$pop, levels=c("B53-S", "B60-S", "B42-S", "B46-S", "B49-S", "L62-S", "L62-A", "L05-S", "L08-S", "L10-S", "L11-S", "L12-S", "L13-S", "L06-A", "L16-A", "L17-A", "L39-A", "L41-A","L45-S", "L45-A", "C87-A", "C86-A", "C88-A", "C85-A", "C27-A", "C23-A", "C43-A", "S03-A", "SM-A", "C59-S"))
# my_df0$Group <- as.character(my_df0$Group)
# my_df$Group <- as.character(my_df$Group)
#
# my_df0[ my_df0$K == 10 & my_df0$Group == 1, "Group"] <- "A"
# my_df0[ my_df0$K == 10 & my_df0$Group == 2, "Group"] <- "B"
# my_df0[ my_df0$K == 10 & my_df0$Group == 3, "Group"] <- "C"
# my_df0[ my_df0$K == 10 & my_df0$Group == 4, "Group"] <- "D"
# my_df0[ my_df0$K == 10 & my_df0$Group == 5, "Group"] <- "E"
# my_df0[ my_df0$K == 10 & my_df0$Group == 6, "Group"] <- "F"
# my_df0[ my_df0$K == 10 & my_df0$Group == 7, "Group"] <- "G"
# my_df0[ my_df0$K == 10 & my_df0$Group == 8, "Group"] <- "H"
# my_df0[ my_df0$K == 10 & my_df0$Group == 9, "Group"] <- "I"
# my_df0[ my_df0$K == 10 & my_df0$Group == 10, "Group"] <- "J"
#
# my_df[ my_df$K == 10 & my_df$Group == 1, "Group"] <- "A"
# my_df[ my_df$K == 10 & my_df$Group == 2, "Group"] <- "B"
# my_df[ my_df$K == 10 & my_df$Group == 3, "Group"] <- "C"
# my_df[ my_df$K == 10 & my_df$Group == 4, "Group"] <- "D"
# my_df[ my_df$K == 10 & my_df$Group == 5, "Group"] <- "E"
# my_df[ my_df$K == 10 & my_df$Group == 6, "Group"] <- "F"
# my_df[ my_df$K == 10 & my_df$Group == 7, "Group"] <- "G"
# my_df[ my_df$K == 10 & my_df$Group == 8, "Group"] <- "H"
# my_df[ my_df$K == 10 & my_df$Group == 9, "Group"] <- "I"
# my_df[ my_df$K == 10 & my_df$Group == 10, "Group"] <- "J"
#
# my_df[ my_df$K == 11 & my_df$Group == 1, "Group"] <- "A"
# my_df[ my_df$K == 11 & my_df$Group == 2, "Group"] <- "B"
# my_df[ my_df$K == 11 & my_df$Group == 3, "Group"] <- "C"
# my_df[ my_df$K == 11 & my_df$Group == 4, "Group"] <- "D"
# my_df[ my_df$K == 11 & my_df$Group == 5, "Group"] <- "E"
# my_df[ my_df$K == 11 & my_df$Group == 6, "Group"] <- "F"
# my_df[ my_df$K == 11 & my_df$Group == 7, "Group"] <- "G"
# my_df[ my_df$K == 11 & my_df$Group == 8, "Group"] <- "H"
# my_df[ my_df$K == 11 & my_df$Group == 9, "Group"] <- "I"
# my_df[ my_df$K == 11 & my_df$Group == 10, "Group"] <- "J"
# my_df[ my_df$K == 11 & my_df$Group == 11, "Group"] <- "K"
#
# my_df[ my_df$K == 12 & my_df$Group == 1, "Group"] <- "A"
# my_df[ my_df$K == 12 & my_df$Group == 2, "Group"] <- "B"
# my_df[ my_df$K == 12 & my_df$Group == 3, "Group"] <- "C"
# my_df[ my_df$K == 12 & my_df$Group == 4, "Group"] <- "D"
# my_df[ my_df$K == 12 & my_df$Group == 5, "Group"] <- "E"
# my_df[ my_df$K == 12 & my_df$Group == 6, "Group"] <- "F"
# my_df[ my_df$K == 12 & my_df$Group == 7, "Group"] <- "G"
# my_df[ my_df$K == 12 & my_df$Group == 8, "Group"] <- "H"
# my_df[ my_df$K == 12 & my_df$Group == 9, "Group"] <- "I"
# my_df[ my_df$K == 12 & my_df$Group == 10, "Group"] <- "J"
# my_df[ my_df$K == 12 & my_df$Group == 11, "Group"] <- "K"
# my_df[ my_df$K == 12 & my_df$Group == 12, "Group"] <- "L"
#
# my_df[ my_df$K == 13 & my_df$Group == 1, "Group"] <- "A"
# my_df[ my_df$K == 13 & my_df$Group == 2, "Group"] <- "B"
# my_df[ my_df$K == 13 & my_df$Group == 3, "Group"] <- "C"
# my_df[ my_df$K == 13 & my_df$Group == 4, "Group"] <- "D"
# my_df[ my_df$K == 13 & my_df$Group == 5, "Group"] <- "E"
# my_df[ my_df$K == 13 & my_df$Group == 6, "Group"] <- "F"
# my_df[ my_df$K == 13 & my_df$Group == 7, "Group"] <- "G"
# my_df[ my_df$K == 13 & my_df$Group == 8, "Group"] <- "H"
# my_df[ my_df$K == 13 & my_df$Group == 9, "Group"] <- "I"
# my_df[ my_df$K == 13 & my_df$Group == 10, "Group"] <- "J"
# my_df[ my_df$K == 13 & my_df$Group == 11, "Group"] <- "K"
# my_df[ my_df$K == 13 & my_df$Group == 12, "Group"] <- "L"
# my_df[ my_df$K == 13 & my_df$Group == 13, "Group"] <- "M"
p3 <- ggplot(my_df, aes(x = Sample, y = Posterior, fill = Group))
p3 <- p3 + geom_bar(stat = "identity")
p3 <- p3 + facet_grid(K ~ pop, scales = "free_x", space = "free",
labeller = labeller(K = grp.labs))
p3 <- p3 + theme_bw()
p3 <- p3 + ylab("Posterior membership probability")
p3 <- p3 + theme(legend.position='none')
#p3 <- p3 + scale_color_brewer(palette="Paired")
p3 <- p3 + scale_fill_manual(values=c(col_vector))
p3 <- p3 + theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 8),panel.spacing.x=unit(0.1, "lines"))
p3
ggarrange(ggarrange(p1,
p2,
ncol = 2, labels = c("A", "B")),
p3,
nrow = 2,
labels = c("", "C"),
heights = c(1, 2)
)
# optimize number of PCs to keep
dapc.x <- dapc(AllPops.gc, n.da=100, n.pca=50)
temp <- optim.a.score(dapc.x) #11 is the optimal number of PCs
# xval
xtab <- tab(AllPops.gc, NA.method="mean")
grp <- pop(AllPops.gc)
xval <- xvalDapc(xtab, grp, n.pca.max = 300, training.set = 0.9,
result = "groupMean", center = TRUE, scale = FALSE,
n.pca = NULL, n.rep = 30, xval.plot = TRUE)
xval[2:6] # 20 PCs has the highest prediction and lowest error
# make the dapc
# set shapes - triangle for sexuals, circles for apos
my.pch <- c(17, 17, 17, 17, 17, 17, 21, 17, 17, 17, 17, 17, 17, 21, 21, 21, 21, 21, 17, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 17)
my.pch <-replace(my.pch,my.pch==21, 19)
my.pch.sub <- my.pch[-c(30,29,28,26)]
# for ms
setPop(AllPops.gc) <- ~ms
AllPops.gc$pop <- factor(AllPops.gc$pop, levels=c("S", "A"))
hookeri.dapc.ms <- dapc(AllPops.gc, grp=AllPops.gc$grp, n.pca=20, n.da=100)
scatter(hookeri.dapc.ms, grp = AllPops.gc$pop, cex = 2, legend = TRUE, clabel = T, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75)
# all pops, but color by ms
setPop(AllPops.gc) <- ~pop
hookeri.dapc.msp <- dapc(AllPops.gc, grp=AllPops.gc$grp, n.pca=20, n.da=100)
scatter(hookeri.dapc.msp, grp = AllPops.gc$strata$ms, cex = 2, legend = TRUE, clabel = T,cstar=0, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75, pch=c(17,19), col=cols.ms)
# for pops (all)
setPop(AllPops.gc) <- ~pop
AllPops.gc$pop <- factor(AllPops.gc$pop, levels=c("B53-S", "B60-S", "B42-S", "B46-S", "B49-S", "L62-S", "L62-A", "L05-S", "L08-S", "L10-S", "L11-S", "L12-S", "L13-S", "L06-A", "L16-A", "L17-A", "L39-A", "L41-A","L45-S", "L45-A", "C87-A", "C86-A", "C88-A", "C85-A", "C27-A", "C23-A", "C43-A", "S03-A", "SM-A", "C59-S"))
hookeri.dapc <- dapc(AllPops.gc, grp=AllPops.gc$grp, n.pca=20, n.da=100)
scatter(hookeri.dapc, grp = AllPops.gc$pop, cex = 2, legend = TRUE, clabel = F, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75, pch=my.pch)
# sub a few pops
noYK.gc <- popsub(AllPops.gc, blacklist=c("C59-S", "SM-A", "C23-A", "S03-A"))
hookeri.dapc2 <- dapc(noYK.gc, grp=noYK.gc$pop, n.pca=20, n.da=100)
scatter(hookeri.dapc2, grp = noYK.gc$pop, cex = 2, legend = TRUE, clabel = F, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75, pch=my.pch.sub)
# apo only pops
setPop(apos.gc) <- ~pop
apos.gc$pop <- factor(apos.gc$pop, levels=c("L62-A", "L06-A", "L16-A", "L17-A", "L39-A", "L41-A", "L45-A", "C87-A", "C86-A", "C88-A", "C85-A", "C27-A", "C23-A", "C43-A", "S03-A", "SM-A"))
apo.dapc <- dapc(apos.gc, grp=apos.gc$grp, n.pca=10, n.da=100)
scatter(apo.dapc, grp = apos.gc$pop, cex = 2, legend = TRUE, clabel = F, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75)
# sex only pops
setPop(dips.gc) <- ~pop
dips.gc$pop <- factor(dips.gc$pop, levels=c("B53-S", "B60-S", "B42-S", "B46-S", "B49-S", "L62-S","L05-S", "L08-S", "L10-S", "L11-S", "L12-S", "L13-S", "L45-S", "C59-S"))
dips.dapc <- dapc(dips.gc, grp=dips.gc$grp)
scatter(dips.dapc, grp = dips.gc$pop, cex = 2, legend = TRUE, clabel = F, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75)
# OneMLG
pch.OneMLG <- c(17, 17, 17, 17, 17, 17, 21, 17, 17, 17, 17, 17, 17, 21, 21, 21, 21, 17, 21, 21, 21, 21, 17)
pch.OneMLG <-replace(my.pch,my.pch==21, 19)
setPop(OneMLG.gc) <- ~pop
OneMLG.gc$pop <- factor(OneMLG.gc$pop, levels=c("B53-S", "B60-S", "B42-S", "B46-S", "B49-S", "L62-S", "L62-A", "L05-S", "L08-S", "L10-S", "L11-S", "L12-S", "L13-S", "L16-A", "L17-A", "L39-A", "L41-A","L45-S", "C23-A", "C43-A", "S03-A", "SM-A", "C59-S"))
OneMLG.dapc <- dapc(OneMLG.gc, grp=OneMLG.gc$grp)
scatter(OneMLG.dapc, grp = OneMLG.gc$pop, cex = 2, legend = TRUE, clabel = F, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75, pch=pch.OneMLG)
# loadings
contib <- loadingplot(hookeri.dapc$var.contr, axis=1, thres=0.0003, lab.jitter = 1)
# membership probabilities
summary(hookeri.dapc)
assignplot(hookeri.dapc)
compoplot(hookeri.dapc,posi="bottomright", lab="", ncol=1, xlab="individuals")
# structure-style plot
dapc.results <- as.data.frame(hookeri.dapc$posterior)
dapc.results$pop <- pop(AllPops.gc)
dapc.results$indNames <- rownames(dapc.results)
dapc.results.apo <- as.data.frame(apo.dapc$posterior)
dapc.results.apo$pop <- pop(apos.gc)
dapc.results.apo$indNames <- rownames(dapc.results.apo)
library(reshape2)
dapc.results <- melt(dapc.results)
colnames(dapc.results) <- c("Original_Pop","Sample","Assigned_Pop","Posterior_membership_probability")
# Plot posterior assignments from DAPC (how is this different from Structure?)
p4 <- ggplot(dapc.results, aes(x=Sample, y=Posterior_membership_probability, fill=Assigned_Pop))
p4 <- p4 + geom_bar(stat='identity')
p4 <- p4 + scale_fill_manual(values = col_vector)
p4 <- p4 + facet_grid(~Original_Pop, scales = "free")
p4 <- p4 + theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 8), panel.spacing.x=unit(0.1, "lines"))
p4
library(reshape2)
dapc.results.apo <- melt(dapc.results.apo)
colnames(dapc.results.apo) <- c("Original_Pop","Sample","Assigned_Pop","Posterior_membership_probability")
# Plot posterior assignments from DAPC (how is this different from Structure?)
p5 <- ggplot(dapc.results.apo, aes(x=Sample, y=Posterior_membership_probability, fill=Assigned_Pop))
p5 <- p5 + geom_bar(stat='identity')
p5 <- p5 + scale_fill_manual(values = col_vector)
p5 <- p5 + facet_grid(~Original_Pop, scales = "free")
p5 <- p5 + theme(axis.text.x = element_text(angle = 90, hjust = 1, size = 8), panel.spacing.x=unit(0.1, "lines"))
p5
# create figure
# tiff("DAPC.tiff", res=300, units="in", width=8.6, height=5.8)
# scatter(hookeri.dapc2, grp = noYK.gc$pop, cex = 2, legend = TRUE, clabel = F, posi.leg = "bottomleft", scree.pca = TRUE, posi.pca = "topleft", cleg = 0.75)
# dev.off()
#seasun(length(levels(mdapc$grp)))[mdapc$grp]
#col = col_vector
#grp = dipsNtripsNtets.gc@strata$region
#########
# Trees #
#########
# aboot(AllPops.gc, dist = dist(), sample = 200, tree = "nj", cutoff = 50, quiet = FALSE)
#
# theTree <- dist %>%
# nj() %>% # calculate neighbor-joining tree
# ladderize() # organize branches by clade
# plot(theTree)
# add.scale.bar(length = 0.05)
# for inds, all
hookeri.nj <- aboot(AllPops.gc, dist = provesti.dist, sample = 200, tree = "nj", cutoff = 50, quiet = TRUE)
write.tree(hookeri.nj, file = "hookeri_nj.NEWICK", append = FALSE,
digits = 10, tree.names = FALSE)
write.nexus(hookeri.nj, file = "hookeri_nj.nex")
plot.phylo(oneMLG.nj, tip.color = cols.ms[OneMLG.gc$strata$ms], type="unrooted", cex=0.6, lab4ut = "axial", font=2)
add.scale.bar()
plot.phylo(hookeri.nj, tip.color = cols.ms[AllPops.gc$strata$ms], type="unrooted", cex=0.6, lab4ut = "axial", font=2, show.tip.label = FALSE)
tiplabels(pch=21, col="black", bg=cols.ms[AllPops.gc$strata$ms])
DAPC.cols
ggtree(hookeri.nj, layout="unrooted")+
geom_tiplab()+
geom_label(label=hookeri.nj$tip.label, fill=cols.ms[AllPops.gc$strata$ms])
# inds, apo's only
apo.inds.nj <- aboot(apos.gc, dist=provesti.dist, sample=200, tree="nj", cutoff=50, quiet=TRUE)
plot.phylo(apo.inds.nj, cex=0.8)
nodelabels(apo.inds.nj$node.label, adj = c(1.5, -0.7), frame = "n", cex = 0.8,
font = 3, xpd = TRUE)
# inds, dips only
dips.inds.nj <- aboot(dips.gc, dist=provesti.dist, sample=200, tree="nj", cutoff=50, quiet=TRUE)
plot.phylo(dips.inds.nj, cex=0.8, tip.color = cols.ms[dips.gc$strata$ms], type="unrooted", lab4ut = "axial")
nodelabels(dips.inds.nj$node.label, adj = c(1.5, -0.7), frame = "n", cex = 0.8,
font = 3, xpd = TRUE)
# for pops (all)
hookpop.nj <- aboot(AllPops.gp, dist = provesti.dist, sample = 200, tree = "nj", cutoff = 50, quiet = TRUE)
plot.phylo(hookpop.nj, cex=0.8, tip.color = my.cols.ms)
nodelabels(hookpop.nj$node.label, adj = c(1.5, -0.7), frame = "n", cex = 0.8,
font = 3, xpd = TRUE)
axisPhylo(3)
# for pops (apos)
apopop.nj <- aboot(apos.gp, dist = provesti.dist, sample = 200, tree = "nj", cutoff = 50, quiet = TRUE)
plot.phylo(apopop.nj, cex=0.8)
nodelabels(apopop.nj$node.label, adj = c(1.5, -0.7), frame = "n", cex = 0.8,
font = 3, xpd = TRUE)
axisPhylo(3)
cols.ms <- c("coral3","cornflowerblue")
# for pops (dips)
dipspop.nj <- aboot(dips.gp, dist = provesti.dist, sample = 200, tree = "nj", cutoff = 50, quiet = TRUE)
plot.phylo(dips.inds.nj, cex=0.8, tip.color = cols.ms[dips.gc$strata$ms], type="unrooted", lab4ut = "axial")
plot.phylo(dipspop.nj, cex=0.8, type="unrooted", lab4ut = "axial")
nodelabels(dipspop.nj$node.label, adj = c(1.5, -0.7), frame = "n", cex = 0.8,
font = 3, xpd = TRUE)
axisPhylo(3)
# AllPops.gc %>%
# genind2genpop(pop = ~pop) %>%
# aboot(cutoff = 50, quiet = FALSE, sample = 1000, distance = nei.dist)
# dips + 1 MLG each, inds
#OneMLG.gc
oneMLG.nj <- aboot(OneMLG.gc, dist=provesti.dist, sample=200, tree="nj", cutoff=50, quiet=TRUE)
plot.phylo(oneMLG.nj, cex=0.8, tip.color = cols.ms[OneMLG.gc$strata$ms])
nodelabels(oneMLG.nj$node.label, adj = c(1.5, -0.7), frame = "n", cex = 0.8,
font = 3, xpd = TRUE)
axisPhylo(3)
plot.phylo(oneMLG.nj, tip.color = cols.ms[OneMLG.gc$strata$ms], type="unrooted", cex=0.6, lab4ut = "axial", font=2)
add.scale.bar()
#######
# MSN #
#######
setPop(AllPops.gc, ~ms/pop)
msn <- poppr.msn(AllPops.gc, ddist, showplot = FALSE)
# inds="none" to remove names
my.cols.ms <- replace(my.pch,my.pch==21, "slateblue4")
my.cols.ms <- replace(my.cols.ms,my.cols.ms==17, "red3")
replace(my.pch,my.pch==21, 19)
# inds="none" to remove names
plot_poppr_msn(AllPops.gc, msn, inds="none", palette=my.cols.ms)#plot_poppr_msn(AllPops.gc, msn, palette=my.cols.ms)
|
b7c4ee55a35ea80f9545d88386fe895db6809692
|
b80bd80ccb982b69d7c910901e5d1a5cb1d5ea07
|
/server.R
|
84232d1e3a2a605791a9a723d025d3eea1da7c52
|
[] |
no_license
|
fazepher/AED-datos-franceses
|
3fc7b9b09ff2c6d528a6fffe6ba22ee701dc02f7
|
5820bbf8d53e04fb3a762ccf908ffe01343b913c
|
refs/heads/master
| 2020-05-22T10:28:55.553630
| 2019-05-24T01:51:51
| 2019-05-24T01:51:51
| 186,310,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,406
|
r
|
server.R
|
############################################################################################################
################################################# TESIS FN #################################################
##################################### FERNANDO ANTONIO ZEPEDA HERRERA ######################################
################################################ ITAM 2019 #################################################
############################################################################################################
############################################################################################################
#################################################### AED ###################################################
############################################################################################################
############################################# SERVER APLICACION ############################################
############################################################################################################
#### Preambulo ####
library(shiny)
library(shinythemes)
library(shinyWidgets)
library(flexdashboard)
library(shinycssloaders)
library(rlang)
library(tidyr)
library(dplyr)
library(ggplot2)
library(readr)
library(stringr)
library(magrittr)
library(geofacet)
library(DT)
#### Servidor (server) ####
server <- function(input,output){
# Selecciones iniciales
aaaa <- reactive({
str_extract_all(input$elec,"[0-9]{4}") %>% unlist
})
color <- reactive({
filter(paleta_tesis_fn, FAMILIA == input$familia) %>%
extract2("COLOR")
})
# Categoría seleccionada
cats_disp <- reactive({
filter(tabla_variables, Variable == input$var) %>%
{set_names(extract2(.,"Cats"),extract2(.,"Etiqueta"))}
})
output$cat <- renderUI({
selectInput("cat","Categoría",req(cats_disp()))
})
etiqueta_cat <- reactive({
filter(tabla_variables, Cats == req(input$cat)) %>%
extract2("Etiqueta")
})
# Datos seleccionados
datos_electorales <- reactive({
filter(datos_electorales_completos,
ELECCION == input$elec,
FAMILIA == input$familia,
CODGEO %in% if(input$muestra){Muestra$CODGEO}else{COMUNAS_2007$CODGEO}) %>%
mutate(PCT_VOTOS_BR = (VOT_CANDIDATO + 0.5)/(INSCRITOS + 1))
})
output$tabla_votos_nal <- renderTable({
datos_electorales() %>%
summarise(Mediana = median(PCT_VOTOS_BR),
Media = mean(PCT_VOTOS_BR),
Mínimo = min(PCT_VOTOS_BR),
Máximo = max(PCT_VOTOS_BR),
`Cuartil 1` = quantile(PCT_VOTOS_BR,0.25),
`Cuartil 3` = quantile(PCT_VOTOS_BR,0.75),
`Desviación Estándar` = sd(PCT_VOTOS_BR)) %>%
ungroup() %>%
mutate_if(is.numeric,list(~ round(.,3))) %>%
gather(Estadístico, `% Votos`)
})
datos_graf_solo_insee <- reactive({
datos_censales %>%
filter(AAAA == aaaa(),
CODGEO %in% if(input$muestra){Muestra$CODGEO}else{COMUNAS_2007$CODGEO}) %>%
inner_join(COMUNAS_2007) %>%
filter(COD_REG %in% fr_anc_reg_metr$code) %>%
filter(Pob >= input$pob_min) %>%
left_join(otros_datos_comunales, by = c("CODGEO","AAAA")) %>%
select(CODGEO, COD_DPTO:NOM_REG, req(input$cat)) %>%
dplyr::rename(Pct = UQ(req(input$cat))) %>%
filter(!is.na(Pct))
})
output$tabla_cat_nal <- renderTable({
datos_graf_solo_insee() %>%
summarise_at("Pct",
list(Mediana = ~ median(.),
Media = ~ mean(.),
Mínimo = ~ min(.),
Máximo = ~ max(.),
`Cuartil 1` = ~ quantile(.,0.25),
`Cuartil 3` = ~ quantile(.,0.75),
`Desviación Estándar` = ~sd(.))) %>%
ungroup() %>%
mutate_if(is.numeric,list(~ round(.,3))) %>%
gather(Estadístico, `% Pob`)
})
# Distribuciones del voto
datos_votos_reg <- reactive({
datos_electorales() %>%
inner_join(COMUNAS_2007) %>%
genera_distr_reg(var = PCT_VOTOS_BR)
})
output$distr_hist_votos <- renderPlot({
geofacet_distr_hist(datos = datos_votos_reg(),
var = PCT_VOTOS_BR,
titulo = paste(input$familia,"en las",input$elec,sep=" "),
tit_x = "% votos comunal por region y para la metrópoli entera",
color = color())
})
output$tabla_votos_reg <- renderDataTable({
datos_votos_reg() %>%
group_by(NOM_REG) %>%
summarise(Comunas = n(),
Mediana = unique(MEDIANA),
Media = mean(PCT_VOTOS_BR),
Min = min(PCT_VOTOS_BR),
Max = max(PCT_VOTOS_BR),
Q1 = quantile(PCT_VOTOS_BR,0.25),
Q3 = quantile(PCT_VOTOS_BR,0.75),
SD = sd(PCT_VOTOS_BR),
Cociente_Med = unique(MEDIANA)/unique(MEDIANA_NAL)) %>%
ungroup() %>%
mutate_if(is.numeric,list(~ round(.,3)))
},
rownames = FALSE,
colnames = c("Región","Núm. Comunas",
"Mediana","Media","Mínimo","Máximo","1er Cuartil","3er Cuartil","Desv. Est.",
"Cociente Mediana Nacional"),
filter = "top",
options = list(
lengthMenu = c(1,5,10,15,20),
pageLength = 20,
dom = "ltp",
language = list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'
)
)
)
datos_referencia_votos <- reactive({
datos_electorales() %>%
inner_join(COMUNAS_2007) %>%
genera_datos_referencia(var = PCT_VOTOS_BR)
})
datos_votos_dpto <- reactive({
datos_electorales() %>%
inner_join(COMUNAS_2007) %>%
genera_datos_dpto(med_nal = datos_referencia_votos() %>% extract2("Med"),
var = PCT_VOTOS_BR)
})
output$distr_viol_votos <- renderPlot({
geofacet_violines_dpto(datos = datos_votos_dpto(),
datos_referencia = datos_referencia_votos(),
var = PCT_VOTOS_BR,
titulo = paste(input$familia,"en las",input$elec,sep=" "),
texto_metropoli = "Metrópili entera",
color = color())
})
output$tabla_votos_dpto <- renderDataTable({
datos_votos_dpto() %>%
group_by(NOM_REG,NOM_DPTO,COD_DPTO) %>%
summarise(Comunas = n(),
Mediana = unique(Mediana_Dpto),
Media = mean(PCT_VOTOS_BR),
Min = min(PCT_VOTOS_BR),
Max = max(PCT_VOTOS_BR),
Q1 = quantile(PCT_VOTOS_BR,0.25),
Q3 = quantile(PCT_VOTOS_BR,0.75),
SD = sd(PCT_VOTOS_BR),
Cociente_Med = unique(Mediana_Dpto)/unique(Mediana_Nacional)) %>%
ungroup() %>%
mutate_if(is.numeric,list(~ round(.,3)))
},
rownames = FALSE,
colnames = c("Región", "Departamento", "Código INSEE de Departamento","Núm. Comunas",
"Mediana","Media","Mínimo","Máximo","1er Cuartil","3er Cuartil","Desv. Est.","Cociente Mediana Nacional"),
filter = "top",
options = list(
lengthMenu = 2:8,
pageLength = 5,
dom = "ltp",
language = list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'
)
)
)
# Distribuciones de la categoría
datos_cat_reg <- reactive({
datos_graf_solo_insee() %>%
genera_distr_reg(var = Pct)
})
output$tabla_cat_reg <- renderDataTable({
datos_cat_reg() %>%
group_by(NOM_REG) %>%
summarise(Comunas = n(),
Mediana = unique(MEDIANA),
Media = mean(Pct),
Min = min(Pct),
Max = max(Pct),
Q1 = quantile(Pct,0.25),
Q3 = quantile(Pct,0.75),
SD = sd(Pct),
Cociente_Med = unique(MEDIANA)/unique(MEDIANA_NAL)) %>%
ungroup() %>%
mutate_if(is.numeric,list(~ round(.,3)))
},
rownames = FALSE,
colnames = c("Región","Núm. Comunas",
"Mediana","Media","Mínimo","Máximo","1er Cuartil","3er Cuartil","Desv. Est.","Cociente Mediana Nacional"),
filter = "top",
options = list(
lengthMenu = c(1,5,10,15,20),
pageLength = 20,
dom = "ltp",
language = list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'
)
)
)
output$distr_hist_cat <- renderPlot({
geofacet_distr_hist(datos = datos_cat_reg(),
var = Pct,
titulo = paste(etiqueta_cat(),"en",aaaa(),sep=" "),
tit_x = "% poblacion comunal por región y para la metrópoli entera",
color = color())
})
datos_referencia_cat <- reactive({
datos_graf_solo_insee() %>%
genera_datos_referencia(var = Pct)
})
datos_cat_dpto <- reactive({
datos_graf_solo_insee() %>%
genera_datos_dpto(med_nal = datos_referencia_cat() %>% extract2("Med"),
var = Pct)
})
output$distr_viol_cat <- renderPlot({
geofacet_violines_dpto(datos = datos_cat_dpto(),
datos_referencia = datos_referencia_cat(),
var = Pct,
titulo = paste(etiqueta_cat(),"en",aaaa(),sep=" "),
texto_metropoli = "Metrópili entera",
color = color())
})
output$tabla_cat_dpto <- renderDataTable({
datos_cat_dpto() %>%
group_by(NOM_REG,NOM_DPTO,COD_DPTO) %>%
summarise(Comunas = n(),
Mediana = unique(Mediana_Dpto),
Media = mean(Pct),
Min = min(Pct),
Max = max(Pct),
Q1 = quantile(Pct,0.25),
Q3 = quantile(Pct,0.75),
SD = sd(Pct),
Cociente_Med = unique(Mediana_Dpto)/unique(Mediana_Nacional)) %>%
ungroup() %>%
mutate_if(is.numeric,list(~ round(.,3)))
},
rownames = FALSE,
colnames = c("Región", "Departamento", "Código INSEE de Departamento","Núm. Comunas",
"Mediana","Media","Mínimo","Máximo","1er Cuartil","3er Cuartil","Desv. Est.","Cociente Mediana Nacional"),
filter = "top",
options = list(
lengthMenu = 2:8,
pageLength = 5,
dom = "ltp",
language = list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'
)
)
)
# Diagrama de dispersión
datos_graf <- reactive({
datos_graf_solo_insee() %>%
inner_join(datos_electorales()) %>%
group_by(COD_REG) %>%
mutate(Alpha = 1/n()) %>%
ungroup
})
output$graf_disper <- renderPlot({
geofacet_disp_votos_cat_reg(datos_graf(),
paste(etiqueta_cat(),"vs",input$familia,"en las",input$elec,sep=" "),
if_else(input$var %in% c("Escolaridad","Empleo"),
"% población correspondiente",
"% población comunal"),
color = color())
})
# Correlaciones lineales
datos_corr <- reactive({
datos_graf() %>%
genera_datos_corr
})
output$graf_corr <- renderPlot({
geofacet_corr_votos_cat_dpto(datos_corr(),
paste("Correlación", etiqueta_cat(),"y",input$familia,"en las",input$elec,sep=" "),
"Código INSEE de Departamento",
"Coeficiente de correlación",
color = color())
})
output$tabla_corr <- renderDataTable({
datos_corr() %>%
select(-COD_REG)
},
rownames = FALSE,
colnames = c("Región","Departamento","Código INSEE de Departamento","Núm. Comunas",
paste("Mediana %",c(input$familia,etiqueta_cat())),
"Correlación","Valor p Corr. 0"),
filter = "top",
options = list(
lengthMenu = 2:8,
pageLength = 5,
dom = "ltp",
language = list(
url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'
)
)
)
# Tendencias ingenuas
output$graf_smooth_loess <- renderPlot({
geofacet_smooth_votos_cat_dpto(datos_graf(),
"loess",
paste(etiqueta_cat(),"vs",input$familia,"en las",input$elec,sep=" "),
if_else(input$var %in% c("Escolaridad","Empleo"),
"% población correspondiente",
"% población comunal"),
color = color(),
span = input$span)
})
output$graf_smooth_lm <- renderPlot({
geofacet_smooth_votos_cat_dpto(datos_graf(),
"lm",
paste(etiqueta_cat(),"vs",input$familia,"en las",input$elec,sep=" "),
if_else(input$var %in% c("Escolaridad","Empleo"),
"% población correspondiente",
"% población comunal"),
color = color())
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.