content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# last modified 2011-07-30
optimizerNlminb <- function(start, objective=objectiveML,
gradient=TRUE, maxiter, debug, par.size, model.description, warn, ...){
with(model.description, {
obj <- objective(gradient=gradient)
objective <- obj$objective
grad <- if (gradient) obj$gradient else NULL
if (!warn) save.warn <- options(warn=-1)
res <- nlminb(start, objective, gradient=grad, model.description=model.description,
control=list(trace=if(debug) 1 else 0, iter.max=maxiter, ...))
if (!warn) options(save.warn)
result <- list()
result$convergence <- res$convergence == 0
result$iterations <- res$iterations
par <- res$par
names(par) <- param.names
result$par <- par
if (!result$convergence)
warning(paste('Optimization may not have converged; nlminb return code = ',
res$convergence, '. Consult ?nlminb.\n', sep=""))
result$criterion <- res$objective
obj <- objective(par, model.description)
C <- attr(obj, "C")
rownames(C) <- colnames(C) <- var.names[observed]
result$C <- C
A <- attr(obj, "A")
rownames(A) <- colnames(A) <- var.names
result$A <- A
P <- attr(obj, "P")
rownames(P) <- colnames(P) <- var.names
result$P <- P
class(result) <- "semResult"
result
}
)
}
|
/R/optimizerNlminb.R
|
no_license
|
cran/sem
|
R
| false
| false
| 1,335
|
r
|
# last modified 2011-07-30
optimizerNlminb <- function(start, objective=objectiveML,
gradient=TRUE, maxiter, debug, par.size, model.description, warn, ...){
with(model.description, {
obj <- objective(gradient=gradient)
objective <- obj$objective
grad <- if (gradient) obj$gradient else NULL
if (!warn) save.warn <- options(warn=-1)
res <- nlminb(start, objective, gradient=grad, model.description=model.description,
control=list(trace=if(debug) 1 else 0, iter.max=maxiter, ...))
if (!warn) options(save.warn)
result <- list()
result$convergence <- res$convergence == 0
result$iterations <- res$iterations
par <- res$par
names(par) <- param.names
result$par <- par
if (!result$convergence)
warning(paste('Optimization may not have converged; nlminb return code = ',
res$convergence, '. Consult ?nlminb.\n', sep=""))
result$criterion <- res$objective
obj <- objective(par, model.description)
C <- attr(obj, "C")
rownames(C) <- colnames(C) <- var.names[observed]
result$C <- C
A <- attr(obj, "A")
rownames(A) <- colnames(A) <- var.names
result$A <- A
P <- attr(obj, "P")
rownames(P) <- colnames(P) <- var.names
result$P <- P
class(result) <- "semResult"
result
}
)
}
|
testlist <- list(cost = structure(1.68972201013703e-09, .Dim = c(1L, 1L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.21258088866817e-185, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296, 8.47565288269902e+60 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
/epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926016-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 1,018
|
r
|
testlist <- list(cost = structure(1.68972201013703e-09, .Dim = c(1L, 1L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251052666e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.21258088866817e-185, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296, 8.47565288269902e+60 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827908-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 643
|
r
|
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 3.81959242373749e-313, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
testlist <- list(x = c(-1L, -1L, -1L, -180L, -589825L, -11788759L, -11731124L, 1275068416L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961639-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 467
|
r
|
testlist <- list(x = c(-1L, -1L, -1L, -180L, -589825L, -11788759L, -11731124L, 1275068416L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), y = integer(0))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
library(testthat)
library(rPackedBar)
test_check("rPackedBar")
|
/data/genthat_extracted_code/rPackedBar/tests/testthat.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 64
|
r
|
library(testthat)
library(rPackedBar)
test_check("rPackedBar")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitness_functions.R
\name{users.dataSources.patch}
\alias{users.dataSources.patch}
\title{Updates the specified data source. The dataStreamId, dataType, type, dataStreamName, and device properties with the exception of version, cannot be modified.Data sources are identified by their dataStreamId. This method supports patch semantics.}
\usage{
users.dataSources.patch(DataSource, userId, dataSourceId)
}
\arguments{
\item{DataSource}{The \link{DataSource} object to pass to this method}
\item{userId}{Update the data source for the person identified}
\item{dataSourceId}{The data stream ID of the data source to update}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/fitness.activity.write
\item https://www.googleapis.com/auth/fitness.blood_glucose.write
\item https://www.googleapis.com/auth/fitness.blood_pressure.write
\item https://www.googleapis.com/auth/fitness.body.write
\item https://www.googleapis.com/auth/fitness.body_temperature.write
\item https://www.googleapis.com/auth/fitness.location.write
\item https://www.googleapis.com/auth/fitness.nutrition.write
\item https://www.googleapis.com/auth/fitness.oxygen_saturation.write
\item https://www.googleapis.com/auth/fitness.reproductive_health.write
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/fitness.activity.write, https://www.googleapis.com/auth/fitness.blood_glucose.write, https://www.googleapis.com/auth/fitness.blood_pressure.write, https://www.googleapis.com/auth/fitness.body.write, https://www.googleapis.com/auth/fitness.body_temperature.write, https://www.googleapis.com/auth/fitness.location.write, https://www.googleapis.com/auth/fitness.nutrition.write, https://www.googleapis.com/auth/fitness.oxygen_saturation.write, https://www.googleapis.com/auth/fitness.reproductive_health.write)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/fit/rest/}{Google Documentation}
Other DataSource functions: \code{\link{DataSource}},
\code{\link{users.dataSources.create}},
\code{\link{users.dataSources.update}}
}
|
/googlefitnessv1.auto/man/users.dataSources.patch.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false
| true
| 2,369
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitness_functions.R
\name{users.dataSources.patch}
\alias{users.dataSources.patch}
\title{Updates the specified data source. The dataStreamId, dataType, type, dataStreamName, and device properties with the exception of version, cannot be modified.Data sources are identified by their dataStreamId. This method supports patch semantics.}
\usage{
users.dataSources.patch(DataSource, userId, dataSourceId)
}
\arguments{
\item{DataSource}{The \link{DataSource} object to pass to this method}
\item{userId}{Update the data source for the person identified}
\item{dataSourceId}{The data stream ID of the data source to update}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/fitness.activity.write
\item https://www.googleapis.com/auth/fitness.blood_glucose.write
\item https://www.googleapis.com/auth/fitness.blood_pressure.write
\item https://www.googleapis.com/auth/fitness.body.write
\item https://www.googleapis.com/auth/fitness.body_temperature.write
\item https://www.googleapis.com/auth/fitness.location.write
\item https://www.googleapis.com/auth/fitness.nutrition.write
\item https://www.googleapis.com/auth/fitness.oxygen_saturation.write
\item https://www.googleapis.com/auth/fitness.reproductive_health.write
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/fitness.activity.write, https://www.googleapis.com/auth/fitness.blood_glucose.write, https://www.googleapis.com/auth/fitness.blood_pressure.write, https://www.googleapis.com/auth/fitness.body.write, https://www.googleapis.com/auth/fitness.body_temperature.write, https://www.googleapis.com/auth/fitness.location.write, https://www.googleapis.com/auth/fitness.nutrition.write, https://www.googleapis.com/auth/fitness.oxygen_saturation.write, https://www.googleapis.com/auth/fitness.reproductive_health.write)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://developers.google.com/fit/rest/}{Google Documentation}
Other DataSource functions: \code{\link{DataSource}},
\code{\link{users.dataSources.create}},
\code{\link{users.dataSources.update}}
}
|
library(data.table)
library(ggplot2)
library(ggthemes)
m = fread("../data/1000G.chr20.marker.txt", header = T, stringsAsFactors = F)
fgt = fread("result.chr20.ibd.FGT.txt", header = T, stringsAsFactors = F)
dgt = fread("result.chr20.ibd.DGT.txt", header = T, stringsAsFactors = F)
hmm = fread("result.chr20.ibd.HMM.txt", header = T, stringsAsFactors = F)
fgt$key = sprintf("%d %d %d %d", fgt$SampleID0, fgt$SampleID1, fgt$LHS, fgt$RHS)
dgt$key = sprintf("%d %d %d %d", dgt$SampleID0, dgt$SampleID1, dgt$LHS, dgt$RHS)
hmm$key = sprintf("%d %d %d %d", hmm$SampleID0, hmm$SampleID1, hmm$LHS, hmm$RHS)
fgt = fgt[order(fgt$Fk, sample(1:nrow(fgt))),]
dgt = dgt[order(dgt$Fk, sample(1:nrow(dgt))),]
hmm = hmm[order(hmm$Fk, sample(1:nrow(hmm))),]
del = which(duplicated(fgt$key)); fgt = fgt[-del,]
del = which(duplicated(dgt$key)); dgt = dgt[-del,]
del = which(duplicated(hmm$key)); hmm = hmm[-del,]
fgt$key = sprintf("%d %d %d", fgt$MarkerID, fgt$SampleID0, fgt$SampleID1)
dgt$key = sprintf("%d %d %d", dgt$MarkerID, dgt$SampleID0, dgt$SampleID1)
hmm$key = sprintf("%d %d %d", hmm$MarkerID, hmm$SampleID0, hmm$SampleID1)
key = intersect(hmm$key, intersect(fgt$key, dgt$key))
fgt = as.data.table(as.data.frame(fgt)[match(key, fgt$key),])
dgt = as.data.table(as.data.frame(dgt)[match(key, dgt$key),])
hmm = as.data.table(as.data.frame(hmm)[match(key, hmm$key),])
fgt$tag = "fgt"
dgt$tag = "dgt"
hmm$tag = "hmm"
p = rbind(fgt, dgt, hmm)
x = sample(1:nrow(fgt), 1e6)
p = rbind(fgt[x,], dgt[x,], hmm[x,])
del = which(p$LHS < 2 | p$RHS > max(p$RHS) - 2)
p = p[-del,]
p$pLHS = m$Position[p$LHS+1]
p$pRHS = m$Position[p$RHS+1]
p$pLen = p$pRHS - p$pLHS + 1
p$gLHS = m$GenDist[p$LHS+1]
p$gRHS = m$GenDist[p$RHS+1]
p$gLen = p$gRHS - p$gLHS + 1e-8
p = split(p, p$tag)
k = Reduce(intersect, sapply(p, function(x) unique(x$key)))
k = sample(k, 1e6)
p = lapply(p, function(x, k) {
i = which(x$key %in% k)
x[i,]
}, k)
p = rbindlist(p)
panel = read.table("../data/integrated_call_samples_v3.20130502.ALL.panel", header = T, stringsAsFactors = F)
panel = as.data.table(panel)
p$s0 = panel$super_pop[p$SampleID0+1]
p$s1 = panel$super_pop[p$SampleID1+1]
p$ss0 = ""
p$ss1 = ""
x = (p$s0 <= p$s1); p$ss0[x] = p$s0[x]; p$ss1[x] = p$s1[x];
x = (p$s0 > p$s1); p$ss0[x] = p$s1[x]; p$ss1[x] = p$s0[x];
p$ss = sprintf("%s %s", p$ss0, p$ss1)
nss = by(p, p$ss, nrow)
nss = data.table(ss0 = sub("^([A-Z]+) ([A-Z]+)$", "\\1", names(nss)),
ss1 = sub("^([A-Z]+) ([A-Z]+)$", "\\2", names(nss)),
num = as.vector(nss))
nss$str = format(nss$num/3, trim = F, big.mark = ",")
p$tag = factor(p$tag, levels = c("fgt", "dgt", "hmm"), labels = c("FGT, phased haplotypes", "DGT, genotypes", "HMM, genotypes"), ordered = T)
gg = ggplot(p) +
facet_grid(ss1~ss0, switch = "both") +
stat_summary(aes(x = Fk, y = gLen / 1, color = tag, linetype = tag), geom = "line", size = 2/3, #position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
geom_label(data = nss, aes(x = 22, y = 1.65, label = str), label.size = NA, size = 2.5, fill = "white", alpha = 2/3) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10)) +
scale_colour_manual(values = c("FGT, phased haplotypes" = "purple", "DGT, genotypes" = "limegreen", "HMM, genotypes" = "goldenrod")) +
scale_linetype_manual(values = c("FGT, phased haplotypes" = "11", "DGT, genotypes" = "22", "HMM, genotypes" = "solid")) +
coord_cartesian(ylim = c(10^-2.5, 2)) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "grey90"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "white", size = 1/2),
panel.grid.minor.y = element_line(colour = "white", size = 1/3)) +
xlab("Focal allele count") + ylab("Genetic length (cM)")
gg
ggsave(gg, filename = "__plot.pop_genlen.pdf", width = 12, height = 9)
gg = ggplot(p) +
facet_grid(ss0~ss1) +
stat_summary(aes(x = Fk, y = pLen / 1e6, color = tag, linetype = tag), geom = "line", size = 2/3, #position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
geom_label(data = nss, aes(x = 22, y = 1.65, label = str), label.size = NA, size = 2.5, fill = "grey90", alpha = 2/3) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10), position = "right") +
scale_x_continuous(position = "top") +
scale_colour_manual(values = c("FGT, phased haplotypes" = "purple", "DGT, genotypes" = "limegreen", "HMM, genotypes" = "goldenrod")) +
scale_linetype_manual(values = c("FGT, phased haplotypes" = "11", "DGT, genotypes" = "22", "HMM, genotypes" = "solid")) +
coord_cartesian(ylim = c(10^-2.5, 2)) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "white"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "grey90", size = 1/2),
panel.grid.minor.y = element_line(colour = "grey90", size = 1/3)) +
xlab("Focal allele count") + ylab("Physical length (Mb)")
gg
ggsave(gg, filename = "__plot.pop_phylen.pdf", width = 12, height = 9)
save(p, file = "_data.ibd.fgt-dgt-hmm.RData")
##################
chr = 20
Ne = 20000
M = fread(sprintf("../../data/1000G.chr%d.marker.txt", chr), header = T, stringsAsFactors = F)
files = dir(pattern = ".*\\.ccf\\..*", full.names = T)
d = NULL
for (file in files) {
if (grepl("NN", file)) next
tmp = fread(file, header = T, stringsAsFactors = F)
d = rbind(d, tmp)
}
loc = unique(d$MarkerID)
length(loc) # 40789
#loc = sample(loc, 5000)
#x = which(d$MarkerID %in% loc)
#p = d[x,]
p = d
x = which(p$Clock == "C")
p = p[x,]
nrow(p) #
del = which(p$Shared == 1)
p = p[-del,]
nrow(p) # 3536825
# key = sprintf("%d %d %d %d %d", p$MarkerID, p$SampleID0, p$Chr0, p$SampleID1, p$Chr1)
# del = which(duplicated(key))
# if (length(del) > 0) {
# p = p[-del,]
# }
# nrow(p) # 1171257
#
#
# key = sprintf("%d %d %d %d %d %d", p$SampleID0, p$Chr0, p$SampleID1, p$Chr1, p$SegmentLHS, p$SegmentRHS)
# del = which(duplicated(key))
# if (length(del) > 0) {
# p = p[-del,]
# }
# nrow(p) # 900422
del = which(p$SegmentLHS < 2 | p$SegmentRHS > max(p$SegmentRHS) - 2)
p = p[-del,]
nrow(p) # 3514601
# x = sample(1:nrow(p), 1e6)
# p = p[x,]
# nrow(p) #
p$pLHS = M$Position[p$SegmentLHS+1]
p$pRHS = M$Position[p$SegmentRHS+1]
p$pLen = p$pRHS - p$pLHS + 1
p$gLHS = M$GenDist[p$SegmentLHS+1]
p$gRHS = M$GenDist[p$SegmentRHS+1]
p$gLen = p$gRHS - p$gLHS + 1e-8
panel = read.table("../../data/integrated_call_samples_v3.20130502.ALL.panel", header = T, stringsAsFactors = F)
p$s0 = panel$super_pop[p$SampleID0+1]
p$s1 = panel$super_pop[p$SampleID1+1]
p$ss0 = ""
p$ss1 = ""
x = (p$s0 <= p$s1); p$ss0[x] = p$s0[x]; p$ss1[x] = p$s1[x];
x = (p$s0 > p$s1); p$ss0[x] = p$s1[x]; p$ss1[x] = p$s0[x];
p$ss = sprintf("%s %s", p$ss0, p$ss1)
nss = table(p$ss)
nss = data.table(ss0 = sub("^([A-Z]+) ([A-Z]+)$", "\\1", names(nss)),
ss1 = sub("^([A-Z]+) ([A-Z]+)$", "\\2", names(nss)),
num = as.vector(nss))
nss$str = sprintf("%s %%", format(round(nss$num/sum(nss$num) * 100, 3), digits = 3, trim = F, big.mark = ","))
q = p
key = sprintf("%d %d %d %d %d %d", q$SampleID0, q$Chr0, q$SampleID1, q$Chr1, q$SegmentLHS, q$SegmentRHS)
del = which(duplicated(key))
if (length(del) > 0) {
q = q[-del,]
}
nrow(q) # 2338288
x = by(q, list(q$ss), function(x) median(x$gLen))
array(x, dim = dim(x), dimnames = dimnames(x))
y = by(q, list(q$ss), function(x) median(x$pLen/1e6))
array(y, dim = dim(y), dimnames = dimnames(y))
array(x, dim = dim(x), dimnames = dimnames(x)) / array(y, dim = dim(y), dimnames = dimnames(y))
mx = range(round(log10(c(1.925,max(p$Fk)))*(1/3),2))
gg = ggplot(p) +
facet_grid(ss1~ss0, switch = "both") +
stat_summary(aes(x = round(log10(Fk)*(1/3),2), y = gLen / 1), geom = "smooth", color = "black", alpha = 1/2, size = 1/2,# fatten = 1/10,
fun.data = function(x) {
# if (length(x) < 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4))
}) +
# stat_summary(aes(x = Fk, y = gLen / 1), geom = "pointrange", size = 1/3, fatten = 1/10, color = "indianred",
# fun.data = function(x) {
# if (length(x) >= 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
# data.table(ymin = quantile(x, 1/4),
# y = quantile(x, 2/4),
# ymax = quantile(x, 3/4))
# }) +
geom_label(data = nss, aes(x = 1, y = 2.333, label = str), label.size = NA, size = 2.75, fill = "white", alpha = 2/3) +
#geom_label(data = avr, aes(x = 41, y = 0.006, label = str), label.size = NA, size = 3, fill = "white", alpha = 2/3, parse = T) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10)) +
scale_x_continuous(breaks = log10(2^(1:12))*(1/3), labels = c("2","","8","","32","","128","","512","", "2048", ""), expand = c(0,0)) +
coord_cartesian(ylim = c(0.005, 3), xlim = mx) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
axis.text = element_text(size = 8),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "grey90"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "white", size = 1/2),
panel.grid.minor.y = element_line(colour = "white", size = 1/3)) +
xlab("Focal allele count") + ylab("Genetic length (cM)")
gg
ggsave(gg, filename = "___plot.pop_genlen.pdf", width = 12, height = 9)
gg = ggplot(p) +
facet_grid(ss0~ss1) +
stat_summary(aes(x = round(log10(Fk)*(1/3),2), y = gLen / 1), geom = "smooth", color = "black", alpha = 1/2, size = 1/2,# fatten = 1/10,
fun.data = function(x) {
# if (length(x) < 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4))
}) +
# stat_summary(aes(x = Fk, y = gLen / 1), geom = "pointrange", size = 1/3, fatten = 1/10, color = "indianred",
# fun.data = function(x) {
# if (length(x) >= 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
# data.table(ymin = quantile(x, 1/4),
# y = quantile(x, 2/4),
# ymax = quantile(x, 3/4))
# }) +
#geom_label(data = nss, aes(x = 42, y = 2.33, label = str), label.size = NA, size = 2.75, fill = "grey90", alpha = 2/3) +
#geom_label(data = mpl, aes(x = 44.5, y = 0.005, label = str), label.size = NA, size = 2.75, fill = "grey90", alpha = 2/3) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10), position = "right") +
scale_x_continuous(breaks = log10(2^(1:12))*(1/3), labels = c("2","","8","","32","","128","","512","", "2048", ""), expand = c(0,0), position = "top") +
coord_cartesian(ylim = c(0.005, 3), xlim = mx) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
axis.text = element_text(size = 8),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "white"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "grey90", size = 1/2),
panel.grid.minor.y = element_line(colour = "grey90", size = 1/3)) +
xlab("Focal allele count") + ylab("Physical length (Mb)")
gg
ggsave(gg, filename = "___plot.pop_phylen.pdf", width = 12, height = 9)
save(p, file = "_data.ibd.hhmm.RData")
##############
mean(p$gLen / (p$pLen / 1e6))
z = by(p, list(p$ss1, p$ss0), function(x) mean(x$gLen / (x$pLen / 1e6)))
array(z, dim = dim(z), dimnames = dimnames(z))
#############
ggplot(p) +
stat_summary(aes(x = Fk, y = gLen / 1, color = tag), geom = "pointrange", position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
scale_y_log10(minor_breaks = c((0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10))
p$sb = (p$s0 == p$s1)
pp = which(p$sb)
pp = p[pp,]
ggplot(pp) +
facet_grid(.~s0) +
stat_summary(aes(x = Fk, y = gLen / 1, color = tag), geom = "pointrange", position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
scale_y_log10(minor_breaks = c((0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10))
pp = which(p$s0 != p$s1)
pp = p[pp,]
del = which(duplicated(k))
p = d[-del,]
ggplot(m) +
stat_summary(aes(x = fk, y = len / 1), geom = "pointrange", size = 1/3, fatten = 1/10, #alpha = 1, color = "grey30", #position = position_dodge(width = 1/2),
fun.data = function(x) {
if (length(x) < 100) {
return( data.table(ymin = NA,
y = NA,
ymax = NA) )
}
data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4))
}) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10))
|
/1000G/full/plot_lens.R
|
no_license
|
pkalbers/ScriptCollection
|
R
| false
| false
| 14,263
|
r
|
library(data.table)
library(ggplot2)
library(ggthemes)
m = fread("../data/1000G.chr20.marker.txt", header = T, stringsAsFactors = F)
fgt = fread("result.chr20.ibd.FGT.txt", header = T, stringsAsFactors = F)
dgt = fread("result.chr20.ibd.DGT.txt", header = T, stringsAsFactors = F)
hmm = fread("result.chr20.ibd.HMM.txt", header = T, stringsAsFactors = F)
fgt$key = sprintf("%d %d %d %d", fgt$SampleID0, fgt$SampleID1, fgt$LHS, fgt$RHS)
dgt$key = sprintf("%d %d %d %d", dgt$SampleID0, dgt$SampleID1, dgt$LHS, dgt$RHS)
hmm$key = sprintf("%d %d %d %d", hmm$SampleID0, hmm$SampleID1, hmm$LHS, hmm$RHS)
fgt = fgt[order(fgt$Fk, sample(1:nrow(fgt))),]
dgt = dgt[order(dgt$Fk, sample(1:nrow(dgt))),]
hmm = hmm[order(hmm$Fk, sample(1:nrow(hmm))),]
del = which(duplicated(fgt$key)); fgt = fgt[-del,]
del = which(duplicated(dgt$key)); dgt = dgt[-del,]
del = which(duplicated(hmm$key)); hmm = hmm[-del,]
fgt$key = sprintf("%d %d %d", fgt$MarkerID, fgt$SampleID0, fgt$SampleID1)
dgt$key = sprintf("%d %d %d", dgt$MarkerID, dgt$SampleID0, dgt$SampleID1)
hmm$key = sprintf("%d %d %d", hmm$MarkerID, hmm$SampleID0, hmm$SampleID1)
key = intersect(hmm$key, intersect(fgt$key, dgt$key))
fgt = as.data.table(as.data.frame(fgt)[match(key, fgt$key),])
dgt = as.data.table(as.data.frame(dgt)[match(key, dgt$key),])
hmm = as.data.table(as.data.frame(hmm)[match(key, hmm$key),])
fgt$tag = "fgt"
dgt$tag = "dgt"
hmm$tag = "hmm"
p = rbind(fgt, dgt, hmm)
x = sample(1:nrow(fgt), 1e6)
p = rbind(fgt[x,], dgt[x,], hmm[x,])
del = which(p$LHS < 2 | p$RHS > max(p$RHS) - 2)
p = p[-del,]
p$pLHS = m$Position[p$LHS+1]
p$pRHS = m$Position[p$RHS+1]
p$pLen = p$pRHS - p$pLHS + 1
p$gLHS = m$GenDist[p$LHS+1]
p$gRHS = m$GenDist[p$RHS+1]
p$gLen = p$gRHS - p$gLHS + 1e-8
p = split(p, p$tag)
k = Reduce(intersect, sapply(p, function(x) unique(x$key)))
k = sample(k, 1e6)
p = lapply(p, function(x, k) {
i = which(x$key %in% k)
x[i,]
}, k)
p = rbindlist(p)
panel = read.table("../data/integrated_call_samples_v3.20130502.ALL.panel", header = T, stringsAsFactors = F)
panel = as.data.table(panel)
p$s0 = panel$super_pop[p$SampleID0+1]
p$s1 = panel$super_pop[p$SampleID1+1]
p$ss0 = ""
p$ss1 = ""
x = (p$s0 <= p$s1); p$ss0[x] = p$s0[x]; p$ss1[x] = p$s1[x];
x = (p$s0 > p$s1); p$ss0[x] = p$s1[x]; p$ss1[x] = p$s0[x];
p$ss = sprintf("%s %s", p$ss0, p$ss1)
nss = by(p, p$ss, nrow)
nss = data.table(ss0 = sub("^([A-Z]+) ([A-Z]+)$", "\\1", names(nss)),
ss1 = sub("^([A-Z]+) ([A-Z]+)$", "\\2", names(nss)),
num = as.vector(nss))
nss$str = format(nss$num/3, trim = F, big.mark = ",")
p$tag = factor(p$tag, levels = c("fgt", "dgt", "hmm"), labels = c("FGT, phased haplotypes", "DGT, genotypes", "HMM, genotypes"), ordered = T)
gg = ggplot(p) +
facet_grid(ss1~ss0, switch = "both") +
stat_summary(aes(x = Fk, y = gLen / 1, color = tag, linetype = tag), geom = "line", size = 2/3, #position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
geom_label(data = nss, aes(x = 22, y = 1.65, label = str), label.size = NA, size = 2.5, fill = "white", alpha = 2/3) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10)) +
scale_colour_manual(values = c("FGT, phased haplotypes" = "purple", "DGT, genotypes" = "limegreen", "HMM, genotypes" = "goldenrod")) +
scale_linetype_manual(values = c("FGT, phased haplotypes" = "11", "DGT, genotypes" = "22", "HMM, genotypes" = "solid")) +
coord_cartesian(ylim = c(10^-2.5, 2)) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "grey90"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "white", size = 1/2),
panel.grid.minor.y = element_line(colour = "white", size = 1/3)) +
xlab("Focal allele count") + ylab("Genetic length (cM)")
gg
ggsave(gg, filename = "__plot.pop_genlen.pdf", width = 12, height = 9)
gg = ggplot(p) +
facet_grid(ss0~ss1) +
stat_summary(aes(x = Fk, y = pLen / 1e6, color = tag, linetype = tag), geom = "line", size = 2/3, #position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
geom_label(data = nss, aes(x = 22, y = 1.65, label = str), label.size = NA, size = 2.5, fill = "grey90", alpha = 2/3) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10), position = "right") +
scale_x_continuous(position = "top") +
scale_colour_manual(values = c("FGT, phased haplotypes" = "purple", "DGT, genotypes" = "limegreen", "HMM, genotypes" = "goldenrod")) +
scale_linetype_manual(values = c("FGT, phased haplotypes" = "11", "DGT, genotypes" = "22", "HMM, genotypes" = "solid")) +
coord_cartesian(ylim = c(10^-2.5, 2)) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "white"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "grey90", size = 1/2),
panel.grid.minor.y = element_line(colour = "grey90", size = 1/3)) +
xlab("Focal allele count") + ylab("Physical length (Mb)")
gg
ggsave(gg, filename = "__plot.pop_phylen.pdf", width = 12, height = 9)
save(p, file = "_data.ibd.fgt-dgt-hmm.RData")
##################
chr = 20
Ne = 20000
M = fread(sprintf("../../data/1000G.chr%d.marker.txt", chr), header = T, stringsAsFactors = F)
files = dir(pattern = ".*\\.ccf\\..*", full.names = T)
d = NULL
for (file in files) {
if (grepl("NN", file)) next
tmp = fread(file, header = T, stringsAsFactors = F)
d = rbind(d, tmp)
}
loc = unique(d$MarkerID)
length(loc) # 40789
#loc = sample(loc, 5000)
#x = which(d$MarkerID %in% loc)
#p = d[x,]
p = d
x = which(p$Clock == "C")
p = p[x,]
nrow(p) #
del = which(p$Shared == 1)
p = p[-del,]
nrow(p) # 3536825
# key = sprintf("%d %d %d %d %d", p$MarkerID, p$SampleID0, p$Chr0, p$SampleID1, p$Chr1)
# del = which(duplicated(key))
# if (length(del) > 0) {
# p = p[-del,]
# }
# nrow(p) # 1171257
#
#
# key = sprintf("%d %d %d %d %d %d", p$SampleID0, p$Chr0, p$SampleID1, p$Chr1, p$SegmentLHS, p$SegmentRHS)
# del = which(duplicated(key))
# if (length(del) > 0) {
# p = p[-del,]
# }
# nrow(p) # 900422
del = which(p$SegmentLHS < 2 | p$SegmentRHS > max(p$SegmentRHS) - 2)
p = p[-del,]
nrow(p) # 3514601
# x = sample(1:nrow(p), 1e6)
# p = p[x,]
# nrow(p) #
p$pLHS = M$Position[p$SegmentLHS+1]
p$pRHS = M$Position[p$SegmentRHS+1]
p$pLen = p$pRHS - p$pLHS + 1
p$gLHS = M$GenDist[p$SegmentLHS+1]
p$gRHS = M$GenDist[p$SegmentRHS+1]
p$gLen = p$gRHS - p$gLHS + 1e-8
panel = read.table("../../data/integrated_call_samples_v3.20130502.ALL.panel", header = T, stringsAsFactors = F)
p$s0 = panel$super_pop[p$SampleID0+1]
p$s1 = panel$super_pop[p$SampleID1+1]
p$ss0 = ""
p$ss1 = ""
x = (p$s0 <= p$s1); p$ss0[x] = p$s0[x]; p$ss1[x] = p$s1[x];
x = (p$s0 > p$s1); p$ss0[x] = p$s1[x]; p$ss1[x] = p$s0[x];
p$ss = sprintf("%s %s", p$ss0, p$ss1)
nss = table(p$ss)
nss = data.table(ss0 = sub("^([A-Z]+) ([A-Z]+)$", "\\1", names(nss)),
ss1 = sub("^([A-Z]+) ([A-Z]+)$", "\\2", names(nss)),
num = as.vector(nss))
nss$str = sprintf("%s %%", format(round(nss$num/sum(nss$num) * 100, 3), digits = 3, trim = F, big.mark = ","))
q = p
key = sprintf("%d %d %d %d %d %d", q$SampleID0, q$Chr0, q$SampleID1, q$Chr1, q$SegmentLHS, q$SegmentRHS)
del = which(duplicated(key))
if (length(del) > 0) {
q = q[-del,]
}
nrow(q) # 2338288
x = by(q, list(q$ss), function(x) median(x$gLen))
array(x, dim = dim(x), dimnames = dimnames(x))
y = by(q, list(q$ss), function(x) median(x$pLen/1e6))
array(y, dim = dim(y), dimnames = dimnames(y))
array(x, dim = dim(x), dimnames = dimnames(x)) / array(y, dim = dim(y), dimnames = dimnames(y))
mx = range(round(log10(c(1.925,max(p$Fk)))*(1/3),2))
gg = ggplot(p) +
facet_grid(ss1~ss0, switch = "both") +
stat_summary(aes(x = round(log10(Fk)*(1/3),2), y = gLen / 1), geom = "smooth", color = "black", alpha = 1/2, size = 1/2,# fatten = 1/10,
fun.data = function(x) {
# if (length(x) < 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4))
}) +
# stat_summary(aes(x = Fk, y = gLen / 1), geom = "pointrange", size = 1/3, fatten = 1/10, color = "indianred",
# fun.data = function(x) {
# if (length(x) >= 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
# data.table(ymin = quantile(x, 1/4),
# y = quantile(x, 2/4),
# ymax = quantile(x, 3/4))
# }) +
geom_label(data = nss, aes(x = 1, y = 2.333, label = str), label.size = NA, size = 2.75, fill = "white", alpha = 2/3) +
#geom_label(data = avr, aes(x = 41, y = 0.006, label = str), label.size = NA, size = 3, fill = "white", alpha = 2/3, parse = T) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10)) +
scale_x_continuous(breaks = log10(2^(1:12))*(1/3), labels = c("2","","8","","32","","128","","512","", "2048", ""), expand = c(0,0)) +
coord_cartesian(ylim = c(0.005, 3), xlim = mx) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
axis.text = element_text(size = 8),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "grey90"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "white", size = 1/2),
panel.grid.minor.y = element_line(colour = "white", size = 1/3)) +
xlab("Focal allele count") + ylab("Genetic length (cM)")
gg
ggsave(gg, filename = "___plot.pop_genlen.pdf", width = 12, height = 9)
gg = ggplot(p) +
facet_grid(ss0~ss1) +
stat_summary(aes(x = round(log10(Fk)*(1/3),2), y = gLen / 1), geom = "smooth", color = "black", alpha = 1/2, size = 1/2,# fatten = 1/10,
fun.data = function(x) {
# if (length(x) < 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4))
}) +
# stat_summary(aes(x = Fk, y = gLen / 1), geom = "pointrange", size = 1/3, fatten = 1/10, color = "indianred",
# fun.data = function(x) {
# if (length(x) >= 500) {
# return( data.table(ymin = NA,
# y = NA,
# ymax = NA) )
# }
# data.table(ymin = quantile(x, 1/4),
# y = quantile(x, 2/4),
# ymax = quantile(x, 3/4))
# }) +
#geom_label(data = nss, aes(x = 42, y = 2.33, label = str), label.size = NA, size = 2.75, fill = "grey90", alpha = 2/3) +
#geom_label(data = mpl, aes(x = 44.5, y = 0.005, label = str), label.size = NA, size = 2.75, fill = "grey90", alpha = 2/3) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10), position = "right") +
scale_x_continuous(breaks = log10(2^(1:12))*(1/3), labels = c("2","","8","","32","","128","","512","", "2048", ""), expand = c(0,0), position = "top") +
coord_cartesian(ylim = c(0.005, 3), xlim = mx) +
theme_few() +
theme(aspect.ratio = 1,
legend.title = element_blank(),
axis.text = element_text(size = 8),
strip.placement = "outside",
strip.text = element_text(face = "bold"),
panel.background = element_rect(fill = "white"),
panel.border = element_rect(fill = NA, size = 1/2, colour = "black"),
panel.grid.major = element_line(colour = "grey90", size = 1/2),
panel.grid.minor.y = element_line(colour = "grey90", size = 1/3)) +
xlab("Focal allele count") + ylab("Physical length (Mb)")
gg
ggsave(gg, filename = "___plot.pop_phylen.pdf", width = 12, height = 9)
save(p, file = "_data.ibd.hhmm.RData")
##############
mean(p$gLen / (p$pLen / 1e6))
z = by(p, list(p$ss1, p$ss0), function(x) mean(x$gLen / (x$pLen / 1e6)))
array(z, dim = dim(z), dimnames = dimnames(z))
#############
ggplot(p) +
stat_summary(aes(x = Fk, y = gLen / 1, color = tag), geom = "pointrange", position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
scale_y_log10(minor_breaks = c((0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10))
p$sb = (p$s0 == p$s1)
pp = which(p$sb)
pp = p[pp,]
ggplot(pp) +
facet_grid(.~s0) +
stat_summary(aes(x = Fk, y = gLen / 1, color = tag), geom = "pointrange", position = position_dodge(width = 1/2),
fun.data = function(x) { data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4)) }) +
scale_y_log10(minor_breaks = c((0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10))
pp = which(p$s0 != p$s1)
pp = p[pp,]
del = which(duplicated(k))
p = d[-del,]
ggplot(m) +
stat_summary(aes(x = fk, y = len / 1), geom = "pointrange", size = 1/3, fatten = 1/10, #alpha = 1, color = "grey30", #position = position_dodge(width = 1/2),
fun.data = function(x) {
if (length(x) < 100) {
return( data.table(ymin = NA,
y = NA,
ymax = NA) )
}
data.table(ymin = quantile(x, 1/4),
y = quantile(x, 2/4),
ymax = quantile(x, 3/4))
}) +
scale_y_log10(minor_breaks = c((0:9)/1000, (0:9)/100, (0:9)/10, (0:9)/1, (0:9)*10))
|
library(ape)
testtree <- read.tree("6622_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6622_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/6622_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("6622_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6622_0_unrooted.txt")
|
survey<-read.csv("./data/Survey_Responses_return.csv")
template<-read.csv("./data/Demographics Template.csv")
survey<-survey[,c(1:15,22:26,33,29,35,38,36,37,40:57,16:21,39,27,28,30:32,34)]
#Creat empaty new columns
survey$Case.Number<-rep(NA,nrow(survey))
survey$CA..USI<-rep(NA,nrow(survey))
survey$CA..PAS.ID<-rep(NA,nrow(survey))
survey$CA..STARS.ID<-rep(NA,nrow(survey))
survey$SSN<-rep(NA,nrow(survey))
survey$School.Name_774<-rep("Career Academy (HS)(DC)(PCS)",nrow(survey))
survey$Grade_1124<-rep(NA,nrow(survey))
survey$Ext<-rep(NA,nrow(survey))
survey$Program.Name<-rep(NA,nrow(survey))
survey$Program.Start.Date<-rep(NA,nrow(survey))
survey$Program.End.Date<-rep(NA,nrow(survey))
survey$Dismissal.Reason<-rep(NA,nrow(survey))
survey$Emergency.Contact.Name<-paste(survey$First.Name.1,survey$Last.Name.1)
survey$Entry.code_1139<-rep(NA,nrow(survey))
#reselect columns
survey_upload<-subset(survey,select=c(1,58:61,2,4,3,6,5,11:13,7:10,50,70,48,
47,14:17,29:32,35,63,64,36,27,39,62,22,
54:56,65,71,57,38,26,28,43,25,40,33,34,
23,44,37,42,41,66:69))
check1<-cbind(names(survey_upload),names(template))
#rename the survey
names<-names(template)
#delete the dots
names<-gsub('\\.', ' ', names)
names(survey_upload)=names
survey_upload[,58]=rep(date,nrow(survey_upload))
#write into a .csv file
write.csv(survey_upload,
"./output/survey_ETO_return.csv",
row.names=FALSE)
|
/script/Survey_To_ETO.R
|
no_license
|
glorialy/Survey_upload_Tool
|
R
| false
| false
| 1,510
|
r
|
survey<-read.csv("./data/Survey_Responses_return.csv")
template<-read.csv("./data/Demographics Template.csv")
survey<-survey[,c(1:15,22:26,33,29,35,38,36,37,40:57,16:21,39,27,28,30:32,34)]
#Creat empaty new columns
survey$Case.Number<-rep(NA,nrow(survey))
survey$CA..USI<-rep(NA,nrow(survey))
survey$CA..PAS.ID<-rep(NA,nrow(survey))
survey$CA..STARS.ID<-rep(NA,nrow(survey))
survey$SSN<-rep(NA,nrow(survey))
survey$School.Name_774<-rep("Career Academy (HS)(DC)(PCS)",nrow(survey))
survey$Grade_1124<-rep(NA,nrow(survey))
survey$Ext<-rep(NA,nrow(survey))
survey$Program.Name<-rep(NA,nrow(survey))
survey$Program.Start.Date<-rep(NA,nrow(survey))
survey$Program.End.Date<-rep(NA,nrow(survey))
survey$Dismissal.Reason<-rep(NA,nrow(survey))
survey$Emergency.Contact.Name<-paste(survey$First.Name.1,survey$Last.Name.1)
survey$Entry.code_1139<-rep(NA,nrow(survey))
#reselect columns
survey_upload<-subset(survey,select=c(1,58:61,2,4,3,6,5,11:13,7:10,50,70,48,
47,14:17,29:32,35,63,64,36,27,39,62,22,
54:56,65,71,57,38,26,28,43,25,40,33,34,
23,44,37,42,41,66:69))
check1<-cbind(names(survey_upload),names(template))
#rename the survey
names<-names(template)
#delete the dots
names<-gsub('\\.', ' ', names)
names(survey_upload)=names
survey_upload[,58]=rep(date,nrow(survey_upload))
#write into a .csv file
write.csv(survey_upload,
"./output/survey_ETO_return.csv",
row.names=FALSE)
|
library(ranger)
library(survival)
context("ranger_maxstat")
test_that("maxstat splitting works for survival", {
rf <- ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat")
expect_is(rf, "ranger")
expect_lt(rf$prediction.error, 0.4)
})
test_that("maxstat splitting works for regression", {
rf <- ranger(Sepal.Length ~ ., iris, splitrule = "maxstat")
expect_is(rf, "ranger")
expect_gt(rf$r.squared, 0.5)
})
test_that("maxstat splitting, alpha or minprop out of range throws error", {
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", alpha = -1))
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", alpha = 2))
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", minprop = -1))
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", minprop = 1))
})
test_that("maxstat splitting not working for classification", {
expect_error(ranger(Species ~ ., iris, splitrule = "maxstat"))
})
test_that("maxstat impurity importance is positive", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5,
splitrule = "maxstat", importance = "impurity")
expect_gt(mean(rf$variable.importance), 0)
rf <- ranger(Sepal.Length ~ ., iris, num.trees = 5,
splitrule = "maxstat", importance = "impurity")
expect_gt(mean(rf$variable.importance), 0)
})
test_that("maxstat corrected impurity importance is positive (on average)", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5,
splitrule = "maxstat", importance = "impurity_corrected")
expect_gt(mean(rf$variable.importance), 0)
rf <- ranger(Sepal.Length ~ ., iris, num.trees = 5,
splitrule = "maxstat", importance = "impurity_corrected")
expect_gt(mean(rf$variable.importance), 0)
})
|
/data/genthat_extracted_code/ranger/tests/test_maxstat.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,845
|
r
|
library(ranger)
library(survival)
context("ranger_maxstat")
test_that("maxstat splitting works for survival", {
rf <- ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat")
expect_is(rf, "ranger")
expect_lt(rf$prediction.error, 0.4)
})
test_that("maxstat splitting works for regression", {
rf <- ranger(Sepal.Length ~ ., iris, splitrule = "maxstat")
expect_is(rf, "ranger")
expect_gt(rf$r.squared, 0.5)
})
test_that("maxstat splitting, alpha or minprop out of range throws error", {
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", alpha = -1))
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", alpha = 2))
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", minprop = -1))
expect_error(ranger(Surv(time, status) ~ ., veteran, splitrule = "maxstat", minprop = 1))
})
test_that("maxstat splitting not working for classification", {
expect_error(ranger(Species ~ ., iris, splitrule = "maxstat"))
})
test_that("maxstat impurity importance is positive", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5,
splitrule = "maxstat", importance = "impurity")
expect_gt(mean(rf$variable.importance), 0)
rf <- ranger(Sepal.Length ~ ., iris, num.trees = 5,
splitrule = "maxstat", importance = "impurity")
expect_gt(mean(rf$variable.importance), 0)
})
test_that("maxstat corrected impurity importance is positive (on average)", {
rf <- ranger(Surv(time, status) ~ ., veteran, num.trees = 5,
splitrule = "maxstat", importance = "impurity_corrected")
expect_gt(mean(rf$variable.importance), 0)
rf <- ranger(Sepal.Length ~ ., iris, num.trees = 5,
splitrule = "maxstat", importance = "impurity_corrected")
expect_gt(mean(rf$variable.importance), 0)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{isNormByGroup}
\alias{isNormByGroup}
\title{Helper method which determines if a biomarker is normally distributed by group at alpha level 0.05.}
\usage{
isNormByGroup(dataFrame, marker)
}
\arguments{
\item{dataFrame}{The data frame to test for normality by group.}
\item{marker}{The biomarker number to be tested; e.g., for the nth biomarker, input integer n.
Note that marker n corresponds to column n+1 in the data frame.}
}
\value{
True if normality cannot be rejected for all groups; false otherwise.
}
\description{
This function will be called by the implementations of bullet 1 and 2 on page 7 of the project slides.
Note that the \code{shapiro.test()} function limits sample size to \code{5000}.
}
|
/man/isNormByGroup.Rd
|
no_license
|
JWLevesque/AMS597-Group-5-Project
|
R
| false
| true
| 798
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{isNormByGroup}
\alias{isNormByGroup}
\title{Helper method which determines if a biomarker is normally distributed by group at alpha level 0.05.}
\usage{
isNormByGroup(dataFrame, marker)
}
\arguments{
\item{dataFrame}{The data frame to test for normality by group.}
\item{marker}{The biomarker number to be tested; e.g., for the nth biomarker, input integer n.
Note that marker n corresponds to column n+1 in the data frame.}
}
\value{
True if normality cannot be rejected for all groups; false otherwise.
}
\description{
This function will be called by the implementations of bullet 1 and 2 on page 7 of the project slides.
Note that the \code{shapiro.test()} function limits sample size to \code{5000}.
}
|
testlist <- list(a = 9.97924695498953e-316, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result)
|
/BayesMRA/inst/testfiles/rmvn_arma_scalar/AFL_rmvn_arma_scalar/rmvn_arma_scalar_valgrind_files/1615926296-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 117
|
r
|
testlist <- list(a = 9.97924695498953e-316, b = 0)
result <- do.call(BayesMRA::rmvn_arma_scalar,testlist)
str(result)
|
## makeCacheMatrix function creates a list containg 4 functions:
## 1. set(), 2. get(), 3. setinv(), 4. getinv()
## inv is set to NULL so that when makeCacheMatrix is called for the first time ther is no stored inverse
## value of the data and inv are set in set() method of this function
## value of x can be obtained by calling get() method
## computed inverse can be stored by calling setinv() method
## previously computed inverse can be obtained by calling getinv() method
## Eg. calling: x <- makeCacheMatrix(matrixS) where matrixS is some invertible square matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## this function first gets the inverse of 'x'
## if inv has a value this function will obtain it and display what it does
## if not data is obtained from 'x' by get() method
## inverse of the data is computed
## the newly computed inverse is placed in cache by setinv() method
## the newly computed inverse is returned
## Eg. calling: cacheSolve(x)
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
AnirudhNandan/ProgrammingAssignment2
|
R
| false
| false
| 1,528
|
r
|
## makeCacheMatrix function creates a list containg 4 functions:
## 1. set(), 2. get(), 3. setinv(), 4. getinv()
## inv is set to NULL so that when makeCacheMatrix is called for the first time ther is no stored inverse
## value of the data and inv are set in set() method of this function
## value of x can be obtained by calling get() method
## computed inverse can be stored by calling setinv() method
## previously computed inverse can be obtained by calling getinv() method
## Eg. calling: x <- makeCacheMatrix(matrixS) where matrixS is some invertible square matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## this function first gets the inverse of 'x'
## if inv has a value this function will obtain it and display what it does
## if not data is obtained from 'x' by get() method
## inverse of the data is computed
## the newly computed inverse is placed in cache by setinv() method
## the newly computed inverse is returned
## Eg. calling: cacheSolve(x)
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached inverse")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
context("simpr::fit")
library(tidyverse)
test_that("Multiple fit functions give different results", {
set.seed(100)
chisq_spec = variables(x1 = ~rnorm(n),
x2 = ~x1 + rnorm(n, 0, sd = 2),
c1 = ~ cut(x1, breaks = b) %>% as.numeric,
c2 = ~ cut(x2, breaks = b) %>% as.numeric) %>%
meta(n = c(50, 100),
b = c(2, 10))
chisq_fit = chisq_spec %>%
gen(5) %>%
fit(ChiSq = ~chisq.test(.$c1, .$c2),
Unknown_Continuous_Correlation = ~cor.test(.$x1, .$x2))
## These should NOT be the same!
expect_false(identical(chisq_fit$ChiSq, chisq_fit$Unknown_Continuous_Correlation))
})
|
/tests/testthat/test_fit.R
|
no_license
|
jsm19-tds-demo/simpr
|
R
| false
| false
| 681
|
r
|
context("simpr::fit")
library(tidyverse)
test_that("Multiple fit functions give different results", {
set.seed(100)
chisq_spec = variables(x1 = ~rnorm(n),
x2 = ~x1 + rnorm(n, 0, sd = 2),
c1 = ~ cut(x1, breaks = b) %>% as.numeric,
c2 = ~ cut(x2, breaks = b) %>% as.numeric) %>%
meta(n = c(50, 100),
b = c(2, 10))
chisq_fit = chisq_spec %>%
gen(5) %>%
fit(ChiSq = ~chisq.test(.$c1, .$c2),
Unknown_Continuous_Correlation = ~cor.test(.$x1, .$x2))
## These should NOT be the same!
expect_false(identical(chisq_fit$ChiSq, chisq_fit$Unknown_Continuous_Correlation))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tubern.R
\name{tubern_POST}
\alias{tubern_POST}
\title{POST}
\usage{
tubern_POST(path, query = NULL, body = "", ...)
}
\arguments{
\item{path}{path to specific API request URL}
\item{query}{query list}
\item{body}{passing image through body}
\item{\dots}{Additional arguments passed to \code{\link[httr]{POST}}.}
}
\value{
list
}
\description{
POST
}
|
/man/tubern_POST.Rd
|
permissive
|
cran/tubern
|
R
| false
| true
| 455
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tubern.R
\name{tubern_POST}
\alias{tubern_POST}
\title{POST}
\usage{
tubern_POST(path, query = NULL, body = "", ...)
}
\arguments{
\item{path}{path to specific API request URL}
\item{query}{query list}
\item{body}{passing image through body}
\item{\dots}{Additional arguments passed to \code{\link[httr]{POST}}.}
}
\value{
list
}
\description{
POST
}
|
#' Arrange correlation matrices from cross.correlate into a table format
#'
#' @param res Output from cross.correlate
#' @param verbose verbose
#'
#' @return Correlation table
#'
#' @export
#'
#' @examples
#' data(peerj32)
#' d1 <- peerj32$microbes[1:20, 1:10]
#' d2 <- peerj32$lipids[1:20,1:10]
#' cc <- cross.correlate(d1, d2, mode = 'matrix')
#' cmat <- cmat2table(cc)
#'
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
cmat2table <- function(res, verbose = FALSE) {
ctab <- NULL
if (!is.null(res$cor)) {
ctab <- melt(res$cor)
colnames(ctab) <- c("X1", "X2", "Correlation")
}
correlation <- NULL # circumwent warning on globabl vars
if (!is.null(res$p.adj)) {
if (verbose) {
message("Arranging the table")
}
ctab <- cbind(ctab, melt(res$p.adj)$value)
colnames(ctab) <- c("X1", "X2", "Correlation", "p.adj")
ctab <- ctab[order(ctab$p.adj), ]
colnames(ctab) <- c("X1", "X2", "Correlation", "p.adj")
} else {
message("No significant adjusted p-values")
if (!is.null(ctab)) {
ctab <- cbind(ctab, melt(res$pval)$value)
ctab <- ctab[order(-abs(ctab$Correlation)), ]
colnames(ctab) <- c("X1", "X2", "Correlation", "pvalue")
}
}
ctab$X1 <- as.character(ctab$X1)
ctab$X2 <- as.character(ctab$X2)
ctab
}
#' List color scales
#'
#'
#'
#'
#' @return list of color scales
#'
#' @export
#' @examples list.color.scales()
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
list.color.scales <- function() {
## Different colour scales
list(`white/blue` = colorRampPalette(c("white", "darkblue"),
interpolate = "linear")(100),
`white/black` = colorRampPalette(c("white", "black"),
interpolate = "linear")(100),
`black/yellow/white` = colorRampPalette(c("black", "yellow", "white"),
bias = 0.5, interpolate = "linear")(100))
}
#' calculate.hclust
#'
#' Calculate hierarchical clustering for standard selections in
#' profiling script
#'
#'
#' @param dat data matrix (use log10 with pearson!)
#' @param method hierarchical clustering method (see ?hclust)
#' @param metric clustering metrics (spearman / pearson / euclidean)
#'
#'
#' @return hclust object for log10 and for absolute scale data
#'
#' @export
#' @examples
#' data(peerj32)
#' dat <- peerj32$microbes
#' hc <- calculate.hclust(dat, 'complete', 'pearson')
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
calculate.hclust <- function(dat, method = "complete", metric = "pearson") {
if (metric == "euclidean") {
hc <- hclust(dist(t(dat)), method = method)
} else if (metric %in% c("spearman", "pearson")) {
hc <- hclust(as.dist(1 - cor(dat, use = "complete.obs",
method = metric)),
method = method)
} else {
stop("Provide proper metric for calculate.hclust!")
}
hc
}
#' get probeset data matrix
#'
#'
#' @param name name
#' @param level taxonomic level
#' @param phylogeny.info phylogeny.info
#' @param probedata oligos vs. samples preprocessed data matrix;
#' absolute scale
#' @param log10 Logical. Logarithmize the data TRUE/FALSE
#'
#'
#' @return probeset data matrix
#'
#' @export
#' @examples
#' phylogeny.info <- GetPhylogeny('HITChip', 'filtered')
#' data.dir <- system.file("extdata", package = "microbiome")
#' probedata <- read.profiling("oligo", data.dir = data.dir)
#' ps <- get.probeset('Akkermansia', 'L2', phylogeny.info, probedata)
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
get.probeset <- function(name, level, phylogeny.info, probedata, log10 = TRUE) {
# Pick probes for this entity
probes <- retrieve.probesets(phylogeny.info, level, name)
sets <- vector(length = length(probes), mode = "list")
names(sets) <- names(probes)
for (nam in names(probes)) {
# Pick expression for particular probes (absolute scale)
p <- intersect(probes[[nam]], rownames(probedata))
dat <- NULL
if (length(p) > 0) {
dat <- probedata[p, ]
dat <- matrix(dat, nrow = length(probes[[nam]]))
rownames(dat) <- probes[[nam]]
colnames(dat) <- colnames(probedata)
# Logarithmize probeset?
if (log10) {
dat <- log10(dat)
}
}
sets[[nam]] <- dat
}
if (length(sets) == 1) {
sets <- sets[[1]]
}
# Return
sets
}
#' PhylotypeRatios
#'
#' Calculate phylotype ratios (eg. Bacteroides vs. Prevotella etc.)
#' for a given phylotypes vs. samples matrix
#'
#' @param dat phylotypes vs. samples data matrix in log10 scale
#'
#' @return phylotype pairs x samples matrix indicating the ratio
#' (in log10 domain) between each unique pair
#' @export
#' @examples
#' data(peerj32)
#' ratios <- PhylotypeRatios(peerj32$microbes)
#' @references
#' See citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
PhylotypeRatios <- function(dat) {
phylogroups <- rownames(dat)
Nratios <- (length(phylogroups)^2 - length(phylogroups))/2
Nsamples <- ncol(dat)
ratios <- list()
for (i in 1:(length(phylogroups) - 1)) {
for (j in (i + 1):length(phylogroups)) {
pt1 <- phylogroups[[i]]
pt2 <- phylogroups[[j]]
ratios[[paste(pt1, pt2, sep = "-")]] <- dat[pt1, ] - dat[pt2, ]
}
}
ratios <- do.call(cbind, ratios)
t(ratios)
}
#' matrix.padjust
#'
#' Calculate adjusted p-values for a matrix of pvalues
#' which may contain missing values.
#' @param pvals p-value matrix
#' @param p.adjust.method p-value adjustment method: for options, see ?p.adjust
#' @return Adjusted p-value matrix
#' @export
#' @references
#' JD Storey 2003. Ann. Statist. 31(6):2013-2035. The positive false
#' discovery rate: a Bayesian interpretation and the q-value.
#'
#' To cite the microbiome R package, see citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @examples qvals <- matrix.padjust(matrix(runif(1000), nrow = 100))
#' @keywords utilities
matrix.padjust <- function(pvals, p.adjust.method = "BH") {
pvec <- as.vector(pvals)
nai <- is.na(pvec)
qvec <- rep(NA, length(pvec))
qvec[!nai] <- p.adjust(pvec[!nai], method = p.adjust.method)
qmat <- matrix(qvec, nrow = nrow(pvals))
dimnames(qmat) <- dimnames(pvals)
qmat
}
#' polish.phylogeny.info
#'
#' Ensure phylogeny.info is in correct format
#'
#' @param phylogeny.info phylogeny.info data frame
#'
#' @return polished phylogeny.info
#' @references See citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @examples
#' #phylogeny.info <- GetPhylogeny('HITChip', 'filtered')
#' #phylogeny.info <- polish.phylogeny.info(phylogeny.info)
#' @keywords internal
polish.phylogeny.info <- function(phylogeny.info) {
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level.0")] <- "L0"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level.1")] <- "L1"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level.2")] <- "L2"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level 0")] <- "L0"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level 1")] <- "L1"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level 2")] <- "L2"
phylogeny.info
}
#' Impute missing values from a Gaussian.
#'
#' @param X data matrix (features x samples)
#'
#' @return imputed data matrix
#' @export
#' @references
#' See citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @examples
#' data(peerj32)
#' x <- peerj32$microbes
#' xi <- impute(x)
#' @keywords utilities
impute <- function(X) {
# TODO Replace with standard R functions
for (i in 1:ncol(X)) {
x <- X[, i]
nas <- is.na(x)
X[nas, i] <- rnorm(sum(nas), mean(x[!is.na(x)]), sd(x[!is.na(x)]))
}
X
}
#' Get lower triangle of a square matrix
#' as a numeric vector such that
#' row-by-row, picking elements in the order
#' 2,1;3,1;3,2;4,1,...
#'
#' @param mat data matrix
#'
#' @return lower triangle as vector
#'
#' @export
#' @examples
#' mat <- rbind(c(1,2,3), c(4,5,6), c(7,8,9))
#' vec <- lower.triangle(mat)
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
lower.triangle <- function(mat) {
# TODO is this easy replace with standard R functions ?
elements <- c()
nr <- dim(mat)[[1]]
nc <- dim(mat)[[2]]
for (i in 2:nr) {
for (j in 1:(i - 1)) {
elements <- c(elements, mat[i, j])
}
}
elements
}
|
/R/utils.R
|
no_license
|
chaokang2012/microbiome
|
R
| false
| false
| 9,560
|
r
|
#' Arrange correlation matrices from cross.correlate into a table format
#'
#' @param res Output from cross.correlate
#' @param verbose verbose
#'
#' @return Correlation table
#'
#' @export
#'
#' @examples
#' data(peerj32)
#' d1 <- peerj32$microbes[1:20, 1:10]
#' d2 <- peerj32$lipids[1:20,1:10]
#' cc <- cross.correlate(d1, d2, mode = 'matrix')
#' cmat <- cmat2table(cc)
#'
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
cmat2table <- function(res, verbose = FALSE) {
ctab <- NULL
if (!is.null(res$cor)) {
ctab <- melt(res$cor)
colnames(ctab) <- c("X1", "X2", "Correlation")
}
correlation <- NULL # circumwent warning on globabl vars
if (!is.null(res$p.adj)) {
if (verbose) {
message("Arranging the table")
}
ctab <- cbind(ctab, melt(res$p.adj)$value)
colnames(ctab) <- c("X1", "X2", "Correlation", "p.adj")
ctab <- ctab[order(ctab$p.adj), ]
colnames(ctab) <- c("X1", "X2", "Correlation", "p.adj")
} else {
message("No significant adjusted p-values")
if (!is.null(ctab)) {
ctab <- cbind(ctab, melt(res$pval)$value)
ctab <- ctab[order(-abs(ctab$Correlation)), ]
colnames(ctab) <- c("X1", "X2", "Correlation", "pvalue")
}
}
ctab$X1 <- as.character(ctab$X1)
ctab$X2 <- as.character(ctab$X2)
ctab
}
#' List color scales
#'
#'
#'
#'
#' @return list of color scales
#'
#' @export
#' @examples list.color.scales()
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
list.color.scales <- function() {
## Different colour scales
list(`white/blue` = colorRampPalette(c("white", "darkblue"),
interpolate = "linear")(100),
`white/black` = colorRampPalette(c("white", "black"),
interpolate = "linear")(100),
`black/yellow/white` = colorRampPalette(c("black", "yellow", "white"),
bias = 0.5, interpolate = "linear")(100))
}
#' calculate.hclust
#'
#' Calculate hierarchical clustering for standard selections in
#' profiling script
#'
#'
#' @param dat data matrix (use log10 with pearson!)
#' @param method hierarchical clustering method (see ?hclust)
#' @param metric clustering metrics (spearman / pearson / euclidean)
#'
#'
#' @return hclust object for log10 and for absolute scale data
#'
#' @export
#' @examples
#' data(peerj32)
#' dat <- peerj32$microbes
#' hc <- calculate.hclust(dat, 'complete', 'pearson')
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
calculate.hclust <- function(dat, method = "complete", metric = "pearson") {
if (metric == "euclidean") {
hc <- hclust(dist(t(dat)), method = method)
} else if (metric %in% c("spearman", "pearson")) {
hc <- hclust(as.dist(1 - cor(dat, use = "complete.obs",
method = metric)),
method = method)
} else {
stop("Provide proper metric for calculate.hclust!")
}
hc
}
#' get probeset data matrix
#'
#'
#' @param name name
#' @param level taxonomic level
#' @param phylogeny.info phylogeny.info
#' @param probedata oligos vs. samples preprocessed data matrix;
#' absolute scale
#' @param log10 Logical. Logarithmize the data TRUE/FALSE
#'
#'
#' @return probeset data matrix
#'
#' @export
#' @examples
#' phylogeny.info <- GetPhylogeny('HITChip', 'filtered')
#' data.dir <- system.file("extdata", package = "microbiome")
#' probedata <- read.profiling("oligo", data.dir = data.dir)
#' ps <- get.probeset('Akkermansia', 'L2', phylogeny.info, probedata)
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
get.probeset <- function(name, level, phylogeny.info, probedata, log10 = TRUE) {
# Pick probes for this entity
probes <- retrieve.probesets(phylogeny.info, level, name)
sets <- vector(length = length(probes), mode = "list")
names(sets) <- names(probes)
for (nam in names(probes)) {
# Pick expression for particular probes (absolute scale)
p <- intersect(probes[[nam]], rownames(probedata))
dat <- NULL
if (length(p) > 0) {
dat <- probedata[p, ]
dat <- matrix(dat, nrow = length(probes[[nam]]))
rownames(dat) <- probes[[nam]]
colnames(dat) <- colnames(probedata)
# Logarithmize probeset?
if (log10) {
dat <- log10(dat)
}
}
sets[[nam]] <- dat
}
if (length(sets) == 1) {
sets <- sets[[1]]
}
# Return
sets
}
#' PhylotypeRatios
#'
#' Calculate phylotype ratios (eg. Bacteroides vs. Prevotella etc.)
#' for a given phylotypes vs. samples matrix
#'
#' @param dat phylotypes vs. samples data matrix in log10 scale
#'
#' @return phylotype pairs x samples matrix indicating the ratio
#' (in log10 domain) between each unique pair
#' @export
#' @examples
#' data(peerj32)
#' ratios <- PhylotypeRatios(peerj32$microbes)
#' @references
#' See citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
PhylotypeRatios <- function(dat) {
phylogroups <- rownames(dat)
Nratios <- (length(phylogroups)^2 - length(phylogroups))/2
Nsamples <- ncol(dat)
ratios <- list()
for (i in 1:(length(phylogroups) - 1)) {
for (j in (i + 1):length(phylogroups)) {
pt1 <- phylogroups[[i]]
pt2 <- phylogroups[[j]]
ratios[[paste(pt1, pt2, sep = "-")]] <- dat[pt1, ] - dat[pt2, ]
}
}
ratios <- do.call(cbind, ratios)
t(ratios)
}
#' matrix.padjust
#'
#' Calculate adjusted p-values for a matrix of pvalues
#' which may contain missing values.
#' @param pvals p-value matrix
#' @param p.adjust.method p-value adjustment method: for options, see ?p.adjust
#' @return Adjusted p-value matrix
#' @export
#' @references
#' JD Storey 2003. Ann. Statist. 31(6):2013-2035. The positive false
#' discovery rate: a Bayesian interpretation and the q-value.
#'
#' To cite the microbiome R package, see citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @examples qvals <- matrix.padjust(matrix(runif(1000), nrow = 100))
#' @keywords utilities
matrix.padjust <- function(pvals, p.adjust.method = "BH") {
pvec <- as.vector(pvals)
nai <- is.na(pvec)
qvec <- rep(NA, length(pvec))
qvec[!nai] <- p.adjust(pvec[!nai], method = p.adjust.method)
qmat <- matrix(qvec, nrow = nrow(pvals))
dimnames(qmat) <- dimnames(pvals)
qmat
}
#' polish.phylogeny.info
#'
#' Ensure phylogeny.info is in correct format
#'
#' @param phylogeny.info phylogeny.info data frame
#'
#' @return polished phylogeny.info
#' @references See citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @examples
#' #phylogeny.info <- GetPhylogeny('HITChip', 'filtered')
#' #phylogeny.info <- polish.phylogeny.info(phylogeny.info)
#' @keywords internal
polish.phylogeny.info <- function(phylogeny.info) {
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level.0")] <- "L0"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level.1")] <- "L1"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level.2")] <- "L2"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level 0")] <- "L0"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level 1")] <- "L1"
colnames(phylogeny.info)[
which(colnames(phylogeny.info) == "level 2")] <- "L2"
phylogeny.info
}
#' Impute missing values from a Gaussian.
#'
#' @param X data matrix (features x samples)
#'
#' @return imputed data matrix
#' @export
#' @references
#' See citation('microbiome')
#' @author Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @examples
#' data(peerj32)
#' x <- peerj32$microbes
#' xi <- impute(x)
#' @keywords utilities
impute <- function(X) {
# TODO Replace with standard R functions
for (i in 1:ncol(X)) {
x <- X[, i]
nas <- is.na(x)
X[nas, i] <- rnorm(sum(nas), mean(x[!is.na(x)]), sd(x[!is.na(x)]))
}
X
}
#' Get lower triangle of a square matrix
#' as a numeric vector such that
#' row-by-row, picking elements in the order
#' 2,1;3,1;3,2;4,1,...
#'
#' @param mat data matrix
#'
#' @return lower triangle as vector
#'
#' @export
#' @examples
#' mat <- rbind(c(1,2,3), c(4,5,6), c(7,8,9))
#' vec <- lower.triangle(mat)
#' @references See citation('microbiome')
#' @author Contact: Leo Lahti \email{microbiome-admin@@googlegroups.com}
#' @keywords utilities
lower.triangle <- function(mat) {
# TODO is this easy replace with standard R functions ?
elements <- c()
nr <- dim(mat)[[1]]
nc <- dim(mat)[[2]]
for (i in 2:nr) {
for (j in 1:(i - 1)) {
elements <- c(elements, mat[i, j])
}
}
elements
}
|
#Quantile normalization, good R programming practice!!!
#Sep 23, 2016
quantile.norm<-function (PM){
order.PM <- apply(PM, 2, sort) #get the same dimension of matrix with ordered columns
mean.PM <- apply(order.PM, 1, mean) #get the rowmean
rank<-apply(PM,2, rank, ties.method="first") #get the column rank
finish.PM<-apply(rank, 2, function(x) mean.PM[x]) #reorder the rowmean
return (finish.PM)
}
#re check this function, might be buggy
judge.cnv<-function (x, cutoff){
y<-c()
for (i in 1:length(x)){
if (x[i] >=cutoff) {
y[i]<-"gain"} else if ( x[i] < 0 && abs(x[i]) >=cutoff ){
y[i]<-"loss"} else
y[i]<-"-"
}
return (y)
}
less 180.cnv | awk -F "\t" '{for(i=5;i<NF;i++) {if ($i >=2.5 || $i <= -2.5) {print $0} }}' |less
data<-read.table("12_sample.doc", header=F)
head(data)
quantile.norm<-function (PM){
order.PM <- apply(PM, 2, sort) #get the same dimension of matrix with ordered columns
mean.PM <- apply(order.PM, 1, mean) #get the rowmean
rank<-apply(PM,2, rank, ties.method="first") #get the column rank
finish.PM<-apply(rank, 2, function(x) mean.PM[x]) #reorder the rowmean
return (finish.PM)
}
doc.norm<-log2(quantile.norm(data[,5:16])+1)
meanByrow<-apply(doc.norm,1,mean)
sdByrow<-apply(doc.norm,1,sd)
doc.std<-c()
for (i in 1:nrow(doc.norm)){doc.std<-rbind(doc.std, round((doc.norm[i,]-meanByrow[i])/sdByrow[i],2))}
write.table(cbind(data[,1:4], doc.std), file="180.cnv", sep="\t", row.names=F, col.names=F)
apply(data[1:6,5:16], 2, function(x) judge.cnv(x,2.5))
write.table(cbind(data[,1:4],apply(data[,5:16], 2, function(x) judge.cnv(x,2.5))),file="cnv.info", sep="\t", quote=F, row.names=F, col.names=F)
|
/05stat_R_scripts/quantile.norm.cnv.R
|
no_license
|
seqmachine/scripts
|
R
| false
| false
| 1,658
|
r
|
#Quantile normalization, good R programming practice!!!
#Sep 23, 2016
quantile.norm<-function (PM){
order.PM <- apply(PM, 2, sort) #get the same dimension of matrix with ordered columns
mean.PM <- apply(order.PM, 1, mean) #get the rowmean
rank<-apply(PM,2, rank, ties.method="first") #get the column rank
finish.PM<-apply(rank, 2, function(x) mean.PM[x]) #reorder the rowmean
return (finish.PM)
}
#re check this function, might be buggy
judge.cnv<-function (x, cutoff){
y<-c()
for (i in 1:length(x)){
if (x[i] >=cutoff) {
y[i]<-"gain"} else if ( x[i] < 0 && abs(x[i]) >=cutoff ){
y[i]<-"loss"} else
y[i]<-"-"
}
return (y)
}
less 180.cnv | awk -F "\t" '{for(i=5;i<NF;i++) {if ($i >=2.5 || $i <= -2.5) {print $0} }}' |less
data<-read.table("12_sample.doc", header=F)
head(data)
quantile.norm<-function (PM){
order.PM <- apply(PM, 2, sort) #get the same dimension of matrix with ordered columns
mean.PM <- apply(order.PM, 1, mean) #get the rowmean
rank<-apply(PM,2, rank, ties.method="first") #get the column rank
finish.PM<-apply(rank, 2, function(x) mean.PM[x]) #reorder the rowmean
return (finish.PM)
}
doc.norm<-log2(quantile.norm(data[,5:16])+1)
meanByrow<-apply(doc.norm,1,mean)
sdByrow<-apply(doc.norm,1,sd)
doc.std<-c()
for (i in 1:nrow(doc.norm)){doc.std<-rbind(doc.std, round((doc.norm[i,]-meanByrow[i])/sdByrow[i],2))}
write.table(cbind(data[,1:4], doc.std), file="180.cnv", sep="\t", row.names=F, col.names=F)
apply(data[1:6,5:16], 2, function(x) judge.cnv(x,2.5))
write.table(cbind(data[,1:4],apply(data[,5:16], 2, function(x) judge.cnv(x,2.5))),file="cnv.info", sep="\t", quote=F, row.names=F, col.names=F)
|
load_libraries <- function(libraries) {
invisible(
lapply(
libraries,
library,
character.only = TRUE,
warn.conflicts = FALSE
)
)
}
|
/packages/helpers/load_libraries.R
|
no_license
|
pedrorio/statistical_learning
|
R
| false
| false
| 165
|
r
|
load_libraries <- function(libraries) {
invisible(
lapply(
libraries,
library,
character.only = TRUE,
warn.conflicts = FALSE
)
)
}
|
source('util.R')
eps_cone_proj <- function(Z, eps) {
decomp <- eigen(Z, symmetric=TRUE)
return(decomp$vec %*% (pmax(decomp$val, eps) * t(decomp$vec)))
}
xue_cov <- function(S, lambda, eps, mu=2, tol=1e-3, max_it=500) {
p <- nrow(S)
Sigma <- soft_threshold(S, lambda, modify_diag=FALSE)
Omega <- S
Lambda <- matrix(0, p, p)
i <- 0
if (min(eigen(Sigma, symmetric=TRUE)$val) >= eps) {
return(Sigma)
}
while (norm(Sigma - Omega) > tol) {
if (i > max_it) {
warning(sprintf('Maximum iterations exceeded: lambda=%f, tau=%f', lambda, eps))
break
}
Omega <- eps_cone_proj(Sigma + mu * Lambda, eps)
Sigma <- 1 / (1 + mu) * soft_threshold(mu * (S - Lambda) + Omega, lambda * mu,
modify_diag=FALSE)
Lambda <- Lambda - 1 / mu * (Omega - Sigma)
i <- i + 1
}
return(Sigma)
}
liu_cov <- function(S, lambda, tau, rho=2, tol=1e-3, max_it=500) {
p <- nrow(S)
R <- cov2cor(S)
Sigma <- soft_threshold(R, lambda, modify_diag=FALSE)
Gamma <- R
U <- matrix(0, p, p)
i <- 0
if (min(eigen(Sigma, symmetric=TRUE)$val) >= tau) {
return(t(sqrt(diag(S)) * Sigma) * sqrt(diag(S)))
}
while (norm(Sigma - Gamma) > tol) {
if (i > max_it) {
warning(sprintf('Maximum iterations exceeded: lambda=%f, tau=%f', lambda, tau))
break
}
Sigma <- soft_threshold(Gamma + 1 / rho * U, lambda / rho)
Gamma <- eps_cone_proj((R + rho * Sigma - U) / (1 + rho), tau)
U <- U + rho * (Gamma - Sigma)
i <- i + 1
}
return(t(sqrt(diag(S)) * Sigma) * sqrt(diag(S)))
}
|
/sparse_mle.R
|
permissive
|
leeyang/sparse_stein_cov
|
R
| false
| false
| 1,583
|
r
|
source('util.R')
eps_cone_proj <- function(Z, eps) {
decomp <- eigen(Z, symmetric=TRUE)
return(decomp$vec %*% (pmax(decomp$val, eps) * t(decomp$vec)))
}
xue_cov <- function(S, lambda, eps, mu=2, tol=1e-3, max_it=500) {
p <- nrow(S)
Sigma <- soft_threshold(S, lambda, modify_diag=FALSE)
Omega <- S
Lambda <- matrix(0, p, p)
i <- 0
if (min(eigen(Sigma, symmetric=TRUE)$val) >= eps) {
return(Sigma)
}
while (norm(Sigma - Omega) > tol) {
if (i > max_it) {
warning(sprintf('Maximum iterations exceeded: lambda=%f, tau=%f', lambda, eps))
break
}
Omega <- eps_cone_proj(Sigma + mu * Lambda, eps)
Sigma <- 1 / (1 + mu) * soft_threshold(mu * (S - Lambda) + Omega, lambda * mu,
modify_diag=FALSE)
Lambda <- Lambda - 1 / mu * (Omega - Sigma)
i <- i + 1
}
return(Sigma)
}
liu_cov <- function(S, lambda, tau, rho=2, tol=1e-3, max_it=500) {
p <- nrow(S)
R <- cov2cor(S)
Sigma <- soft_threshold(R, lambda, modify_diag=FALSE)
Gamma <- R
U <- matrix(0, p, p)
i <- 0
if (min(eigen(Sigma, symmetric=TRUE)$val) >= tau) {
return(t(sqrt(diag(S)) * Sigma) * sqrt(diag(S)))
}
while (norm(Sigma - Gamma) > tol) {
if (i > max_it) {
warning(sprintf('Maximum iterations exceeded: lambda=%f, tau=%f', lambda, tau))
break
}
Sigma <- soft_threshold(Gamma + 1 / rho * U, lambda / rho)
Gamma <- eps_cone_proj((R + rho * Sigma - U) / (1 + rho), tau)
U <- U + rho * (Gamma - Sigma)
i <- i + 1
}
return(t(sqrt(diag(S)) * Sigma) * sqrt(diag(S)))
}
|
# unemployment is loaded in the workspace
summary(unemployment)
|
/Regression.R
|
no_license
|
Pavithira-Satheesh/Pavi
|
R
| false
| false
| 64
|
r
|
# unemployment is loaded in the workspace
summary(unemployment)
|
shinyServer(function(input, output, session) {
output$plot <- renderPlot({
plot(cars, type=input$plotType)
})
output$summary <- renderPrint({
summary(cars)
})
output$table <- renderDataTable({
cars
}, options=list(pageLength=10))
})
|
/B_analysts_sources_github/seandavi/Mammomics/server.R
|
no_license
|
Irbis3/crantasticScrapper
|
R
| false
| false
| 259
|
r
|
shinyServer(function(input, output, session) {
output$plot <- renderPlot({
plot(cars, type=input$plotType)
})
output$summary <- renderPrint({
summary(cars)
})
output$table <- renderDataTable({
cars
}, options=list(pageLength=10))
})
|
setwd('~/Documents/a-mmm-lab/a-ml/data')
cdata = read.csv('5-crack-surface-normal.csv', header=T)
cor(cdata$distance_to_grain_boundary, cdata$dadN3Dline)
cor(cdata$dadN3Dline, cdata$distance_to_grain_boundary)
lm.fit=lm(dadN3Dline~scaled_angle,data=cdata)
lm.fit
summary(lm.fit)
confint(lm.fit)
predict(lm.fit,data.frame(scaled_angle=(c(0,.5,1,1.5))),interval ="confidence")
plot(cdata$scaled_angle,cdata$dadN3Dline)
abline(lm.fit,lwd=3,col='red')
par(mfrow=c(2,2))
plot(lm.fit)
plot(predict(lm.fit), residuals(lm.fit))
plot(predict(lm.fit), rstudent(lm.fit))
par(mfrow=c(1,1))
lm.fit=lm(dadN3Dline~scaled_angle+distance_to_grain_boundary,data=cdata)
summary(lm.fit)
lm.fit=lm(dadN3Dline~.,data=cdata)
summary(lm.fit)
lm.fit=lm(dadN3Dline~.-raw_angle-nearest_grain_boundary_id-normal_z-normal_y-normal_x-nearest_grain_phi2-nearest_grain_Phi-nearest_grain_phi1-phi2-Phi-phi1-unit_vector_to_grain_boundary_z-unit_vector_to_grain_boundary_y-unit_vector_to_grain_boundary_x-magnitude_of_vector_to_grain_boundary-vector_to_grain_boundary_z-vector_to_grain_boundary_y-vector_to_grain_boundary_x-nearest_grain_boundary_z-nearest_grain_boundary_y-nearest_grain_boundary_x-grain_id-z-y-x-theta-crack_id,data=cdata)
summary(lm.fit)
plot(cdata$distance_to_grain_boundary, cdata$dadN3Dline)
plot(cdata$misorientation, cdata$dadN3Dline)
plot(cdata$scaled_angle, cdata$dadN3Dline)
install.packages('car')
library(car)
vif(lm.fit)
lm.fit1=update(lm.fit, ~.-scaled_angle)
summary(lm(dadN3Dline~distance_to_grain_boundary*scaled_angle, data=cdata))
lm.fit2=lm(dadN3Dline~scaled_angle+I(scaled_angle^2), data=cdata)
attach(cdata)
summary(lm.fit2)
lm.fit=lm(dadN3Dline~scaled_angle)
anova(lm.fit,lm.fit2)
lm.fit=lm(dadN3Dline~poly(scaled_angle, 13))
summary(lm.fit)
|
/r/old-testing/1-linear-regression.R
|
no_license
|
nathanwilk7/mmm-lab-features-ml
|
R
| false
| false
| 1,749
|
r
|
setwd('~/Documents/a-mmm-lab/a-ml/data')
cdata = read.csv('5-crack-surface-normal.csv', header=T)
cor(cdata$distance_to_grain_boundary, cdata$dadN3Dline)
cor(cdata$dadN3Dline, cdata$distance_to_grain_boundary)
lm.fit=lm(dadN3Dline~scaled_angle,data=cdata)
lm.fit
summary(lm.fit)
confint(lm.fit)
predict(lm.fit,data.frame(scaled_angle=(c(0,.5,1,1.5))),interval ="confidence")
plot(cdata$scaled_angle,cdata$dadN3Dline)
abline(lm.fit,lwd=3,col='red')
par(mfrow=c(2,2))
plot(lm.fit)
plot(predict(lm.fit), residuals(lm.fit))
plot(predict(lm.fit), rstudent(lm.fit))
par(mfrow=c(1,1))
lm.fit=lm(dadN3Dline~scaled_angle+distance_to_grain_boundary,data=cdata)
summary(lm.fit)
lm.fit=lm(dadN3Dline~.,data=cdata)
summary(lm.fit)
lm.fit=lm(dadN3Dline~.-raw_angle-nearest_grain_boundary_id-normal_z-normal_y-normal_x-nearest_grain_phi2-nearest_grain_Phi-nearest_grain_phi1-phi2-Phi-phi1-unit_vector_to_grain_boundary_z-unit_vector_to_grain_boundary_y-unit_vector_to_grain_boundary_x-magnitude_of_vector_to_grain_boundary-vector_to_grain_boundary_z-vector_to_grain_boundary_y-vector_to_grain_boundary_x-nearest_grain_boundary_z-nearest_grain_boundary_y-nearest_grain_boundary_x-grain_id-z-y-x-theta-crack_id,data=cdata)
summary(lm.fit)
plot(cdata$distance_to_grain_boundary, cdata$dadN3Dline)
plot(cdata$misorientation, cdata$dadN3Dline)
plot(cdata$scaled_angle, cdata$dadN3Dline)
install.packages('car')
library(car)
vif(lm.fit)
lm.fit1=update(lm.fit, ~.-scaled_angle)
summary(lm(dadN3Dline~distance_to_grain_boundary*scaled_angle, data=cdata))
lm.fit2=lm(dadN3Dline~scaled_angle+I(scaled_angle^2), data=cdata)
attach(cdata)
summary(lm.fit2)
lm.fit=lm(dadN3Dline~scaled_angle)
anova(lm.fit,lm.fit2)
lm.fit=lm(dadN3Dline~poly(scaled_angle, 13))
summary(lm.fit)
|
## Title: Programming Assignment 2 - Lexical Scoping
## Week 2 Assignment (Peer Graded) of 4-Week R-Programming course;
## second course in the Data Science Specialization series
## Function: Creates a special "matrix" object that can cache its inverse
## Assumption: The matrix supplied is always invertible
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
## <<- operator which can be used to assign a value to an object
## in an environment that is different from the current environment
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Function: computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
## Check if an inversed matrix has previously been computed
print("An inversed matrix was previously computed!")
return(inv)
}
## Nope, calculate it for the first time
data <- x$get()
inv <- solve(data)
## For next time, preserve the inversed matrix in the cache
x$setinverse(inv)
inv
}
## 22 February 2015
|
/cachematrix.R
|
no_license
|
oliahad/ProgrammingAssignment2
|
R
| false
| false
| 1,449
|
r
|
## Title: Programming Assignment 2 - Lexical Scoping
## Week 2 Assignment (Peer Graded) of 4-Week R-Programming course;
## second course in the Data Science Specialization series
## Function: Creates a special "matrix" object that can cache its inverse
## Assumption: The matrix supplied is always invertible
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
## <<- operator which can be used to assign a value to an object
## in an environment that is different from the current environment
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
## Function: computes the inverse of the special "matrix" returned
## by makeCacheMatrix above. If the inverse has already been calculated
## (and the matrix has not changed), then the cachesolve should retrieve
## the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
## Check if an inversed matrix has previously been computed
print("An inversed matrix was previously computed!")
return(inv)
}
## Nope, calculate it for the first time
data <- x$get()
inv <- solve(data)
## For next time, preserve the inversed matrix in the cache
x$setinverse(inv)
inv
}
## 22 February 2015
|
#' UI part of report overview module
#'
#' @param id Shiny namespace id.
#'
#' @return UI elements for report overview module.
#' @export
#' @import shiny
reportOverviewModuleUI <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
HTML("This page shows the summary of the classifications in the selected sample set.
The table cells have a barchart that shows the relation of the cell value to other cell values in the same category, with the microbiota columns being a separate category from the rest."),
checkboxInput(ns("opt_samples_overview_percent"), label = "Show percentages instead of number of reads", value = TRUE),
DT::dataTableOutput(ns('dt_samples_overview')),
br(),
HTML("If the table does not display at first, double-click the checkbox to reload it.")
)
}
#' Shiny modules to display an overview of metagenomics reports
#'
#' @param input Shiny input object.
#' @param output Shiyn output object.
#' @param session Shiny session object.
#' @param sample_data Samples \code{data.frame}.
#' @param reports List of reports.
#' @param datatable_opts Additional datatable opts (mostly $class)
#'
#' @return Report overview module server functionality.
#' @export
#' @import shiny
reportOverviewModule <- function(input, output, session, sample_data, reports, datatable_opts = NULL) {
#r_state <- list()
observeEvent(input$opt_samples_overview_percent, {
## save state of table
#r_state <<- list(
# search_columns = input$dt_samples_overview_search_columns,
# state = input$dt_samples_overview_state
# )
# utils::str(input$dt_samples_overview_state)
})
get_samples_summary <- reactive( {
validate(need(sample_data(), message = "No data available."))
validate(need(reports(), message = "No data available."))
withProgress({
## Create summaries of all reports
#str(reports())
samples_summary <- do.call(rbind, lapply(reports(), summarize_report))
samples_summary$Name <- rownames(samples_summary)
#samples_summary$FileName <- sample_data()[,"ReportFile"]
extra_cols <- c("Name")
samples_summary <- samples_summary[,c(extra_cols, setdiff(colnames(samples_summary),extra_cols))]
colnames(samples_summary) <- beautify_string(colnames(samples_summary))
samples_summary
}, message = "Summarizing sample contents ... ")
})
## Samples overview output
output$dt_samples_overview <- DT::renderDataTable({
samples_summary <- get_samples_summary()
validate(need(samples_summary, message = "Error in getting samples summary - please re-select the sample set."))
start_color_bar_at <- 2 ## length of extra_cols + 1
number_range <- c(0, max(samples_summary[, 2], na.rm = TRUE))
if (isTRUE(input$opt_samples_overview_percent)) {
## add a custom renderer.
start_color_bar_at <- start_color_bar_at + 1
number_range <- c(0, 100)
samples_summary[, start_color_bar_at:ncol(samples_summary)] <-
100 * signif(sweep(samples_summary[, start_color_bar_at:ncol(samples_summary)], 1, samples_summary[, 2], `/`), 4)
## TODO: Define columnDefs and give read counts on mouse-over
}
microbial_col <- 7
if (!max(samples_summary[,2]) > 1000) {
samples_summary[,seq(from=start_color_bar_at, to=ncol(samples_summary))] <- signif(samples_summary[,seq(from=start_color_bar_at, to=ncol(samples_summary))], 4)
}
dt <- DT::datatable(
samples_summary,
rownames = FALSE,
selection = 'single',
extensions = datatable_opts$extensions,
options(buttons = common_buttons(basename(attr(sample_data(), "set_name")), "summary")),
escape = FALSE,
class = datatable_opts$class,
)
#formatString <- function(table, columns, before="", after="") {
# DT:::formatColumns(table, columns, function(col, before, after)
# sprintf("$(this.api().cell(row, %s).node()).html((%s + data[%d]) + %s); ",col, before, col, after),
# before, after
# )
#}
if (isTRUE(input$opt_samples_overview_percent)) {
dt <- dt %>%
DT::formatStyle(
colnames(samples_summary)[2],
background = styleColorBar2(c(0,max(samples_summary[[2]],na.rm=T)), 'lightblue')
) %>%
DT::formatStyle(
colnames(samples_summary)[seq(from=start_color_bar_at, to=microbial_col-1)],
background = styleColorBar2(number_range, 'lightsalmon')
) %>%
DT::formatStyle(colnames(samples_summary)[seq(from=microbial_col,to=ncol(samples_summary))],
background = DT::styleColorBar(c(0, max(
samples_summary[, microbial_col], na.rm = TRUE
)), 'lightgreen')) %>%
DT::formatCurrency(start_color_bar_at - 1, currency = '', digits = 0) %>%
DT::formatString(seq(from=start_color_bar_at, to=ncol(samples_summary)),
suffix = '%') ## TODO: display as percent
# ## not implemented for now as formatPercentage enforces a certain number of digits, but I like to round
# ## with signif.
} else {
dt <-
dt %>% DT::formatStyle(
colnames(samples_summary)[seq(from=start_color_bar_at, to=microbial_col-1)],
background = styleColorBar2(number_range, 'lightblue')
) %>%
DT::formatStyle(colnames(samples_summary)[seq(from=microbial_col,to=ncol(samples_summary))],
background = DT::styleColorBar(c(0, max(
samples_summary[, microbial_col], na.rm = TRUE
)), 'lightgreen'))
if (max(samples_summary[,2]) > 1000) {
dt <- dt %>% DT::formatCurrency(seq(from=start_color_bar_at, to=ncol(samples_summary)),
currency = '', digits = 0)
}
}
dt
})
}
|
/R/module_overview_table.R
|
no_license
|
alartin/pavian
|
R
| false
| false
| 5,847
|
r
|
#' UI part of report overview module
#'
#' @param id Shiny namespace id.
#'
#' @return UI elements for report overview module.
#' @export
#' @import shiny
reportOverviewModuleUI <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
HTML("This page shows the summary of the classifications in the selected sample set.
The table cells have a barchart that shows the relation of the cell value to other cell values in the same category, with the microbiota columns being a separate category from the rest."),
checkboxInput(ns("opt_samples_overview_percent"), label = "Show percentages instead of number of reads", value = TRUE),
DT::dataTableOutput(ns('dt_samples_overview')),
br(),
HTML("If the table does not display at first, double-click the checkbox to reload it.")
)
}
#' Shiny modules to display an overview of metagenomics reports
#'
#' @param input Shiny input object.
#' @param output Shiyn output object.
#' @param session Shiny session object.
#' @param sample_data Samples \code{data.frame}.
#' @param reports List of reports.
#' @param datatable_opts Additional datatable opts (mostly $class)
#'
#' @return Report overview module server functionality.
#' @export
#' @import shiny
reportOverviewModule <- function(input, output, session, sample_data, reports, datatable_opts = NULL) {
#r_state <- list()
observeEvent(input$opt_samples_overview_percent, {
## save state of table
#r_state <<- list(
# search_columns = input$dt_samples_overview_search_columns,
# state = input$dt_samples_overview_state
# )
# utils::str(input$dt_samples_overview_state)
})
get_samples_summary <- reactive( {
validate(need(sample_data(), message = "No data available."))
validate(need(reports(), message = "No data available."))
withProgress({
## Create summaries of all reports
#str(reports())
samples_summary <- do.call(rbind, lapply(reports(), summarize_report))
samples_summary$Name <- rownames(samples_summary)
#samples_summary$FileName <- sample_data()[,"ReportFile"]
extra_cols <- c("Name")
samples_summary <- samples_summary[,c(extra_cols, setdiff(colnames(samples_summary),extra_cols))]
colnames(samples_summary) <- beautify_string(colnames(samples_summary))
samples_summary
}, message = "Summarizing sample contents ... ")
})
## Samples overview output
output$dt_samples_overview <- DT::renderDataTable({
samples_summary <- get_samples_summary()
validate(need(samples_summary, message = "Error in getting samples summary - please re-select the sample set."))
start_color_bar_at <- 2 ## length of extra_cols + 1
number_range <- c(0, max(samples_summary[, 2], na.rm = TRUE))
if (isTRUE(input$opt_samples_overview_percent)) {
## add a custom renderer.
start_color_bar_at <- start_color_bar_at + 1
number_range <- c(0, 100)
samples_summary[, start_color_bar_at:ncol(samples_summary)] <-
100 * signif(sweep(samples_summary[, start_color_bar_at:ncol(samples_summary)], 1, samples_summary[, 2], `/`), 4)
## TODO: Define columnDefs and give read counts on mouse-over
}
microbial_col <- 7
if (!max(samples_summary[,2]) > 1000) {
samples_summary[,seq(from=start_color_bar_at, to=ncol(samples_summary))] <- signif(samples_summary[,seq(from=start_color_bar_at, to=ncol(samples_summary))], 4)
}
dt <- DT::datatable(
samples_summary,
rownames = FALSE,
selection = 'single',
extensions = datatable_opts$extensions,
options(buttons = common_buttons(basename(attr(sample_data(), "set_name")), "summary")),
escape = FALSE,
class = datatable_opts$class,
)
#formatString <- function(table, columns, before="", after="") {
# DT:::formatColumns(table, columns, function(col, before, after)
# sprintf("$(this.api().cell(row, %s).node()).html((%s + data[%d]) + %s); ",col, before, col, after),
# before, after
# )
#}
if (isTRUE(input$opt_samples_overview_percent)) {
dt <- dt %>%
DT::formatStyle(
colnames(samples_summary)[2],
background = styleColorBar2(c(0,max(samples_summary[[2]],na.rm=T)), 'lightblue')
) %>%
DT::formatStyle(
colnames(samples_summary)[seq(from=start_color_bar_at, to=microbial_col-1)],
background = styleColorBar2(number_range, 'lightsalmon')
) %>%
DT::formatStyle(colnames(samples_summary)[seq(from=microbial_col,to=ncol(samples_summary))],
background = DT::styleColorBar(c(0, max(
samples_summary[, microbial_col], na.rm = TRUE
)), 'lightgreen')) %>%
DT::formatCurrency(start_color_bar_at - 1, currency = '', digits = 0) %>%
DT::formatString(seq(from=start_color_bar_at, to=ncol(samples_summary)),
suffix = '%') ## TODO: display as percent
# ## not implemented for now as formatPercentage enforces a certain number of digits, but I like to round
# ## with signif.
} else {
dt <-
dt %>% DT::formatStyle(
colnames(samples_summary)[seq(from=start_color_bar_at, to=microbial_col-1)],
background = styleColorBar2(number_range, 'lightblue')
) %>%
DT::formatStyle(colnames(samples_summary)[seq(from=microbial_col,to=ncol(samples_summary))],
background = DT::styleColorBar(c(0, max(
samples_summary[, microbial_col], na.rm = TRUE
)), 'lightgreen'))
if (max(samples_summary[,2]) > 1000) {
dt <- dt %>% DT::formatCurrency(seq(from=start_color_bar_at, to=ncol(samples_summary)),
currency = '', digits = 0)
}
}
dt
})
}
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = matrix(0, nrow(A), ncol(A));
for( i in 1:ncol(A) ){
R[,i] = round(A[,i]);
}
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
|
/src/test/scripts/functions/vect/VectorizeForLoopUnaryColPos.R
|
permissive
|
apache/systemds
|
R
| false
| false
| 1,189
|
r
|
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
args <- commandArgs(TRUE)
options(digits=22)
library("Matrix")
A = as.matrix(readMM(paste(args[1], "A.mtx", sep="")))
R = matrix(0, nrow(A), ncol(A));
for( i in 1:ncol(A) ){
R[,i] = round(A[,i]);
}
writeMM(as(R, "CsparseMatrix"), paste(args[2], "R", sep=""));
|
#-----------TWITTER DATA ANALYTICS USING R------------------
consumer_key <- "GIVE YOUR CONSUMER KEY"
consumer_secret_key <- "SECRETKEY"
access_token <- "ACCESS_TOKEN"
access_token_secret <- "TOKEN SECRET"
consumer_key
install.packages("twitteR")
install.packages("httr")
install.packages("ROAuth")
install.packages("devtools")
library(httr)
library(devtools)
library(twitteR)
library(ROAuth)
library(RCurl)
library(plyr)
library(stringr)
library(ggplot2)
#============================================================
---
title: "Twitter"
output:
html_document: default
word_document: default
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
##
# save credentials
### https://apps.twitter.com/
library(httr)
library(devtools)
library(twitteR)
#install.packages("ROAuth")
library(ROAuth)
#install.packages('base64enc')
library(base64enc)
library(plyr)
library(stringr)
library(ggplot2)
library(httpuv)
library(RCurl)
# Set SSL certs globally
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
requestURL = "https://api.twitter.com/oauth/request_token"
accessURL = "https://api.twitter.com/oauth/access_token"
authURL = "https://api.twitter.com/oauth/authorize"
consumerKey <- consumer_key
consumerSecret <- consumer_secret_key
#consumerKey
accessToken <- access_token
accessSecret <- access_token_secret
options(httr_oauth_cache=T)
setup_twitter_oauth(consumerKey,
consumerSecret,
accessToken,
accessSecret)
#to get the 1000 tweeets in a list, machine learning
r_stats <- searchTwitter("machinelearning", n=1000)
r_stats
class(r_stats)
y_stats.df = twListToDF(r_stats)
View(y_stats.df)
# Sentiment Analysis or social media analytics
library(plyr)
library(stringr)
library(ggplot2)
DatasetKKRiders <- read.csv("C:\\Users\\Administrator\\Desktop\\KKRidersTweets.csv")
#DatasetKKRiders$text<-as.factor(DatasetKKRiders$text)
Datasetlionsdenkxip <- read.csv("C:\\Users\\Administrator\\Desktop\\lionsdenKXIPTweets.csv")
#Datasetlionsdenkxip$text<-as.factor(Datasetlionsdenkxip$text)
###############################
#Sentiment Function
###############################
library (plyr)
library (stringr)
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list
# or a vector as an "l" for us
# we want a simple array ("a") of scores back, so we use
# "l" + "a" + "ply" = "laply":
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
############################################
#Scoring Tweets & Adding a column
############################################
## List of positive and negative words can be downloaded from below link
## http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#lexicon
#Load sentiment word lists
hu.liu.pos = scan('F:/New folder/R Videos/positive-words.txt', what='character', comment.char=';')
hu.liu.neg = scan('F:/New folder/R Videos/negative-words.txt', what='character', comment.char=';')
#Add words to list # Optional
pos.words = c(hu.liu.pos, 'upgrade','jai ho')
neg.words = c(hu.liu.neg, 'wtf', 'wait','waiting', 'epicfail', 'mechanical')
#Score all tweets
KKRiders.scores = score.sentiment(DatasetKKRiders$text, pos.words,neg.words, .progress='text')
lionsdenkxip.scores = score.sentiment(Datasetlionsdenkxip$text, pos.words,neg.words, .progress='text')
View(KKRiders.scores)
#path<-""
#write.csv(KKRiders.scores,file=paste(path,"KKRidersScores.csv",sep=""),row.names=TRUE)
#write.csv(lionsdenkxip.scores,file=paste(path," lionsdenkxipScores.csv",sep=""),row.names=TRUE)
KKRiders.scores$Team = 'KKRiders'
View(KKRiders.scores)
lionsdenkxip.scores$Team = 'lionsdenkxip'
View(lionsdenkxip.scores)
#############################
# Visualizing
#############################
hist(KKRiders.scores$score)
qplot(KKRiders.scores$score)
hist(lionsdenkxip.scores$score)
qplot(lionsdenkxip.scores$score)
#################################
#Comparing 2 data sets
#################################
# all.scores = rbind(KKRiders.scores, lionsdenkxip.scores, MLB.scores)
all.scores = rbind(KKRiders.scores, lionsdenkxip.scores)
ggplot(data=all.scores) + # ggplot works on data.frames, always
geom_histogram(mapping=aes(x=score, fill=Team), binwidth=1) +
facet_grid(Team~.) + # make a separate plot for each hashtag
theme_bw() + scale_fill_brewer() # plain display, nicer colors
#####################################################
#https://apps.twitter.com
# You'd need to set global options with an authenticated app
consumerKey <- ""
consumerSecret <- ""
accessToken <- ""
accessTokenSecret <- ""
setup_twitter_oauth(consumerKey, consumerSecret, accessToken, accessTokenSecret)
# We can request only 3200 tweets at a time; it will return fewer
# depending on the API
tweets1 <- userTimeline("KKR", n = 100)
tweets1_df <- tbl_df(map_df(tweets1, as.data.frame))
-------------------------------------------------------------------------------
#install.packages("wordcloud")
library(wordcloud)
library(syuzhet)
library(plyr)
library(dplyr)
library(stringr)
library(ggplot2)
#Harvest some tweets
dhoni_tweets = searchTwitter("dhoni", n=2000, since = "2016-09-01", lang = "en")
#explore tweets
length.dhoni_tweets <- length(dhoni_tweets)
length.dhoni_tweets
dhoni_tweets
dhoni_tweets.df <- ldply(dhoni_tweets, function(t) t$toDataFrame())
write.csv(dhoni_tweets.df, "dhonitweets.csv")
#get the text
dhoni_tweets <- sapply(dhoni_tweets, function(x) x$getText())
#cleaning 1- Remove people name, RT text etc.
dhoni_tweets1 <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "",dhoni_tweets)
dhoni_tweets1
y_stats.df1 <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "",y_stats.df)
#Cleaning 2- remove html links
dhoni_tweets2 <- gsub("http[^[:blank:]]+","", dhoni_tweets1)
y_stats.df2 <- gsub("http[^[:blank:]]+","", y_stats.df1)
#cleaning 3- remove the people names
dhoni_tweets3 <- gsub("@\\w+", "", dhoni_tweets2)
y_stats.df3 <- gsub("@\\w+", "", y_stats.df2)
#cleaning 4- remove punctuations
dhoni_tweets4 <- gsub("[[:punct:]]", " ", dhoni_tweets3)
y_stats.df4 <- gsub("[[:punct:]]", " ", y_stats.df3)
#cleaning 5- remove Punctuations
dhoni_tweets5 <- gsub("[^[:alnum:]]", " ", dhoni_tweets4)
y_stats.df5 <- gsub("[^[:alnum:]]", " ", y_stats.df4)
# Exporting to Excel
write.csv(dhoni_tweets5, "dhonitweets1.csv")
write.csv(y_stats.df5, "ystats.csv")
library(tm)
library(tmap)
#library(expm)
#creating wordcorpus and cleaning
dhoni_tweets6 <- Corpus(VectorSource(dhoni_tweets5))
dhoni_tweets6 <- tm_map(dhoni_tweets6, removePunctuation)
dhoni_tweets6 <- tm_map(dhoni_tweets6, content_transformer(tolower))
dhoni_tweets6 <- tm_map(dhoni_tweets6, removeWords, stopwords("english"))
dhoni_tweets6 <- tm_map(dhoni_tweets6, stripWhitespace)
y_stats.df <- Corpus(VectorSource())
#building wordcloud
pal <- brewer.pal(8, "Dark2")
wordcloud(dhoni_tweets6, min.freq = 5, max.words = Inf, width = 1000, height = 1000,
random.order = FALSE, color = pal)
#it will show
wordcloud(dhoni_tweets6)
#Sentiment Analysis
#how the function works
get_nrc_sentiment("I bought an iphone a few days ago.
it is such a nice phone, although a little large.
the touch screen is cool. the voice quality is
clear too. I simply love it!")
#Running on our data
dhonisentiment <- get_nrc_sentiment(dhoni_tweets5)
dhoniSentimentScores <- data.frame(colSums(dhonisentiment[,]))
names(dhoniSentimentScores) <- "score"
dhoniSentimentScores <- cbind("sentiment" = rownames(dhoniSentimentScores), dhoniSentimentScores)
rownames(dhoniSentimentScores) <- NULL
ggplot(data = dhoniSentimentScores, aes(x = sentiment, y = score))+
geom_bar(aes(fill = sentiment), stat = "identity")+
theme(legend.position = "none") +
xlab("sentiment") + ylab("number of tweets") + ggtitle("Total Score
Based on Dhoni Tweets")
|
/src/ml-using-r-programming/twitter_data_sentimental_analysis_using_r.r
|
no_license
|
ajayghoshrr/machine-learning-and-stuff
|
R
| false
| false
| 9,324
|
r
|
#-----------TWITTER DATA ANALYTICS USING R------------------
consumer_key <- "GIVE YOUR CONSUMER KEY"
consumer_secret_key <- "SECRETKEY"
access_token <- "ACCESS_TOKEN"
access_token_secret <- "TOKEN SECRET"
consumer_key
install.packages("twitteR")
install.packages("httr")
install.packages("ROAuth")
install.packages("devtools")
library(httr)
library(devtools)
library(twitteR)
library(ROAuth)
library(RCurl)
library(plyr)
library(stringr)
library(ggplot2)
#============================================================
---
title: "Twitter"
output:
html_document: default
word_document: default
---
```{r setup, include=FALSE}
knitr::opts_chunk$set(echo = TRUE)
```
##
# save credentials
### https://apps.twitter.com/
library(httr)
library(devtools)
library(twitteR)
#install.packages("ROAuth")
library(ROAuth)
#install.packages('base64enc')
library(base64enc)
library(plyr)
library(stringr)
library(ggplot2)
library(httpuv)
library(RCurl)
# Set SSL certs globally
options(RCurlOptions = list(cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl")))
requestURL = "https://api.twitter.com/oauth/request_token"
accessURL = "https://api.twitter.com/oauth/access_token"
authURL = "https://api.twitter.com/oauth/authorize"
consumerKey <- consumer_key
consumerSecret <- consumer_secret_key
#consumerKey
accessToken <- access_token
accessSecret <- access_token_secret
options(httr_oauth_cache=T)
setup_twitter_oauth(consumerKey,
consumerSecret,
accessToken,
accessSecret)
#to get the 1000 tweeets in a list, machine learning
r_stats <- searchTwitter("machinelearning", n=1000)
r_stats
class(r_stats)
y_stats.df = twListToDF(r_stats)
View(y_stats.df)
# Sentiment Analysis or social media analytics
library(plyr)
library(stringr)
library(ggplot2)
DatasetKKRiders <- read.csv("C:\\Users\\Administrator\\Desktop\\KKRidersTweets.csv")
#DatasetKKRiders$text<-as.factor(DatasetKKRiders$text)
Datasetlionsdenkxip <- read.csv("C:\\Users\\Administrator\\Desktop\\lionsdenKXIPTweets.csv")
#Datasetlionsdenkxip$text<-as.factor(Datasetlionsdenkxip$text)
###############################
#Sentiment Function
###############################
library (plyr)
library (stringr)
score.sentiment = function(sentences, pos.words, neg.words, .progress='none')
{
require(plyr)
require(stringr)
# we got a vector of sentences. plyr will handle a list
# or a vector as an "l" for us
# we want a simple array ("a") of scores back, so we use
# "l" + "a" + "ply" = "laply":
scores = laply(sentences, function(sentence, pos.words, neg.words) {
# clean up sentences with R's regex-driven global substitute, gsub():
sentence = gsub('[[:punct:]]', '', sentence)
sentence = gsub('[[:cntrl:]]', '', sentence)
sentence = gsub('\\d+', '', sentence)
# and convert to lower case:
sentence = tolower(sentence)
# split into words. str_split is in the stringr package
word.list = str_split(sentence, '\\s+')
# sometimes a list() is one level of hierarchy too much
words = unlist(word.list)
# compare our words to the dictionaries of positive & negative terms
pos.matches = match(words, pos.words)
neg.matches = match(words, neg.words)
# match() returns the position of the matched term or NA
# we just want a TRUE/FALSE:
pos.matches = !is.na(pos.matches)
neg.matches = !is.na(neg.matches)
# and conveniently enough, TRUE/FALSE will be treated as 1/0 by sum():
score = sum(pos.matches) - sum(neg.matches)
return(score)
}, pos.words, neg.words, .progress=.progress )
scores.df = data.frame(score=scores, text=sentences)
return(scores.df)
}
############################################
#Scoring Tweets & Adding a column
############################################
## List of positive and negative words can be downloaded from below link
## http://www.cs.uic.edu/~liub/FBS/sentiment-analysis.html#lexicon
#Load sentiment word lists
hu.liu.pos = scan('F:/New folder/R Videos/positive-words.txt', what='character', comment.char=';')
hu.liu.neg = scan('F:/New folder/R Videos/negative-words.txt', what='character', comment.char=';')
#Add words to list # Optional
pos.words = c(hu.liu.pos, 'upgrade','jai ho')
neg.words = c(hu.liu.neg, 'wtf', 'wait','waiting', 'epicfail', 'mechanical')
#Score all tweets
KKRiders.scores = score.sentiment(DatasetKKRiders$text, pos.words,neg.words, .progress='text')
lionsdenkxip.scores = score.sentiment(Datasetlionsdenkxip$text, pos.words,neg.words, .progress='text')
View(KKRiders.scores)
#path<-""
#write.csv(KKRiders.scores,file=paste(path,"KKRidersScores.csv",sep=""),row.names=TRUE)
#write.csv(lionsdenkxip.scores,file=paste(path," lionsdenkxipScores.csv",sep=""),row.names=TRUE)
KKRiders.scores$Team = 'KKRiders'
View(KKRiders.scores)
lionsdenkxip.scores$Team = 'lionsdenkxip'
View(lionsdenkxip.scores)
#############################
# Visualizing
#############################
hist(KKRiders.scores$score)
qplot(KKRiders.scores$score)
hist(lionsdenkxip.scores$score)
qplot(lionsdenkxip.scores$score)
#################################
#Comparing 2 data sets
#################################
# all.scores = rbind(KKRiders.scores, lionsdenkxip.scores, MLB.scores)
all.scores = rbind(KKRiders.scores, lionsdenkxip.scores)
ggplot(data=all.scores) + # ggplot works on data.frames, always
geom_histogram(mapping=aes(x=score, fill=Team), binwidth=1) +
facet_grid(Team~.) + # make a separate plot for each hashtag
theme_bw() + scale_fill_brewer() # plain display, nicer colors
#####################################################
#https://apps.twitter.com
# You'd need to set global options with an authenticated app
consumerKey <- ""
consumerSecret <- ""
accessToken <- ""
accessTokenSecret <- ""
setup_twitter_oauth(consumerKey, consumerSecret, accessToken, accessTokenSecret)
# We can request only 3200 tweets at a time; it will return fewer
# depending on the API
tweets1 <- userTimeline("KKR", n = 100)
tweets1_df <- tbl_df(map_df(tweets1, as.data.frame))
-------------------------------------------------------------------------------
#install.packages("wordcloud")
library(wordcloud)
library(syuzhet)
library(plyr)
library(dplyr)
library(stringr)
library(ggplot2)
#Harvest some tweets
dhoni_tweets = searchTwitter("dhoni", n=2000, since = "2016-09-01", lang = "en")
#explore tweets
length.dhoni_tweets <- length(dhoni_tweets)
length.dhoni_tweets
dhoni_tweets
dhoni_tweets.df <- ldply(dhoni_tweets, function(t) t$toDataFrame())
write.csv(dhoni_tweets.df, "dhonitweets.csv")
#get the text
dhoni_tweets <- sapply(dhoni_tweets, function(x) x$getText())
#cleaning 1- Remove people name, RT text etc.
dhoni_tweets1 <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "",dhoni_tweets)
dhoni_tweets1
y_stats.df1 <- gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "",y_stats.df)
#Cleaning 2- remove html links
dhoni_tweets2 <- gsub("http[^[:blank:]]+","", dhoni_tweets1)
y_stats.df2 <- gsub("http[^[:blank:]]+","", y_stats.df1)
#cleaning 3- remove the people names
dhoni_tweets3 <- gsub("@\\w+", "", dhoni_tweets2)
y_stats.df3 <- gsub("@\\w+", "", y_stats.df2)
#cleaning 4- remove punctuations
dhoni_tweets4 <- gsub("[[:punct:]]", " ", dhoni_tweets3)
y_stats.df4 <- gsub("[[:punct:]]", " ", y_stats.df3)
#cleaning 5- remove Punctuations
dhoni_tweets5 <- gsub("[^[:alnum:]]", " ", dhoni_tweets4)
y_stats.df5 <- gsub("[^[:alnum:]]", " ", y_stats.df4)
# Exporting to Excel
write.csv(dhoni_tweets5, "dhonitweets1.csv")
write.csv(y_stats.df5, "ystats.csv")
library(tm)
library(tmap)
#library(expm)
#creating wordcorpus and cleaning
dhoni_tweets6 <- Corpus(VectorSource(dhoni_tweets5))
dhoni_tweets6 <- tm_map(dhoni_tweets6, removePunctuation)
dhoni_tweets6 <- tm_map(dhoni_tweets6, content_transformer(tolower))
dhoni_tweets6 <- tm_map(dhoni_tweets6, removeWords, stopwords("english"))
dhoni_tweets6 <- tm_map(dhoni_tweets6, stripWhitespace)
y_stats.df <- Corpus(VectorSource())
#building wordcloud
pal <- brewer.pal(8, "Dark2")
wordcloud(dhoni_tweets6, min.freq = 5, max.words = Inf, width = 1000, height = 1000,
random.order = FALSE, color = pal)
#it will show
wordcloud(dhoni_tweets6)
#Sentiment Analysis
#how the function works
get_nrc_sentiment("I bought an iphone a few days ago.
it is such a nice phone, although a little large.
the touch screen is cool. the voice quality is
clear too. I simply love it!")
#Running on our data
dhonisentiment <- get_nrc_sentiment(dhoni_tweets5)
dhoniSentimentScores <- data.frame(colSums(dhonisentiment[,]))
names(dhoniSentimentScores) <- "score"
dhoniSentimentScores <- cbind("sentiment" = rownames(dhoniSentimentScores), dhoniSentimentScores)
rownames(dhoniSentimentScores) <- NULL
ggplot(data = dhoniSentimentScores, aes(x = sentiment, y = score))+
geom_bar(aes(fill = sentiment), stat = "identity")+
theme(legend.position = "none") +
xlab("sentiment") + ylab("number of tweets") + ggtitle("Total Score
Based on Dhoni Tweets")
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplProperties Class
#'
#' @field enableFallback
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplProperties <- R6::R6Class(
'ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplProperties',
public = list(
`enableFallback` = NULL,
initialize = function(`enableFallback`){
if (!missing(`enableFallback`)) {
stopifnot(R6::is.R6(`enableFallback`))
self$`enableFallback` <- `enableFallback`
}
},
toJSON = function() {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject <- list()
if (!is.null(self$`enableFallback`)) {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject[['enableFallback']] <- self$`enableFallback`$toJSON()
}
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject
},
fromJSON = function(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson) {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject <- jsonlite::fromJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson)
if (!is.null(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject$`enableFallback`)) {
enableFallbackObject <- ConfigNodePropertyBoolean$new()
enableFallbackObject$fromJSON(jsonlite::toJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject$enableFallback, auto_unbox = TRUE))
self$`enableFallback` <- enableFallbackObject
}
},
toJSONString = function() {
sprintf(
'{
"enableFallback": %s
}',
self$`enableFallback`$toJSON()
)
},
fromJSONString = function(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson) {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject <- jsonlite::fromJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson)
ConfigNodePropertyBooleanObject <- ConfigNodePropertyBoolean$new()
self$`enableFallback` <- ConfigNodePropertyBooleanObject$fromJSON(jsonlite::toJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject$enableFallback, auto_unbox = TRUE))
}
)
)
|
/clients/r/generated/R/ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplProperties.r
|
permissive
|
shinesolutions/swagger-aem-osgi
|
R
| false
| false
| 2,746
|
r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplProperties Class
#'
#' @field enableFallback
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplProperties <- R6::R6Class(
'ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplProperties',
public = list(
`enableFallback` = NULL,
initialize = function(`enableFallback`){
if (!missing(`enableFallback`)) {
stopifnot(R6::is.R6(`enableFallback`))
self$`enableFallback` <- `enableFallback`
}
},
toJSON = function() {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject <- list()
if (!is.null(self$`enableFallback`)) {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject[['enableFallback']] <- self$`enableFallback`$toJSON()
}
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject
},
fromJSON = function(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson) {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject <- jsonlite::fromJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson)
if (!is.null(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject$`enableFallback`)) {
enableFallbackObject <- ConfigNodePropertyBoolean$new()
enableFallbackObject$fromJSON(jsonlite::toJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject$enableFallback, auto_unbox = TRUE))
self$`enableFallback` <- enableFallbackObject
}
},
toJSONString = function() {
sprintf(
'{
"enableFallback": %s
}',
self$`enableFallback`$toJSON()
)
},
fromJSONString = function(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson) {
ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject <- jsonlite::fromJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesJson)
ConfigNodePropertyBooleanObject <- ConfigNodePropertyBoolean$new()
self$`enableFallback` <- ConfigNodePropertyBooleanObject$fromJSON(jsonlite::toJSON(ComAdobeCqSocialServiceusersInternalImplServiceUserWrapperImplPropertiesObject$enableFallback, auto_unbox = TRUE))
}
)
)
|
library(tidyverse)
library(rjson)
#
stages <- c("2", "4", "6")
FILES <- fs::dir_info("C:/Users/Jake/Documents/R Code/p-c/Stage Telemetry/") %>%
filter(str_sub(path, 1, 3) == "tdf") %>%
filter(str_detect(path, "step"))
#
con <- dbConnect(MySQL(),
host='localhost',
dbname='cycling',
user='jalnichols',
password='braves')
#
for(s in 1:length(FILES$path)) {
clean <- readr::melt_tsv(FILES$path[[s]]) %>% filter(str_detect(value, "telemetryCompetitor-2020"))
for(r in 1:length(clean$value)) {
js <- jsonlite::fromJSON(str_replace(clean$value[[r]], "data: ", "")) %>% .$data
df <- js$Riders %>%
jsonlite::flatten() %>%
as_tibble() %>%
mutate(TimeStamp = js$TimeStamp,
StageId = js$StageId,
RaceName = js$RaceName,
RaceStatus = js$RaceStatus,
`_id` = js$`_id`,
`_bind` = js$`_bind`,
`_updatedAt` = js$`_updatedAt`,
`_parent` = js$`_parent`,
`_key` = js$`_key`) %>%
select(-LatLon)
DBI::dbWriteTable(con, "telemetry_tdf2020", df, row.names = F, append = TRUE)
print(paste0(s,"-", r))
}
}
|
/Clean download file TDF Telemetry.R
|
no_license
|
jalnichols/p-c
|
R
| false
| false
| 1,265
|
r
|
library(tidyverse)
library(rjson)
#
stages <- c("2", "4", "6")
FILES <- fs::dir_info("C:/Users/Jake/Documents/R Code/p-c/Stage Telemetry/") %>%
filter(str_sub(path, 1, 3) == "tdf") %>%
filter(str_detect(path, "step"))
#
con <- dbConnect(MySQL(),
host='localhost',
dbname='cycling',
user='jalnichols',
password='braves')
#
for(s in 1:length(FILES$path)) {
clean <- readr::melt_tsv(FILES$path[[s]]) %>% filter(str_detect(value, "telemetryCompetitor-2020"))
for(r in 1:length(clean$value)) {
js <- jsonlite::fromJSON(str_replace(clean$value[[r]], "data: ", "")) %>% .$data
df <- js$Riders %>%
jsonlite::flatten() %>%
as_tibble() %>%
mutate(TimeStamp = js$TimeStamp,
StageId = js$StageId,
RaceName = js$RaceName,
RaceStatus = js$RaceStatus,
`_id` = js$`_id`,
`_bind` = js$`_bind`,
`_updatedAt` = js$`_updatedAt`,
`_parent` = js$`_parent`,
`_key` = js$`_key`) %>%
select(-LatLon)
DBI::dbWriteTable(con, "telemetry_tdf2020", df, row.names = F, append = TRUE)
print(paste0(s,"-", r))
}
}
|
elnet=function(x,is.sparse,ix,jx,y,weights,offset,type.gaussian=c("covariance","naive"),alpha,nobs,nvars,jd,vp,cl,ne,nx,nlam,flmin,ulam,thresh,isd,intr,vnames,maxit){
maxit=as.integer(maxit)
weights=as.double(weights)
### compute the null deviance
ybar=weighted.mean(y,weights)
nulldev=sum(weights* (y-ybar)^2)
if(nulldev==0)stop("y is constant; gaussian glmnet fails at standardization step")
type.gaussian=match.arg(type.gaussian)
ka=as.integer(switch(type.gaussian,
covariance=1,
naive=2,
))
storage.mode(y)="double"
if(is.null(offset)){
offset=y*0
is.offset=FALSE}
else{
storage.mode(offset)="double"
is.offset=TRUE
}
fit=if(is.sparse).Fortran("spelnet",
ka,parm=alpha,nobs,nvars,x,ix,jx,y-offset,weights,jd,vp,cl,ne,nx,nlam,flmin,ulam,thresh,isd,intr,maxit,
lmu=integer(1),
a0=double(nlam),
ca=double(nx*nlam),
ia=integer(nx),
nin=integer(nlam),
rsq=double(nlam),
alm=double(nlam),
nlp=integer(1),
jerr=integer(1),PACKAGE="glmnet"
)
else .Fortran("elnet",
ka,parm=alpha,nobs,nvars,as.double(x),y-offset,weights,jd,vp,cl,ne,nx,nlam,flmin,ulam,thresh,isd,intr,maxit,
lmu=integer(1),
a0=double(nlam),
ca=double(nx*nlam),
ia=integer(nx),
nin=integer(nlam),
rsq=double(nlam),
alm=double(nlam),
nlp=integer(1),
jerr=integer(1),PACKAGE="glmnet"
)
if(fit$jerr!=0){
errmsg=jerr(fit$jerr,maxit,pmax=nx,family="gaussian")
if(errmsg$fatal)stop(errmsg$msg,call.=FALSE)
else warning(errmsg$msg,call.=FALSE)
}
outlist=getcoef(fit,nvars,nx,vnames)
dev=fit$rsq[seq(fit$lmu)]
outlist=c(outlist,list(dev.ratio=dev,nulldev=nulldev,npasses=fit$nlp,jerr=fit$jerr,offset=is.offset))
class(outlist)="elnet"
outlist
}
|
/R/elnet.R
|
no_license
|
Kimberly7fei/glmnet
|
R
| false
| false
| 1,864
|
r
|
elnet=function(x,is.sparse,ix,jx,y,weights,offset,type.gaussian=c("covariance","naive"),alpha,nobs,nvars,jd,vp,cl,ne,nx,nlam,flmin,ulam,thresh,isd,intr,vnames,maxit){
maxit=as.integer(maxit)
weights=as.double(weights)
### compute the null deviance
ybar=weighted.mean(y,weights)
nulldev=sum(weights* (y-ybar)^2)
if(nulldev==0)stop("y is constant; gaussian glmnet fails at standardization step")
type.gaussian=match.arg(type.gaussian)
ka=as.integer(switch(type.gaussian,
covariance=1,
naive=2,
))
storage.mode(y)="double"
if(is.null(offset)){
offset=y*0
is.offset=FALSE}
else{
storage.mode(offset)="double"
is.offset=TRUE
}
fit=if(is.sparse).Fortran("spelnet",
ka,parm=alpha,nobs,nvars,x,ix,jx,y-offset,weights,jd,vp,cl,ne,nx,nlam,flmin,ulam,thresh,isd,intr,maxit,
lmu=integer(1),
a0=double(nlam),
ca=double(nx*nlam),
ia=integer(nx),
nin=integer(nlam),
rsq=double(nlam),
alm=double(nlam),
nlp=integer(1),
jerr=integer(1),PACKAGE="glmnet"
)
else .Fortran("elnet",
ka,parm=alpha,nobs,nvars,as.double(x),y-offset,weights,jd,vp,cl,ne,nx,nlam,flmin,ulam,thresh,isd,intr,maxit,
lmu=integer(1),
a0=double(nlam),
ca=double(nx*nlam),
ia=integer(nx),
nin=integer(nlam),
rsq=double(nlam),
alm=double(nlam),
nlp=integer(1),
jerr=integer(1),PACKAGE="glmnet"
)
if(fit$jerr!=0){
errmsg=jerr(fit$jerr,maxit,pmax=nx,family="gaussian")
if(errmsg$fatal)stop(errmsg$msg,call.=FALSE)
else warning(errmsg$msg,call.=FALSE)
}
outlist=getcoef(fit,nvars,nx,vnames)
dev=fit$rsq[seq(fit$lmu)]
outlist=c(outlist,list(dev.ratio=dev,nulldev=nulldev,npasses=fit$nlp,jerr=fit$jerr,offset=is.offset))
class(outlist)="elnet"
outlist
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TCRAffinity.R
\name{contactFootprintDensityPlot}
\alias{contactFootprintDensityPlot}
\alias{Features_sCPP}
\alias{univariateCorrelationAnalysis}
\alias{univariateCorrelationPlot}
\alias{univariateFeatureSelect}
\alias{multivariateRegressionPlot}
\alias{multivariateRegressionPrediction}
\title{Single TCR-peptide contact potential profiling.}
\usage{
contactFootprintDensityPlot(weightStringSet)
Features_sCPP(peptideSet, tcrSet, aaIndexIDSet = c("BETM990101inv",
"KESO980102inv", "KOLA930101", "MICC010101", "SIMK990104", "VENM980101",
"VENM980101inv"), fragLenSet = 3:8, seed = 12345,
coreN = parallel::detectCores(logical = F))
univariateCorrelationAnalysis(dt_deltaG_sCPP,
coreN = parallel::detectCores(logical = F))
univariateCorrelationPlot(dt_deltaG_sCPP, dt_deltaG_sCPP_outlier = NULL,
corVarName = "sCPP_BETM990101inv_Skew_4")
univariateFeatureSelect(dt_univ, aaIndexIDSet, sig = 0.05,
bestOnly = T)
multivariateRegressionPlot(dt_deltaG_sCPP, dt_deltaG_sCPP_outlier = NULL,
corVarNames)
multivariateRegressionPrediction(dt_deltaG_sCPP,
dt_deltaG_sCPP_new = NULL, corVarNames)
}
\arguments{
\item{weightStringSet}{A set of contact weights. E.g. "1|2|3|2|1"}
\item{peptideSet}{A set of peptide sequences.}
\item{tcrSet}{A set of corresponding TCR sequences.}
\item{aaIndexIDSet}{A set of AAIndex IDs indicating the AACP scales to be used. Set "all" to shortcut the selection of all available AACP scales.}
\item{fragLenSet}{A set of sliding window sizes. Must be between 3 and 8.}
\item{seed}{A random seed.}
\item{coreN}{The number of cores to be used for parallelization.}
\item{dt_deltaG_sCPP}{A data.table containing the "DeltaG" and sCPP feature columns.}
\item{dt_deltaG_sCPP_outlier}{Optional. If provided, it would be used as outliers.}
\item{corVarName}{A variable name used for univariate analysis.}
\item{dt_univ}{An output data.table of univariate analysis.}
\item{sig}{A siginificance cutoff for univariate feature selection.}
\item{bestOnly}{Logical. If True, only the best sCPP feature per AAIndex will be returned. If False, all sCPP features derived from selected AAIndices will be returned.}
\item{corVarNames}{A set of variable names used for multivariate regression.}
\item{dt_deltaG_sCPP_new}{Optional. If provided, TCR affnities would be predicted for this dataset. Otherwise, predicted TCR affinities for the original dataset would be returned.}
\item{aaIndexIDSet}{A set of AAIndices of interest for univariate feature selection.}
}
\description{
Single TCR-peptide contact potential profiling.
}
|
/man/TCRAffinity.Rd
|
permissive
|
abuguadalajara/Repitope
|
R
| false
| true
| 2,713
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TCRAffinity.R
\name{contactFootprintDensityPlot}
\alias{contactFootprintDensityPlot}
\alias{Features_sCPP}
\alias{univariateCorrelationAnalysis}
\alias{univariateCorrelationPlot}
\alias{univariateFeatureSelect}
\alias{multivariateRegressionPlot}
\alias{multivariateRegressionPrediction}
\title{Single TCR-peptide contact potential profiling.}
\usage{
contactFootprintDensityPlot(weightStringSet)
Features_sCPP(peptideSet, tcrSet, aaIndexIDSet = c("BETM990101inv",
"KESO980102inv", "KOLA930101", "MICC010101", "SIMK990104", "VENM980101",
"VENM980101inv"), fragLenSet = 3:8, seed = 12345,
coreN = parallel::detectCores(logical = F))
univariateCorrelationAnalysis(dt_deltaG_sCPP,
coreN = parallel::detectCores(logical = F))
univariateCorrelationPlot(dt_deltaG_sCPP, dt_deltaG_sCPP_outlier = NULL,
corVarName = "sCPP_BETM990101inv_Skew_4")
univariateFeatureSelect(dt_univ, aaIndexIDSet, sig = 0.05,
bestOnly = T)
multivariateRegressionPlot(dt_deltaG_sCPP, dt_deltaG_sCPP_outlier = NULL,
corVarNames)
multivariateRegressionPrediction(dt_deltaG_sCPP,
dt_deltaG_sCPP_new = NULL, corVarNames)
}
\arguments{
\item{weightStringSet}{A set of contact weights. E.g. "1|2|3|2|1"}
\item{peptideSet}{A set of peptide sequences.}
\item{tcrSet}{A set of corresponding TCR sequences.}
\item{aaIndexIDSet}{A set of AAIndex IDs indicating the AACP scales to be used. Set "all" to shortcut the selection of all available AACP scales.}
\item{fragLenSet}{A set of sliding window sizes. Must be between 3 and 8.}
\item{seed}{A random seed.}
\item{coreN}{The number of cores to be used for parallelization.}
\item{dt_deltaG_sCPP}{A data.table containing the "DeltaG" and sCPP feature columns.}
\item{dt_deltaG_sCPP_outlier}{Optional. If provided, it would be used as outliers.}
\item{corVarName}{A variable name used for univariate analysis.}
\item{dt_univ}{An output data.table of univariate analysis.}
\item{sig}{A siginificance cutoff for univariate feature selection.}
\item{bestOnly}{Logical. If True, only the best sCPP feature per AAIndex will be returned. If False, all sCPP features derived from selected AAIndices will be returned.}
\item{corVarNames}{A set of variable names used for multivariate regression.}
\item{dt_deltaG_sCPP_new}{Optional. If provided, TCR affnities would be predicted for this dataset. Otherwise, predicted TCR affinities for the original dataset would be returned.}
\item{aaIndexIDSet}{A set of AAIndices of interest for univariate feature selection.}
}
\description{
Single TCR-peptide contact potential profiling.
}
|
library(rgdal)
library(sf)
#import spatial data
coord<-"CRSstring" #ex:"+proj=utm +zone=16 +ellps=WGS84 +datum=WGS84 +units=m +no_defs" #set coordinate system
shp_project<-spTransform(shp, CRS(coord)) #apply CRS to new object
#do stuff.......
#using sf package (much easier)
shp<-shp %>% st_transform(4326) #or whatever EPSG code
#do stuff......
|
/reproject/reproject.R
|
no_license
|
rnall0/rgistoolbox
|
R
| false
| false
| 351
|
r
|
library(rgdal)
library(sf)
#import spatial data
coord<-"CRSstring" #ex:"+proj=utm +zone=16 +ellps=WGS84 +datum=WGS84 +units=m +no_defs" #set coordinate system
shp_project<-spTransform(shp, CRS(coord)) #apply CRS to new object
#do stuff.......
#using sf package (much easier)
shp<-shp %>% st_transform(4326) #or whatever EPSG code
#do stuff......
|
## There are two functions below. One is to create a "spectial" matrix
##that can cacge its inverse The second function computes the inverse
## create a matrix acctualy contains set,get,setinverse and
##getinverse functions
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
##set the value of matrix
set <- function(y) {
x <<- y
i <<- NULL
}
##get the value of matrix
get <- function() x
##set the inverse value of the matrix
setinverse <- function(inverse) i <<- inverse
##get the inverse value of the matrix
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
##function to determin wether the inverse of matrix is calculated
##if yes, return the stored value before, If not, calculate the
##inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
##determin wether it is null or not
if(!is.null(i)) {
message("getting cached data")
return(i)
}
##if not calculated before, calculate the inverse right now
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
##returen the inverse value
i
}
|
/cachematrix.R
|
no_license
|
xiaoheyu/ProgrammingAssignment2
|
R
| false
| false
| 1,253
|
r
|
## There are two functions below. One is to create a "spectial" matrix
##that can cacge its inverse The second function computes the inverse
## create a matrix acctualy contains set,get,setinverse and
##getinverse functions
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
##set the value of matrix
set <- function(y) {
x <<- y
i <<- NULL
}
##get the value of matrix
get <- function() x
##set the inverse value of the matrix
setinverse <- function(inverse) i <<- inverse
##get the inverse value of the matrix
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
##function to determin wether the inverse of matrix is calculated
##if yes, return the stored value before, If not, calculate the
##inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinverse()
##determin wether it is null or not
if(!is.null(i)) {
message("getting cached data")
return(i)
}
##if not calculated before, calculate the inverse right now
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
##returen the inverse value
i
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AuthenticationManager.R
\docType{methods}
\name{showAuth}
\alias{showAuth}
\alias{showAuth,AuthenticationManager,D1Node-method}
\title{Display all authentication information}
\usage{
showAuth(.Object, node)
\S4method{showAuth}{AuthenticationManager,D1Node}(.Object, node)
}
\arguments{
\item{.Object}{An AuthenticationManager instance}
\item{node}{A D1Node instance}
}
\description{
Display all authentication information
}
|
/dataone/man/showAuth.Rd
|
permissive
|
couture322/rdataone
|
R
| false
| true
| 505
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AuthenticationManager.R
\docType{methods}
\name{showAuth}
\alias{showAuth}
\alias{showAuth,AuthenticationManager,D1Node-method}
\title{Display all authentication information}
\usage{
showAuth(.Object, node)
\S4method{showAuth}{AuthenticationManager,D1Node}(.Object, node)
}
\arguments{
\item{.Object}{An AuthenticationManager instance}
\item{node}{A D1Node instance}
}
\description{
Display all authentication information
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/column_summary.R
\docType{class}
\name{ColumnSummary}
\alias{ColumnSummary}
\title{ColumnSummary}
\format{
An \code{R6Class} generator object
}
\description{
ColumnSummary Class
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{name}}{character [optional]}
\item{\code{directory}}{character [optional]}
\item{\code{category}}{character [optional]}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{ColumnSummary$new()}}
\item \href{#method-toJSON}{\code{ColumnSummary$toJSON()}}
\item \href{#method-fromJSON}{\code{ColumnSummary$fromJSON()}}
\item \href{#method-toJSONString}{\code{ColumnSummary$toJSONString()}}
\item \href{#method-fromJSONString}{\code{ColumnSummary$fromJSONString()}}
\item \href{#method-clone}{\code{ColumnSummary$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$new(name = NULL, directory = NULL, category = NULL, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$toJSON()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$fromJSON(ColumnSummaryJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$toJSONString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$fromJSONString(ColumnSummaryJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/auto-generated-sdk/man/ColumnSummary.Rd
|
permissive
|
afernandes85/analyticsapi-engines-r-sdk
|
R
| false
| true
| 2,910
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/column_summary.R
\docType{class}
\name{ColumnSummary}
\alias{ColumnSummary}
\title{ColumnSummary}
\format{
An \code{R6Class} generator object
}
\description{
ColumnSummary Class
}
\section{Public fields}{
\if{html}{\out{<div class="r6-fields">}}
\describe{
\item{\code{name}}{character [optional]}
\item{\code{directory}}{character [optional]}
\item{\code{category}}{character [optional]}
}
\if{html}{\out{</div>}}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-new}{\code{ColumnSummary$new()}}
\item \href{#method-toJSON}{\code{ColumnSummary$toJSON()}}
\item \href{#method-fromJSON}{\code{ColumnSummary$fromJSON()}}
\item \href{#method-toJSONString}{\code{ColumnSummary$toJSONString()}}
\item \href{#method-fromJSONString}{\code{ColumnSummary$fromJSONString()}}
\item \href{#method-clone}{\code{ColumnSummary$clone()}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-new"></a>}}
\if{latex}{\out{\hypertarget{method-new}{}}}
\subsection{Method \code{new()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$new(name = NULL, directory = NULL, category = NULL, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSON"></a>}}
\if{latex}{\out{\hypertarget{method-toJSON}{}}}
\subsection{Method \code{toJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$toJSON()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSON"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSON}{}}}
\subsection{Method \code{fromJSON()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$fromJSON(ColumnSummaryJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-toJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-toJSONString}{}}}
\subsection{Method \code{toJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$toJSONString()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-fromJSONString"></a>}}
\if{latex}{\out{\hypertarget{method-fromJSONString}{}}}
\subsection{Method \code{fromJSONString()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$fromJSONString(ColumnSummaryJson)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-clone"></a>}}
\if{latex}{\out{\hypertarget{method-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{ColumnSummary$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
# Load dataset and Filter dates:
powerDT <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";")
filterPowerDT <- subset(powerDT, Date %in% c("1/2/2007", "2/2/2007"))
# Convert Time to Date
dateTime <- strptime(paste(filterPowerDT$Date, filterPowerDT$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
# Plot
plot(dateTime, as.numeric(as.character(filterPowerDT$Global_active_power)),
type = "l", xlab = "", ylab = "Global Active Power (kilowatts")
# Save plot to .png:
png("plot2.png", width=480, height=480)
dev.off()
|
/plot2.R
|
no_license
|
Trochillianne/04.-Exploratory-Data-Analysis_Project1
|
R
| false
| false
| 551
|
r
|
# Load dataset and Filter dates:
powerDT <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";")
filterPowerDT <- subset(powerDT, Date %in% c("1/2/2007", "2/2/2007"))
# Convert Time to Date
dateTime <- strptime(paste(filterPowerDT$Date, filterPowerDT$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
# Plot
plot(dateTime, as.numeric(as.character(filterPowerDT$Global_active_power)),
type = "l", xlab = "", ylab = "Global Active Power (kilowatts")
# Save plot to .png:
png("plot2.png", width=480, height=480)
dev.off()
|
#' @title
#' WFG8 Function
#'
#' @description
#' Eighth test problem from the "Walking Fish Group" problem generator toolkit.
#'
#' @references
#' S. Huband, P. Hingston, L. Barone, and L. While, "A Review of Multi-objective
#' Test Problems and a Scalable Test Problem Toolkit," in IEEE Transactions on
#' Evolutionary Computation, Volume 10, No 5, October 2006, pp. 477-506. IEEE.
#' @param n.objectives [\code{integer(1)}]\cr
#' Number of objectives.
#' @param k [\code{integer}(1)]\cr
#' Number of position-related parameters. These will automatically be the
#' first \code{k} elements from the input vector. This value has to be a
#' multiple of \code{n.objectives - 1}.
#' @param l [\code{integer}(1)]\cr
#' Number of distance-related parameters. These will automatically be
#' the last \code{l} elements from the input vector.
#' @return [\code{smoof_multi_objective_function}]
#' @details
#' Huband et al. recommend a value of \code{k = 4L} position-related
#' parameters for bi-objective problems and \code{k = 2L * (n.objectives - 1L)}
#' for many-objective problems. Furthermore the authors recommend a value of
#' \code{l = 20} distance-related parameters. Therefore, if \code{k} and/or
#' \code{l} are not explicitly defined by the user, their values will be set
#' to the recommended values per default.
#'
#' @export
makeWFG8Function = function(n.objectives, k, l) {
assertInt(n.objectives, lower = 2L)
force(n.objectives)
if (missing(k)) {
if (n.objectives == 2L) {
k = 4L
} else {
k = 2L * (n.objectives - 1L)
}
}
assertInt(k, lower = n.objectives - 1L)
assertTRUE(k %% (n.objectives - 1L) == 0L)
force(k)
if (missing(l)) {
l = 20L
}
assertInt(l, lower = 1L)
force(l)
dimensions = k + l
# C implementation
fn = function(x) {
assertNumeric(x, len = k + l, any.missing = FALSE, all.missing = FALSE)
return(mof_WFG_8(z = x, M = n.objectives, k = k))
}
makeMultiObjectiveFunction(
name = "WFG8 function",
id = sprintf("WFG8-%id-%io", dimensions, n.objectives),
description = "WFG8 function",
fn = fn,
par.set = makeNumericParamSet(
len = dimensions,
id = "x",
lower = rep(0, dimensions),
upper = 2L * seq_len(dimensions),
vector = TRUE
),
minimize = rep(TRUE, n.objectives),
n.objectives = n.objectives
)
}
class(makeWFG8Function) = c("function", "smoof_generator")
attr(makeWFG8Function, "name") = c("WFG8")
attr(makeWFG8Function, "type") = c("multi-objective")
attr(makeWFG8Function, "tags") = c("multi-objective")
|
/R/mof.WFG8.R
|
permissive
|
jakobbossek/smoof
|
R
| false
| false
| 2,594
|
r
|
#' @title
#' WFG8 Function
#'
#' @description
#' Eighth test problem from the "Walking Fish Group" problem generator toolkit.
#'
#' @references
#' S. Huband, P. Hingston, L. Barone, and L. While, "A Review of Multi-objective
#' Test Problems and a Scalable Test Problem Toolkit," in IEEE Transactions on
#' Evolutionary Computation, Volume 10, No 5, October 2006, pp. 477-506. IEEE.
#' @param n.objectives [\code{integer(1)}]\cr
#' Number of objectives.
#' @param k [\code{integer}(1)]\cr
#' Number of position-related parameters. These will automatically be the
#' first \code{k} elements from the input vector. This value has to be a
#' multiple of \code{n.objectives - 1}.
#' @param l [\code{integer}(1)]\cr
#' Number of distance-related parameters. These will automatically be
#' the last \code{l} elements from the input vector.
#' @return [\code{smoof_multi_objective_function}]
#' @details
#' Huband et al. recommend a value of \code{k = 4L} position-related
#' parameters for bi-objective problems and \code{k = 2L * (n.objectives - 1L)}
#' for many-objective problems. Furthermore the authors recommend a value of
#' \code{l = 20} distance-related parameters. Therefore, if \code{k} and/or
#' \code{l} are not explicitly defined by the user, their values will be set
#' to the recommended values per default.
#'
#' @export
makeWFG8Function = function(n.objectives, k, l) {
assertInt(n.objectives, lower = 2L)
force(n.objectives)
if (missing(k)) {
if (n.objectives == 2L) {
k = 4L
} else {
k = 2L * (n.objectives - 1L)
}
}
assertInt(k, lower = n.objectives - 1L)
assertTRUE(k %% (n.objectives - 1L) == 0L)
force(k)
if (missing(l)) {
l = 20L
}
assertInt(l, lower = 1L)
force(l)
dimensions = k + l
# C implementation
fn = function(x) {
assertNumeric(x, len = k + l, any.missing = FALSE, all.missing = FALSE)
return(mof_WFG_8(z = x, M = n.objectives, k = k))
}
makeMultiObjectiveFunction(
name = "WFG8 function",
id = sprintf("WFG8-%id-%io", dimensions, n.objectives),
description = "WFG8 function",
fn = fn,
par.set = makeNumericParamSet(
len = dimensions,
id = "x",
lower = rep(0, dimensions),
upper = 2L * seq_len(dimensions),
vector = TRUE
),
minimize = rep(TRUE, n.objectives),
n.objectives = n.objectives
)
}
class(makeWFG8Function) = c("function", "smoof_generator")
attr(makeWFG8Function, "name") = c("WFG8")
attr(makeWFG8Function, "type") = c("multi-objective")
attr(makeWFG8Function, "tags") = c("multi-objective")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_service.R
\name{apigatewayv2}
\alias{apigatewayv2}
\title{AmazonApiGatewayV2}
\usage{
apigatewayv2()
}
\description{
Amazon API Gateway V2
}
\section{Operations}{
\tabular{ll}{
\link[=apigatewayv2_create_api]{create_api} \tab Creates an Api resource \cr
\link[=apigatewayv2_create_api_mapping]{create_api_mapping} \tab Creates an API mapping \cr
\link[=apigatewayv2_create_authorizer]{create_authorizer} \tab Creates an Authorizer for an API \cr
\link[=apigatewayv2_create_deployment]{create_deployment} \tab Creates a Deployment for an API \cr
\link[=apigatewayv2_create_domain_name]{create_domain_name} \tab Creates a domain name \cr
\link[=apigatewayv2_create_integration]{create_integration} \tab Creates an Integration \cr
\link[=apigatewayv2_create_integration_response]{create_integration_response} \tab Creates an IntegrationResponses \cr
\link[=apigatewayv2_create_model]{create_model} \tab Creates a Model for an API \cr
\link[=apigatewayv2_create_route]{create_route} \tab Creates a Route for an API \cr
\link[=apigatewayv2_create_route_response]{create_route_response} \tab Creates a RouteResponse for a Route \cr
\link[=apigatewayv2_create_stage]{create_stage} \tab Creates a Stage for an API \cr
\link[=apigatewayv2_delete_api]{delete_api} \tab Deletes an Api resource \cr
\link[=apigatewayv2_delete_api_mapping]{delete_api_mapping} \tab Deletes an API mapping \cr
\link[=apigatewayv2_delete_authorizer]{delete_authorizer} \tab Deletes an Authorizer \cr
\link[=apigatewayv2_delete_deployment]{delete_deployment} \tab Deletes a Deployment \cr
\link[=apigatewayv2_delete_domain_name]{delete_domain_name} \tab Deletes a domain name \cr
\link[=apigatewayv2_delete_integration]{delete_integration} \tab Deletes an Integration \cr
\link[=apigatewayv2_delete_integration_response]{delete_integration_response} \tab Deletes an IntegrationResponses \cr
\link[=apigatewayv2_delete_model]{delete_model} \tab Deletes a Model \cr
\link[=apigatewayv2_delete_route]{delete_route} \tab Deletes a Route \cr
\link[=apigatewayv2_delete_route_response]{delete_route_response} \tab Deletes a RouteResponse \cr
\link[=apigatewayv2_delete_stage]{delete_stage} \tab Deletes a Stage \cr
\link[=apigatewayv2_get_api]{get_api} \tab Gets an Api resource \cr
\link[=apigatewayv2_get_api_mapping]{get_api_mapping} \tab The API mapping \cr
\link[=apigatewayv2_get_api_mappings]{get_api_mappings} \tab The API mappings \cr
\link[=apigatewayv2_get_apis]{get_apis} \tab Gets a collection of Api resources \cr
\link[=apigatewayv2_get_authorizer]{get_authorizer} \tab Gets an Authorizer \cr
\link[=apigatewayv2_get_authorizers]{get_authorizers} \tab Gets the Authorizers for an API \cr
\link[=apigatewayv2_get_deployment]{get_deployment} \tab Gets a Deployment \cr
\link[=apigatewayv2_get_deployments]{get_deployments} \tab Gets the Deployments for an API \cr
\link[=apigatewayv2_get_domain_name]{get_domain_name} \tab Gets a domain name \cr
\link[=apigatewayv2_get_domain_names]{get_domain_names} \tab Gets the domain names for an AWS account \cr
\link[=apigatewayv2_get_integration]{get_integration} \tab Gets an Integration \cr
\link[=apigatewayv2_get_integration_response]{get_integration_response} \tab Gets an IntegrationResponses \cr
\link[=apigatewayv2_get_integration_responses]{get_integration_responses} \tab Gets the IntegrationResponses for an Integration\cr
\link[=apigatewayv2_get_integrations]{get_integrations} \tab Gets the Integrations for an API \cr
\link[=apigatewayv2_get_model]{get_model} \tab Gets a Model \cr
\link[=apigatewayv2_get_model_template]{get_model_template} \tab Gets a model template \cr
\link[=apigatewayv2_get_models]{get_models} \tab Gets the Models for an API \cr
\link[=apigatewayv2_get_route]{get_route} \tab Gets a Route \cr
\link[=apigatewayv2_get_route_response]{get_route_response} \tab Gets a RouteResponse \cr
\link[=apigatewayv2_get_route_responses]{get_route_responses} \tab Gets the RouteResponses for a Route \cr
\link[=apigatewayv2_get_routes]{get_routes} \tab Gets the Routes for an API \cr
\link[=apigatewayv2_get_stage]{get_stage} \tab Gets a Stage \cr
\link[=apigatewayv2_get_stages]{get_stages} \tab Gets the Stages for an API \cr
\link[=apigatewayv2_update_api]{update_api} \tab Updates an Api resource \cr
\link[=apigatewayv2_update_api_mapping]{update_api_mapping} \tab The API mapping \cr
\link[=apigatewayv2_update_authorizer]{update_authorizer} \tab Updates an Authorizer \cr
\link[=apigatewayv2_update_deployment]{update_deployment} \tab Updates a Deployment \cr
\link[=apigatewayv2_update_domain_name]{update_domain_name} \tab Updates a domain name \cr
\link[=apigatewayv2_update_integration]{update_integration} \tab Updates an Integration \cr
\link[=apigatewayv2_update_integration_response]{update_integration_response} \tab Updates an IntegrationResponses \cr
\link[=apigatewayv2_update_model]{update_model} \tab Updates a Model \cr
\link[=apigatewayv2_update_route]{update_route} \tab Updates a Route \cr
\link[=apigatewayv2_update_route_response]{update_route_response} \tab Updates a RouteResponse \cr
\link[=apigatewayv2_update_stage]{update_stage} \tab Updates a Stage
}
}
\examples{
\donttest{svc <- apigatewayv2()
svc$create_api(
Foo = 123
)}
}
|
/cran/paws.networking/man/apigatewayv2.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 5,297
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/apigatewayv2_service.R
\name{apigatewayv2}
\alias{apigatewayv2}
\title{AmazonApiGatewayV2}
\usage{
apigatewayv2()
}
\description{
Amazon API Gateway V2
}
\section{Operations}{
\tabular{ll}{
\link[=apigatewayv2_create_api]{create_api} \tab Creates an Api resource \cr
\link[=apigatewayv2_create_api_mapping]{create_api_mapping} \tab Creates an API mapping \cr
\link[=apigatewayv2_create_authorizer]{create_authorizer} \tab Creates an Authorizer for an API \cr
\link[=apigatewayv2_create_deployment]{create_deployment} \tab Creates a Deployment for an API \cr
\link[=apigatewayv2_create_domain_name]{create_domain_name} \tab Creates a domain name \cr
\link[=apigatewayv2_create_integration]{create_integration} \tab Creates an Integration \cr
\link[=apigatewayv2_create_integration_response]{create_integration_response} \tab Creates an IntegrationResponses \cr
\link[=apigatewayv2_create_model]{create_model} \tab Creates a Model for an API \cr
\link[=apigatewayv2_create_route]{create_route} \tab Creates a Route for an API \cr
\link[=apigatewayv2_create_route_response]{create_route_response} \tab Creates a RouteResponse for a Route \cr
\link[=apigatewayv2_create_stage]{create_stage} \tab Creates a Stage for an API \cr
\link[=apigatewayv2_delete_api]{delete_api} \tab Deletes an Api resource \cr
\link[=apigatewayv2_delete_api_mapping]{delete_api_mapping} \tab Deletes an API mapping \cr
\link[=apigatewayv2_delete_authorizer]{delete_authorizer} \tab Deletes an Authorizer \cr
\link[=apigatewayv2_delete_deployment]{delete_deployment} \tab Deletes a Deployment \cr
\link[=apigatewayv2_delete_domain_name]{delete_domain_name} \tab Deletes a domain name \cr
\link[=apigatewayv2_delete_integration]{delete_integration} \tab Deletes an Integration \cr
\link[=apigatewayv2_delete_integration_response]{delete_integration_response} \tab Deletes an IntegrationResponses \cr
\link[=apigatewayv2_delete_model]{delete_model} \tab Deletes a Model \cr
\link[=apigatewayv2_delete_route]{delete_route} \tab Deletes a Route \cr
\link[=apigatewayv2_delete_route_response]{delete_route_response} \tab Deletes a RouteResponse \cr
\link[=apigatewayv2_delete_stage]{delete_stage} \tab Deletes a Stage \cr
\link[=apigatewayv2_get_api]{get_api} \tab Gets an Api resource \cr
\link[=apigatewayv2_get_api_mapping]{get_api_mapping} \tab The API mapping \cr
\link[=apigatewayv2_get_api_mappings]{get_api_mappings} \tab The API mappings \cr
\link[=apigatewayv2_get_apis]{get_apis} \tab Gets a collection of Api resources \cr
\link[=apigatewayv2_get_authorizer]{get_authorizer} \tab Gets an Authorizer \cr
\link[=apigatewayv2_get_authorizers]{get_authorizers} \tab Gets the Authorizers for an API \cr
\link[=apigatewayv2_get_deployment]{get_deployment} \tab Gets a Deployment \cr
\link[=apigatewayv2_get_deployments]{get_deployments} \tab Gets the Deployments for an API \cr
\link[=apigatewayv2_get_domain_name]{get_domain_name} \tab Gets a domain name \cr
\link[=apigatewayv2_get_domain_names]{get_domain_names} \tab Gets the domain names for an AWS account \cr
\link[=apigatewayv2_get_integration]{get_integration} \tab Gets an Integration \cr
\link[=apigatewayv2_get_integration_response]{get_integration_response} \tab Gets an IntegrationResponses \cr
\link[=apigatewayv2_get_integration_responses]{get_integration_responses} \tab Gets the IntegrationResponses for an Integration\cr
\link[=apigatewayv2_get_integrations]{get_integrations} \tab Gets the Integrations for an API \cr
\link[=apigatewayv2_get_model]{get_model} \tab Gets a Model \cr
\link[=apigatewayv2_get_model_template]{get_model_template} \tab Gets a model template \cr
\link[=apigatewayv2_get_models]{get_models} \tab Gets the Models for an API \cr
\link[=apigatewayv2_get_route]{get_route} \tab Gets a Route \cr
\link[=apigatewayv2_get_route_response]{get_route_response} \tab Gets a RouteResponse \cr
\link[=apigatewayv2_get_route_responses]{get_route_responses} \tab Gets the RouteResponses for a Route \cr
\link[=apigatewayv2_get_routes]{get_routes} \tab Gets the Routes for an API \cr
\link[=apigatewayv2_get_stage]{get_stage} \tab Gets a Stage \cr
\link[=apigatewayv2_get_stages]{get_stages} \tab Gets the Stages for an API \cr
\link[=apigatewayv2_update_api]{update_api} \tab Updates an Api resource \cr
\link[=apigatewayv2_update_api_mapping]{update_api_mapping} \tab The API mapping \cr
\link[=apigatewayv2_update_authorizer]{update_authorizer} \tab Updates an Authorizer \cr
\link[=apigatewayv2_update_deployment]{update_deployment} \tab Updates a Deployment \cr
\link[=apigatewayv2_update_domain_name]{update_domain_name} \tab Updates a domain name \cr
\link[=apigatewayv2_update_integration]{update_integration} \tab Updates an Integration \cr
\link[=apigatewayv2_update_integration_response]{update_integration_response} \tab Updates an IntegrationResponses \cr
\link[=apigatewayv2_update_model]{update_model} \tab Updates a Model \cr
\link[=apigatewayv2_update_route]{update_route} \tab Updates a Route \cr
\link[=apigatewayv2_update_route_response]{update_route_response} \tab Updates a RouteResponse \cr
\link[=apigatewayv2_update_stage]{update_stage} \tab Updates a Stage
}
}
\examples{
\donttest{svc <- apigatewayv2()
svc$create_api(
Foo = 123
)}
}
|
library(metaflu)
library(ggplot2)
library(dplyr)
library(doMC)
library(tidyr)
library(purrr)
library(gridExtra)
library(abind)
registerDoMC(cores = 20)
library(doRNG)
library(grid)
set.seed(123)
#source(rprojroot::find_rstudio_root_file("R","summarizing_functions.R"))
# Scripts to produce results of varying 1 or 2 parameters for final presentation.
# Run either 1D or 2D case (2D currently commented out)
# Setup (1D)
# For the presentation, cull_time varied from 1 to 20, pi_report from 0.05 to 1, and I_crit from 1 to 10.
threshold = c(1)
varied_param = "pi"
size = 10 # number of chickens
if (varied_param == "cull_time"){
print("cull_time")
results_list <- vary_params(param_name = varied_param, param_values = threshold, num_of_chickens = size)
saveRDS(paste("size", size, ".rds", sep = ""))
} else if (varied_param == "pi") {
print("pi")
} else if (varied_param == "network") {
print("network")
}
print("fin")
#####################################################
# Setup (2D)
# For the presentation, farm size varied from (50, 200, 100) and omega from (0.01, 0.03, 0.05)
# num_of_farms = 50 #200 is default
# threshold_1 = c(1:20)
# varied_param_1 = "cull_time"
#
# threshold_2 = 0.01
# varied_param_2 = "omega"
#
# results_list <- vary_params_2d(param1_name = varied_param_1, param1_values = threshold_1, param2_name = varied_param_2,
# param2_values = threshold_2)
#
# saveRDS(results_list, "small_omega_small_size.rds")
#
# print("I AM DONE!")
|
/culling_paper_code.R
|
no_license
|
kateroyce/internship-research
|
R
| false
| false
| 1,513
|
r
|
library(metaflu)
library(ggplot2)
library(dplyr)
library(doMC)
library(tidyr)
library(purrr)
library(gridExtra)
library(abind)
registerDoMC(cores = 20)
library(doRNG)
library(grid)
set.seed(123)
#source(rprojroot::find_rstudio_root_file("R","summarizing_functions.R"))
# Scripts to produce results of varying 1 or 2 parameters for final presentation.
# Run either 1D or 2D case (2D currently commented out)
# Setup (1D)
# For the presentation, cull_time varied from 1 to 20, pi_report from 0.05 to 1, and I_crit from 1 to 10.
threshold = c(1)
varied_param = "pi"
size = 10 # number of chickens
if (varied_param == "cull_time"){
print("cull_time")
results_list <- vary_params(param_name = varied_param, param_values = threshold, num_of_chickens = size)
saveRDS(paste("size", size, ".rds", sep = ""))
} else if (varied_param == "pi") {
print("pi")
} else if (varied_param == "network") {
print("network")
}
print("fin")
#####################################################
# Setup (2D)
# For the presentation, farm size varied from (50, 200, 100) and omega from (0.01, 0.03, 0.05)
# num_of_farms = 50 #200 is default
# threshold_1 = c(1:20)
# varied_param_1 = "cull_time"
#
# threshold_2 = 0.01
# varied_param_2 = "omega"
#
# results_list <- vary_params_2d(param1_name = varied_param_1, param1_values = threshold_1, param2_name = varied_param_2,
# param2_values = threshold_2)
#
# saveRDS(results_list, "small_omega_small_size.rds")
#
# print("I AM DONE!")
|
\name{ani.start}
\alias{ani.start}
\alias{ani.stop}
\title{Generate an HTML animation page}
\usage{
ani.start(...)
ani.stop(...)
}
\arguments{
\item{...}{arguments passed to \code{\link{ani.options}}
to set animation parameters}
}
\description{
These functions are defunct and should not be used any
more.
}
\keyword{internal}
|
/man/ani.start.Rd
|
no_license
|
tomoky0820/animation
|
R
| false
| false
| 336
|
rd
|
\name{ani.start}
\alias{ani.start}
\alias{ani.stop}
\title{Generate an HTML animation page}
\usage{
ani.start(...)
ani.stop(...)
}
\arguments{
\item{...}{arguments passed to \code{\link{ani.options}}
to set animation parameters}
}
\description{
These functions are defunct and should not be used any
more.
}
\keyword{internal}
|
# Robert J. Hijmans
# May 2010
# Version 1.0
# Licence GPL v3
rotated <- function(x) {
isTRUE(try(x@rotated, silent=TRUE))
}
setMethod("rectify", signature(x="Raster"),
function(x, ext, res, method='ngb', filename='', ...) {
stopifnot(rotated(x))
if ( missing(ext)) {
ext <- extent(x)
} else {
ext <- extent(ext)
}
out <- raster(ext)
if ( missing(res)) {
res(out) <- abs(raster::res(x))
} else {
res(out) <- res
}
resample(x, out, method=method, filename=filename, ...)
}
)
|
/R/rectify.R
|
no_license
|
cran/raster
|
R
| false
| false
| 542
|
r
|
# Robert J. Hijmans
# May 2010
# Version 1.0
# Licence GPL v3
rotated <- function(x) {
isTRUE(try(x@rotated, silent=TRUE))
}
setMethod("rectify", signature(x="Raster"),
function(x, ext, res, method='ngb', filename='', ...) {
stopifnot(rotated(x))
if ( missing(ext)) {
ext <- extent(x)
} else {
ext <- extent(ext)
}
out <- raster(ext)
if ( missing(res)) {
res(out) <- abs(raster::res(x))
} else {
res(out) <- res
}
resample(x, out, method=method, filename=filename, ...)
}
)
|
#install.packages("ggplot2")
library(ggplot2)
# diamond is build in data table
head(diamonds)
tail(diamonds)
#scatter plot
b = qplot(diamonds$carat, diamonds$price, col = diamonds$clarity)
b
|
/Diamond.R
|
no_license
|
PaulSaikat/R-Programming
|
R
| false
| false
| 205
|
r
|
#install.packages("ggplot2")
library(ggplot2)
# diamond is build in data table
head(diamonds)
tail(diamonds)
#scatter plot
b = qplot(diamonds$carat, diamonds$price, col = diamonds$clarity)
b
|
\name{IdtMxSNDE-class}
\Rdversion{1.1}
\docType{class}
\alias{IdtMxSNDE-class}
\title{Class IdtMxSNDE}
\description{IdtMxSNDE contains the results of a mixture model estimation for the Skew-Normal model, with the four different possible variance-covariance configurations.
}
\section{Slots}{
\describe{
\item{\code{Hmcdt}:}{Indicates whether we consider an homoscedastic location model (TRUE) or a general model (FALSE)}
\item{\code{CovConfCases}:}{List of the considered configurations}
\item{\code{grouping}:}{Inherited from class \code{\linkS4class{IdtMxE}}. Factor indicating the group to which each observation belongs to}
\item{\code{ModelNames}:}{Inherited from class \code{\linkS4class{IdtE}}. The model acronym, indicating the model type (currently, N for Normal and SN for Skew-Normal), and the configuration (Case 1 through Case 4)}
\item{\code{ModelType}:}{Inherited from class \code{\linkS4class{IdtE}}. Indicates the model; currently, Gaussian or Skew-Normal distributions are implemented}
\item{\code{ModelConfig}:}{Inherited from class \code{\linkS4class{IdtE}}. Configuration case of the variance-covariance matrix: Case 1 through Case 4}
\item{\code{NIVar}:}{Inherited from class \code{\linkS4class{IdtE}}. Number of interval variables}
\item{\code{SelCrit}:}{Inherited from class \code{\linkS4class{IdtE}}. The model selection criterion; currently, AIC and BIC are implemented}
\item{\code{logLiks}:}{Inherited from class \code{\linkS4class{IdtE}}. The logarithms of the likelihood function for the different cases}
\item{\code{AICs}:}{Inherited from class \code{\linkS4class{IdtE}}. Value of the AIC criterion }
\item{\code{BICs}:}{Inherited from class \code{\linkS4class{IdtE}}. Value of the BIC criterion }
\item{\code{BestModel}:}{Inherited from class \code{\linkS4class{IdtE}}. Indicates the best model according to the chosen selection criterion}
\item{\code{SngD}:}{Inherited from class \code{\linkS4class{IdtE}}. Boolean flag indicating whether a single or a mixture of distribution were estimated. Always set to FALSE in objects of class \code{\linkS4class{IdtMxSNDE}} }
\item{\code{Ngrps}:}{Inherited from class \code{\linkS4class{IdtMxE}}. Number of mixture components}
}
}
\section{Extends}{
Class \code{\linkS4class{IdtMxE}}, directly.
Class \code{\linkS4class{IdtE}}, by class \code{\linkS4class{IdtMxE}}, distance 2.
}
\section{Methods}{
No methods defined with class IdtMxSNDE in the signature.
}
\author{Pedro Duarte Silva <psilva@porto.ucp.pt>\cr
Paula Brito <mpbrito.fep.up.pt>
}
\references{
Azzalini, A. and Dalla Valle, A. (1996), The multivariate skew-normal distribution. \emph{Biometrika} \bold{83}(4), 715--726.\cr
Brito, P., Duarte Silva, A. P. (2012), Modelling Interval Data with Normal and Skew-Normal Distributions. \emph{Journal of Applied Statistics} \bold{39}(1), 3--20.
}
\seealso{
\code{\linkS4class{IdtE}}, \code{\linkS4class{IdtMxE}}, \code{\linkS4class{IdtSngSNDE}}, \code{\link{MANOVA}}, \code{\linkS4class{IData}}
}
\keyword{classes}
\keyword{interval data}
|
/fuzzedpackages/MAINT.Data/man/IdtMxSNDE-class.Rd
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 3,176
|
rd
|
\name{IdtMxSNDE-class}
\Rdversion{1.1}
\docType{class}
\alias{IdtMxSNDE-class}
\title{Class IdtMxSNDE}
\description{IdtMxSNDE contains the results of a mixture model estimation for the Skew-Normal model, with the four different possible variance-covariance configurations.
}
\section{Slots}{
\describe{
\item{\code{Hmcdt}:}{Indicates whether we consider an homoscedastic location model (TRUE) or a general model (FALSE)}
\item{\code{CovConfCases}:}{List of the considered configurations}
\item{\code{grouping}:}{Inherited from class \code{\linkS4class{IdtMxE}}. Factor indicating the group to which each observation belongs to}
\item{\code{ModelNames}:}{Inherited from class \code{\linkS4class{IdtE}}. The model acronym, indicating the model type (currently, N for Normal and SN for Skew-Normal), and the configuration (Case 1 through Case 4)}
\item{\code{ModelType}:}{Inherited from class \code{\linkS4class{IdtE}}. Indicates the model; currently, Gaussian or Skew-Normal distributions are implemented}
\item{\code{ModelConfig}:}{Inherited from class \code{\linkS4class{IdtE}}. Configuration case of the variance-covariance matrix: Case 1 through Case 4}
\item{\code{NIVar}:}{Inherited from class \code{\linkS4class{IdtE}}. Number of interval variables}
\item{\code{SelCrit}:}{Inherited from class \code{\linkS4class{IdtE}}. The model selection criterion; currently, AIC and BIC are implemented}
\item{\code{logLiks}:}{Inherited from class \code{\linkS4class{IdtE}}. The logarithms of the likelihood function for the different cases}
\item{\code{AICs}:}{Inherited from class \code{\linkS4class{IdtE}}. Value of the AIC criterion }
\item{\code{BICs}:}{Inherited from class \code{\linkS4class{IdtE}}. Value of the BIC criterion }
\item{\code{BestModel}:}{Inherited from class \code{\linkS4class{IdtE}}. Indicates the best model according to the chosen selection criterion}
\item{\code{SngD}:}{Inherited from class \code{\linkS4class{IdtE}}. Boolean flag indicating whether a single or a mixture of distribution were estimated. Always set to FALSE in objects of class \code{\linkS4class{IdtMxSNDE}} }
\item{\code{Ngrps}:}{Inherited from class \code{\linkS4class{IdtMxE}}. Number of mixture components}
}
}
\section{Extends}{
Class \code{\linkS4class{IdtMxE}}, directly.
Class \code{\linkS4class{IdtE}}, by class \code{\linkS4class{IdtMxE}}, distance 2.
}
\section{Methods}{
No methods defined with class IdtMxSNDE in the signature.
}
\author{Pedro Duarte Silva <psilva@porto.ucp.pt>\cr
Paula Brito <mpbrito.fep.up.pt>
}
\references{
Azzalini, A. and Dalla Valle, A. (1996), The multivariate skew-normal distribution. \emph{Biometrika} \bold{83}(4), 715--726.\cr
Brito, P., Duarte Silva, A. P. (2012), Modelling Interval Data with Normal and Skew-Normal Distributions. \emph{Journal of Applied Statistics} \bold{39}(1), 3--20.
}
\seealso{
\code{\linkS4class{IdtE}}, \code{\linkS4class{IdtMxE}}, \code{\linkS4class{IdtSngSNDE}}, \code{\link{MANOVA}}, \code{\linkS4class{IData}}
}
\keyword{classes}
\keyword{interval data}
|
\name{Ising-methods}
\alias{print.IsingFit}
\alias{plot.IsingFit}
\alias{summary.IsingFit}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Methods for IsingFit objects
}
\description{
Print method prints the IsingFit output , plot method plots the estimated network (with the \code{qgraph} package), and summary method returns density of the network, the value of gamma used, the rule used, and the time the analysis took.
}
\usage{
\method{print}{IsingFit}(x, \dots)
\method{summary}{IsingFit}(object, \dots)
\method{plot}{IsingFit}(x, \dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
output of \code{\link{IsingFit}}
}
\item{object}{
output of \code{\link{IsingFit}}
}
\item{\dots}{
Arguments sent to qgraph. Only used in plot method.
}
}
\author{
Claudia van Borkulo
}
|
/man/methods.Rd
|
no_license
|
cran/IsingFit
|
R
| false
| false
| 841
|
rd
|
\name{Ising-methods}
\alias{print.IsingFit}
\alias{plot.IsingFit}
\alias{summary.IsingFit}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Methods for IsingFit objects
}
\description{
Print method prints the IsingFit output , plot method plots the estimated network (with the \code{qgraph} package), and summary method returns density of the network, the value of gamma used, the rule used, and the time the analysis took.
}
\usage{
\method{print}{IsingFit}(x, \dots)
\method{summary}{IsingFit}(object, \dots)
\method{plot}{IsingFit}(x, \dots)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
output of \code{\link{IsingFit}}
}
\item{object}{
output of \code{\link{IsingFit}}
}
\item{\dots}{
Arguments sent to qgraph. Only used in plot method.
}
}
\author{
Claudia van Borkulo
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chose_parents.R
\name{tournament_selection}
\alias{tournament_selection}
\title{tournament_selection}
\usage{
tournament_selection(y, dataset, individuals, k = 4, n_var,
objective = "AIC")
}
\arguments{
\item{y}{response variable}
\item{dataset}{the dataframe containing the data}
\item{k}{the number of subsequences that should not overlap}
\item{n_var}{number of parents}
\item{objective}{the objective function}
}
\value{
a list of n_var parents that are the fittest ones in each subgroup
}
\description{
A parents selection mechanism. The tournament selection splits the parents
population in k non overlapping subgroups and takes the fittest individual
in each group then takes n-k random subgroups (n the number of parents)
and takes the fittest individual in each of these random subgroups
}
|
/man/tournament_selection.Rd
|
no_license
|
yachuan/GA
|
R
| false
| true
| 883
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/chose_parents.R
\name{tournament_selection}
\alias{tournament_selection}
\title{tournament_selection}
\usage{
tournament_selection(y, dataset, individuals, k = 4, n_var,
objective = "AIC")
}
\arguments{
\item{y}{response variable}
\item{dataset}{the dataframe containing the data}
\item{k}{the number of subsequences that should not overlap}
\item{n_var}{number of parents}
\item{objective}{the objective function}
}
\value{
a list of n_var parents that are the fittest ones in each subgroup
}
\description{
A parents selection mechanism. The tournament selection splits the parents
population in k non overlapping subgroups and takes the fittest individual
in each group then takes n-k random subgroups (n the number of parents)
and takes the fittest individual in each of these random subgroups
}
|
# JAGs Code ---------------------------------------------------------------
install.packages("coda")
install.packages("mvtnorm")
install.packages("rjags")
install.packages("boot")
install.packages("knitr")
install.packages("usethis")
install.packages("nlme")
install.packages("glmmTMB")
#install.packages("sjSDM")
install.packages("jSDM")
install.packages("devtools")
#install.packages("R2WinBUGS")
#install.packages("JAGS")
install.packages("R2jags")
install.packages("useful")
install.packages("DHARMa")
install.packages("MVN")
#install.packages("R2OpenBUGS")
install.packages("corpcor")
# Start RUN_CODE ----------------------------------------------------------
library(corpcor)
library(nlme)
library(MVN)
library(DHARMa)
library(useful)
library(glmmTMB)
library(usethis)
library(devtools)
#devtools::install_github("ghislainv/jSDM")
library(knitr)
library(sjSDM)
#library(jSDM)
library(coda)
library(rjags)
library(mvtnorm)
library(rjags)
#library(R2WinBUGS)l
library(JAGS)
library(R2jags)
library(mvtnorm)
#library(R2OpenBUGS)
# <____________ change function
# <____________ dist.matrix <- function(side)
# <____________ {
# <____________ row.coords <- rep(1:side, times=side)
# <____________ col.coords <- rep(1:side, each=side)
# <____________ row.col <<- data.frame(row.coords, col.coords)
# <____________ D <- dist(row.col, method="euclidean", diag=TRUE, upper=TRUE)
# <____________ D <- as.matrix(D)
# <____________ return(D) # <___________list(D=D, coords = row.col)
# <____________ }
row <- row.coords <- rep(1:side, times=side)
col <- col.coords <- rep(1:side, each=side)
row.col <<- data.frame(row, col)
D1 <- dist(row.col, method="euclidean", diag=TRUE, upper=TRUE)
D <- as.matrix(D1)
#dist.matrix <- function(list(D=D, coords = row.col))
dist.matrix <- function(side)
{
row.coords=row.coords
col.coords=col.coords
row.col=row.col
D=D
return(D)
}
cor.surface <- function(side, global.mu, lambda)
{
D <- dist.matrix(side)
# scaling the distance matrix by the exponential decay
SIGMA <- exp(-lambda*D)
mu <- rep(global.mu, times=side*side)
# sampling from the multivariate normal distribution
M <- matrix(nrow=side, ncol=side)
M[] <- rmvnorm(1, mu, SIGMA)
return(M) # list(...)
}
# parameters (the truth) that I will want to recover by JAGS
side = 10
global.mu = 0
lambda = 0.2 # let's try something new
# simulating the main raster that I will analyze as data
M <- cor.surface(side = side, lambda = lambda, global.mu = global.mu)
image(M)
mean(M)
# simulating the inherent uncertainty of the mean of M:
test = replicate(1000, mean(cor.surface(side = side, lambda = lambda, global.mu = global.mu)))
hist(test, breaks = 40)
sd(test)
# normal distribution
jag1 <- as.vector(as.matrix(M))
my.data <- list(N = side * side, D = dist.matrix(side), y = jag1)
modelCode = textConnection("
model
{
# priors
lambda ~ dgamma(1, 0.1)
global.mu ~ dnorm(0, 0.01)
for(i in 1:N)
{
# vector of mvnorm means mu
mu[i] <- global.mu
}
# derived quantities
for(i in 1:N)
{
for(j in 1:N)
{
# turning the distance matrix to covariance matrix
D.covar[i,j] <- exp(-lambda*D[i,j])
}
}
# turning covariances into precisions (that's how I understand it)
D.tau[1:N,1:N] <- inverse(D.covar[1:N,1:N])
# likelihood
y[1:N] ~ dmnorm(mu[], D.tau[,])
}
")
fit <- jags(data=my.data,
parameters.to.save=c("lambda", "global.mu"),
model.file=modelCode,
n.iter=10000,
n.chains=3,
n.burnin=5000,
n.thin=5,
DIC=FALSE)
?jags
plot(as.mcmc(fit))
summary(fit)
?as.mcmc
pairs(as.matrix(as.mcmc(fit)))
# 13.09 glmmTMB -----------------------------------------------------------
# glmmTMB time variable --------------------------------------------------
# Spatial correlations ----------------------------------------------------
data = data.frame(resp=my.data$y)
data$pos <- numFactor(row.col, row.coords)
data$group <- factor(rep(1, nrow(data)))
data$x = row.coords
data$y = col.coords
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data=data)
summary(fit.exp)
### Note:
### from the glmmTMB source:
### exp( -dist(i,j) * exp(-theta(1)) ) );
### which means that to get our lambda parametrization we have to calculate: exp(-theta)
exp(-fit.exp$fit$par[4]) # original
exp(-fit.exp$fit$par[3]) # auch theta
fit.exp$fit$par#[3]
#
#
side= 18
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data = new.data1)
print(fit.exp)
summary(fit.exp)
18*18
# 13.09 nmle --------------------------------------------------------------
data.1 <- data.frame(my.data$y)
id = rep(letters[1:20],5)
lm1 <- lme(y ~ 1, random=~ 1 | id, correlation=corExp(form=~1|id), data = my.data)
summary(lm1)
my.data$group = as.factor(rep(1, my.data$N))
my.data$rows = row.col[,1]
my.data$cols = row.col[,2]
# <____________ lm2 <- lme(y ~ 1, random=~ 1 | group, correlation=corExp(form=~rows+cols), data = my.data)
lm3 <- gls(y ~ 1, correlation=corExp(form=~rows+cols), data = my.data)
lm3
# <____________ lm2
# <____________ intervals(lm3)
# <____________ summary(lm3)$intervals
# <____________ log(0.2)
range <- coef(gls$modelStruct$corStruct, unconstrained = F)
l = 1 /range # r = range # ich depp ....
d <- l
gls.lambda <- exp(-range*D)
gls.lambda
#
#
#
#
# <____________ length(my.data$y)
# <____________ dummy <- rep(1, 100)
# <____________ spdata <- cbind(my.data$y, dummy)
# <____________ lme1 <- lme(y ~ 1, data = my.data, random = ~ 1 | dummy, method = "ML")
# <____________ summary(lme1)
# <____________ ?lme
# <____________ lme2 <- update(lme1, correlation = corGaus(1, form = ~ dummy + 0), method = "ML")
# <____________ summary(lme2)
#
#
#
#
#
# Eigene Likelihood Function MVN -----------------------------------------
# zweiter Versuch MLE von MVN zu laufen -----------------------------------
# "solve", "qr.solve", "pseudoinverse"
#y <- -mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov,log = TRUE)
set.seed=26
ll = function(par) {
cov = (exp(-par[1]* my.data$D))
-mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov,log = TRUE)
}
methods = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN","Brent")
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
result
res = sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
plot(1:96, res)
result$par[1]
# <____________ min(res) # 57.61983 = 0.16 best score
# <____________ optim.minimizer(res)
# <____________ optim.minimum(res)
# <____________ print(res$minimum)
# <____________ result
# <____________ sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
# <____________ cov = (exp(-5.8* my.data$D))
# <____________ cov
##################################################################################
#'
#'
##################################################################################
# glmmTMB -----------------------------------------------------------
data = data.frame(resp=my.data$y)
data$pos <- numFactor(row.col, row.coords)
data$group <- factor(rep(1, nrow(data)))
data$x = row.coords
data$y = col.coords
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data=data)
summary(fit.exp)
### exp( -dist(i,j) * exp(-theta(1)) ) );
### which means that to get our lambda parametrization we have to calculate: exp(-theta)
exp(-fit.exp$fit$par[4])
# nmle4 / GLS--------------------------------------------------------------
my.data$group = as.factor(rep(1, my.data$N))
my.data$rows = row.col[,1]
my.data$cols = row.col[,2]
lm2 <- lme(y ~ 1, random=~ 1 | group, correlation=corExp(form=~rows+cols), data = my.data)
lm3 <- gls(y ~ 1, correlation=corExp(form=~rows+cols), data = my.data)
try({
gls = gls(y ~ 1, correlation=corExp (form =~ rows + cols), data = new.data)
}, silent=TRUE)
range <- coef(gls$modelStruct$corStruct, unconstrained = F)
l = 1 /range # r = range
d <- l
gls.lambda <- exp(-range*D)
gls.lambda
# Eigene Likelihood Function MVN -----------------------------------------
#y <- -mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov,log = TRUE)
methods = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN","Brent")
ll = function(par) {
cov = (exp(-par[1]* new.data$D))
-mvtnorm::dmvnorm(new.data1$resp, mean = rep(par[2], side*side), sigma = cov ,log = TRUE)
}
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
result
res = sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
plot(1:96, res)
result$par[1]
min(res) # 88.61983 = 0.315 best score
sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
cov = (exp(-5.8* my.data$D))
cov
group
#------- For Loop Strucutre, Jags, nmle, glmmTMB (50 times) ---------------
#--------------------------------------------------------------------------
# Loop structur -----------------------------------------------------------
# runtime
##
for....{
time_gls = system.time(
{
b = 5*3
gls_model = gls(...)
}
)
time_glmmTMB = system.time(
{
b = 5*3
glmmTMBs_model = gls(...)
}
)
time[3]
b
}
# time, lambda(theta, range), intercept
# glmmTMB, gls, optim
# 10 sites, 3 ziel values
-------------------------------------------------------------------
# <____________ result_glmmTMB = matrix(NA, 10, 3)
# <____________ for(i in 1:10){
# <____________ time_glmmTMB =
# <____________ system.time({
# <____________ m1 = glmmTMB(y~1, data = my.data)
# <____________ })
# <____________ result_glmmTMB[i, 1] = time_glmmTMB[3]
# <____________ result_glmmTMB[i, 2] = summary(m1)$coefficients$cond[1]
# <____________ }
# <____________ b
------------------------------------------------------------------
2+2
# LOOP__GlmmTMB -----------------------------------------------------------
result_glmmTMB = matrix(NA, 10, 3)
for(i in 1:10){
time_glmmTMB =
system.time({
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data=data)
})
result_glmmTMB[i, 1] = time_glmmTMB[3] # Time
result_glmmTMB[i, 2] = exp(-fit.exp$fit$par[4]) #lambda/theta?
result_glmmTMB[i, 3] = summary(fit.exp)$coefficients$cond[1] # Intercept
}
result_glmmTMB
# Loop__GLS ---------------------------------------------------------------
result_gls = matrix(NA, 10, 3)
for(i in 1:10){
time_gls =
system.time({
gls <- gls(y ~ 1, correlation=corExp (form=~rows+cols), data = my.data)
})
result_gls[i, 1] = time_gls[3] # Time
result_gls[i, 2] = 1/coef(gls$modelStruct$corStruct, unconstrained = F) #lambda/theta?
result_gls[i, 3] = summary(gls)$coefficients # Intercept
}
result_gls
# <____________ range = 2.541579
# <____________ ?update_labels
# Loop__OPTIM -------------------------------------------------------------
result_optim = matrix(NA, 10, 3)
for(i in 1:10){
time_optim =
system.time({
ll = function(par) {
cov = (exp(-par[1]* my.data$D))
-mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov ,log = TRUE)
}
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
})
result_optim[i, 1] = time_optim[3] # Time
result_optim[i, 2] = result$value # niedrigster Score...
result_optim[i, 3] = result$par[1] # Intercept
}
result_optim
# Loop__In__Loop_Structure with lambda ------------------------------------
# <____________ lambda
# <____________ h = 0
lambda.result = matrix(NA,50,2)
for (h in 1:50){
time_glmmTMB =
system.time({
lambda[h] =0.15+h*0.05
l.r <- lambda[h]
# n.lambda = ceiling(lambda[h])
})
lambda.result[h, 1] <- lambda[h]
lambda.result[h, 2] <- l.r
}
lambda.result
# Loop__in__Loop_Combination__Model ---------------------------------------
repeat.experiment = data.frame(matrix(NA,500,12))
colnames(repeat.experiment) = c("rep", "Lambda", "Side", "glmm_Time", "glmm_Lambda", "glmm_Intercept", "gls_Time", "gls_Lambda", "gls_Intercept", "optim_Time", "optim_Lambda", "optim_Intercept")
counter=1
for (g in 1:5 ){
time_repeat =
system.time({
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# Loop, wiederhole Experiment 5 mal für die Statistik
for (h in 1:10 ){
time_glmmTMB =
system.time({
lambda = 0 + (h*0.2)
l.r <- lambda
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# Loop, ändere Lambda von lambda=0.2 in 0.05 Schritten bis Lambda=0.5
for (j in 1:10){
time_side =
system.time({
side = 2+ 2*j
s.r <- side
# Loop, ändere side von 4 in 2er schritten auf 20 (daten ergeben sich aus side*side )
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
D <- cor.surface(side = side, lambda = lambda, global.mu = global.mu)
M <- cor.surface(side = side, lambda = lambda, global.mu = global.mu)
y <- as.vector(as.matrix(M))
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
new.data1 = 0
new.data = 0
new.data <- list(side=side, lambda = lambda,N = side * side, D = dist.matrix(side), y = y)
new.data$row = row.coords <- rep(1:side, times=side)
new.data$col = col.coords <- rep(1:side, each=side)
new.data$row.col = data.frame(new.data$row, new.data$col)
new.data$N = side*side
n= side*side
# new.data1 = data.frame(resp = my.data$y)
new.data$group <- as.factor(rep(1, new.data$N))
new.data$rows <- new.data$row.col[,1]
new.data$cols <- new.data$row.col[,2]
new.data1 = data.frame(resp = new.data$y)
new.data1$pos <- numFactor(new.data$col, new.data$row.col)
new.data1$group <- factor(rep(1, new.data$N))
new.data1$x <- new.data$row.col
new.data1$y <- new.data$col
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
for(i in 1:5){
data
time_glmmTMB =
system.time({
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data = new.data1)
})
time_gls =
system.time({
gls <- gls(y ~ 1, correlation=corExp (form =~ rows + cols), data = new.data)
})
time_optim =
system.time({
ll = function(par) {
cov = (exp(-par[1]* new.data$D))
-mvtnorm::dmvnorm(new.data1$y, mean = rep(par[2], side*side), sigma = cov ,log = TRUE)
}
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
})
}
repeat.experiment[counter, 1] <- g
repeat.experiment[counter, 2] <- lambda
repeat.experiment[counter, 3] <- s.r
repeat.experiment[counter, 4] <- time_glmmTMB[3]
repeat.experiment[counter, 5] <- exp(-fit.exp$fit$par[4])
repeat.experiment[counter, 6] <- summary(fit.exp)$coefficients$cond[1]
repeat.experiment[counter, 7] <- time_gls[3]
repeat.experiment[counter, 8] <- 1/coef(gls$modelStruct$corStruct, unconstrained = F)
repeat.experiment[counter, 9] <- summary(gls)$coefficients
repeat.experiment[counter, 10] <- time_optim[3]
repeat.experiment[counter, 11] <- result$value
repeat.experiment[counter, 12] <- result$par[1]
counter = counter + 1
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
)}
lambda = 0.2
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
)}
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
)}
repeat.experiment
# Einzelfunktionen könnten als 1 Funtion verbunden werden
|
/1.Code/backup_1.R
|
no_license
|
EttnerAndreas/Andreas-Ettner-Max-Internship
|
R
| false
| false
| 17,135
|
r
|
# JAGs Code ---------------------------------------------------------------
install.packages("coda")
install.packages("mvtnorm")
install.packages("rjags")
install.packages("boot")
install.packages("knitr")
install.packages("usethis")
install.packages("nlme")
install.packages("glmmTMB")
#install.packages("sjSDM")
install.packages("jSDM")
install.packages("devtools")
#install.packages("R2WinBUGS")
#install.packages("JAGS")
install.packages("R2jags")
install.packages("useful")
install.packages("DHARMa")
install.packages("MVN")
#install.packages("R2OpenBUGS")
install.packages("corpcor")
# Start RUN_CODE ----------------------------------------------------------
library(corpcor)
library(nlme)
library(MVN)
library(DHARMa)
library(useful)
library(glmmTMB)
library(usethis)
library(devtools)
#devtools::install_github("ghislainv/jSDM")
library(knitr)
library(sjSDM)
#library(jSDM)
library(coda)
library(rjags)
library(mvtnorm)
library(rjags)
#library(R2WinBUGS)l
library(JAGS)
library(R2jags)
library(mvtnorm)
#library(R2OpenBUGS)
# <____________ change function
# <____________ dist.matrix <- function(side)
# <____________ {
# <____________ row.coords <- rep(1:side, times=side)
# <____________ col.coords <- rep(1:side, each=side)
# <____________ row.col <<- data.frame(row.coords, col.coords)
# <____________ D <- dist(row.col, method="euclidean", diag=TRUE, upper=TRUE)
# <____________ D <- as.matrix(D)
# <____________ return(D) # <___________list(D=D, coords = row.col)
# <____________ }
row <- row.coords <- rep(1:side, times=side)
col <- col.coords <- rep(1:side, each=side)
row.col <<- data.frame(row, col)
D1 <- dist(row.col, method="euclidean", diag=TRUE, upper=TRUE)
D <- as.matrix(D1)
#dist.matrix <- function(list(D=D, coords = row.col))
dist.matrix <- function(side)
{
row.coords=row.coords
col.coords=col.coords
row.col=row.col
D=D
return(D)
}
cor.surface <- function(side, global.mu, lambda)
{
D <- dist.matrix(side)
# scaling the distance matrix by the exponential decay
SIGMA <- exp(-lambda*D)
mu <- rep(global.mu, times=side*side)
# sampling from the multivariate normal distribution
M <- matrix(nrow=side, ncol=side)
M[] <- rmvnorm(1, mu, SIGMA)
return(M) # list(...)
}
# parameters (the truth) that I will want to recover by JAGS
side = 10
global.mu = 0
lambda = 0.2 # let's try something new
# simulating the main raster that I will analyze as data
M <- cor.surface(side = side, lambda = lambda, global.mu = global.mu)
image(M)
mean(M)
# simulating the inherent uncertainty of the mean of M:
test = replicate(1000, mean(cor.surface(side = side, lambda = lambda, global.mu = global.mu)))
hist(test, breaks = 40)
sd(test)
# normal distribution
jag1 <- as.vector(as.matrix(M))
my.data <- list(N = side * side, D = dist.matrix(side), y = jag1)
modelCode = textConnection("
model
{
# priors
lambda ~ dgamma(1, 0.1)
global.mu ~ dnorm(0, 0.01)
for(i in 1:N)
{
# vector of mvnorm means mu
mu[i] <- global.mu
}
# derived quantities
for(i in 1:N)
{
for(j in 1:N)
{
# turning the distance matrix to covariance matrix
D.covar[i,j] <- exp(-lambda*D[i,j])
}
}
# turning covariances into precisions (that's how I understand it)
D.tau[1:N,1:N] <- inverse(D.covar[1:N,1:N])
# likelihood
y[1:N] ~ dmnorm(mu[], D.tau[,])
}
")
fit <- jags(data=my.data,
parameters.to.save=c("lambda", "global.mu"),
model.file=modelCode,
n.iter=10000,
n.chains=3,
n.burnin=5000,
n.thin=5,
DIC=FALSE)
?jags
plot(as.mcmc(fit))
summary(fit)
?as.mcmc
pairs(as.matrix(as.mcmc(fit)))
# 13.09 glmmTMB -----------------------------------------------------------
# glmmTMB time variable --------------------------------------------------
# Spatial correlations ----------------------------------------------------
data = data.frame(resp=my.data$y)
data$pos <- numFactor(row.col, row.coords)
data$group <- factor(rep(1, nrow(data)))
data$x = row.coords
data$y = col.coords
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data=data)
summary(fit.exp)
### Note:
### from the glmmTMB source:
### exp( -dist(i,j) * exp(-theta(1)) ) );
### which means that to get our lambda parametrization we have to calculate: exp(-theta)
exp(-fit.exp$fit$par[4]) # original
exp(-fit.exp$fit$par[3]) # auch theta
fit.exp$fit$par#[3]
#
#
side= 18
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data = new.data1)
print(fit.exp)
summary(fit.exp)
18*18
# 13.09 nmle --------------------------------------------------------------
data.1 <- data.frame(my.data$y)
id = rep(letters[1:20],5)
lm1 <- lme(y ~ 1, random=~ 1 | id, correlation=corExp(form=~1|id), data = my.data)
summary(lm1)
my.data$group = as.factor(rep(1, my.data$N))
my.data$rows = row.col[,1]
my.data$cols = row.col[,2]
# <____________ lm2 <- lme(y ~ 1, random=~ 1 | group, correlation=corExp(form=~rows+cols), data = my.data)
lm3 <- gls(y ~ 1, correlation=corExp(form=~rows+cols), data = my.data)
lm3
# <____________ lm2
# <____________ intervals(lm3)
# <____________ summary(lm3)$intervals
# <____________ log(0.2)
range <- coef(gls$modelStruct$corStruct, unconstrained = F)
l = 1 /range # r = range # ich depp ....
d <- l
gls.lambda <- exp(-range*D)
gls.lambda
#
#
#
#
# <____________ length(my.data$y)
# <____________ dummy <- rep(1, 100)
# <____________ spdata <- cbind(my.data$y, dummy)
# <____________ lme1 <- lme(y ~ 1, data = my.data, random = ~ 1 | dummy, method = "ML")
# <____________ summary(lme1)
# <____________ ?lme
# <____________ lme2 <- update(lme1, correlation = corGaus(1, form = ~ dummy + 0), method = "ML")
# <____________ summary(lme2)
#
#
#
#
#
# Eigene Likelihood Function MVN -----------------------------------------
# zweiter Versuch MLE von MVN zu laufen -----------------------------------
# "solve", "qr.solve", "pseudoinverse"
#y <- -mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov,log = TRUE)
set.seed=26
ll = function(par) {
cov = (exp(-par[1]* my.data$D))
-mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov,log = TRUE)
}
methods = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN","Brent")
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
result
res = sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
plot(1:96, res)
result$par[1]
# <____________ min(res) # 57.61983 = 0.16 best score
# <____________ optim.minimizer(res)
# <____________ optim.minimum(res)
# <____________ print(res$minimum)
# <____________ result
# <____________ sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
# <____________ cov = (exp(-5.8* my.data$D))
# <____________ cov
##################################################################################
#'
#'
##################################################################################
# glmmTMB -----------------------------------------------------------
data = data.frame(resp=my.data$y)
data$pos <- numFactor(row.col, row.coords)
data$group <- factor(rep(1, nrow(data)))
data$x = row.coords
data$y = col.coords
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data=data)
summary(fit.exp)
### exp( -dist(i,j) * exp(-theta(1)) ) );
### which means that to get our lambda parametrization we have to calculate: exp(-theta)
exp(-fit.exp$fit$par[4])
# nmle4 / GLS--------------------------------------------------------------
my.data$group = as.factor(rep(1, my.data$N))
my.data$rows = row.col[,1]
my.data$cols = row.col[,2]
lm2 <- lme(y ~ 1, random=~ 1 | group, correlation=corExp(form=~rows+cols), data = my.data)
lm3 <- gls(y ~ 1, correlation=corExp(form=~rows+cols), data = my.data)
try({
gls = gls(y ~ 1, correlation=corExp (form =~ rows + cols), data = new.data)
}, silent=TRUE)
range <- coef(gls$modelStruct$corStruct, unconstrained = F)
l = 1 /range # r = range
d <- l
gls.lambda <- exp(-range*D)
gls.lambda
# Eigene Likelihood Function MVN -----------------------------------------
#y <- -mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov,log = TRUE)
methods = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN","Brent")
ll = function(par) {
cov = (exp(-par[1]* new.data$D))
-mvtnorm::dmvnorm(new.data1$resp, mean = rep(par[2], side*side), sigma = cov ,log = TRUE)
}
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
result
res = sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
plot(1:96, res)
result$par[1]
min(res) # 88.61983 = 0.315 best score
sapply(seq(0.05, 1, by = 0.01),function(i) ll(c(i, 0.0)))
cov = (exp(-5.8* my.data$D))
cov
group
#------- For Loop Strucutre, Jags, nmle, glmmTMB (50 times) ---------------
#--------------------------------------------------------------------------
# Loop structur -----------------------------------------------------------
# runtime
##
for....{
time_gls = system.time(
{
b = 5*3
gls_model = gls(...)
}
)
time_glmmTMB = system.time(
{
b = 5*3
glmmTMBs_model = gls(...)
}
)
time[3]
b
}
# time, lambda(theta, range), intercept
# glmmTMB, gls, optim
# 10 sites, 3 ziel values
-------------------------------------------------------------------
# <____________ result_glmmTMB = matrix(NA, 10, 3)
# <____________ for(i in 1:10){
# <____________ time_glmmTMB =
# <____________ system.time({
# <____________ m1 = glmmTMB(y~1, data = my.data)
# <____________ })
# <____________ result_glmmTMB[i, 1] = time_glmmTMB[3]
# <____________ result_glmmTMB[i, 2] = summary(m1)$coefficients$cond[1]
# <____________ }
# <____________ b
------------------------------------------------------------------
2+2
# LOOP__GlmmTMB -----------------------------------------------------------
result_glmmTMB = matrix(NA, 10, 3)
for(i in 1:10){
time_glmmTMB =
system.time({
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data=data)
})
result_glmmTMB[i, 1] = time_glmmTMB[3] # Time
result_glmmTMB[i, 2] = exp(-fit.exp$fit$par[4]) #lambda/theta?
result_glmmTMB[i, 3] = summary(fit.exp)$coefficients$cond[1] # Intercept
}
result_glmmTMB
# Loop__GLS ---------------------------------------------------------------
result_gls = matrix(NA, 10, 3)
for(i in 1:10){
time_gls =
system.time({
gls <- gls(y ~ 1, correlation=corExp (form=~rows+cols), data = my.data)
})
result_gls[i, 1] = time_gls[3] # Time
result_gls[i, 2] = 1/coef(gls$modelStruct$corStruct, unconstrained = F) #lambda/theta?
result_gls[i, 3] = summary(gls)$coefficients # Intercept
}
result_gls
# <____________ range = 2.541579
# <____________ ?update_labels
# Loop__OPTIM -------------------------------------------------------------
result_optim = matrix(NA, 10, 3)
for(i in 1:10){
time_optim =
system.time({
ll = function(par) {
cov = (exp(-par[1]* my.data$D))
-mvtnorm::dmvnorm(my.data$y, mean = rep(par[2], 100), sigma = cov ,log = TRUE)
}
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
})
result_optim[i, 1] = time_optim[3] # Time
result_optim[i, 2] = result$value # niedrigster Score...
result_optim[i, 3] = result$par[1] # Intercept
}
result_optim
# Loop__In__Loop_Structure with lambda ------------------------------------
# <____________ lambda
# <____________ h = 0
lambda.result = matrix(NA,50,2)
for (h in 1:50){
time_glmmTMB =
system.time({
lambda[h] =0.15+h*0.05
l.r <- lambda[h]
# n.lambda = ceiling(lambda[h])
})
lambda.result[h, 1] <- lambda[h]
lambda.result[h, 2] <- l.r
}
lambda.result
# Loop__in__Loop_Combination__Model ---------------------------------------
repeat.experiment = data.frame(matrix(NA,500,12))
colnames(repeat.experiment) = c("rep", "Lambda", "Side", "glmm_Time", "glmm_Lambda", "glmm_Intercept", "gls_Time", "gls_Lambda", "gls_Intercept", "optim_Time", "optim_Lambda", "optim_Intercept")
counter=1
for (g in 1:5 ){
time_repeat =
system.time({
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# Loop, wiederhole Experiment 5 mal für die Statistik
for (h in 1:10 ){
time_glmmTMB =
system.time({
lambda = 0 + (h*0.2)
l.r <- lambda
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
# Loop, ändere Lambda von lambda=0.2 in 0.05 Schritten bis Lambda=0.5
for (j in 1:10){
time_side =
system.time({
side = 2+ 2*j
s.r <- side
# Loop, ändere side von 4 in 2er schritten auf 20 (daten ergeben sich aus side*side )
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
D <- cor.surface(side = side, lambda = lambda, global.mu = global.mu)
M <- cor.surface(side = side, lambda = lambda, global.mu = global.mu)
y <- as.vector(as.matrix(M))
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
new.data1 = 0
new.data = 0
new.data <- list(side=side, lambda = lambda,N = side * side, D = dist.matrix(side), y = y)
new.data$row = row.coords <- rep(1:side, times=side)
new.data$col = col.coords <- rep(1:side, each=side)
new.data$row.col = data.frame(new.data$row, new.data$col)
new.data$N = side*side
n= side*side
# new.data1 = data.frame(resp = my.data$y)
new.data$group <- as.factor(rep(1, new.data$N))
new.data$rows <- new.data$row.col[,1]
new.data$cols <- new.data$row.col[,2]
new.data1 = data.frame(resp = new.data$y)
new.data1$pos <- numFactor(new.data$col, new.data$row.col)
new.data1$group <- factor(rep(1, new.data$N))
new.data1$x <- new.data$row.col
new.data1$y <- new.data$col
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
for(i in 1:5){
data
time_glmmTMB =
system.time({
fit.exp <- glmmTMB(resp ~ 1 + exp(pos + 0 | group), data = new.data1)
})
time_gls =
system.time({
gls <- gls(y ~ 1, correlation=corExp (form =~ rows + cols), data = new.data)
})
time_optim =
system.time({
ll = function(par) {
cov = (exp(-par[1]* new.data$D))
-mvtnorm::dmvnorm(new.data1$y, mean = rep(par[2], side*side), sigma = cov ,log = TRUE)
}
result <- optim(par = c(0.5,10), fn = ll, gr = NULL, method = methods[1], hessian = FALSE)
})
}
repeat.experiment[counter, 1] <- g
repeat.experiment[counter, 2] <- lambda
repeat.experiment[counter, 3] <- s.r
repeat.experiment[counter, 4] <- time_glmmTMB[3]
repeat.experiment[counter, 5] <- exp(-fit.exp$fit$par[4])
repeat.experiment[counter, 6] <- summary(fit.exp)$coefficients$cond[1]
repeat.experiment[counter, 7] <- time_gls[3]
repeat.experiment[counter, 8] <- 1/coef(gls$modelStruct$corStruct, unconstrained = F)
repeat.experiment[counter, 9] <- summary(gls)$coefficients
repeat.experiment[counter, 10] <- time_optim[3]
repeat.experiment[counter, 11] <- result$value
repeat.experiment[counter, 12] <- result$par[1]
counter = counter + 1
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
)}
lambda = 0.2
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
)}
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
}
)}
repeat.experiment
# Einzelfunktionen könnten als 1 Funtion verbunden werden
|
## PROCESS LSMS AND DHS DATA ##
#### Preliminaries: Load packages, create new folders, define aggregation functions ####
setwd('/home/cslovell/PycharmProjects/predicting-poverty') # Set working directory to where you downloaded the replication folder
rm(list=ls())
library(magrittr)
library(foreign)
library(raster)
extract <- raster::extract # Ensure the 'magrittr' package does not mask the 'raster' package's 'extract' function
library(readstata13) # One Tanzanian LSMS .dta file was saved in Stata-13 format
library(plyr)
'%&%' <- function(x,y)paste0(x,y)
Int2Factor <- function(x)
{
if(!is.null(attr(x, "value.labels"))){
vlab <- attr(x, "value.labels")
if(sum(duplicated(vlab)) > 0)
cat("Duplicated levels:", vlab, "\n")
else if(sum(duplicated(names(vlab))) > 0)
cat("Duplicated labels:",
names(vlab)[duplicated(names(vlab))], "\n")
else
x <- factor(x, levels = as.numeric(vlab),
labels = names(vlab))
}
x
}
#convert.factors = FALSE
#dir.create('data/output/LSMS', showWarnings = F)
#dir.create('data/output/DHS', showWarnings = F)
# Assign each cluster the mean nightlights values over a 10 km^2 area centered on its provided coordinates
nl <- function(df, year){
# ls.filter identifies the nine clusters we filtered out because of LandScan data availability in our analysis
ls.filter <- c(0.112190, -1.542321, -1.629748, -1.741995, -1.846039, -1.896059, -2.371342, -2.385341, -2.446988)
nl <- raster(paste0('data/input/Nightlights/', year, '/', list.files(paste0('data/input/Nightlights/', year))))
df2 <- subset(df, is.na(lat)==F & is.na(lon)==F & lat !=0 & lon != 0)
df2 <- unique(df2[,c('lat', 'lon')])
shape <- extent(c(range(c(df2$lon-0.5, df2$lon+0.5)),
range(c(df2$lat-0.5, df2$lat+0.5))))
nl <- crop(nl, shape)
for (i in 1:nrow(df2)){
lat <- sort(c(df2$lat[i] - (180/pi)*(5000/6378137), df2$lat[i] + (180/pi)*(5000/6378137)))
lon <- sort(c(df2$lon[i] - (180/pi)*(5000/6378137)/cos(df2$lat[i]), df2$lon[i] + (180/pi)*(5000/6378137)/cos(df2$lat[i])))
ext <- extent(lon, lat)
nl.vals <- unlist(extract(nl, ext))
nl.vals[nl.vals==255] <- NULL
df2$nl[i] <- mean(nl.vals, na.rm = T)
# Add a column to indicate whether cluster was one of the nine filtered out by LandScan data availability for our study
# This allows full replication of our data by subsetting survey data to sample == 1 as well as testing on the full survey sample
# Ultimately, our sample differs from the full sample by just one cluster in Uganda and one in Tanzania
df2$sample[i] <- if (round(df2$lat[i], 6) %in% ls.filter) 0 else 1
}
df <- merge(na.omit(df2), df, by = c('lat', 'lon'))
return(df)
}
# Aggregate household-level data to cluster level
cluster <- function(df, dhs = F){
# Record how many households comprise each cluster
for (i in 1:nrow(df)){
sub <- subset(df, lat == df$lat[i] & lon == df$lon[i])
df$n[i] <- nrow(sub)
}
# Clustering for LSMS survey data
df <- if (dhs == FALSE)
ddply(df, .(lat, lon), summarise,
cons = mean(cons),
nl = mean(nl),
n = mean(n),
sample = min(sample))
# Clustering for DHS survey data
else ddply(df, .(lat, lon), summarise,
wealthscore = mean(wealthscore),
nl = mean(nl),
n = mean(n),
sample = min(sample))
return(df)
}
#### Write LSMS Data ####
## Uganda ##
uga12.cons <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/UNPS 2011-12 Consumption Aggregate.dta') %$%
data.frame(hhid = HHID, cons = welfare*118.69/(30*946.89*mean(c(66.68, 71.55))))
uga12.geo <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/UNPS_Geovars_1112.dta')
uga12.coords <- data.frame(hhid = uga12.geo$HHID, lat = uga12.geo$lat_mod, lon = uga12.geo$lon_mod)
uga12.rururb <- data.frame(hhid = uga12.geo$HHID, rururb = uga12.geo$urban, stringsAsFactors = F)
uga12.weight <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/GSEC1.dta')[,c('HHID', 'mult')]
names(uga12.weight) <- c('hhid', 'weight')
uga12.hh9 <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/GSEC9A.dta')
uga12.room <- data.frame(hhid = uga12.hh9$HHID, room = uga12.hh9$h9q3)
uga12.metal <- data.frame(hhid = uga12.hh9$HHID, roof = uga12.hh9$h9q4=='Iron sheets')
uga12.vars <- list(uga12.cons, uga12.rururb, uga12.coords, uga12.weight, uga12.room, uga12.metal) %>%
Reduce(function(x,y) merge(x, y, by = 'hhid'), .) %>%
nl(2012)
write.table(uga12.vars, 'data/output/LSMS/Uganda 2012 LSMS (Household).txt', row.names = F)
write.table(cluster(uga12.vars), 'data/output/LSMS/Uganda 2012 LSMS (Cluster).txt', row.names = F)
## Tanzania ##
tza13.cons <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/ConsumptionNPS3.dta') %$%
data.frame(hhid = y3_hhid, cons = expmR/(365*adulteq))
tza13.cons$cons <- tza13.cons$cons*112.69/(585.52*mean(c(130.72,141.01)))
tza13.geo <- read.dta13('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HouseholdGeovars_Y3.dta')
tza13.coords <- data.frame(hhid = tza13.geo$y3_hhid, lat = tza13.geo$lat_dd_mod, lon = tza13.geo$lon_dd_mod)
tza13.hha <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HH_SEC_A.dta')
tza13.rururb <- data.frame(hhid = tza13.hha$y3_hhid, rururb = tza13.hha$y3_rural, stringsAsFactors = F)
tza13.weight <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HH_SEC_A.dta')[,c('y3_hhid', 'y3_weight')]
names(tza13.weight) <- c('hhid', 'weight')
tza13.hhi <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HH_SEC_I.dta')
tza13.room <- na.omit(data.frame(hhid = tza13.hhi$y3_hhid, room = tza13.hhi$hh_i07_1))
tza13.metal <- data.frame(hhid = tza13.hhi$y3_hhid, metal = tza13.hhi$hh_i09=='METAL SHEETS (GCI)')
tza13.vars <- list(tza13.cons, tza13.coords, tza13.rururb, tza13.weight, tza13.room, tza13.metal) %>%
Reduce(function(x, y) merge(x, y, by = 'hhid'), .) %>%
nl(2013)
write.table(tza13.vars, 'data/output/LSMS/Tanzania 2013 LSMS (Household).txt', row.names = F)
write.table(cluster(tza13.vars), 'data/output/LSMS/Tanzania 2013 LSMS (Cluster).txt', row.names = F)
## Malawi ##
mwi13.cons <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/Round 2 (2013) Consumption Aggregate.dta') %$%
data.frame(hhid = y2_hhid, cons = rexpagg/(365*adulteq), weight = hhweight)
mwi13.cons$cons <- mwi13.cons$cons*107.62/(116.28*166.12)
mwi13.geo <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/HouseholdGeovariables_IHPS.dta', convert.factors = FALSE)
mwi13.coords <- data.frame(hhid = mwi13.geo$y2_hhid, lat = mwi13.geo$LAT_DD_MOD, lon = mwi13.geo$LON_DD_MOD)
mwi13.hha <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/HH_MOD_A_FILT.dta')
mwi13.rururb <- data.frame(hhid = mwi13.hha$y2_hhid, rururb = mwi13.hha$baseline_rural, stringsAsFactors = F)
mwi13.hhf <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/HH_MOD_F.dta')
mwi13.room <- data.frame(hhid = mwi13.hhf$y2_hhid, room = mwi13.hhf$hh_f10)
mwi13.metal <- data.frame(hhid = mwi13.hhf$y2_hhid, metal = mwi13.hhf$hh_f10=='IRON SHEETS')
mwi13.vars <- list(mwi13.cons, mwi13.coords, mwi13.rururb, mwi13.room, mwi13.metal) %>%
Reduce(function(x, y) merge(x, y, by = 'hhid'), .) %>%
nl(2013)
write.table(mwi13.vars, 'data/output/LSMS/Malawi 2013 LSMS (Household).txt', row.names = F)
write.table(cluster(mwi13.vars), 'data/output/LSMS/Malawi 2013 LSMS (Cluster).txt', row.names = F)
## Nigeria ##
nga13.cons <- read.dta('data/input/LSMS/DATA/cons_agg_w2.dta') %$%
data.frame(hhid = hhid, cons = pcexp_dr_w2/365)
nga13.cons$cons <- nga13.cons$cons*110.84/(79.53*100)
nga13.geo <- read.dta('data/input/LSMS/DATA/Geodata Wave 2/NGA_HouseholdGeovars_Y2.dta', convert.factors = FALSE)
nga13.coords <- data.frame(hhid = nga13.geo$hhid, lat = nga13.geo$LAT_DD_MOD, lon = nga13.geo$LON_DD_MOD)
nga13.rururb <- data.frame(hhid = nga13.geo$hhid, rururb = nga13.geo$sector, stringsAsFactors = F)
nga13.weight <- read.dta('data/input/LSMS/DATA/HHTrack.dta', convert.factors = FALSE)[,c('hhid', 'wt_wave2')]
names(nga13.weight)[2] <- 'weight'
nga13.phhh8 <- read.dta('data/input/LSMS/DATA/Post Harvest Wave 2/Household/sect8_harvestw2.dta', convert.factors = FALSE)
nga13.room <- data.frame(hhid = nga13.phhh8$hhid, room = nga13.phhh8$s8q9)
nga13.metal <- data.frame(hhid = nga13.phhh8$hhid, metal = nga13.phhh8$s8q7=='IRON SHEETS')
nga13.vars <- list(nga13.cons, nga13.coords, nga13.rururb, nga13.weight, nga13.room, nga13.metal) %>%
Reduce(function(x, y) merge(x, y, by = 'hhid'), .) %>%
nl(2013)
write.table(nga13.vars, 'data/output/LSMS/Nigeria 2013 LSMS (Household).txt', row.names = F)
write.table(cluster(nga13.vars), 'data/output/LSMS/Nigeria 2013 LSMS (Cluster).txt', row.names = F)
#### Write DHS Data ####
path <- function(iso){
return(paste0('data/input/DHS/',list.files('data/input/DHS')[substr(list.files('data/input/DHS'),1,2)==iso], '/'))
}
vars <- c('001', '005', 271)
vars <- c('hhid', paste0('hv', vars))
names <- c('hhid', 'cluster', 'weight', 'wealthscore')
# Uganda 2011
uga11.dhs <- read.dta(path('UG')%&%'ughr60dt/UGHR60FL.DTA', convert.factors=NA) %>%
subset(select = vars)
names(uga11.dhs) <- names
uga11.coords <- read.dbf(path('UG')%&%'ugge61fl/UGGE61FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(uga11.coords) <- c('cluster', 'lat', 'lon')
uga11.dhs <- merge(uga11.dhs, uga11.coords, by = 'cluster') %>%
nl(2011)
write.table(uga11.dhs, 'data/output/DHS/Uganda 2011 DHS (Household).txt', row.names = F)
write.table(cluster(uga11.dhs, T), 'data/output/DHS/Uganda 2011 DHS (Cluster).txt', row.names = F)
# Tanzania 2010
tza10.dhs <- read.dta(path('TZ')%&%'tzhr63dt/TZHR63FL.DTA', convert.factors = NA) %>%
subset(select = vars)
names(tza10.dhs) <- names
tza10.coords <- read.dbf(path('TZ')%&%'tzge61fl/TZGE61FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(tza10.coords) <- c('cluster', 'lat', 'lon')
tza10.dhs <- merge(tza10.dhs, tza10.coords, by = 'cluster') %>%
nl(2010)
write.table(tza10.dhs, 'data/output/DHS/Tanzania 2010 DHS (Household).txt', row.names = F)
write.table(cluster(tza10.dhs, T), 'data/output/DHS/Tanzania 2010 DHS (Cluster).txt', row.names = F)
# Nigeria 2013
nga13.dhs <- read.dta(path('NG')%&%'nghr6adt/NGHR6AFL.DTA', convert.factors = FALSE) %>%
subset(select = vars)
names(nga13.dhs) <- names
nga13.coords <- read.dbf(path('NG')%&%'ngge6afl/NGGE6AFL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(nga13.coords) <- c('cluster', 'lat', 'lon')
nga13.dhs <- merge(nga13.dhs, nga13.coords, by = 'cluster') %>%
nl(2013)
write.table(nga13.dhs, 'data/output/DHS/Nigeria 2013 DHS (Household).txt', row.names = F)
write.table(cluster(nga13.dhs, T), 'data/output/DHS/Nigeria 2013 DHS (Cluster).txt', row.names = F)
# Malawi 2010
mwi10.dhs <- read.dta(path('MW')%&%'mwhr61dt/MWHR61FL.DTA', convert.factors = FALSE) %>%
subset(select = vars)
names(mwi10.dhs) <- names
mwi10.coords <- read.dbf(path('MW')%&%'mwge62fl/MWGE62FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(mwi10.coords) <- c('cluster', 'lat', 'lon')
mwi10.dhs <- merge(mwi10.dhs, mwi10.coords, by = 'cluster') %>%
nl(2010)
write.table(mwi10.dhs, 'data/output/DHS/Malawi 2010 DHS (Household).txt', row.names = F)
write.table(cluster(mwi10.dhs, T), 'data/output/DHS/Malawi 2010 DHS (Cluster).txt', row.names = F)
# Rwanda 2010
rwa10.dhs <- read.dta(path('RW')%&%'rwhr61dt/RWHR61FL.DTA', convert.factors = FALSE) %>%
subset(select = vars)
names(rwa10.dhs) <- names
rwa10.coords <- read.dbf(path('RW')%&%'rwge61fl/RWGE61FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(rwa10.coords) <- c('cluster', 'lat', 'lon')
rwa10.dhs <- merge(rwa10.dhs, rwa10.coords, by = 'cluster') %>%
nl(2010)
write.table(rwa10.dhs, 'data/output/DHS/Rwanda 2010 DHS (Household).txt', row.names = F)
write.table(cluster(rwa10.dhs, T), 'data/output/DHS/Rwanda 2010 DHS (Cluster).txt', row.names = F)
|
/scripts/ProcessSurveyData.R
|
permissive
|
escap-data-hub/predicting-poverty
|
R
| false
| false
| 11,918
|
r
|
## PROCESS LSMS AND DHS DATA ##
#### Preliminaries: Load packages, create new folders, define aggregation functions ####
setwd('/home/cslovell/PycharmProjects/predicting-poverty') # Set working directory to where you downloaded the replication folder
rm(list=ls())
library(magrittr)
library(foreign)
library(raster)
extract <- raster::extract # Ensure the 'magrittr' package does not mask the 'raster' package's 'extract' function
library(readstata13) # One Tanzanian LSMS .dta file was saved in Stata-13 format
library(plyr)
'%&%' <- function(x,y)paste0(x,y)
Int2Factor <- function(x)
{
if(!is.null(attr(x, "value.labels"))){
vlab <- attr(x, "value.labels")
if(sum(duplicated(vlab)) > 0)
cat("Duplicated levels:", vlab, "\n")
else if(sum(duplicated(names(vlab))) > 0)
cat("Duplicated labels:",
names(vlab)[duplicated(names(vlab))], "\n")
else
x <- factor(x, levels = as.numeric(vlab),
labels = names(vlab))
}
x
}
#convert.factors = FALSE
#dir.create('data/output/LSMS', showWarnings = F)
#dir.create('data/output/DHS', showWarnings = F)
# Assign each cluster the mean nightlights values over a 10 km^2 area centered on its provided coordinates
nl <- function(df, year){
# ls.filter identifies the nine clusters we filtered out because of LandScan data availability in our analysis
ls.filter <- c(0.112190, -1.542321, -1.629748, -1.741995, -1.846039, -1.896059, -2.371342, -2.385341, -2.446988)
nl <- raster(paste0('data/input/Nightlights/', year, '/', list.files(paste0('data/input/Nightlights/', year))))
df2 <- subset(df, is.na(lat)==F & is.na(lon)==F & lat !=0 & lon != 0)
df2 <- unique(df2[,c('lat', 'lon')])
shape <- extent(c(range(c(df2$lon-0.5, df2$lon+0.5)),
range(c(df2$lat-0.5, df2$lat+0.5))))
nl <- crop(nl, shape)
for (i in 1:nrow(df2)){
lat <- sort(c(df2$lat[i] - (180/pi)*(5000/6378137), df2$lat[i] + (180/pi)*(5000/6378137)))
lon <- sort(c(df2$lon[i] - (180/pi)*(5000/6378137)/cos(df2$lat[i]), df2$lon[i] + (180/pi)*(5000/6378137)/cos(df2$lat[i])))
ext <- extent(lon, lat)
nl.vals <- unlist(extract(nl, ext))
nl.vals[nl.vals==255] <- NULL
df2$nl[i] <- mean(nl.vals, na.rm = T)
# Add a column to indicate whether cluster was one of the nine filtered out by LandScan data availability for our study
# This allows full replication of our data by subsetting survey data to sample == 1 as well as testing on the full survey sample
# Ultimately, our sample differs from the full sample by just one cluster in Uganda and one in Tanzania
df2$sample[i] <- if (round(df2$lat[i], 6) %in% ls.filter) 0 else 1
}
df <- merge(na.omit(df2), df, by = c('lat', 'lon'))
return(df)
}
# Aggregate household-level data to cluster level
cluster <- function(df, dhs = F){
# Record how many households comprise each cluster
for (i in 1:nrow(df)){
sub <- subset(df, lat == df$lat[i] & lon == df$lon[i])
df$n[i] <- nrow(sub)
}
# Clustering for LSMS survey data
df <- if (dhs == FALSE)
ddply(df, .(lat, lon), summarise,
cons = mean(cons),
nl = mean(nl),
n = mean(n),
sample = min(sample))
# Clustering for DHS survey data
else ddply(df, .(lat, lon), summarise,
wealthscore = mean(wealthscore),
nl = mean(nl),
n = mean(n),
sample = min(sample))
return(df)
}
#### Write LSMS Data ####
## Uganda ##
uga12.cons <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/UNPS 2011-12 Consumption Aggregate.dta') %$%
data.frame(hhid = HHID, cons = welfare*118.69/(30*946.89*mean(c(66.68, 71.55))))
uga12.geo <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/UNPS_Geovars_1112.dta')
uga12.coords <- data.frame(hhid = uga12.geo$HHID, lat = uga12.geo$lat_mod, lon = uga12.geo$lon_mod)
uga12.rururb <- data.frame(hhid = uga12.geo$HHID, rururb = uga12.geo$urban, stringsAsFactors = F)
uga12.weight <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/GSEC1.dta')[,c('HHID', 'mult')]
names(uga12.weight) <- c('hhid', 'weight')
uga12.hh9 <- read.dta('data/input/LSMS/UGA_2011_UNPS_v01_M_Stata/GSEC9A.dta')
uga12.room <- data.frame(hhid = uga12.hh9$HHID, room = uga12.hh9$h9q3)
uga12.metal <- data.frame(hhid = uga12.hh9$HHID, roof = uga12.hh9$h9q4=='Iron sheets')
uga12.vars <- list(uga12.cons, uga12.rururb, uga12.coords, uga12.weight, uga12.room, uga12.metal) %>%
Reduce(function(x,y) merge(x, y, by = 'hhid'), .) %>%
nl(2012)
write.table(uga12.vars, 'data/output/LSMS/Uganda 2012 LSMS (Household).txt', row.names = F)
write.table(cluster(uga12.vars), 'data/output/LSMS/Uganda 2012 LSMS (Cluster).txt', row.names = F)
## Tanzania ##
tza13.cons <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/ConsumptionNPS3.dta') %$%
data.frame(hhid = y3_hhid, cons = expmR/(365*adulteq))
tza13.cons$cons <- tza13.cons$cons*112.69/(585.52*mean(c(130.72,141.01)))
tza13.geo <- read.dta13('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HouseholdGeovars_Y3.dta')
tza13.coords <- data.frame(hhid = tza13.geo$y3_hhid, lat = tza13.geo$lat_dd_mod, lon = tza13.geo$lon_dd_mod)
tza13.hha <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HH_SEC_A.dta')
tza13.rururb <- data.frame(hhid = tza13.hha$y3_hhid, rururb = tza13.hha$y3_rural, stringsAsFactors = F)
tza13.weight <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HH_SEC_A.dta')[,c('y3_hhid', 'y3_weight')]
names(tza13.weight) <- c('hhid', 'weight')
tza13.hhi <- read.dta('data/input/LSMS/TZA_2012_NPS-R3_v01_M_STATA_English_labels/HH_SEC_I.dta')
tza13.room <- na.omit(data.frame(hhid = tza13.hhi$y3_hhid, room = tza13.hhi$hh_i07_1))
tza13.metal <- data.frame(hhid = tza13.hhi$y3_hhid, metal = tza13.hhi$hh_i09=='METAL SHEETS (GCI)')
tza13.vars <- list(tza13.cons, tza13.coords, tza13.rururb, tza13.weight, tza13.room, tza13.metal) %>%
Reduce(function(x, y) merge(x, y, by = 'hhid'), .) %>%
nl(2013)
write.table(tza13.vars, 'data/output/LSMS/Tanzania 2013 LSMS (Household).txt', row.names = F)
write.table(cluster(tza13.vars), 'data/output/LSMS/Tanzania 2013 LSMS (Cluster).txt', row.names = F)
## Malawi ##
mwi13.cons <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/Round 2 (2013) Consumption Aggregate.dta') %$%
data.frame(hhid = y2_hhid, cons = rexpagg/(365*adulteq), weight = hhweight)
mwi13.cons$cons <- mwi13.cons$cons*107.62/(116.28*166.12)
mwi13.geo <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/HouseholdGeovariables_IHPS.dta', convert.factors = FALSE)
mwi13.coords <- data.frame(hhid = mwi13.geo$y2_hhid, lat = mwi13.geo$LAT_DD_MOD, lon = mwi13.geo$LON_DD_MOD)
mwi13.hha <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/HH_MOD_A_FILT.dta')
mwi13.rururb <- data.frame(hhid = mwi13.hha$y2_hhid, rururb = mwi13.hha$baseline_rural, stringsAsFactors = F)
mwi13.hhf <- read.dta('data/input/LSMS/MWI_2013_IHPS_v01_M_Stata/HH_MOD_F.dta')
mwi13.room <- data.frame(hhid = mwi13.hhf$y2_hhid, room = mwi13.hhf$hh_f10)
mwi13.metal <- data.frame(hhid = mwi13.hhf$y2_hhid, metal = mwi13.hhf$hh_f10=='IRON SHEETS')
mwi13.vars <- list(mwi13.cons, mwi13.coords, mwi13.rururb, mwi13.room, mwi13.metal) %>%
Reduce(function(x, y) merge(x, y, by = 'hhid'), .) %>%
nl(2013)
write.table(mwi13.vars, 'data/output/LSMS/Malawi 2013 LSMS (Household).txt', row.names = F)
write.table(cluster(mwi13.vars), 'data/output/LSMS/Malawi 2013 LSMS (Cluster).txt', row.names = F)
## Nigeria ##
nga13.cons <- read.dta('data/input/LSMS/DATA/cons_agg_w2.dta') %$%
data.frame(hhid = hhid, cons = pcexp_dr_w2/365)
nga13.cons$cons <- nga13.cons$cons*110.84/(79.53*100)
nga13.geo <- read.dta('data/input/LSMS/DATA/Geodata Wave 2/NGA_HouseholdGeovars_Y2.dta', convert.factors = FALSE)
nga13.coords <- data.frame(hhid = nga13.geo$hhid, lat = nga13.geo$LAT_DD_MOD, lon = nga13.geo$LON_DD_MOD)
nga13.rururb <- data.frame(hhid = nga13.geo$hhid, rururb = nga13.geo$sector, stringsAsFactors = F)
nga13.weight <- read.dta('data/input/LSMS/DATA/HHTrack.dta', convert.factors = FALSE)[,c('hhid', 'wt_wave2')]
names(nga13.weight)[2] <- 'weight'
nga13.phhh8 <- read.dta('data/input/LSMS/DATA/Post Harvest Wave 2/Household/sect8_harvestw2.dta', convert.factors = FALSE)
nga13.room <- data.frame(hhid = nga13.phhh8$hhid, room = nga13.phhh8$s8q9)
nga13.metal <- data.frame(hhid = nga13.phhh8$hhid, metal = nga13.phhh8$s8q7=='IRON SHEETS')
nga13.vars <- list(nga13.cons, nga13.coords, nga13.rururb, nga13.weight, nga13.room, nga13.metal) %>%
Reduce(function(x, y) merge(x, y, by = 'hhid'), .) %>%
nl(2013)
write.table(nga13.vars, 'data/output/LSMS/Nigeria 2013 LSMS (Household).txt', row.names = F)
write.table(cluster(nga13.vars), 'data/output/LSMS/Nigeria 2013 LSMS (Cluster).txt', row.names = F)
#### Write DHS Data ####
path <- function(iso){
return(paste0('data/input/DHS/',list.files('data/input/DHS')[substr(list.files('data/input/DHS'),1,2)==iso], '/'))
}
vars <- c('001', '005', 271)
vars <- c('hhid', paste0('hv', vars))
names <- c('hhid', 'cluster', 'weight', 'wealthscore')
# Uganda 2011
uga11.dhs <- read.dta(path('UG')%&%'ughr60dt/UGHR60FL.DTA', convert.factors=NA) %>%
subset(select = vars)
names(uga11.dhs) <- names
uga11.coords <- read.dbf(path('UG')%&%'ugge61fl/UGGE61FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(uga11.coords) <- c('cluster', 'lat', 'lon')
uga11.dhs <- merge(uga11.dhs, uga11.coords, by = 'cluster') %>%
nl(2011)
write.table(uga11.dhs, 'data/output/DHS/Uganda 2011 DHS (Household).txt', row.names = F)
write.table(cluster(uga11.dhs, T), 'data/output/DHS/Uganda 2011 DHS (Cluster).txt', row.names = F)
# Tanzania 2010
tza10.dhs <- read.dta(path('TZ')%&%'tzhr63dt/TZHR63FL.DTA', convert.factors = NA) %>%
subset(select = vars)
names(tza10.dhs) <- names
tza10.coords <- read.dbf(path('TZ')%&%'tzge61fl/TZGE61FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(tza10.coords) <- c('cluster', 'lat', 'lon')
tza10.dhs <- merge(tza10.dhs, tza10.coords, by = 'cluster') %>%
nl(2010)
write.table(tza10.dhs, 'data/output/DHS/Tanzania 2010 DHS (Household).txt', row.names = F)
write.table(cluster(tza10.dhs, T), 'data/output/DHS/Tanzania 2010 DHS (Cluster).txt', row.names = F)
# Nigeria 2013
nga13.dhs <- read.dta(path('NG')%&%'nghr6adt/NGHR6AFL.DTA', convert.factors = FALSE) %>%
subset(select = vars)
names(nga13.dhs) <- names
nga13.coords <- read.dbf(path('NG')%&%'ngge6afl/NGGE6AFL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(nga13.coords) <- c('cluster', 'lat', 'lon')
nga13.dhs <- merge(nga13.dhs, nga13.coords, by = 'cluster') %>%
nl(2013)
write.table(nga13.dhs, 'data/output/DHS/Nigeria 2013 DHS (Household).txt', row.names = F)
write.table(cluster(nga13.dhs, T), 'data/output/DHS/Nigeria 2013 DHS (Cluster).txt', row.names = F)
# Malawi 2010
mwi10.dhs <- read.dta(path('MW')%&%'mwhr61dt/MWHR61FL.DTA', convert.factors = FALSE) %>%
subset(select = vars)
names(mwi10.dhs) <- names
mwi10.coords <- read.dbf(path('MW')%&%'mwge62fl/MWGE62FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(mwi10.coords) <- c('cluster', 'lat', 'lon')
mwi10.dhs <- merge(mwi10.dhs, mwi10.coords, by = 'cluster') %>%
nl(2010)
write.table(mwi10.dhs, 'data/output/DHS/Malawi 2010 DHS (Household).txt', row.names = F)
write.table(cluster(mwi10.dhs, T), 'data/output/DHS/Malawi 2010 DHS (Cluster).txt', row.names = F)
# Rwanda 2010
rwa10.dhs <- read.dta(path('RW')%&%'rwhr61dt/RWHR61FL.DTA', convert.factors = FALSE) %>%
subset(select = vars)
names(rwa10.dhs) <- names
rwa10.coords <- read.dbf(path('RW')%&%'rwge61fl/RWGE61FL.dbf')[,c('DHSCLUST', 'LATNUM', 'LONGNUM')]
names(rwa10.coords) <- c('cluster', 'lat', 'lon')
rwa10.dhs <- merge(rwa10.dhs, rwa10.coords, by = 'cluster') %>%
nl(2010)
write.table(rwa10.dhs, 'data/output/DHS/Rwanda 2010 DHS (Household).txt', row.names = F)
write.table(cluster(rwa10.dhs, T), 'data/output/DHS/Rwanda 2010 DHS (Cluster).txt', row.names = F)
|
## Second File of the App
## We read ones again the required program.
library(shiny)
## Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
samples <- faithful[, 2]
bins <- seq(min(samples), max(samples), length.out = 1)
# draw the histogram with the specified number of bins
hist(samples, breaks = bins, col = 'red', border = 'white')
})
})
|
/server.R
|
no_license
|
herson44/DevopingDataProductsproject
|
R
| false
| false
| 446
|
r
|
## Second File of the App
## We read ones again the required program.
library(shiny)
## Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$distPlot <- renderPlot({
samples <- faithful[, 2]
bins <- seq(min(samples), max(samples), length.out = 1)
# draw the histogram with the specified number of bins
hist(samples, breaks = bins, col = 'red', border = 'white')
})
})
|
library(mefa4)
library(pbapply)
library(RColorBrewer)
ROOT <- "e:/peter/AB_data_v2016/out/birds"
ROOT2 <- "~/Dropbox/josm/2016/wewp"
#setwd("c:/p/AB_data_v2015/out/birds/results")
#fl <- list.files()
#fl2 <- gsub("birds_bam-", "birds_abmi-", fl)
#for (i in 1:length(fl))
# if (fl[i] != fl2[i])
# file.rename(fl[i], fl2[i])
level <- 0.9
up <- function() {
source("~/repos/bragging/R/glm_skeleton.R")
source("~/repos/abmianalytics/R/results_functions.R")
source("~/repos/bamanalytics/R/makingsense_functions.R")
source("~/repos/abmianalytics/R/wrsi_functions.R")
# source("~/repos/abmianalytics/R/results_functions1.R")
# source("~/repos/abmianalytics/R/results_functions2.R")
invisible(NULL)
}
up()
e <- new.env()
load(file.path(ROOT, "data", "data-full-withrevisit.Rdata"), envir=e)
dat <- e$DAT
dat <- dat[dat$useOK,]
yy <- e$YY[rownames(dat),]
tax <- droplevels(e$TAX[colnames(yy),])
#pveghf <- e$pveghf[rownames(dat),]
#pveghf <- data.frame(as.matrix(pveghf))
#pveghf$Open <- pveghf$GrassHerb + pveghf$Shrub
#pveghf <- as.matrix(pveghf[,c("Decid", "Mixwood", "Conif", "Pine", "BSpr", "Larch",
# "Open", "Wetland", "Cult", "UrbInd", "HardLin", "SoftLin")])
#colnames(pveghf) <- c("Deciduous", "Mixedwood", "White Spruce", "Pine",
# "Black Spruce", "Larch",
# "Open", "Wet", "Cultivated", "Urban/Industrial", "Hard Linear", "Soft Linear")
#psoilhf <- as.matrix(e$psoilhf[rownames(dat),c("Productive", "Clay",
# "Saline", "RapidDrain", "Cult", "UrbInd")])
#colnames(psoilhf) <- c("Productive", "Clay",
# "Saline", "Rapid Drain", "Cultivated", "Urban/Industrial")
en <- new.env()
load(file.path(ROOT, "data", "data-full-north.Rdata"), envir=en)
xnn <- en$DAT
modsn <- en$mods
yyn0 <- en$YY
es <- new.env()
load(file.path(ROOT, "data", "data-full-south.Rdata"), envir=es)
xns <- es$DAT
modss <- es$mods
yys0 <- es$YY
rm(e, en, es)
yyn <- yy[rownames(yyn0),]
yys <- yy[rownames(yys0),]
## terms and design matrices
nTerms <- getTerms(modsn, "list")
sTerms <- getTerms(modss, "list")
Xnn <- model.matrix(getTerms(modsn, "formula"), xnn)
colnames(Xnn) <- fixNames(colnames(Xnn))
Xns <- model.matrix(getTerms(modss, "formula"), xns)
colnames(Xns) <- fixNames(colnames(Xns))
stage_hab_n <- 5
stage_hab_s <- 3
spp <- "WEWP"
do_hsh <- FALSE
do_veg <- TRUE
NAM <- as.character(tax[spp, "English_Name"])
f <- file.path(ROOT2, "results", paste0("birds_abmi-",
ifelse(do_hsh, "dohsh", "nohsh"), ifelse(do_veg, "-north_", "-south_"), spp, ".Rdata"))
resn <- loadSPP(f)
## habitat assoc
estn_hab <- getEst(resn, stage=stage_hab_n, na.out=FALSE, Xnn)
prn <- pred_veghf(estn_hab, Xnn, burn_included=FALSE)
## veghf
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("habitat-", as.character(tax[spp, "Species_ID"]), "-bw.png"))
png(file=fname,width=1500,height=700)
fig_veghf(prn, NAM, bw=TRUE)
dev.off()
## surrounding hf
estn_sp <- getEst(resn, stage=9, na.out=FALSE, Xnn)
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("surroundingHF-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_hf_noremn(estn_sp, Xnn, LAB=NAM)
par(op)
dev.off()
## surrounding Wet
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("surroundingWet-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_any("WetWaterKM", estn_sp, Xnn, xlab="Surrounding Wet/Water (%)", LAB=NAM)
par(op)
dev.off()
## ASP CTI
## CTI: high when it is flat, low when it is higher slope
## SLPASP (ASP): negative when exposed (warmer) and positive when north facing (colder)
cf <- estn_sp[,c("xASP","xCTI","xASP:xCTI")]
range(dat$xASP)
range(dat$xCTI)
z <- expand.grid(xASP=seq(-0.25, 0.25, by=0.1),
xCTI=seq(-0.5, 0.5, by=0.1))
z$ASP <- z$xASP
z$CTI <- exp(z$xCTI)*10 - 1
X <- model.matrix(~xASP+xCTI+xASP:xCTI-1, z)
pr <- apply(cf, 1, function(i) drop(X %*% i))
z$pr <- rowMeans(pr)
x <- seq(-0.25, 0.25, by=0.1)
y <- exp(seq(-0.5, 0.5, by=0.1))*10 - 1
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("topo-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
plot(dat$ASP[yy[,"WEWP"] == 0], dat$CTI[yy[,"WEWP"] == 0], pch=19, cex=0.5,
xlab="Slope / aspect solar radiation index",ylab="Compound topographic index",
xlim=c(-0.25,0.25), ylim=c(5, 15), col="grey")
points(dat$ASP[yy[,"WEWP"] > 0], dat$CTI[yy[,"WEWP"] > 0], pch=19, cex=0.5, col=1)
contour(x, y, matrix(z$pr, length(x), length(y)), add=TRUE, col=4)
par(op)
dev.off()
## map-det
load(file.path("e:/peter/AB_data_v2016/out", "kgrid", "kgrid_table.Rdata"))
col1 <- c("#C8FBC8","#C8E6FA","#F5E6F5","#FFDCEC","#FFE6CD","#FFF1D2")[match(kgrid$NRNAME,
c("Boreal","Foothills","Rocky Mountain","Canadian Shield","Parkland","Grassland"))]
library(raster)
library(sp)
library(rgdal)
city <-data.frame(x = -c(114,113,112,111,117,118)-c(5,30,49,23,8,48)/60,
y = c(51,53,49,56,58,55)+c(3,33,42,44,31,10)/60)
rownames(city) <- c("Calgary","Edmonton","Lethbridge","Fort McMurray",
"High Level","Grande Prairie")
coordinates(city) <- ~ x + y
proj4string(city) <- CRS(paste0("+proj=longlat +datum=WGS84 ",
"+ellps=WGS84 +towgs84=0,0,0"))
city <- as.data.frame(spTransform(city, CRS(paste0("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 ",
"+x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))))
xyw <- as.matrix(kgrid[kgrid$pWater >= 0.99,c("X","Y")])
blank <- matrix(0, 0, 2)
fw <- read.csv("c:/Users/Peter/Dropbox/josm/2016/wewp/FWMIS_data_WEWP.csv")
library(raster)
library(sp)
library(rgdal)
XYlatlon <- fw[,c("Longitude","Latitude")]
XYlatlon <- XYlatlon[rowSums(is.na(XYlatlon))==0,]
coordinates(XYlatlon) <- ~ Longitude + Latitude
proj4string(XYlatlon) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
XY <- as.data.frame(spTransform(XYlatlon, CRS("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 +x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")))
colnames(XY) <- colnames(xy1)
fw$UTM.Northing[fw$UTM.Northing == ""] <- NA
fw$UTM.Northing <- as.numeric(as.character(fw$UTM.Northing))
fwxy <- fw[,c("UTM.Easting","UTM.Northing")]
xy0 <- as.matrix(dat[yy[,spp] == 0,c("X","Y")])
xy1 <- as.matrix(dat[yy[,spp] > 0,c("X","Y")])
NAM <- as.character(tax[spp, "English_Name"])
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("detections-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=col1, axes=FALSE, ann=FALSE)
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(xy0, pch="+", cex=0.5, col="red3")
#points(xy0, pch=19, cex=0.5, col="red3")
points(xy1, pch=16, cex=1.6, col="red4")
points(XY, pch=16, cex=1.6, col="blue")
mtext(NAM, line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
dev.off()
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("detectionsWithFWMIS-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=col1, axes=FALSE, ann=FALSE)
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(xy0, pch="+", cex=0.5, col="red3")
#points(xy0, pch=19, cex=0.5, col="red3")
points(xy1, pch=16, cex=1.6, col="red4")
points(XY, pch=16, cex=1.6, col="blue")
mtext(NAM, line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
dev.off()
yyyy <- ifelse(yy[,spp] == 0, 0, 1)
aa <- Xtab(~ dat$YEAR + yyyy + dat$NRNAME)
aa <- lapply(1:length(aa), function(i) {
z <- as.matrix(aa[[i]])
rownames(z) <- paste(names(aa)[i], rownames(z))
z
})
aa <- do.call(rbind, aa)
write.csv(aa, file=file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("detectionsBAM-", as.character(tax[spp, "Species_ID"]), ".csv")))
## convex hull
rt <- raster(file.path("e:/peter/AB_data_v2016", "data", "kgrid", "AHM1k.asc"))
crs <- CRS("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 +x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
projection(rt) <- crs
mat0 <- as.matrix(rt)
library(dismo)
dd <- rbind(XY,xy1)
ch <- convHull(dd)
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("chull-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=600, height=1000)
plot(rt, axes=FALSE, box=FALSE, legend=FALSE, col="grey",
main="Western Wood-Pewee", maxpixels=10^6, interpolate=FALSE)
points(xy0, col="white", pch=20, cex=0.5)
points(dd, col=1, pch=20, cex=1)
plot(ch@polygons, border=4, lwd=2, add=TRUE)
dev.off()
## area in kn^2
sapply(slot(ch@polygons, "polygons"), slot, "area") / 10^6
## number of grid cells occupied
source("~/repos/abmianalytics/R/maps_functions.R")
kgrid$det <- 0L
kgrid$surv <- 0L
for (i in 1:nrow(dd)) {
d <- sqrt((kgrid$X - dd$X[i])^2 + (kgrid$Y - dd$Y[i])^2)
j <- which.min(d)
kgrid$det[j] <- 1L
kgrid$surv[j] <- 1L
}
for (i in 1:nrow(xy0)) {
d <- sqrt((kgrid$X - xy0[i,"X"])^2 + (kgrid$Y - xy0[i,"Y"])^2)
kgrid$surv[which.min(d)] <- 1L
}
## make a factor with level: unsurveyed, surveyed, detected
## check the scale !!!!
kgrid$aoo <- kgrid$surv + kgrid$det + 1
kgrid$faoo <- factor(kgrid$aoo, 1:3)
levels(kgrid$faoo) <- c("unsurveyed", "surveyed", "detected")
r2 <- as_Raster0(kgrid$Row, kgrid$Col, kgrid$aoo, rt)
k <- kgrid[,c("det","surv")]
save(k, file="e:/peter/AB_data_v2016/out/birds/wewp/aoo.Rdata")
#load(file.path("e:/peter/AB_data_v2016/out", "kgrid", "kgrid_table.Rdata"))
#load("e:/peter/AB_data_v2016/out/birds/wewp/aoo.Rdata")
k$Row <- kgrid$Row
k$Col <- kgrid$Col
k$Row2 <- 1 + kgrid$Row %/% 2
k$Col2 <- 1 + kgrid$Col %/% 2
k$Row2_Col2 <- interaction(k$Row2, k$Col2, sep="_", drop=TRUE)
#tmp <- Xtab(~ k$Row2_Col2 + kgrid$NRNAME)
#vi <- find_max(tmp)
k$NR <- kgrid$NRNAME
k2det <- as.matrix(Xtab(~Row2_Col2 + det, k))
k2surv <- as.matrix(Xtab(~Row2_Col2 + surv, k))
k2det[k2det>0] <- 1
k2surv <- k2surv + k2det
k2surv[k2surv>0] <- 1
dim(k2det)
table(k2det[,2] > 0)
table(k2surv[,2] > 0)
k$aoo <- k$surv + k$det + 1
k$faoo <- factor(k$aoo, 1:3)
levels(k$faoo) <- c("unsurveyed", "surveyed", "detected")
byNR_2km <- list()
for (i in levels(k$NR)) {
kk <- droplevels(k[k$NR == i,])
kk <- groupSums(as.matrix(kk[,c("det","surv")]), 1, kk$Row2_Col2)
kk[kk>0] <- 1
kk <- as.data.frame(kk)
kk$aoo <- kk$surv + kk$det + 1
kk$faoo <- factor(kk$aoo, 1:3)
levels(kk$faoo) <- c("unsurveyed", "surveyed", "detected")
tmp <- table(kk$faoo)[c("detected","surveyed","unsurveyed")]
tmp2 <- cumsum(tmp)
names(tmp2) <- c("detected", "surveyed", "available")
byNR_2km[[i]] <- tmp2
}
byNR_2km <- do.call(rbind, byNR_2km)
write.csv(byNR_2km, file="e:/peter/AB_data_v2016/out/birds/wewp/aoo-2km-byNR.csv")
## calculating # occurrences
xnn$lxn <- interaction(xnn$LUF_NAME, xnn$NSRNAME, drop=TRUE, sep="_")
xns$lxn <- interaction(xns$LUF_NAME, xns$NSRNAME, drop=TRUE, sep="_")
y01 <- list()
for (spp in rownames(tax)) {
if (tax[spp, "map_det"]) {
cat(spp, "\n");flush.console()
y01[[spp]] <- table(lxn=c(as.character(xnn$lxn), as.character(xns$lxn)),
det=c(ifelse(yyn[,spp]>0, 1, 0), ifelse(yys[,spp]>0, 1, 0)))[,"1"]
}
}
y01 <- do.call(cbind, y01)
colnames(y01) <- slt[colnames(y01), "sppid"]
tmp <- strsplit(rownames(y01), "_")
y01d <- data.frame(LUFxNSR=rownames(y01),
LUF=sapply(tmp, "[[", 1),
NSR=sapply(tmp, "[[", 2),
y01)
write.csv(y01d, row.names=FALSE, file=file.path(ROOT, "birds-number-of-occurrences.csv"))
## veghf-north
## linear-north
## table: veghf-north
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "veghf_north"]) {
f <- file.path(ROOT2, "results", paste0("birds_abmi-",
ifelse(do_hsh, "dohsh", "nohsh"), ifelse(do_veg, "-north_", "-south_"), spp, ".Rdata"))
resn <- loadSPP(f)
estn6 <- getEst(resn, stage=5, na.out=FALSE, Xnn)
fname <- file.path(ROOT, "coefs",
paste0(as.character(tax[spp, "file"]), "_Stage6_coefs.csv"))
write.csv(estn6, row.names=FALSE, file=fname)
}
}
res_veghf <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "veghf_north"]) {
f <- file.path(ROOT2, "results", paste0("birds_abmi-",
ifelse(do_hsh, "dohsh", "nohsh"), ifelse(do_veg, "-north_", "-south_"), spp, ".Rdata"))
resn <- loadSPP(f)
estn_hab <- getEst(resn, stage=stage_hab_n, na.out=FALSE, Xnn)
prn <- pred_veghf(estn_hab, Xnn, burn_included=FALSE)
res_veghf[[spp]] <- prn
NDAT <- sum(yyn[,spp] > 0)
## veghf
fname <- file.path(ROOT, "figs", "veghf-north",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=1500,height=700)
# fig_veghf(prn, paste0(NAM, " (n = ", NDAT, " detections)"))
fig_veghf(prn)
dev.off()
## linear
fname <- file.path(ROOT, "figs", "linear-north",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=350,height=400)
fig_linear(attr(prn, "linear"), paste0(NAM, "\nNorth (n = ", NDAT, " det.)"))
dev.off()
}
}
f1 <- function(x) {
rr <- attr(x, "linear")[-1]
names(rr) <- c("SoftLinear", "SoftLinear.LCL", "SoftLinear.UCL",
"HardLinear", "HardLinear.LCL", "HardLinear.UCL")
x <- x[rownames(x) != "Burn",c(2,3,4)]
rownames(x) <- gsub(" ", "", rownames(x))
xx <- t(x)
dim(xx) <- NULL
names(xx) <- paste0(rep(rownames(x), each=3), c("", ".LCL", ".UCL"))
c(xx, rr)
}
vhf <- t(sapply(res_veghf, f1))
vhf2 <- data.frame(tax[rownames(vhf), c("English_Name","Scientific_Name")],
vhf)
vhf2 <- vhf[rownames(slt)[slt$veghf.north],]
write.csv(vhf2, file=file.path(ROOT, "figs", "birds-veghf-north.csv"))
SPP <- rownames(slt)[slt$veghf.north]
vhf2 <- read.csv(file.path(ROOT, "figs", "birds-veghf-north.csv"))
rownames(vhf2) <- vhf2$X
vhf2 <- vhf2[,-(1:3)]
excl <- c(grep(".LCL", colnames(vhf2)), grep(".UCL", colnames(vhf2)))
vhf2 <- as.matrix(vhf2[,-excl])
Max <- apply(vhf2[,1:(ncol(vhf2)-2)], 1, max)
vhf2 <- vhf2 / Max
vhf2[vhf2[,"HardLinear"] > 5,"HardLinear"] <- 5
## soilhf-treed-south
## soilhf-nontreed-south
## linear-south
## table: soilhf-north
res_soilhf <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "soilhf_treed_south"] | tax[spp, "soilhf_nontreed_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_hab <- getEst(ress, stage=stage_hab_s, na.out=FALSE, Xns)
prs <- pred_soilhf(ests_hab, Xns)
res_soilhf[[spp]] <- prs
NDAT <- sum(yys[,spp] > 0)
YMAX <- max(fig_soilhf_ymax(prs$treed), fig_soilhf_ymax(prs$nontreed))
## treed
fname <- file.path(ROOT, "figs", "soilhf-treed-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=500,height=450)
fig_soilhf(prs$treed, paste0(NAM, ", South, Treed (n = ", NDAT, " detections)"),
ymax=YMAX)
dev.off()
## nontreed
fname <- file.path(ROOT, "figs", "soilhf-nontreed-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=500,height=450)
fig_soilhf(prs$nontreed, paste0(NAM, ", South, Non-treed (n = ", NDAT, " detections)"),
ymax=YMAX)
dev.off()
## linear
fname <- file.path(ROOT, "figs", "linear-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=350,height=400)
fig_linear(prs$linear, paste0(NAM, "\nSouth (n = ", NDAT, " det.)"))
dev.off()
}
}
f2 <- function(x) {
rr <- x$linear[-1]
names(rr) <- c("SoftLinear", "SoftLinear.LCL", "SoftLinear.UCL",
"HardLinear", "HardLinear.LCL", "HardLinear.UCL")
x <- x$nontreed
rownames(x) <- gsub(" ", "", rownames(x))
xx <- t(x[,2:4])
dim(xx) <- NULL
names(xx) <- paste0(rep(rownames(x), each=3), c("", ".LCL", ".UCL"))
c(xx, rr)
}
soil <- t(sapply(res_soilhf, f2))
soil2 <- data.frame(tax[rownames(soil), c("English_Name","Scientific_Name")],
soil)
soil2 <- soil2[rownames(slt)[slt$soilhf.south],]
write.csv(soil2, file=file.path(ROOT, "figs", "birds-soilhf-south.csv"))
## climate & surrounding hf tables, climate surface maps
cn <- c("xPET", "xMAT", "xAHM", "xFFP",
"xMAP", "xMWMT", "xMCMT", "xlat", "xlong", "xlat2", "xlong2",
"THF_KM", "Lin_KM", "Nonlin_KM", "Succ_KM", "Alien_KM", "Noncult_KM",
"Cult_KM", "THF2_KM", "Nonlin2_KM", "Succ2_KM", "Alien2_KM",
"Noncult2_KM")
transform_CLIM <- function(x, ID="PKEY") {
z <- x[,ID,drop=FALSE]
z$xlong <- (x$POINT_X - (-113.7)) / 2.15
z$xlat <- (x$POINT_Y - 53.8) / 2.28
z$xAHM <- (x$AHM - 0) / 50
z$xPET <- (x$PET - 0) / 800
z$xFFP <- (x$FFP - 0) / 130
z$xMAP <- (x$MAP - 0) / 2200
z$xMAT <- (x$MAT - 0) / 6
z$xMCMT <- (x$MCMT - 0) / 25
z$xMWMT <- (x$MWMT - 0) / 20
z
}
xclim <- transform_CLIM(kgrid, "Row_Col")
xclim$xlat2 <- xclim$xlat^2
xclim$xlong2 <- xclim$xlong^2
ffTerms <- getTerms(modsn["Space"], "formula", intercept=FALSE)
Xclim <- model.matrix(ffTerms, xclim)
colnames(Xclim) <- fixNames(colnames(Xclim))
excln <- kgrid$NRNAME %in% c("Rocky Mountain", "Grassland")
excls <- rep(TRUE, nrow(kgrid))
excls[kgrid$NRNAME %in% c("Grassland", "Parkland")] <- FALSE
excls[kgrid$NSRNAME %in% c("Dry Mixedwood")] <- FALSE
clim_n <- list()
clim_s <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "surroundinghf_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_sp <- getEst(resn, stage=stage_hab_n + 2, na.out=FALSE, Xnn)
sp_n <- colMeans(estn_sp[,cn])
clim_n[[spp]] <- sp_n
fname <- file.path(ROOT, "figs", "climate-north",
paste0(as.character(tax[spp, "file"]), ".png"))
## quick and dirty
pr <- exp(drop(Xclim %*% colMeans(estn_sp[,colnames(Xclim)])))
## bootstrap based and correct
# pr <- rowMeans(exp(apply(estn_sp[,colnames(Xclim)], 1, function(z) drop(Xclim %*% z))))
q <- quantile(pr, 0.99)
pr[pr > q] <- q
pr <- pr/max(pr)
pr[excln] <- NA
qq <- quantile(pr, seq(0.1, 0.9, 0.1), na.rm=TRUE)
z <- cut(pr, c(-1, unique(qq), 2))
Col <- rev(terrain.colors(nlevels(z)))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=Col[z], axes=FALSE, ann=FALSE)
points(kgrid$X[excln], kgrid$Y[excln], pch=15, cex=0.2, col="darkgrey")
mtext(paste0(NAM, ", North"), line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
legend("bottomleft", col=rev(Col), fill=rev(Col),
legend=c("High", rep("", length(Col)-2), "Low"), bty="n")
dev.off()
}
if (tax[spp, "surroundinghf_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_sp <- getEst(ress, stage=stage_hab_s + 2, na.out=FALSE, Xns)
sp_s <- colMeans(ests_sp[,cn])
clim_s[[spp]] <- sp_s
fname <- file.path(ROOT, "figs", "climate-south",
paste0(as.character(tax[spp, "file"]), ".png"))
## quick and dirty
pr <- exp(drop(Xclim %*% colMeans(ests_sp[,colnames(Xclim)])))
## bootstrap based and correct
# pr <- rowMeans(exp(apply(ests_sp[,colnames(Xclim)], 1, function(z) drop(Xclim %*% z))))
q <- quantile(pr, 0.99)
pr[pr > q] <- q
pr <- pr/max(pr)
pr[excls] <- NA
qq <- quantile(pr, seq(0.1, 0.9, 0.1), na.rm=TRUE)
z <- cut(pr, c(-1, unique(qq), 2))
Col <- rev(terrain.colors(nlevels(z)))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=Col[z], axes=FALSE, ann=FALSE)
points(kgrid$X[excls], kgrid$Y[excls], pch=15, cex=0.2, col="darkgrey")
mtext(paste0(NAM, ", South"), line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
legend("bottomleft", col=rev(Col), fill=rev(Col),
legend=c("High", rep("", length(Col)-2), "Low"), bty="n")
dev.off()
}
}
clim_N <- data.frame(tax[names(clim_n), c("English_Name","Scientific_Name")],
do.call(rbind, clim_n))
clim_S <- data.frame(tax[names(clim_s), c("English_Name","Scientific_Name")],
do.call(rbind, clim_s))
clim_N <- clim_N[rownames(slt)[slt$modelN],]
clim_S <- clim_S[rownames(slt)[slt$modelS],]
write.csv(clim_N, file=file.path(ROOT, "figs", "climatehf-north.csv"))
write.csv(clim_S, file=file.path(ROOT, "figs", "climatehf-south.csv"))
## surroundinghf-north
## surroundinghf-south
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "surroundinghf_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_sp <- getEst(resn, stage=stage_hab_n + 2, na.out=FALSE, Xnn)
fname <- file.path(ROOT, "figs", "surroundinghf-north",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_hf_noremn(estn_sp, Xnn, LAB=paste0(NAM, ", North"))
par(op)
dev.off()
}
if (tax[spp, "surroundinghf_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_sp <- getEst(ress, stage=stage_hab_s + 2, na.out=FALSE, Xns)
fname <- file.path(ROOT, "figs", "surroundinghf-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_hf_noremn(ests_sp, Xns, LAB=paste0(NAM, ", North"))
par(op)
dev.off()
}
}
## trend
res_trend <- matrix(NA, nrow(tax), 10)
colnames(res_trend) <- c("Mean_North","Median_North","LCL_North","UCL_North","n_North",
"Mean_South","Median_South","LCL_South","UCL_South","n_South")
res_trend[,5] <- tax$ndet_n
res_trend[,10] <- tax$ndet_s
rownames(res_trend) <- rownames(tax)
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
if (tax[spp, "trend_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_yr <- getEst(resn, stage=stage_hab_n + 3, na.out=FALSE, Xnn)
yr_n <- 100 * (exp(estn_yr[,"YR"]) - 1)
res_trend[spp, 1:4] <- fstat(yr_n)
NDATN <- sum(yyn[,spp] > 0)
NN <- aggregate(yyn[,spp], list(year=xnn$YEAR), mean)
}
if (tax[spp, "trend_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_yr <- getEst(ress, stage=stage_hab_s + 3, na.out=FALSE, Xns)
yr_s <- 100 * (exp(ests_yr[,"YR"]) - 1)
res_trend[spp, 6:9] <- fstat(yr_s)
NDATS <- sum(yys[,spp] > 0)
NS <- aggregate(yys[,spp], list(year=xns$YEAR), mean)
}
if (tax[spp, "trend_north"] | tax[spp, "trend_south"]) {
NAM <- as.character(tax[spp, "English_Name"])
fname <- file.path(ROOT, "figs", "trend",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname, width=600, height=600)
op <- par(mfrow=c(2,2), cex=0.8)
if (tax[spp, "trend_north"]) {
plot(NN, ylab="Annual Mean Abundance Index", xlab="Year",
type="b", col=1, pch=19,
main=paste0(NAM, ", North (n = ", NDATN, " detections)"))
abline(lm(x ~ year, NN), col="red4", lty=1, lwd=2)
hist(yr_n, col="gold", xlab="Decadal Trend (%)", main="")
abline(v=fstat(yr_n)[1], col="red4", lty=1, lwd=2)
abline(v=fstat(yr_n)[3:4], col="red4", lty=2, lwd=1)
} else {
plot.new()
plot.new()
}
if (tax[spp, "trend_south"]) {
plot(NS, ylab="Annual Mean Abundance Index", xlab="Year",
type="b", col=1, pch=19,
main=paste0(NAM, ", South (n = ", NDATS, " detections)"))
abline(lm(x ~ year, NS), col="red4", lty=1, lwd=2)
hist(yr_n, col="gold", xlab="Decadal Trend (%)", main="")
abline(v=fstat(yr_s)[1], col="red4", lty=1, lwd=2)
abline(v=fstat(yr_s)[3:4], col="red4", lty=2, lwd=1)
} else {
plot.new()
plot.new()
}
par(op)
dev.off()
}
}
res_trend2 <- data.frame(tax[,c("English_Name","Scientific_Name")], res_trend)
write.csv(res_trend2, file=file.path(ROOT, "figs", "trend.csv"))
rank_fun(res_trend$Mean_North, res_trend$LCL_North, res_trend$UCL_North,
n=res_trend$n_North, col=1, lab = rownames(res_trend))
rank_fun(res_trend$Mean_South, res_trend$LCL_South, res_trend$UCL_South,
n=res_trend$n_South, col=1, lab = rownames(res_trend))
## ARU effect
res_aru <- list()
for (spp in rownames(tax[tax$surroundinghf_north,])) {
pres <- sum(yyn[substr(rownames(yyn), 1, 5) == "EMCLA",spp] > 0)
if (pres > 0) {
cat(spp, "\n");flush.console()
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn <- getEst(resn, stage=6, na.out=FALSE, Xnn)
aru <- estn[,"ARU"]
res_aru[[spp]] <- c(aru, pres)
}
}
res_aru <- do.call(rbind, res_aru)
tmp <- res_aru[res_aru[,241]>19,-241]
tmp[tmp == 0] <- NA
rowMeans(exp(tmp), na.rm=TRUE)
## Linear features coefficients
spp <- "BTNW"
xlin <- nonDuplicated(xnn[,c("ROAD01","hab1","hab_lcc","hab_lcc2","hab_lcc3")],
hab1, TRUE)
xlin <- xlin[c("Decid", "Mixwood", "Conif", "Pine", "BSpr", "Larch",
"Decid", "Mixwood", "Conif", "Pine", "BSpr", "Larch",
"GrassHerb", "Shrub", "Wetland", "Cult", "UrbInd"),]
rownames(xlin)[1:12] <- paste0(rep(rownames(xlin)[1:6], 2),
rep(c("0-40","40+"), each=6))
xlin$ROAD01 <- 1
xlin$SoftLin_PC <- 0
xlin$hab_lcc[] <- c(4,4, 3,3,3,3, 2,2, 1,1,1,1, 5,5,5,5,5)
xlin$hab_lcc3 <- xlin$hab_lcc
levels(xlin$hab_lcc3) <- c("1", "1", "2", "2", "3")
xlin$hab_lcc2 <- xlin$hab_lcc
levels(xlin$hab_lcc2) <- c("1", "1", "1", "1", "2")
Xlin <- model.matrix(getTerms(modsn["Contrast"], "formula", intercept=TRUE), xlin)
colnames(Xlin) <- fixNames(colnames(Xlin))
Xlin <- Xlin[,-1]
res_soft <- list()
res_hard <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "veghf_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_lin <- getEst(resn, stage=stage_hab_n, na.out=FALSE, Xnn)
colnames(estn_lin) <- fixNames(colnames(estn_lin))
estn_lin2 <- estn_lin[,colnames(Xlin)]
pr <- apply(estn_lin2, 1, function(z) Xlin %*% z)
rownames(pr) <- rownames(xlin)
tab <- t(apply(exp(pr), 1, quantile, c(0.5, 0.05, 0.95)))
res_hard[[spp]] <- data.frame(Species=spp, Habitat=rownames(tab), tab)
res_soft[[spp]] <- quantile(estn_lin[,"SoftLin_PC"], c(0.5, 0.05, 0.95))
}
}
## note: roadside stuff is exponentiated, but soft lin is not,
## because it is exp(x * est)
softlin <- data.frame(Species=tax[names(res_soft), "English_Name"], do.call(rbind, res_soft))
hardlin <- do.call(rbind, res_hard)
hardlin$Species <- tax[as.character(hardlin$Species), "English_Name"]
softlin <- droplevels(softlin[rownames(slt)[slt$veghf.north],])
hardlin <- droplevels(hardlin[hardlin$Species %in% softlin$Species,])
write.csv(softlin, row.names=FALSE,
file=file.path(ROOT, "figs", "soft-linear-coefs-2015.csv"))
write.csv(hardlin, row.names=FALSE,
file=file.path(ROOT, "figs", "hard-linear-EXPcoefs-2015.csv"))
softlin2 <- softlin[c("BTNW","BBWA","OVEN","BRCR","CAWA"),]
hardlin2 <- do.call(rbind, res_hard[c("BTNW","BBWA","OVEN","BRCR","CAWA")])
hardlin2$Species <- tax[as.character(hardlin2$Species), "English_Name"]
write.csv(softlin2, row.names=FALSE,
file=file.path(ROOT, "figs", "soft-linear-coefs-2015-5spp.csv"))
write.csv(hardlin2, row.names=FALSE,
file=file.path(ROOT, "figs", "hard-linear-EXPcoefs-2015-5spp.csv"))
## upland/lowland classification of species
tax2 <- read.csv("~/repos/abmispecies/_data/birds.csv")
rownames(tax2) <- tax2$AOU
tax3 <- read.csv("~/repos/abmianalytics/lookup/vertebrate-guilds.csv")
rownames(tax3) <- tax3$AOU.Code
setdiff(tax2$AOU[tax2$map.pred], tax3$AOU.Code)
setdiff(tax2$AOU[tax2$map.pred], tax$Species_ID)
SPP <- intersect(tax2$AOU[tax2$map.pred], tax3$AOU.Code)
tax2 <- droplevels(tax2[SPP,])
tax3 <- droplevels(tax3[SPP,])
native <- tax3[,grep("Native.to.", colnames(tax3))]
native[is.na(native)] <- 0
native[native > 0] <- 1
wet <- tax3[,c("General.Habitat.Category.Bog", "General.Habitat.Category.WetAq",
"Wetland.Types.Wet_NestTerrOrWet", "Wetland.Types.Aq_NestTerrOrWet")]
wet[is.na(wet)] <- 0
tax2$native <- ifelse(rowSums(native)>0, 1, 0)
tax2 <- cbind(tax2, wet)
dat2 <- dat[dat$useOK & dat$keep,]
wetcl <- c("BSpr","Larch","Wetland")
dat2$strat <- as.factor(ifelse(dat2$hab1 %in% wetcl, "lowland", "upland"))
yy2 <- as.matrix(yy[rownames(dat2), SPP])
off2 <- e$OFFmean[rownames(dat2)]
table(dat2$strat, dat2$pWater >0.5)
dat2$strat[dat2$pWater >0.5] <- "lowland"
library(opticut)
XXX <- model.matrix(~ ROAD01 + SoftLin_PC, dat2)
oc1 <- opticut1(yy2[,1], XXX, dat2$strat, dist="poisson")
oc <- opticut(yy2 ~ ROAD01 + SoftLin_PC, dat2, strata=dat2$strat,
offset=off2, dist="poisson", comb="rank")
os <- summary(oc)$summary
os <- os[SPP,]
tax2v <- data.frame(tax2[SPP,], os[SPP,])
tax2v$w <- NULL
tax2v$ndet_n <- NULL
tax2v$ndet_s <- NULL
tax2v$ndet_ns <- NULL
tax2v$map.det <- NULL
tax2v$veghf.north <- NULL
tax2v$soilhf.south <- NULL
tax2v$map.pred <- NULL
tax2v$useavail.north <- NULL
tax2v$useavail.south <- NULL
tax2v$lablo <- NULL
tax2v$labhi <- NULL
#levels(tax2v$split) <- c("lowland", "upland", "nopref")
#tax2v$split[tax2v$logLR < 2] <- "nopref"
table(tax2v$split)
tax2v$order <- tax3[SPP, "Order"]
tax2v$split2 <- as.character(tax2v$split)
tax2v$split2[] <- ""
tax2v$split2[tax2v$General.Habitat.Category.Bog +
tax2v$General.Habitat.Category.WetAq > 0 & tax2v$split == "lowland"] <- "lowland"
tax2v$split2[tax2v$General.Habitat.Category.Bog +
tax2v$General.Habitat.Category.WetAq == 0 & tax2v$split == "upland"] <- "upland"
tax2v$split2[tax2v$order %in% c("ANSERIFORMES","CHARADRIIFORMES","CICONIIFORMES",
"PODICIPEDIFORMES","PELECANIFORMES","GAVIIFORMES","GRUIFORMES")] <- "lowland"
tax2v$split2[tax2v$order %in% c("COLUMBIFORMES","FALCONIFORMES",
"GALLIFORMES","PICIFORMES","STRIGIFORMES")] <- "upland"
tax2v$split2[tax2v$native == 0] <- "nonnative"
table(tax2v$order,tax2v$split)
table(tax2v$split2)
write.csv(tax2v, file="~/birds-upland-lowland-classification.csv", row.names=FALSE)
|
/R/results-josm.R
|
no_license
|
psolymos/abmianalytics
|
R
| false
| false
| 31,196
|
r
|
library(mefa4)
library(pbapply)
library(RColorBrewer)
ROOT <- "e:/peter/AB_data_v2016/out/birds"
ROOT2 <- "~/Dropbox/josm/2016/wewp"
#setwd("c:/p/AB_data_v2015/out/birds/results")
#fl <- list.files()
#fl2 <- gsub("birds_bam-", "birds_abmi-", fl)
#for (i in 1:length(fl))
# if (fl[i] != fl2[i])
# file.rename(fl[i], fl2[i])
level <- 0.9
up <- function() {
source("~/repos/bragging/R/glm_skeleton.R")
source("~/repos/abmianalytics/R/results_functions.R")
source("~/repos/bamanalytics/R/makingsense_functions.R")
source("~/repos/abmianalytics/R/wrsi_functions.R")
# source("~/repos/abmianalytics/R/results_functions1.R")
# source("~/repos/abmianalytics/R/results_functions2.R")
invisible(NULL)
}
up()
e <- new.env()
load(file.path(ROOT, "data", "data-full-withrevisit.Rdata"), envir=e)
dat <- e$DAT
dat <- dat[dat$useOK,]
yy <- e$YY[rownames(dat),]
tax <- droplevels(e$TAX[colnames(yy),])
#pveghf <- e$pveghf[rownames(dat),]
#pveghf <- data.frame(as.matrix(pveghf))
#pveghf$Open <- pveghf$GrassHerb + pveghf$Shrub
#pveghf <- as.matrix(pveghf[,c("Decid", "Mixwood", "Conif", "Pine", "BSpr", "Larch",
# "Open", "Wetland", "Cult", "UrbInd", "HardLin", "SoftLin")])
#colnames(pveghf) <- c("Deciduous", "Mixedwood", "White Spruce", "Pine",
# "Black Spruce", "Larch",
# "Open", "Wet", "Cultivated", "Urban/Industrial", "Hard Linear", "Soft Linear")
#psoilhf <- as.matrix(e$psoilhf[rownames(dat),c("Productive", "Clay",
# "Saline", "RapidDrain", "Cult", "UrbInd")])
#colnames(psoilhf) <- c("Productive", "Clay",
# "Saline", "Rapid Drain", "Cultivated", "Urban/Industrial")
en <- new.env()
load(file.path(ROOT, "data", "data-full-north.Rdata"), envir=en)
xnn <- en$DAT
modsn <- en$mods
yyn0 <- en$YY
es <- new.env()
load(file.path(ROOT, "data", "data-full-south.Rdata"), envir=es)
xns <- es$DAT
modss <- es$mods
yys0 <- es$YY
rm(e, en, es)
yyn <- yy[rownames(yyn0),]
yys <- yy[rownames(yys0),]
## terms and design matrices
nTerms <- getTerms(modsn, "list")
sTerms <- getTerms(modss, "list")
Xnn <- model.matrix(getTerms(modsn, "formula"), xnn)
colnames(Xnn) <- fixNames(colnames(Xnn))
Xns <- model.matrix(getTerms(modss, "formula"), xns)
colnames(Xns) <- fixNames(colnames(Xns))
stage_hab_n <- 5
stage_hab_s <- 3
spp <- "WEWP"
do_hsh <- FALSE
do_veg <- TRUE
NAM <- as.character(tax[spp, "English_Name"])
f <- file.path(ROOT2, "results", paste0("birds_abmi-",
ifelse(do_hsh, "dohsh", "nohsh"), ifelse(do_veg, "-north_", "-south_"), spp, ".Rdata"))
resn <- loadSPP(f)
## habitat assoc
estn_hab <- getEst(resn, stage=stage_hab_n, na.out=FALSE, Xnn)
prn <- pred_veghf(estn_hab, Xnn, burn_included=FALSE)
## veghf
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("habitat-", as.character(tax[spp, "Species_ID"]), "-bw.png"))
png(file=fname,width=1500,height=700)
fig_veghf(prn, NAM, bw=TRUE)
dev.off()
## surrounding hf
estn_sp <- getEst(resn, stage=9, na.out=FALSE, Xnn)
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("surroundingHF-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_hf_noremn(estn_sp, Xnn, LAB=NAM)
par(op)
dev.off()
## surrounding Wet
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("surroundingWet-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_any("WetWaterKM", estn_sp, Xnn, xlab="Surrounding Wet/Water (%)", LAB=NAM)
par(op)
dev.off()
## ASP CTI
## CTI: high when it is flat, low when it is higher slope
## SLPASP (ASP): negative when exposed (warmer) and positive when north facing (colder)
cf <- estn_sp[,c("xASP","xCTI","xASP:xCTI")]
range(dat$xASP)
range(dat$xCTI)
z <- expand.grid(xASP=seq(-0.25, 0.25, by=0.1),
xCTI=seq(-0.5, 0.5, by=0.1))
z$ASP <- z$xASP
z$CTI <- exp(z$xCTI)*10 - 1
X <- model.matrix(~xASP+xCTI+xASP:xCTI-1, z)
pr <- apply(cf, 1, function(i) drop(X %*% i))
z$pr <- rowMeans(pr)
x <- seq(-0.25, 0.25, by=0.1)
y <- exp(seq(-0.5, 0.5, by=0.1))*10 - 1
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("topo-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
plot(dat$ASP[yy[,"WEWP"] == 0], dat$CTI[yy[,"WEWP"] == 0], pch=19, cex=0.5,
xlab="Slope / aspect solar radiation index",ylab="Compound topographic index",
xlim=c(-0.25,0.25), ylim=c(5, 15), col="grey")
points(dat$ASP[yy[,"WEWP"] > 0], dat$CTI[yy[,"WEWP"] > 0], pch=19, cex=0.5, col=1)
contour(x, y, matrix(z$pr, length(x), length(y)), add=TRUE, col=4)
par(op)
dev.off()
## map-det
load(file.path("e:/peter/AB_data_v2016/out", "kgrid", "kgrid_table.Rdata"))
col1 <- c("#C8FBC8","#C8E6FA","#F5E6F5","#FFDCEC","#FFE6CD","#FFF1D2")[match(kgrid$NRNAME,
c("Boreal","Foothills","Rocky Mountain","Canadian Shield","Parkland","Grassland"))]
library(raster)
library(sp)
library(rgdal)
city <-data.frame(x = -c(114,113,112,111,117,118)-c(5,30,49,23,8,48)/60,
y = c(51,53,49,56,58,55)+c(3,33,42,44,31,10)/60)
rownames(city) <- c("Calgary","Edmonton","Lethbridge","Fort McMurray",
"High Level","Grande Prairie")
coordinates(city) <- ~ x + y
proj4string(city) <- CRS(paste0("+proj=longlat +datum=WGS84 ",
"+ellps=WGS84 +towgs84=0,0,0"))
city <- as.data.frame(spTransform(city, CRS(paste0("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 ",
"+x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs"))))
xyw <- as.matrix(kgrid[kgrid$pWater >= 0.99,c("X","Y")])
blank <- matrix(0, 0, 2)
fw <- read.csv("c:/Users/Peter/Dropbox/josm/2016/wewp/FWMIS_data_WEWP.csv")
library(raster)
library(sp)
library(rgdal)
XYlatlon <- fw[,c("Longitude","Latitude")]
XYlatlon <- XYlatlon[rowSums(is.na(XYlatlon))==0,]
coordinates(XYlatlon) <- ~ Longitude + Latitude
proj4string(XYlatlon) <- CRS("+proj=longlat +datum=WGS84 +ellps=WGS84 +towgs84=0,0,0")
XY <- as.data.frame(spTransform(XYlatlon, CRS("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 +x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")))
colnames(XY) <- colnames(xy1)
fw$UTM.Northing[fw$UTM.Northing == ""] <- NA
fw$UTM.Northing <- as.numeric(as.character(fw$UTM.Northing))
fwxy <- fw[,c("UTM.Easting","UTM.Northing")]
xy0 <- as.matrix(dat[yy[,spp] == 0,c("X","Y")])
xy1 <- as.matrix(dat[yy[,spp] > 0,c("X","Y")])
NAM <- as.character(tax[spp, "English_Name"])
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("detections-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=col1, axes=FALSE, ann=FALSE)
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(xy0, pch="+", cex=0.5, col="red3")
#points(xy0, pch=19, cex=0.5, col="red3")
points(xy1, pch=16, cex=1.6, col="red4")
points(XY, pch=16, cex=1.6, col="blue")
mtext(NAM, line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
dev.off()
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("detectionsWithFWMIS-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=col1, axes=FALSE, ann=FALSE)
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(xy0, pch="+", cex=0.5, col="red3")
#points(xy0, pch=19, cex=0.5, col="red3")
points(xy1, pch=16, cex=1.6, col="red4")
points(XY, pch=16, cex=1.6, col="blue")
mtext(NAM, line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
dev.off()
yyyy <- ifelse(yy[,spp] == 0, 0, 1)
aa <- Xtab(~ dat$YEAR + yyyy + dat$NRNAME)
aa <- lapply(1:length(aa), function(i) {
z <- as.matrix(aa[[i]])
rownames(z) <- paste(names(aa)[i], rownames(z))
z
})
aa <- do.call(rbind, aa)
write.csv(aa, file=file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("detectionsBAM-", as.character(tax[spp, "Species_ID"]), ".csv")))
## convex hull
rt <- raster(file.path("e:/peter/AB_data_v2016", "data", "kgrid", "AHM1k.asc"))
crs <- CRS("+proj=tmerc +lat_0=0 +lon_0=-115 +k=0.9992 +x_0=500000 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs")
projection(rt) <- crs
mat0 <- as.matrix(rt)
library(dismo)
dd <- rbind(XY,xy1)
ch <- convHull(dd)
fname <- file.path("e:/peter/AB_data_v2016/out/birds/wewp",
paste0("chull-", as.character(tax[spp, "Species_ID"]), ".png"))
png(file=fname, width=600, height=1000)
plot(rt, axes=FALSE, box=FALSE, legend=FALSE, col="grey",
main="Western Wood-Pewee", maxpixels=10^6, interpolate=FALSE)
points(xy0, col="white", pch=20, cex=0.5)
points(dd, col=1, pch=20, cex=1)
plot(ch@polygons, border=4, lwd=2, add=TRUE)
dev.off()
## area in kn^2
sapply(slot(ch@polygons, "polygons"), slot, "area") / 10^6
## number of grid cells occupied
source("~/repos/abmianalytics/R/maps_functions.R")
kgrid$det <- 0L
kgrid$surv <- 0L
for (i in 1:nrow(dd)) {
d <- sqrt((kgrid$X - dd$X[i])^2 + (kgrid$Y - dd$Y[i])^2)
j <- which.min(d)
kgrid$det[j] <- 1L
kgrid$surv[j] <- 1L
}
for (i in 1:nrow(xy0)) {
d <- sqrt((kgrid$X - xy0[i,"X"])^2 + (kgrid$Y - xy0[i,"Y"])^2)
kgrid$surv[which.min(d)] <- 1L
}
## make a factor with level: unsurveyed, surveyed, detected
## check the scale !!!!
kgrid$aoo <- kgrid$surv + kgrid$det + 1
kgrid$faoo <- factor(kgrid$aoo, 1:3)
levels(kgrid$faoo) <- c("unsurveyed", "surveyed", "detected")
r2 <- as_Raster0(kgrid$Row, kgrid$Col, kgrid$aoo, rt)
k <- kgrid[,c("det","surv")]
save(k, file="e:/peter/AB_data_v2016/out/birds/wewp/aoo.Rdata")
#load(file.path("e:/peter/AB_data_v2016/out", "kgrid", "kgrid_table.Rdata"))
#load("e:/peter/AB_data_v2016/out/birds/wewp/aoo.Rdata")
k$Row <- kgrid$Row
k$Col <- kgrid$Col
k$Row2 <- 1 + kgrid$Row %/% 2
k$Col2 <- 1 + kgrid$Col %/% 2
k$Row2_Col2 <- interaction(k$Row2, k$Col2, sep="_", drop=TRUE)
#tmp <- Xtab(~ k$Row2_Col2 + kgrid$NRNAME)
#vi <- find_max(tmp)
k$NR <- kgrid$NRNAME
k2det <- as.matrix(Xtab(~Row2_Col2 + det, k))
k2surv <- as.matrix(Xtab(~Row2_Col2 + surv, k))
k2det[k2det>0] <- 1
k2surv <- k2surv + k2det
k2surv[k2surv>0] <- 1
dim(k2det)
table(k2det[,2] > 0)
table(k2surv[,2] > 0)
k$aoo <- k$surv + k$det + 1
k$faoo <- factor(k$aoo, 1:3)
levels(k$faoo) <- c("unsurveyed", "surveyed", "detected")
byNR_2km <- list()
for (i in levels(k$NR)) {
kk <- droplevels(k[k$NR == i,])
kk <- groupSums(as.matrix(kk[,c("det","surv")]), 1, kk$Row2_Col2)
kk[kk>0] <- 1
kk <- as.data.frame(kk)
kk$aoo <- kk$surv + kk$det + 1
kk$faoo <- factor(kk$aoo, 1:3)
levels(kk$faoo) <- c("unsurveyed", "surveyed", "detected")
tmp <- table(kk$faoo)[c("detected","surveyed","unsurveyed")]
tmp2 <- cumsum(tmp)
names(tmp2) <- c("detected", "surveyed", "available")
byNR_2km[[i]] <- tmp2
}
byNR_2km <- do.call(rbind, byNR_2km)
write.csv(byNR_2km, file="e:/peter/AB_data_v2016/out/birds/wewp/aoo-2km-byNR.csv")
## calculating # occurrences
xnn$lxn <- interaction(xnn$LUF_NAME, xnn$NSRNAME, drop=TRUE, sep="_")
xns$lxn <- interaction(xns$LUF_NAME, xns$NSRNAME, drop=TRUE, sep="_")
y01 <- list()
for (spp in rownames(tax)) {
if (tax[spp, "map_det"]) {
cat(spp, "\n");flush.console()
y01[[spp]] <- table(lxn=c(as.character(xnn$lxn), as.character(xns$lxn)),
det=c(ifelse(yyn[,spp]>0, 1, 0), ifelse(yys[,spp]>0, 1, 0)))[,"1"]
}
}
y01 <- do.call(cbind, y01)
colnames(y01) <- slt[colnames(y01), "sppid"]
tmp <- strsplit(rownames(y01), "_")
y01d <- data.frame(LUFxNSR=rownames(y01),
LUF=sapply(tmp, "[[", 1),
NSR=sapply(tmp, "[[", 2),
y01)
write.csv(y01d, row.names=FALSE, file=file.path(ROOT, "birds-number-of-occurrences.csv"))
## veghf-north
## linear-north
## table: veghf-north
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "veghf_north"]) {
f <- file.path(ROOT2, "results", paste0("birds_abmi-",
ifelse(do_hsh, "dohsh", "nohsh"), ifelse(do_veg, "-north_", "-south_"), spp, ".Rdata"))
resn <- loadSPP(f)
estn6 <- getEst(resn, stage=5, na.out=FALSE, Xnn)
fname <- file.path(ROOT, "coefs",
paste0(as.character(tax[spp, "file"]), "_Stage6_coefs.csv"))
write.csv(estn6, row.names=FALSE, file=fname)
}
}
res_veghf <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "veghf_north"]) {
f <- file.path(ROOT2, "results", paste0("birds_abmi-",
ifelse(do_hsh, "dohsh", "nohsh"), ifelse(do_veg, "-north_", "-south_"), spp, ".Rdata"))
resn <- loadSPP(f)
estn_hab <- getEst(resn, stage=stage_hab_n, na.out=FALSE, Xnn)
prn <- pred_veghf(estn_hab, Xnn, burn_included=FALSE)
res_veghf[[spp]] <- prn
NDAT <- sum(yyn[,spp] > 0)
## veghf
fname <- file.path(ROOT, "figs", "veghf-north",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=1500,height=700)
# fig_veghf(prn, paste0(NAM, " (n = ", NDAT, " detections)"))
fig_veghf(prn)
dev.off()
## linear
fname <- file.path(ROOT, "figs", "linear-north",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=350,height=400)
fig_linear(attr(prn, "linear"), paste0(NAM, "\nNorth (n = ", NDAT, " det.)"))
dev.off()
}
}
f1 <- function(x) {
rr <- attr(x, "linear")[-1]
names(rr) <- c("SoftLinear", "SoftLinear.LCL", "SoftLinear.UCL",
"HardLinear", "HardLinear.LCL", "HardLinear.UCL")
x <- x[rownames(x) != "Burn",c(2,3,4)]
rownames(x) <- gsub(" ", "", rownames(x))
xx <- t(x)
dim(xx) <- NULL
names(xx) <- paste0(rep(rownames(x), each=3), c("", ".LCL", ".UCL"))
c(xx, rr)
}
vhf <- t(sapply(res_veghf, f1))
vhf2 <- data.frame(tax[rownames(vhf), c("English_Name","Scientific_Name")],
vhf)
vhf2 <- vhf[rownames(slt)[slt$veghf.north],]
write.csv(vhf2, file=file.path(ROOT, "figs", "birds-veghf-north.csv"))
SPP <- rownames(slt)[slt$veghf.north]
vhf2 <- read.csv(file.path(ROOT, "figs", "birds-veghf-north.csv"))
rownames(vhf2) <- vhf2$X
vhf2 <- vhf2[,-(1:3)]
excl <- c(grep(".LCL", colnames(vhf2)), grep(".UCL", colnames(vhf2)))
vhf2 <- as.matrix(vhf2[,-excl])
Max <- apply(vhf2[,1:(ncol(vhf2)-2)], 1, max)
vhf2 <- vhf2 / Max
vhf2[vhf2[,"HardLinear"] > 5,"HardLinear"] <- 5
## soilhf-treed-south
## soilhf-nontreed-south
## linear-south
## table: soilhf-north
res_soilhf <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "soilhf_treed_south"] | tax[spp, "soilhf_nontreed_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_hab <- getEst(ress, stage=stage_hab_s, na.out=FALSE, Xns)
prs <- pred_soilhf(ests_hab, Xns)
res_soilhf[[spp]] <- prs
NDAT <- sum(yys[,spp] > 0)
YMAX <- max(fig_soilhf_ymax(prs$treed), fig_soilhf_ymax(prs$nontreed))
## treed
fname <- file.path(ROOT, "figs", "soilhf-treed-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=500,height=450)
fig_soilhf(prs$treed, paste0(NAM, ", South, Treed (n = ", NDAT, " detections)"),
ymax=YMAX)
dev.off()
## nontreed
fname <- file.path(ROOT, "figs", "soilhf-nontreed-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=500,height=450)
fig_soilhf(prs$nontreed, paste0(NAM, ", South, Non-treed (n = ", NDAT, " detections)"),
ymax=YMAX)
dev.off()
## linear
fname <- file.path(ROOT, "figs", "linear-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname,width=350,height=400)
fig_linear(prs$linear, paste0(NAM, "\nSouth (n = ", NDAT, " det.)"))
dev.off()
}
}
f2 <- function(x) {
rr <- x$linear[-1]
names(rr) <- c("SoftLinear", "SoftLinear.LCL", "SoftLinear.UCL",
"HardLinear", "HardLinear.LCL", "HardLinear.UCL")
x <- x$nontreed
rownames(x) <- gsub(" ", "", rownames(x))
xx <- t(x[,2:4])
dim(xx) <- NULL
names(xx) <- paste0(rep(rownames(x), each=3), c("", ".LCL", ".UCL"))
c(xx, rr)
}
soil <- t(sapply(res_soilhf, f2))
soil2 <- data.frame(tax[rownames(soil), c("English_Name","Scientific_Name")],
soil)
soil2 <- soil2[rownames(slt)[slt$soilhf.south],]
write.csv(soil2, file=file.path(ROOT, "figs", "birds-soilhf-south.csv"))
## climate & surrounding hf tables, climate surface maps
cn <- c("xPET", "xMAT", "xAHM", "xFFP",
"xMAP", "xMWMT", "xMCMT", "xlat", "xlong", "xlat2", "xlong2",
"THF_KM", "Lin_KM", "Nonlin_KM", "Succ_KM", "Alien_KM", "Noncult_KM",
"Cult_KM", "THF2_KM", "Nonlin2_KM", "Succ2_KM", "Alien2_KM",
"Noncult2_KM")
transform_CLIM <- function(x, ID="PKEY") {
z <- x[,ID,drop=FALSE]
z$xlong <- (x$POINT_X - (-113.7)) / 2.15
z$xlat <- (x$POINT_Y - 53.8) / 2.28
z$xAHM <- (x$AHM - 0) / 50
z$xPET <- (x$PET - 0) / 800
z$xFFP <- (x$FFP - 0) / 130
z$xMAP <- (x$MAP - 0) / 2200
z$xMAT <- (x$MAT - 0) / 6
z$xMCMT <- (x$MCMT - 0) / 25
z$xMWMT <- (x$MWMT - 0) / 20
z
}
xclim <- transform_CLIM(kgrid, "Row_Col")
xclim$xlat2 <- xclim$xlat^2
xclim$xlong2 <- xclim$xlong^2
ffTerms <- getTerms(modsn["Space"], "formula", intercept=FALSE)
Xclim <- model.matrix(ffTerms, xclim)
colnames(Xclim) <- fixNames(colnames(Xclim))
excln <- kgrid$NRNAME %in% c("Rocky Mountain", "Grassland")
excls <- rep(TRUE, nrow(kgrid))
excls[kgrid$NRNAME %in% c("Grassland", "Parkland")] <- FALSE
excls[kgrid$NSRNAME %in% c("Dry Mixedwood")] <- FALSE
clim_n <- list()
clim_s <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "surroundinghf_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_sp <- getEst(resn, stage=stage_hab_n + 2, na.out=FALSE, Xnn)
sp_n <- colMeans(estn_sp[,cn])
clim_n[[spp]] <- sp_n
fname <- file.path(ROOT, "figs", "climate-north",
paste0(as.character(tax[spp, "file"]), ".png"))
## quick and dirty
pr <- exp(drop(Xclim %*% colMeans(estn_sp[,colnames(Xclim)])))
## bootstrap based and correct
# pr <- rowMeans(exp(apply(estn_sp[,colnames(Xclim)], 1, function(z) drop(Xclim %*% z))))
q <- quantile(pr, 0.99)
pr[pr > q] <- q
pr <- pr/max(pr)
pr[excln] <- NA
qq <- quantile(pr, seq(0.1, 0.9, 0.1), na.rm=TRUE)
z <- cut(pr, c(-1, unique(qq), 2))
Col <- rev(terrain.colors(nlevels(z)))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=Col[z], axes=FALSE, ann=FALSE)
points(kgrid$X[excln], kgrid$Y[excln], pch=15, cex=0.2, col="darkgrey")
mtext(paste0(NAM, ", North"), line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
legend("bottomleft", col=rev(Col), fill=rev(Col),
legend=c("High", rep("", length(Col)-2), "Low"), bty="n")
dev.off()
}
if (tax[spp, "surroundinghf_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_sp <- getEst(ress, stage=stage_hab_s + 2, na.out=FALSE, Xns)
sp_s <- colMeans(ests_sp[,cn])
clim_s[[spp]] <- sp_s
fname <- file.path(ROOT, "figs", "climate-south",
paste0(as.character(tax[spp, "file"]), ".png"))
## quick and dirty
pr <- exp(drop(Xclim %*% colMeans(ests_sp[,colnames(Xclim)])))
## bootstrap based and correct
# pr <- rowMeans(exp(apply(ests_sp[,colnames(Xclim)], 1, function(z) drop(Xclim %*% z))))
q <- quantile(pr, 0.99)
pr[pr > q] <- q
pr <- pr/max(pr)
pr[excls] <- NA
qq <- quantile(pr, seq(0.1, 0.9, 0.1), na.rm=TRUE)
z <- cut(pr, c(-1, unique(qq), 2))
Col <- rev(terrain.colors(nlevels(z)))
png(file=fname, width=600, height=1000)
plot(kgrid$X, kgrid$Y, pch=15, cex=0.2, col=Col[z], axes=FALSE, ann=FALSE)
points(kgrid$X[excls], kgrid$Y[excls], pch=15, cex=0.2, col="darkgrey")
mtext(paste0(NAM, ", South"), line=2, side=3, adj=0.5, cex=1.4, col="grey40")
points(xyw, pch=15, cex=0.2, col=rgb(0.3,0.45,0.9))
points(city, pch=18, col="grey10")
text(city, rownames(city), cex=0.8, adj=-0.1, col="grey10")
legend("bottomleft", col=rev(Col), fill=rev(Col),
legend=c("High", rep("", length(Col)-2), "Low"), bty="n")
dev.off()
}
}
clim_N <- data.frame(tax[names(clim_n), c("English_Name","Scientific_Name")],
do.call(rbind, clim_n))
clim_S <- data.frame(tax[names(clim_s), c("English_Name","Scientific_Name")],
do.call(rbind, clim_s))
clim_N <- clim_N[rownames(slt)[slt$modelN],]
clim_S <- clim_S[rownames(slt)[slt$modelS],]
write.csv(clim_N, file=file.path(ROOT, "figs", "climatehf-north.csv"))
write.csv(clim_S, file=file.path(ROOT, "figs", "climatehf-south.csv"))
## surroundinghf-north
## surroundinghf-south
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "surroundinghf_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_sp <- getEst(resn, stage=stage_hab_n + 2, na.out=FALSE, Xnn)
fname <- file.path(ROOT, "figs", "surroundinghf-north",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_hf_noremn(estn_sp, Xnn, LAB=paste0(NAM, ", North"))
par(op)
dev.off()
}
if (tax[spp, "surroundinghf_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_sp <- getEst(ress, stage=stage_hab_s + 2, na.out=FALSE, Xns)
fname <- file.path(ROOT, "figs", "surroundinghf-south",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname, width=7.5, height=5.7, units="in", res=300)
op <- par(mai=c(0.9,1,0.2,0.3))
fig_hf_noremn(ests_sp, Xns, LAB=paste0(NAM, ", North"))
par(op)
dev.off()
}
}
## trend
res_trend <- matrix(NA, nrow(tax), 10)
colnames(res_trend) <- c("Mean_North","Median_North","LCL_North","UCL_North","n_North",
"Mean_South","Median_South","LCL_South","UCL_South","n_South")
res_trend[,5] <- tax$ndet_n
res_trend[,10] <- tax$ndet_s
rownames(res_trend) <- rownames(tax)
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
if (tax[spp, "trend_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_yr <- getEst(resn, stage=stage_hab_n + 3, na.out=FALSE, Xnn)
yr_n <- 100 * (exp(estn_yr[,"YR"]) - 1)
res_trend[spp, 1:4] <- fstat(yr_n)
NDATN <- sum(yyn[,spp] > 0)
NN <- aggregate(yyn[,spp], list(year=xnn$YEAR), mean)
}
if (tax[spp, "trend_south"]) {
ress <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-south_", spp, ".Rdata")))
ests_yr <- getEst(ress, stage=stage_hab_s + 3, na.out=FALSE, Xns)
yr_s <- 100 * (exp(ests_yr[,"YR"]) - 1)
res_trend[spp, 6:9] <- fstat(yr_s)
NDATS <- sum(yys[,spp] > 0)
NS <- aggregate(yys[,spp], list(year=xns$YEAR), mean)
}
if (tax[spp, "trend_north"] | tax[spp, "trend_south"]) {
NAM <- as.character(tax[spp, "English_Name"])
fname <- file.path(ROOT, "figs", "trend",
paste0(as.character(tax[spp, "file"]), ".png"))
png(file=fname, width=600, height=600)
op <- par(mfrow=c(2,2), cex=0.8)
if (tax[spp, "trend_north"]) {
plot(NN, ylab="Annual Mean Abundance Index", xlab="Year",
type="b", col=1, pch=19,
main=paste0(NAM, ", North (n = ", NDATN, " detections)"))
abline(lm(x ~ year, NN), col="red4", lty=1, lwd=2)
hist(yr_n, col="gold", xlab="Decadal Trend (%)", main="")
abline(v=fstat(yr_n)[1], col="red4", lty=1, lwd=2)
abline(v=fstat(yr_n)[3:4], col="red4", lty=2, lwd=1)
} else {
plot.new()
plot.new()
}
if (tax[spp, "trend_south"]) {
plot(NS, ylab="Annual Mean Abundance Index", xlab="Year",
type="b", col=1, pch=19,
main=paste0(NAM, ", South (n = ", NDATS, " detections)"))
abline(lm(x ~ year, NS), col="red4", lty=1, lwd=2)
hist(yr_n, col="gold", xlab="Decadal Trend (%)", main="")
abline(v=fstat(yr_s)[1], col="red4", lty=1, lwd=2)
abline(v=fstat(yr_s)[3:4], col="red4", lty=2, lwd=1)
} else {
plot.new()
plot.new()
}
par(op)
dev.off()
}
}
res_trend2 <- data.frame(tax[,c("English_Name","Scientific_Name")], res_trend)
write.csv(res_trend2, file=file.path(ROOT, "figs", "trend.csv"))
rank_fun(res_trend$Mean_North, res_trend$LCL_North, res_trend$UCL_North,
n=res_trend$n_North, col=1, lab = rownames(res_trend))
rank_fun(res_trend$Mean_South, res_trend$LCL_South, res_trend$UCL_South,
n=res_trend$n_South, col=1, lab = rownames(res_trend))
## ARU effect
res_aru <- list()
for (spp in rownames(tax[tax$surroundinghf_north,])) {
pres <- sum(yyn[substr(rownames(yyn), 1, 5) == "EMCLA",spp] > 0)
if (pres > 0) {
cat(spp, "\n");flush.console()
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn <- getEst(resn, stage=6, na.out=FALSE, Xnn)
aru <- estn[,"ARU"]
res_aru[[spp]] <- c(aru, pres)
}
}
res_aru <- do.call(rbind, res_aru)
tmp <- res_aru[res_aru[,241]>19,-241]
tmp[tmp == 0] <- NA
rowMeans(exp(tmp), na.rm=TRUE)
## Linear features coefficients
spp <- "BTNW"
xlin <- nonDuplicated(xnn[,c("ROAD01","hab1","hab_lcc","hab_lcc2","hab_lcc3")],
hab1, TRUE)
xlin <- xlin[c("Decid", "Mixwood", "Conif", "Pine", "BSpr", "Larch",
"Decid", "Mixwood", "Conif", "Pine", "BSpr", "Larch",
"GrassHerb", "Shrub", "Wetland", "Cult", "UrbInd"),]
rownames(xlin)[1:12] <- paste0(rep(rownames(xlin)[1:6], 2),
rep(c("0-40","40+"), each=6))
xlin$ROAD01 <- 1
xlin$SoftLin_PC <- 0
xlin$hab_lcc[] <- c(4,4, 3,3,3,3, 2,2, 1,1,1,1, 5,5,5,5,5)
xlin$hab_lcc3 <- xlin$hab_lcc
levels(xlin$hab_lcc3) <- c("1", "1", "2", "2", "3")
xlin$hab_lcc2 <- xlin$hab_lcc
levels(xlin$hab_lcc2) <- c("1", "1", "1", "1", "2")
Xlin <- model.matrix(getTerms(modsn["Contrast"], "formula", intercept=TRUE), xlin)
colnames(Xlin) <- fixNames(colnames(Xlin))
Xlin <- Xlin[,-1]
res_soft <- list()
res_hard <- list()
for (spp in rownames(tax)) {
cat(spp, "\n");flush.console()
NAM <- as.character(tax[spp, "English_Name"])
if (tax[spp, "veghf_north"]) {
resn <- loadSPP(file.path(ROOT, "results", paste0("birds_abmi-north_", spp, ".Rdata")))
estn_lin <- getEst(resn, stage=stage_hab_n, na.out=FALSE, Xnn)
colnames(estn_lin) <- fixNames(colnames(estn_lin))
estn_lin2 <- estn_lin[,colnames(Xlin)]
pr <- apply(estn_lin2, 1, function(z) Xlin %*% z)
rownames(pr) <- rownames(xlin)
tab <- t(apply(exp(pr), 1, quantile, c(0.5, 0.05, 0.95)))
res_hard[[spp]] <- data.frame(Species=spp, Habitat=rownames(tab), tab)
res_soft[[spp]] <- quantile(estn_lin[,"SoftLin_PC"], c(0.5, 0.05, 0.95))
}
}
## note: roadside stuff is exponentiated, but soft lin is not,
## because it is exp(x * est)
softlin <- data.frame(Species=tax[names(res_soft), "English_Name"], do.call(rbind, res_soft))
hardlin <- do.call(rbind, res_hard)
hardlin$Species <- tax[as.character(hardlin$Species), "English_Name"]
softlin <- droplevels(softlin[rownames(slt)[slt$veghf.north],])
hardlin <- droplevels(hardlin[hardlin$Species %in% softlin$Species,])
write.csv(softlin, row.names=FALSE,
file=file.path(ROOT, "figs", "soft-linear-coefs-2015.csv"))
write.csv(hardlin, row.names=FALSE,
file=file.path(ROOT, "figs", "hard-linear-EXPcoefs-2015.csv"))
softlin2 <- softlin[c("BTNW","BBWA","OVEN","BRCR","CAWA"),]
hardlin2 <- do.call(rbind, res_hard[c("BTNW","BBWA","OVEN","BRCR","CAWA")])
hardlin2$Species <- tax[as.character(hardlin2$Species), "English_Name"]
write.csv(softlin2, row.names=FALSE,
file=file.path(ROOT, "figs", "soft-linear-coefs-2015-5spp.csv"))
write.csv(hardlin2, row.names=FALSE,
file=file.path(ROOT, "figs", "hard-linear-EXPcoefs-2015-5spp.csv"))
## upland/lowland classification of species
tax2 <- read.csv("~/repos/abmispecies/_data/birds.csv")
rownames(tax2) <- tax2$AOU
tax3 <- read.csv("~/repos/abmianalytics/lookup/vertebrate-guilds.csv")
rownames(tax3) <- tax3$AOU.Code
setdiff(tax2$AOU[tax2$map.pred], tax3$AOU.Code)
setdiff(tax2$AOU[tax2$map.pred], tax$Species_ID)
SPP <- intersect(tax2$AOU[tax2$map.pred], tax3$AOU.Code)
tax2 <- droplevels(tax2[SPP,])
tax3 <- droplevels(tax3[SPP,])
native <- tax3[,grep("Native.to.", colnames(tax3))]
native[is.na(native)] <- 0
native[native > 0] <- 1
wet <- tax3[,c("General.Habitat.Category.Bog", "General.Habitat.Category.WetAq",
"Wetland.Types.Wet_NestTerrOrWet", "Wetland.Types.Aq_NestTerrOrWet")]
wet[is.na(wet)] <- 0
tax2$native <- ifelse(rowSums(native)>0, 1, 0)
tax2 <- cbind(tax2, wet)
dat2 <- dat[dat$useOK & dat$keep,]
wetcl <- c("BSpr","Larch","Wetland")
dat2$strat <- as.factor(ifelse(dat2$hab1 %in% wetcl, "lowland", "upland"))
yy2 <- as.matrix(yy[rownames(dat2), SPP])
off2 <- e$OFFmean[rownames(dat2)]
table(dat2$strat, dat2$pWater >0.5)
dat2$strat[dat2$pWater >0.5] <- "lowland"
library(opticut)
XXX <- model.matrix(~ ROAD01 + SoftLin_PC, dat2)
oc1 <- opticut1(yy2[,1], XXX, dat2$strat, dist="poisson")
oc <- opticut(yy2 ~ ROAD01 + SoftLin_PC, dat2, strata=dat2$strat,
offset=off2, dist="poisson", comb="rank")
os <- summary(oc)$summary
os <- os[SPP,]
tax2v <- data.frame(tax2[SPP,], os[SPP,])
tax2v$w <- NULL
tax2v$ndet_n <- NULL
tax2v$ndet_s <- NULL
tax2v$ndet_ns <- NULL
tax2v$map.det <- NULL
tax2v$veghf.north <- NULL
tax2v$soilhf.south <- NULL
tax2v$map.pred <- NULL
tax2v$useavail.north <- NULL
tax2v$useavail.south <- NULL
tax2v$lablo <- NULL
tax2v$labhi <- NULL
#levels(tax2v$split) <- c("lowland", "upland", "nopref")
#tax2v$split[tax2v$logLR < 2] <- "nopref"
table(tax2v$split)
tax2v$order <- tax3[SPP, "Order"]
tax2v$split2 <- as.character(tax2v$split)
tax2v$split2[] <- ""
tax2v$split2[tax2v$General.Habitat.Category.Bog +
tax2v$General.Habitat.Category.WetAq > 0 & tax2v$split == "lowland"] <- "lowland"
tax2v$split2[tax2v$General.Habitat.Category.Bog +
tax2v$General.Habitat.Category.WetAq == 0 & tax2v$split == "upland"] <- "upland"
tax2v$split2[tax2v$order %in% c("ANSERIFORMES","CHARADRIIFORMES","CICONIIFORMES",
"PODICIPEDIFORMES","PELECANIFORMES","GAVIIFORMES","GRUIFORMES")] <- "lowland"
tax2v$split2[tax2v$order %in% c("COLUMBIFORMES","FALCONIFORMES",
"GALLIFORMES","PICIFORMES","STRIGIFORMES")] <- "upland"
tax2v$split2[tax2v$native == 0] <- "nonnative"
table(tax2v$order,tax2v$split)
table(tax2v$split2)
write.csv(tax2v, file="~/birds-upland-lowland-classification.csv", row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/601.Local-odds-ratio.R
\name{Local.Odds.Ratio}
\alias{Local.Odds.Ratio}
\title{Given an input matrix, we can split it into smaller sub-matrix (min 2x2) and then find
the Chi-squared test for each sub-matrix. The smaller matrix can "support" or "oppose"
(have a different conclusion at 95% confidence interaval) compared with the overall Chi-squared
test value of the full input matrix. We count the number of times each cell supports or
opposes the overall Chi-squared test. We also generate the possible list of sub-matrix.}
\usage{
Local.Odds.Ratio(mat)
}
\arguments{
\item{mat}{- matrix for which the sub-matrix is to be generated}
}
\value{
A of dataframes with
\item{Hot.df}{ Dataframe with the difference between the supporting matrix and the opposing matrix}
\item{Suport.df }{ Dataframe of the cell counts for support of table level Chi-sqaured }
\item{Oppose.df }{ Dataframe of the cell counts for opposing of table level Chi-sqaured }
\item{sub-matrix }{ list of sub-matrix - this is returned only if the details flag is set to TRUE }
}
\description{
Given an input matrix, we can split it into smaller sub-matrix (min 2x2) and then find
the Chi-squared test for each sub-matrix. The smaller matrix can "support" or "oppose"
(have a different conclusion at 95% confidence interaval) compared with the overall Chi-squared
test value of the full input matrix. We count the number of times each cell supports or
opposes the overall Chi-squared test. We also generate the possible list of sub-matrix.
}
\details{
This can be used as an outlier detection method as well as observing the individual
cells within an IxJ table
}
\examples{
## Example data from [reference 1]
Drills=c(2, 10, 4, 2 )
Pots= c(3, 8, 4, 6)
Grinding.Stones=c( 13, 5, 3, 9)
Point.Fragments=c(20, 36, 19, 20)
mat=rbind(Drills,Pots,Grinding.Stones,Point.Fragments)
Local.Odds.Ratio(mat)
}
\references{
[1] Mosteller F, Parunak A (2006)
Identifying extreme cells in a sizable contingency table: Probabilistic and exploratory approaches.
In: Hoaglin DC, Mosteller F, Tukey JW (eds) Exploring Data Tables, Trends, and Shapes,
John Wiley & Sons, pp 189-224
}
\seealso{
Other IxJ Inference methods: \code{\link{Nominal.Odds.Ratio}},
\code{\link{Plot.heatmap}}, \code{\link{Reversal.point}},
\code{\link{Subtable.Odds.Ratio}},
\code{\link{generate.heatmap.matrix}}
}
|
/man/Local.Odds.Ratio.Rd
|
no_license
|
RajeswaranV/vcdPlus
|
R
| false
| true
| 2,437
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/601.Local-odds-ratio.R
\name{Local.Odds.Ratio}
\alias{Local.Odds.Ratio}
\title{Given an input matrix, we can split it into smaller sub-matrix (min 2x2) and then find
the Chi-squared test for each sub-matrix. The smaller matrix can "support" or "oppose"
(have a different conclusion at 95% confidence interaval) compared with the overall Chi-squared
test value of the full input matrix. We count the number of times each cell supports or
opposes the overall Chi-squared test. We also generate the possible list of sub-matrix.}
\usage{
Local.Odds.Ratio(mat)
}
\arguments{
\item{mat}{- matrix for which the sub-matrix is to be generated}
}
\value{
A of dataframes with
\item{Hot.df}{ Dataframe with the difference between the supporting matrix and the opposing matrix}
\item{Suport.df }{ Dataframe of the cell counts for support of table level Chi-sqaured }
\item{Oppose.df }{ Dataframe of the cell counts for opposing of table level Chi-sqaured }
\item{sub-matrix }{ list of sub-matrix - this is returned only if the details flag is set to TRUE }
}
\description{
Given an input matrix, we can split it into smaller sub-matrix (min 2x2) and then find
the Chi-squared test for each sub-matrix. The smaller matrix can "support" or "oppose"
(have a different conclusion at 95% confidence interaval) compared with the overall Chi-squared
test value of the full input matrix. We count the number of times each cell supports or
opposes the overall Chi-squared test. We also generate the possible list of sub-matrix.
}
\details{
This can be used as an outlier detection method as well as observing the individual
cells within an IxJ table
}
\examples{
## Example data from [reference 1]
Drills=c(2, 10, 4, 2 )
Pots= c(3, 8, 4, 6)
Grinding.Stones=c( 13, 5, 3, 9)
Point.Fragments=c(20, 36, 19, 20)
mat=rbind(Drills,Pots,Grinding.Stones,Point.Fragments)
Local.Odds.Ratio(mat)
}
\references{
[1] Mosteller F, Parunak A (2006)
Identifying extreme cells in a sizable contingency table: Probabilistic and exploratory approaches.
In: Hoaglin DC, Mosteller F, Tukey JW (eds) Exploring Data Tables, Trends, and Shapes,
John Wiley & Sons, pp 189-224
}
\seealso{
Other IxJ Inference methods: \code{\link{Nominal.Odds.Ratio}},
\code{\link{Plot.heatmap}}, \code{\link{Reversal.point}},
\code{\link{Subtable.Odds.Ratio}},
\code{\link{generate.heatmap.matrix}}
}
|
library(rCUR)
### Name: STTm
### Title: Soft tissue tumour dataset
### Aliases: STTm
### Keywords: datasets
### ** Examples
data(STTm)
|
/data/genthat_extracted_code/rCUR/examples/STTm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 142
|
r
|
library(rCUR)
### Name: STTm
### Title: Soft tissue tumour dataset
### Aliases: STTm
### Keywords: datasets
### ** Examples
data(STTm)
|
#' @title Fuse learner with the bagging technique.
#'
#' @description
#' Fuses a learner with the bagging method
#' (i.e., similar to what a \code{randomForest} does).
#' Creates a learner object, which can be
#' used like any other learner object.
#' Models can easily be accessed via \code{\link{getBaggingModels}}.
#'
#' Bagging is implemented as follows:
#' For each iteration a random data subset is sampled (with or without replacement)
#' and potentially the number of features is also restricted to
#' a random subset. Note that this is usually handled in a slightly different way
#' in the random forest where features are sampled at each tree split).
#'
#' Prediction works as follows:
#' For classification we do majority voting to create a discrete label and
#' probabilities are predicted by considering the proportions of all predicted labels.
#' For regression the mean value and the standard deviations across predictions is computed.
#'
#' Note that the passed base learner must always have \code{predict.type = 'response'},
#' while the BaggingWrapper can estimate probabilities and standard errors, so it can
#' be set, e.g., to \code{predict.type = 'prob'}. For this reason, when you call
#' \code{\link{setPredictType}}, the type is only set for the BaggingWrapper, not passed
#' down to the inner learner.
#'
#' @template arg_learner
#' @param bw.iters [\code{integer(1)}]\cr
#' Iterations = number of fitted models in bagging.
#' Default is 10.
#' @param bw.replace [\code{logical(1)}]\cr
#' Sample bags with replacement (bootstrapping)?
#' Default is TRUE.
#' @param bw.size [\code{numeric(1)}]\cr
#' Percentage size of sampled bags.
#' Default is 1 for bootstrapping and 0.632 for subsampling.
#' @param bw.feats [\code{numeric(1)}]\cr
#' Percentage size of randomly selected features in bags.
#' Default is 1.
#' At least one feature will always be selected.
#' @template ret_learner
#' @family wrapper
#' @export
makeBaggingWrapper = function(learner, bw.iters = 10L, bw.replace = TRUE, bw.size, bw.feats = 1) {
learner = checkLearner(learner, type=c("classif", "regr"))
pv = list()
if (!missing(bw.iters)) {
bw.iters = asInt(bw.iters, lower = 1L)
pv$bw.iters = bw.iters
}
if (!missing(bw.replace)) {
assertFlag(bw.replace)
pv$bw.replace = bw.replace
}
if (!missing(bw.size)) {
assertNumber(bw.size, lower = 0, upper = 1)
pv$bw.size = bw.size
}
if (!missing(bw.feats)) {
assertNumber(bw.feats, lower = 0, upper = 1)
pv$bw.feats = bw.feats
}
if (learner$predict.type != "response")
stop("Predict type of the basic learner must be 'response'.")
id = paste(learner$id, "bagged", sep = ".")
packs = learner$package
ps = makeParamSet(
makeIntegerLearnerParam(id = "bw.iters", lower = 1L, default = 10L),
makeLogicalLearnerParam(id = "bw.replace", default = TRUE),
makeNumericLearnerParam(id = "bw.size", lower = 0, upper = 1),
makeNumericLearnerParam(id = "bw.feats", lower = 0, upper = 1, default = 2/3)
)
x = makeBaseWrapper(id, learner, packs, par.set = ps, par.vals = pv, cl = "BaggingWrapper")
x = switch(x$type,
"classif" = addProperties(x, "prob"),
"regr" = addProperties(x, "se"))
return(x)
}
#' @export
trainLearner.BaggingWrapper = function(.learner, .task, .subset, .weights = NULL,
bw.iters = 10, bw.replace = TRUE, bw.size, bw.feats = 1, ...) {
if (missing(bw.size))
bw.size = if (bw.replace) 1 else 0.632
.task = subsetTask(.task, subset = .subset)
n = .task$task.desc$size
m = round(n * bw.size)
allinds = seq_len(n)
if (bw.feats < 1) {
feats = getTaskFeatureNames(.task)
k = max(round(bw.feats * length(feats)), 1)
}
blocking = .task$blocking
models = lapply(seq_len(bw.iters), function(i) {
if (length(blocking)) {
levs = levels(blocking)
m.blocks = round(length(levs) * bw.size)
bag.blocks = sample(blocking, m.blocks, replace = bw.replace)
bag = sample(which(blocking %in% bag.blocks))
} else {
bag = sample(allinds, m, replace = bw.replace)
}
w = .weights[bag]
if (bw.feats < 1) {
feats2 = sample(feats, k, replace = FALSE)
.task2 = subsetTask(.task, features = feats2)
train(.learner$next.learner, .task2, subset = bag, weights = w)
} else {
train(.learner$next.learner, .task, subset = bag, weights = w)
}
})
makeChainModel(next.model = models, cl = "BaggingModel")
}
#' @export
predictLearner.BaggingWrapper = function(.learner, .model, .newdata, ...) {
models = getBaggingModels(.model)
g = if (.learner$type == "classif") as.character else identity
p = asMatrixCols(lapply(models, function(m) {
nd = .newdata[, m$features, drop = FALSE]
g(predict(m, newdata = nd, ...)$data$response)
}))
if (.learner$predict.type == "response") {
g = if (.learner$type == "classif")
as.factor(apply(p, 1L, computeMode))
else
rowMeans(p)
} else {
if (.learner$type == 'classif') {
levs = .model$task.desc$class.levels
p = apply(p, 1L, function(x) {
x = factor(x, levels = levs) # we need all level for the table and we need them in consitent order!
as.numeric(prop.table(table(x)))
})
setColNames(t(p), levs)
} else {
cbind(rowMeans(p), apply(p, 1L, sd))
}
}
}
#' @export
makeWrappedModel.BaggingWrapper = function(learner, learner.model, task.desc, subset, features, factor.levels, time) {
x = NextMethod()
addClasses(x, "BaggingModel")
}
#' @export
print.BaggingModel = function(x, ...) {
s = capture.output(print.WrappedModel(x))
u = sprintf("Bagged Learner: %s", class(x$learner$next.learner)[1L])
s = append(s, u, 1L)
lapply(s, catf)
}
# we need to override here. while the predtype of the encapsulated learner must always
# be response, we can estimates probs and se on the outside
#' @export
setPredictType.BaggingWrapper = function(learner, predict.type) {
learner = setPredictType.Learner(learner, predict.type)
return(learner)
}
|
/R/BaggingWrapper.R
|
no_license
|
narayana1208/mlr
|
R
| false
| false
| 6,025
|
r
|
#' @title Fuse learner with the bagging technique.
#'
#' @description
#' Fuses a learner with the bagging method
#' (i.e., similar to what a \code{randomForest} does).
#' Creates a learner object, which can be
#' used like any other learner object.
#' Models can easily be accessed via \code{\link{getBaggingModels}}.
#'
#' Bagging is implemented as follows:
#' For each iteration a random data subset is sampled (with or without replacement)
#' and potentially the number of features is also restricted to
#' a random subset. Note that this is usually handled in a slightly different way
#' in the random forest where features are sampled at each tree split).
#'
#' Prediction works as follows:
#' For classification we do majority voting to create a discrete label and
#' probabilities are predicted by considering the proportions of all predicted labels.
#' For regression the mean value and the standard deviations across predictions is computed.
#'
#' Note that the passed base learner must always have \code{predict.type = 'response'},
#' while the BaggingWrapper can estimate probabilities and standard errors, so it can
#' be set, e.g., to \code{predict.type = 'prob'}. For this reason, when you call
#' \code{\link{setPredictType}}, the type is only set for the BaggingWrapper, not passed
#' down to the inner learner.
#'
#' @template arg_learner
#' @param bw.iters [\code{integer(1)}]\cr
#' Iterations = number of fitted models in bagging.
#' Default is 10.
#' @param bw.replace [\code{logical(1)}]\cr
#' Sample bags with replacement (bootstrapping)?
#' Default is TRUE.
#' @param bw.size [\code{numeric(1)}]\cr
#' Percentage size of sampled bags.
#' Default is 1 for bootstrapping and 0.632 for subsampling.
#' @param bw.feats [\code{numeric(1)}]\cr
#' Percentage size of randomly selected features in bags.
#' Default is 1.
#' At least one feature will always be selected.
#' @template ret_learner
#' @family wrapper
#' @export
makeBaggingWrapper = function(learner, bw.iters = 10L, bw.replace = TRUE, bw.size, bw.feats = 1) {
learner = checkLearner(learner, type=c("classif", "regr"))
pv = list()
if (!missing(bw.iters)) {
bw.iters = asInt(bw.iters, lower = 1L)
pv$bw.iters = bw.iters
}
if (!missing(bw.replace)) {
assertFlag(bw.replace)
pv$bw.replace = bw.replace
}
if (!missing(bw.size)) {
assertNumber(bw.size, lower = 0, upper = 1)
pv$bw.size = bw.size
}
if (!missing(bw.feats)) {
assertNumber(bw.feats, lower = 0, upper = 1)
pv$bw.feats = bw.feats
}
if (learner$predict.type != "response")
stop("Predict type of the basic learner must be 'response'.")
id = paste(learner$id, "bagged", sep = ".")
packs = learner$package
ps = makeParamSet(
makeIntegerLearnerParam(id = "bw.iters", lower = 1L, default = 10L),
makeLogicalLearnerParam(id = "bw.replace", default = TRUE),
makeNumericLearnerParam(id = "bw.size", lower = 0, upper = 1),
makeNumericLearnerParam(id = "bw.feats", lower = 0, upper = 1, default = 2/3)
)
x = makeBaseWrapper(id, learner, packs, par.set = ps, par.vals = pv, cl = "BaggingWrapper")
x = switch(x$type,
"classif" = addProperties(x, "prob"),
"regr" = addProperties(x, "se"))
return(x)
}
#' @export
trainLearner.BaggingWrapper = function(.learner, .task, .subset, .weights = NULL,
bw.iters = 10, bw.replace = TRUE, bw.size, bw.feats = 1, ...) {
if (missing(bw.size))
bw.size = if (bw.replace) 1 else 0.632
.task = subsetTask(.task, subset = .subset)
n = .task$task.desc$size
m = round(n * bw.size)
allinds = seq_len(n)
if (bw.feats < 1) {
feats = getTaskFeatureNames(.task)
k = max(round(bw.feats * length(feats)), 1)
}
blocking = .task$blocking
models = lapply(seq_len(bw.iters), function(i) {
if (length(blocking)) {
levs = levels(blocking)
m.blocks = round(length(levs) * bw.size)
bag.blocks = sample(blocking, m.blocks, replace = bw.replace)
bag = sample(which(blocking %in% bag.blocks))
} else {
bag = sample(allinds, m, replace = bw.replace)
}
w = .weights[bag]
if (bw.feats < 1) {
feats2 = sample(feats, k, replace = FALSE)
.task2 = subsetTask(.task, features = feats2)
train(.learner$next.learner, .task2, subset = bag, weights = w)
} else {
train(.learner$next.learner, .task, subset = bag, weights = w)
}
})
makeChainModel(next.model = models, cl = "BaggingModel")
}
#' @export
predictLearner.BaggingWrapper = function(.learner, .model, .newdata, ...) {
models = getBaggingModels(.model)
g = if (.learner$type == "classif") as.character else identity
p = asMatrixCols(lapply(models, function(m) {
nd = .newdata[, m$features, drop = FALSE]
g(predict(m, newdata = nd, ...)$data$response)
}))
if (.learner$predict.type == "response") {
g = if (.learner$type == "classif")
as.factor(apply(p, 1L, computeMode))
else
rowMeans(p)
} else {
if (.learner$type == 'classif') {
levs = .model$task.desc$class.levels
p = apply(p, 1L, function(x) {
x = factor(x, levels = levs) # we need all level for the table and we need them in consitent order!
as.numeric(prop.table(table(x)))
})
setColNames(t(p), levs)
} else {
cbind(rowMeans(p), apply(p, 1L, sd))
}
}
}
#' @export
makeWrappedModel.BaggingWrapper = function(learner, learner.model, task.desc, subset, features, factor.levels, time) {
x = NextMethod()
addClasses(x, "BaggingModel")
}
#' @export
print.BaggingModel = function(x, ...) {
s = capture.output(print.WrappedModel(x))
u = sprintf("Bagged Learner: %s", class(x$learner$next.learner)[1L])
s = append(s, u, 1L)
lapply(s, catf)
}
# we need to override here. while the predtype of the encapsulated learner must always
# be response, we can estimates probs and se on the outside
#' @export
setPredictType.BaggingWrapper = function(learner, predict.type) {
learner = setPredictType.Learner(learner, predict.type)
return(learner)
}
|
library(bizdays)
### Name: other-calendars
### Title: Calendars from other packages
### Aliases: other-calendars load_quantlib_calendars
### load_rmetrics_calendars
### ** Examples
if (require("RQuantLib")) {
# loading Argentina calendar
load_quantlib_calendars('Argentina', from='2016-01-01', to='2016-12-31')
bizdays('2016-01-01', '2016-03-14', 'QuantLib/Argentina')
# loading 2 calendars
load_quantlib_calendars(c('UnitedStates/NYSE', 'UnitedKingdom/Settlement'),
from='2016-01-01', to='2016-12-31')
bizdays('2016-01-01', '2016-03-14', 'QuantLib/UnitedStates/NYSE')
# loading all QuantLib's 49 calendars
load_quantlib_calendars(from='2016-01-01', to='2016-12-31')
bizdays('2016-01-01', '2016-03-14', 'QuantLib/Brazil')
}
if (require("timeDate")) {
# loading all Rmetrics calendar
load_rmetrics_calendars(2016)
bizdays('2016-01-01', '2016-03-14', 'Rmetrics/NERC')
bizdays('2016-01-01', '2016-03-14', 'Rmetrics/NYSE')
}
|
/data/genthat_extracted_code/bizdays/examples/other-calendars.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 969
|
r
|
library(bizdays)
### Name: other-calendars
### Title: Calendars from other packages
### Aliases: other-calendars load_quantlib_calendars
### load_rmetrics_calendars
### ** Examples
if (require("RQuantLib")) {
# loading Argentina calendar
load_quantlib_calendars('Argentina', from='2016-01-01', to='2016-12-31')
bizdays('2016-01-01', '2016-03-14', 'QuantLib/Argentina')
# loading 2 calendars
load_quantlib_calendars(c('UnitedStates/NYSE', 'UnitedKingdom/Settlement'),
from='2016-01-01', to='2016-12-31')
bizdays('2016-01-01', '2016-03-14', 'QuantLib/UnitedStates/NYSE')
# loading all QuantLib's 49 calendars
load_quantlib_calendars(from='2016-01-01', to='2016-12-31')
bizdays('2016-01-01', '2016-03-14', 'QuantLib/Brazil')
}
if (require("timeDate")) {
# loading all Rmetrics calendar
load_rmetrics_calendars(2016)
bizdays('2016-01-01', '2016-03-14', 'Rmetrics/NERC')
bizdays('2016-01-01', '2016-03-14', 'Rmetrics/NYSE')
}
|
\name{priceCDS}
\alias{priceCDS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Credit Default Swap Pricing}
\description{
A function that calculates the spreads of several Credit Default Swaps (different maturities) from a yield curve and a credit curve.
}
\details{
In brief, a CDS is used to transfer the credit risk of a reference entity (corporate or sovereign) from one party to another. In a standard CDS contract one party purchases credit protection from another party, to cover the loss of the face value of an asset following a credit event. A credit event is a
legally defined event that typically includes bankruptcy, failure-to-pay and restructuring. This protection lasts until some specified maturity date. To pay for this protection, the protection buyer makes a regular stream of payments, known as the premium leg, to the protection seller. This size of these premium payments is calculated from a quoted default swap spread which is paid on the face value of the protection. These payments are made until a credit event occurs or until maturity, whichever occurs first.
\bold{Modeling Credit Using a Reduced-Form Approach}
The world of credit modelling is divided into two main approaches, one called the structural and the other called the reduced-form. In the structural approach, the idea is to characterize the default as being the consequence of some company event such as its asset value being insufficient to cover a repayment of debt.
Structural models are generally used to say at what spread corporate bonds should trade based on the internal structure of the firm. They therefore require information about the balance sheet of the firm and can be used to establish a link between pricing in the equity and debt markets. However, they are limited
in at least three important ways: they are hard to calibrate because internal company data is only published at most four times a year; they generally lack the flexibility to fit exactly a given term structure of spreads; and they cannot be easily extended to price credit derivatives.
In the reduced-form approach, the credit event process is modeled directly by modeling the probability of the credit event itself. Using a security pricing model based on this approach, this probability of default can be extracted from market prices. Reduced form models also generally have the flexibility to refit the prices of a variety of credit instruments of different maturities. They can also be extended to price more exotic credit derivatives. It is for these reasons that they are used for credit derivative pricing.
See \code{vignette("credule")} for more details.
}
\usage{
priceCDS(yieldcurveTenor, yieldcurveRate, creditcurveTenor, creditcurveSP,
cdsTenors, recoveryRate, numberPremiumPerYear = c(4,2,1,12),
numberDefaultIntervalPerYear = 12, accruedPremium = c(TRUE,FALSE))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{yieldcurveTenor}{
A double vector. Each value represents a tenor of the yield curve expressed in year (e.g. 1.0 for 1Y, 0.5 for 6M)
}
\item{yieldcurveRate}{
A double vector. Each value represents the discount rate (continuously compounded) for a partical tenor (e.g. 0.005 means 0.5\%, 0.02 means 2\%)
}
\item{creditcurveTenor}{
A double vector. Each value represents a tenor of the credit curve expressed in year (e.g. 1.0 for 1Y, 0.5 for 6M)
}
\item{creditcurveSP}{
A double vector. Each value represents the survival probability for a partical tenor (e.g. 0.98 means 98\%)
}
\item{cdsTenors}{
A double vector. Each value represents the maturity expressed in year of a Credit Default Swap which we want to price (e.g 5.0 means 5Y)
}
\item{recoveryRate}{
A double. It represents the Recovery Rate in case of default (e.g 0.40 means 40\% recovery which is a standard value for Senior Unsecured debt)
}
\item{numberPremiumPerYear}{
An Integer. It represents the number of premiums paid per year. CDS premiums paid quaterly (i.e. numberPremiumPerYear=4) and sometimes semi-annually (i.e. numberPremiumPerYear=2)
}
\item{numberDefaultIntervalPerYear}{
An Integer. It represents the number of timesteps used to perform the numerical integral required while computing the default leg value. It is shown that a monthly discretisation usually gives a good approximation (Ref. Valuation of Credit Default Swaps, Dominic O Kane and Stuart Turnbull)
}
\item{accruedPremium}{
A boolean. If set to TRUE, the accrued premium will be taken into account in the calculation of the premium leg value.
}
}
\value{
Returns a Dataframe with 2 columns: tenor and spread. The tenor column contains the tenor value given in parameter cdsTenors, the spread column give the Credit Default Swap spreads (in decimal) for each tenor (e.g. 0.0050 is equivalent to 0.5\% or 50 bp).
}
\author{
Bertrand Le Nezet
}
\examples{
library(credule)
yieldcurveTenor = c(1,2,3,4,5,7)
yieldcurveRate = c(0.0050,0.0070,0.0080,0.0100, 0.0120,0.0150)
creditcurveTenor = c(1,3,5,7)
creditcurveSP = c(0.99,0.98,0.95,0.92)
cdsTenors = c(1,3,5,7)
cdsSpreads = c(0.0050,0.0070,0.00100,0.0120)
premiumFrequency = 4
defaultFrequency = 12
accruedPremium = TRUE
RR = 0.40
priceCDS(yieldcurveTenor,
yieldcurveRate,
creditcurveTenor,
creditcurveSP,
cdsTenors,
RR,
premiumFrequency,
defaultFrequency,
accruedPremium
)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Credit Default Swap, Credit Default Swaps, CDS, spread, survival probability, survival probabilities, default probability, default probabilities, pricing, credit curve, bootstrapping, hazard rate, poisson process}
|
/credule/man/priceCDS.Rd
|
no_license
|
freephys/credule
|
R
| false
| false
| 5,817
|
rd
|
\name{priceCDS}
\alias{priceCDS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Credit Default Swap Pricing}
\description{
A function that calculates the spreads of several Credit Default Swaps (different maturities) from a yield curve and a credit curve.
}
\details{
In brief, a CDS is used to transfer the credit risk of a reference entity (corporate or sovereign) from one party to another. In a standard CDS contract one party purchases credit protection from another party, to cover the loss of the face value of an asset following a credit event. A credit event is a
legally defined event that typically includes bankruptcy, failure-to-pay and restructuring. This protection lasts until some specified maturity date. To pay for this protection, the protection buyer makes a regular stream of payments, known as the premium leg, to the protection seller. This size of these premium payments is calculated from a quoted default swap spread which is paid on the face value of the protection. These payments are made until a credit event occurs or until maturity, whichever occurs first.
\bold{Modeling Credit Using a Reduced-Form Approach}
The world of credit modelling is divided into two main approaches, one called the structural and the other called the reduced-form. In the structural approach, the idea is to characterize the default as being the consequence of some company event such as its asset value being insufficient to cover a repayment of debt.
Structural models are generally used to say at what spread corporate bonds should trade based on the internal structure of the firm. They therefore require information about the balance sheet of the firm and can be used to establish a link between pricing in the equity and debt markets. However, they are limited
in at least three important ways: they are hard to calibrate because internal company data is only published at most four times a year; they generally lack the flexibility to fit exactly a given term structure of spreads; and they cannot be easily extended to price credit derivatives.
In the reduced-form approach, the credit event process is modeled directly by modeling the probability of the credit event itself. Using a security pricing model based on this approach, this probability of default can be extracted from market prices. Reduced form models also generally have the flexibility to refit the prices of a variety of credit instruments of different maturities. They can also be extended to price more exotic credit derivatives. It is for these reasons that they are used for credit derivative pricing.
See \code{vignette("credule")} for more details.
}
\usage{
priceCDS(yieldcurveTenor, yieldcurveRate, creditcurveTenor, creditcurveSP,
cdsTenors, recoveryRate, numberPremiumPerYear = c(4,2,1,12),
numberDefaultIntervalPerYear = 12, accruedPremium = c(TRUE,FALSE))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{yieldcurveTenor}{
A double vector. Each value represents a tenor of the yield curve expressed in year (e.g. 1.0 for 1Y, 0.5 for 6M)
}
\item{yieldcurveRate}{
A double vector. Each value represents the discount rate (continuously compounded) for a partical tenor (e.g. 0.005 means 0.5\%, 0.02 means 2\%)
}
\item{creditcurveTenor}{
A double vector. Each value represents a tenor of the credit curve expressed in year (e.g. 1.0 for 1Y, 0.5 for 6M)
}
\item{creditcurveSP}{
A double vector. Each value represents the survival probability for a partical tenor (e.g. 0.98 means 98\%)
}
\item{cdsTenors}{
A double vector. Each value represents the maturity expressed in year of a Credit Default Swap which we want to price (e.g 5.0 means 5Y)
}
\item{recoveryRate}{
A double. It represents the Recovery Rate in case of default (e.g 0.40 means 40\% recovery which is a standard value for Senior Unsecured debt)
}
\item{numberPremiumPerYear}{
An Integer. It represents the number of premiums paid per year. CDS premiums paid quaterly (i.e. numberPremiumPerYear=4) and sometimes semi-annually (i.e. numberPremiumPerYear=2)
}
\item{numberDefaultIntervalPerYear}{
An Integer. It represents the number of timesteps used to perform the numerical integral required while computing the default leg value. It is shown that a monthly discretisation usually gives a good approximation (Ref. Valuation of Credit Default Swaps, Dominic O Kane and Stuart Turnbull)
}
\item{accruedPremium}{
A boolean. If set to TRUE, the accrued premium will be taken into account in the calculation of the premium leg value.
}
}
\value{
Returns a Dataframe with 2 columns: tenor and spread. The tenor column contains the tenor value given in parameter cdsTenors, the spread column give the Credit Default Swap spreads (in decimal) for each tenor (e.g. 0.0050 is equivalent to 0.5\% or 50 bp).
}
\author{
Bertrand Le Nezet
}
\examples{
library(credule)
yieldcurveTenor = c(1,2,3,4,5,7)
yieldcurveRate = c(0.0050,0.0070,0.0080,0.0100, 0.0120,0.0150)
creditcurveTenor = c(1,3,5,7)
creditcurveSP = c(0.99,0.98,0.95,0.92)
cdsTenors = c(1,3,5,7)
cdsSpreads = c(0.0050,0.0070,0.00100,0.0120)
premiumFrequency = 4
defaultFrequency = 12
accruedPremium = TRUE
RR = 0.40
priceCDS(yieldcurveTenor,
yieldcurveRate,
creditcurveTenor,
creditcurveSP,
cdsTenors,
RR,
premiumFrequency,
defaultFrequency,
accruedPremium
)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ Credit Default Swap, Credit Default Swaps, CDS, spread, survival probability, survival probabilities, default probability, default probabilities, pricing, credit curve, bootstrapping, hazard rate, poisson process}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "steel-plates-fault")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.PART", par.vals = list(R = FALSE, B = FALSE, M = 2, J = FALSE), predict.type = "prob")
#:# hash
#:# 5c366254cfd778235fc51c318d447d98
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_steel-plates-fault/classification_Class/5c366254cfd778235fc51c318d447d98/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 728
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "steel-plates-fault")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.PART", par.vals = list(R = FALSE, B = FALSE, M = 2, J = FALSE), predict.type = "prob")
#:# hash
#:# 5c366254cfd778235fc51c318d447d98
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
library(shiny)
library(leaflet)
source("countryDataFetcher.R")
countryData <- fetchCountryData()
shinyUI(fluidPage(
titlePanel("Choose a country to see the map and flag!"),
sidebarLayout(
sidebarPanel(
selectizeInput("country",
label = "Choose country",
choices = countryData$name,
selected = "Brazil"),
h4("Documentation"),
p("This Shiny app lets you choose a country to see its map and ",
"flag."),
p("Under the hood, it uses the http://restcountries.eu Rest API ",
"to find the longitude and latitude of the country of your ",
"choice and then it centers the view of a Leaflet Map on this ",
"location."),
p("Besides, it also fetches the flag images for the countries ",
"and uses them as markers on the map."),
p("Enjoy!")
),
mainPanel(
leafletOutput("countryMap")
)
)
))
|
/ui.R
|
no_license
|
seuvitor/developing-data-products-course-project
|
R
| false
| false
| 1,056
|
r
|
library(shiny)
library(leaflet)
source("countryDataFetcher.R")
countryData <- fetchCountryData()
shinyUI(fluidPage(
titlePanel("Choose a country to see the map and flag!"),
sidebarLayout(
sidebarPanel(
selectizeInput("country",
label = "Choose country",
choices = countryData$name,
selected = "Brazil"),
h4("Documentation"),
p("This Shiny app lets you choose a country to see its map and ",
"flag."),
p("Under the hood, it uses the http://restcountries.eu Rest API ",
"to find the longitude and latitude of the country of your ",
"choice and then it centers the view of a Leaflet Map on this ",
"location."),
p("Besides, it also fetches the flag images for the countries ",
"and uses them as markers on the map."),
p("Enjoy!")
),
mainPanel(
leafletOutput("countryMap")
)
)
))
|
rename.column <- function(df,old_name,new_name){
# rename column of data frame df that was called old_name to new_name
colnames(df)[colnames(df) == old_name] <- new_name
return(df)
}
remove.column <- function(df,cname){return(df[,-which(colnames(df) == cname)])}
collapse.columns <- function(df,cnames=colnames(df),groupby=NULL){
# INPUTS:
# df: dataframe
# cnames: column names to perform operation on, default to all columns
# groupby: column name to group variables by, treated separately from variables in cnames
# OUTPUTS:
# df.new: dataframe with 2 columns:
# values: all columns in cnames vertically concatenated.
# names: elements of cnames corresponding to rows
# groupby: groups of observations in df for each variable in cnames, these retain their names in df.new
df.names <- do.call('cbind',lapply(cnames, function(n) rep(n,nrow(as.matrix(df[,cnames])))))
df.new <- data.frame(values = as.vector(as.matrix(df[,cnames])),names=as.vector(df.names),stringsAsFactors=F)
if(!is.null(groupby)){
for(group.var in groupby){
df.grp <- do.call('cbind',lapply(cnames,function(n) df[,group.var]))
df.new[,group.var] <- as.vector(df.grp)
}
}
return(df.new)
}
name <- function(x,x.names){
# INPUTS:
# x: vector or dataframe
# x.names: names for elements or columns of x
# OUTPUTS:
# x with x.names as names
names(x) <- x.names
return(x)
}
paste0.combinations <- function(x,y){
# INPUTS:
# x: character vector
# y: character vector
#
# OUTPUTS:
# xy: every element of x pasted to every element of y, with x occuring first
xy <- as.vector(sapply(x, function(i)
sapply(y, function(j) paste0(i,j))))
return(xy)
}
tp.vec2mat <- function(v){
# INPUTS:
# v: vectorized transition probabilities
#
# OUTPUTS:
# m: tp matrix
return(t(matrix(v,sqrt(length(v)),sqrt(length(v)))))
}
matrix.to.df <- function(m,dnames){
# INPUTS:
# m: matrix
# dnames: 2 element list specifying dimnames for m
#
# OUTPUTS:
# df:
dimnames(m) <- dnames
return(as.data.frame(m))
}
axis.sym <- function(x){
# INPUTS:
# x: vector
#
# OUTPUTS:
# c(mi,ma) for symmetric axis with extreme values determining bidirectional limits
return(c(-max(abs(x)),max(abs(x))))
}
|
/code/miscfxns/miscfxns.R
|
permissive
|
ejcorn/fir_pca_22q
|
R
| false
| false
| 2,281
|
r
|
rename.column <- function(df,old_name,new_name){
# rename column of data frame df that was called old_name to new_name
colnames(df)[colnames(df) == old_name] <- new_name
return(df)
}
remove.column <- function(df,cname){return(df[,-which(colnames(df) == cname)])}
collapse.columns <- function(df,cnames=colnames(df),groupby=NULL){
# INPUTS:
# df: dataframe
# cnames: column names to perform operation on, default to all columns
# groupby: column name to group variables by, treated separately from variables in cnames
# OUTPUTS:
# df.new: dataframe with 2 columns:
# values: all columns in cnames vertically concatenated.
# names: elements of cnames corresponding to rows
# groupby: groups of observations in df for each variable in cnames, these retain their names in df.new
df.names <- do.call('cbind',lapply(cnames, function(n) rep(n,nrow(as.matrix(df[,cnames])))))
df.new <- data.frame(values = as.vector(as.matrix(df[,cnames])),names=as.vector(df.names),stringsAsFactors=F)
if(!is.null(groupby)){
for(group.var in groupby){
df.grp <- do.call('cbind',lapply(cnames,function(n) df[,group.var]))
df.new[,group.var] <- as.vector(df.grp)
}
}
return(df.new)
}
name <- function(x,x.names){
# INPUTS:
# x: vector or dataframe
# x.names: names for elements or columns of x
# OUTPUTS:
# x with x.names as names
names(x) <- x.names
return(x)
}
paste0.combinations <- function(x,y){
# INPUTS:
# x: character vector
# y: character vector
#
# OUTPUTS:
# xy: every element of x pasted to every element of y, with x occuring first
xy <- as.vector(sapply(x, function(i)
sapply(y, function(j) paste0(i,j))))
return(xy)
}
tp.vec2mat <- function(v){
# INPUTS:
# v: vectorized transition probabilities
#
# OUTPUTS:
# m: tp matrix
return(t(matrix(v,sqrt(length(v)),sqrt(length(v)))))
}
matrix.to.df <- function(m,dnames){
# INPUTS:
# m: matrix
# dnames: 2 element list specifying dimnames for m
#
# OUTPUTS:
# df:
dimnames(m) <- dnames
return(as.data.frame(m))
}
axis.sym <- function(x){
# INPUTS:
# x: vector
#
# OUTPUTS:
# c(mi,ma) for symmetric axis with extreme values determining bidirectional limits
return(c(-max(abs(x)),max(abs(x))))
}
|
library(burnr)
### Name: find_recording
### Title: Subset 'rings' data.frame to years that are considered
### recording.
### Aliases: find_recording
### ** Examples
require(plyr)
data(lgr2)
ddply(lgr2$rings, 'series', burnr:::find_recording, injury_event = TRUE)
|
/data/genthat_extracted_code/burnr/examples/find_recording.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 273
|
r
|
library(burnr)
### Name: find_recording
### Title: Subset 'rings' data.frame to years that are considered
### recording.
### Aliases: find_recording
### ** Examples
require(plyr)
data(lgr2)
ddply(lgr2$rings, 'series', burnr:::find_recording, injury_event = TRUE)
|
#' @title Modified Mann-Kendall Test For Serially Correlated Data Using the Yue and Wang (2004) Variance Correction Approach Using the Lag-1 Correlation Coefficient Only
#'
#' @description Time series data is often influenced by serial correlation. When data are not random and influenced by autocorrelation, modified Mann-Kendall tests may be used for trend detction. Yue and Wang (2004) have proposed a variance correction approach to address the issue of serial correlation in trend analysis. Data are initially detrended and the effective sample size is calculated using the lag-1 autocorrelation coefficient.
#'
#' @importFrom stats acf median pnorm qnorm
#'
#' @param x - Time series data vector
#'
#' @return Corrected Zc - Z statistic after variance Correction
#'
#' new P.value - P-value after variance correction
#'
#' N/N* - Effective sample size
#'
#' Original Z - Original Mann-Kendall Z statistic
#'
#' Old P-value - Original Mann-Kendall p-value
#'
#' Tau - Mann-Kendall's Tau
#'
#' Sen's Slope - Sen's slope
#'
#' old.variance - Old variance before variance Correction
#'
#' new.variance - Variance after correction
#'
#' @references Kendall, M. (1975). Rank Correlation Methods. Griffin, London, 202 pp.
#'
#' @references Mann, H. B. (1945). Nonparametric Tests Against Trend. Econometrica, 13(3): 245-259.
#'
#' @references Sen, P. K. (1968). Estimates of the Regression Coefficient Based on Kendall’s Tau. Journal of the American statistical Association, 63(324): 1379. <doi:10.2307/2285891>
#'
#' @references Yue, S. and Wang, C. Y. (2004). The Mann-Kendall test modified by effective sample size to detect trend in serially correlated hydrological series. Water Resources Management, 18(3): 201–218. <doi:10.1023/B:WARM.0000043140.61082.60>
#'
#' @details The variance correction approach suggested by Yue and Wang (2004) is implemeted in this function. Effective sample size is calculated based on the AR(1) assumption.
#'
#' @examples x<-c(Nile)
#' mmky1lag(x)
#'
#' @export
#'
mmky1lag <-function(x) {
# Initialize the test parameters
options(scipen = 999)
# Time series vector
x = x
# Modified Z statistic after variance correction as per Yue and Wang (2004) method
z = NULL
# Original Z statistic for Mann-Kendall test before variance correction
z0 = NULL
# Modified Z statistic after variance correction as per Yue and Wang (2004) method
pval = NULL
# Original p-value for Mann-Kendall test before variance correction
pval0 = NULL
# Initialize Mann-Kendall S statistic
S = 0
# Initialize Mann-Kendall Tau
Tau = NULL
# Correction factor n/n* value as per Yue and Wang (2004) method
essf = NULL
# To test whether the data is in vector format
if (is.vector(x) == FALSE) {
stop("Input data must be a vector")
}
# To test whether the data values are finite numbers and attempting to eliminate non-finite numbers
if (any(is.finite(x) == FALSE)) {
x[-c(which(is.finite(x) == FALSE))] -> x
warning("The input vector contains non-finite numbers. An attempt was made to remove them")
}
n <- length(x)
#Specify minimum input vector length
if (n < 3) {
stop("Input vector must contain at least three values")
}
# Calculating Sen's slope
rep(NA, n * (n - 1)/2) -> V
k = 0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
k = k+1
V[k] = (x[j]-x[i])/(j-i)
}
}
median(V,na.rm=TRUE)->slp
# Calculating trend-free series
t=1:length(x)
xn=(x[1:n])-((slp)*(t))
# Calculating Mann-Kendall S statistic
for (i in 1:(n-1)) {
for (j in (i+1):n) {
S = S + sign(x[j]-x[i])
}
}
# Calculating autocorrelation function of the observations (ro)
#lag.max can be edited to include larger number of lags
acf(xn, lag.max=1, plot=FALSE)$acf[-1] -> ro
# Calculating significant autocorrelation at given confidance interval (rof)
rep(NA,length(ro)) -> rof
for (i in 1:(length(ro))) {
rof[i] <- ro[i]
}
# Calculating sum(1-(k/n))*rof^k) for k=1,2...,(n-1)
ess=0
for(k in 1:(n-1)){
ess<-ess+((1-(1/n))*(rof^(k)))
}
# Calculating variance correction factor (n/n*) as per Yue and Wang (2004)
essf = 1 + 2*(ess)
# Calculating Mann-Kendall variance before correction (Var(s))
var.S = n*(n-1)*(2*n+5)*(1/18);
if(length(unique(x)) < n) {
unique(x) -> aux
for (i in 1:length(aux)) {
length(which(x == aux[i])) -> tie
if (tie > 1) {
var.S = var.S - tie*(tie-1)*(2*tie+5)*(1/18)
}
}
}
# Calculating new variance Var(s)*=(Var(s))*(n/n*) as per Yue and Wang (2004)
VS = var.S * essf
# Calculating Z statistic values before and after variance correction
if (S == 0) {
z = 0
z0 = 0
}else
if (S > 0) {
z = (S-1)/sqrt(VS)
z0 = (S-1)/sqrt(var.S)
} else {
z = (S+1)/sqrt(VS)
z0 = (S+1)/sqrt(var.S)
}
# Calculating p-Value before and after variance correction
pval = 2*pnorm(-abs(z))
pval0 = 2*pnorm(-abs(z0))
# Calculating Kendall's Tau
Tau = S/(.5*n*(n-1))
# Listing all outputs
return(c("Corrected Zc" = z,
"new P-value" = pval,
"N/N*" = essf,
"Original Z" = z0,
"old P.value" = pval0,
"Tau" = Tau,
"Sen's slope" = slp,
"old.variance"=var.S,
"new.variance"= VS))
}
|
/R/mmky1.R
|
no_license
|
patakamuri/modifiedmk
|
R
| false
| false
| 5,413
|
r
|
#' @title Modified Mann-Kendall Test For Serially Correlated Data Using the Yue and Wang (2004) Variance Correction Approach Using the Lag-1 Correlation Coefficient Only
#'
#' @description Time series data is often influenced by serial correlation. When data are not random and influenced by autocorrelation, modified Mann-Kendall tests may be used for trend detction. Yue and Wang (2004) have proposed a variance correction approach to address the issue of serial correlation in trend analysis. Data are initially detrended and the effective sample size is calculated using the lag-1 autocorrelation coefficient.
#'
#' @importFrom stats acf median pnorm qnorm
#'
#' @param x - Time series data vector
#'
#' @return Corrected Zc - Z statistic after variance Correction
#'
#' new P.value - P-value after variance correction
#'
#' N/N* - Effective sample size
#'
#' Original Z - Original Mann-Kendall Z statistic
#'
#' Old P-value - Original Mann-Kendall p-value
#'
#' Tau - Mann-Kendall's Tau
#'
#' Sen's Slope - Sen's slope
#'
#' old.variance - Old variance before variance Correction
#'
#' new.variance - Variance after correction
#'
#' @references Kendall, M. (1975). Rank Correlation Methods. Griffin, London, 202 pp.
#'
#' @references Mann, H. B. (1945). Nonparametric Tests Against Trend. Econometrica, 13(3): 245-259.
#'
#' @references Sen, P. K. (1968). Estimates of the Regression Coefficient Based on Kendall’s Tau. Journal of the American statistical Association, 63(324): 1379. <doi:10.2307/2285891>
#'
#' @references Yue, S. and Wang, C. Y. (2004). The Mann-Kendall test modified by effective sample size to detect trend in serially correlated hydrological series. Water Resources Management, 18(3): 201–218. <doi:10.1023/B:WARM.0000043140.61082.60>
#'
#' @details The variance correction approach suggested by Yue and Wang (2004) is implemeted in this function. Effective sample size is calculated based on the AR(1) assumption.
#'
#' @examples x<-c(Nile)
#' mmky1lag(x)
#'
#' @export
#'
mmky1lag <-function(x) {
# Initialize the test parameters
options(scipen = 999)
# Time series vector
x = x
# Modified Z statistic after variance correction as per Yue and Wang (2004) method
z = NULL
# Original Z statistic for Mann-Kendall test before variance correction
z0 = NULL
# Modified Z statistic after variance correction as per Yue and Wang (2004) method
pval = NULL
# Original p-value for Mann-Kendall test before variance correction
pval0 = NULL
# Initialize Mann-Kendall S statistic
S = 0
# Initialize Mann-Kendall Tau
Tau = NULL
# Correction factor n/n* value as per Yue and Wang (2004) method
essf = NULL
# To test whether the data is in vector format
if (is.vector(x) == FALSE) {
stop("Input data must be a vector")
}
# To test whether the data values are finite numbers and attempting to eliminate non-finite numbers
if (any(is.finite(x) == FALSE)) {
x[-c(which(is.finite(x) == FALSE))] -> x
warning("The input vector contains non-finite numbers. An attempt was made to remove them")
}
n <- length(x)
#Specify minimum input vector length
if (n < 3) {
stop("Input vector must contain at least three values")
}
# Calculating Sen's slope
rep(NA, n * (n - 1)/2) -> V
k = 0
for (i in 1:(n-1)) {
for (j in (i+1):n) {
k = k+1
V[k] = (x[j]-x[i])/(j-i)
}
}
median(V,na.rm=TRUE)->slp
# Calculating trend-free series
t=1:length(x)
xn=(x[1:n])-((slp)*(t))
# Calculating Mann-Kendall S statistic
for (i in 1:(n-1)) {
for (j in (i+1):n) {
S = S + sign(x[j]-x[i])
}
}
# Calculating autocorrelation function of the observations (ro)
#lag.max can be edited to include larger number of lags
acf(xn, lag.max=1, plot=FALSE)$acf[-1] -> ro
# Calculating significant autocorrelation at given confidance interval (rof)
rep(NA,length(ro)) -> rof
for (i in 1:(length(ro))) {
rof[i] <- ro[i]
}
# Calculating sum(1-(k/n))*rof^k) for k=1,2...,(n-1)
ess=0
for(k in 1:(n-1)){
ess<-ess+((1-(1/n))*(rof^(k)))
}
# Calculating variance correction factor (n/n*) as per Yue and Wang (2004)
essf = 1 + 2*(ess)
# Calculating Mann-Kendall variance before correction (Var(s))
var.S = n*(n-1)*(2*n+5)*(1/18);
if(length(unique(x)) < n) {
unique(x) -> aux
for (i in 1:length(aux)) {
length(which(x == aux[i])) -> tie
if (tie > 1) {
var.S = var.S - tie*(tie-1)*(2*tie+5)*(1/18)
}
}
}
# Calculating new variance Var(s)*=(Var(s))*(n/n*) as per Yue and Wang (2004)
VS = var.S * essf
# Calculating Z statistic values before and after variance correction
if (S == 0) {
z = 0
z0 = 0
}else
if (S > 0) {
z = (S-1)/sqrt(VS)
z0 = (S-1)/sqrt(var.S)
} else {
z = (S+1)/sqrt(VS)
z0 = (S+1)/sqrt(var.S)
}
# Calculating p-Value before and after variance correction
pval = 2*pnorm(-abs(z))
pval0 = 2*pnorm(-abs(z0))
# Calculating Kendall's Tau
Tau = S/(.5*n*(n-1))
# Listing all outputs
return(c("Corrected Zc" = z,
"new P-value" = pval,
"N/N*" = essf,
"Original Z" = z0,
"old P.value" = pval0,
"Tau" = Tau,
"Sen's slope" = slp,
"old.variance"=var.S,
"new.variance"= VS))
}
|
library(plyr)
# Download the dataset
if(!file.exists("./getcleandata")){dir.create("./getcleandata")}
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, destfile = "./getcleandata/projectdataset.zip")
# Unzip the dataset
unzip(zipfile = "./getcleandata/projectdataset.zip", exdir = "./getcleandata")
# 1. Merge the training and test datasets
# Reading files
# Reading training datasets
x_train <- read.table("./getcleandata/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./getcleandata/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./getcleandata/UCI HAR Dataset/train/subject_train.txt")
# Reading test datasets
x_test <- read.table("./getcleandata/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./getcleandata/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./getcleandata/UCI HAR Dataset/test/subject_test.txt")
# Reading feature vector
features <- read.table("./getcleandata/UCI HAR Dataset/features.txt")
# Reading activity labels
activityLabels = read.table("./getcleandata/UCI HAR Dataset/activity_labels.txt")
# Assigning variable names
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityID"
colnames(subject_test) <- "subjectID"
colnames(activityLabels) <- c("activityID", "activityType")
# Merging all datasets into one set
alltrain <- cbind(y_train, subject_train, x_train)
alltest <- cbind(y_test, subject_test, x_test)
finaldataset <- rbind(alltrain, alltest)
# 2. Extracting only the measurements on the mean and sd for each measurement
# Reading column names
colNames <- colnames(finaldataset)
# Create vector for defining ID, mean, and sd
mean_and_std <- (grepl("activityID", colNames) |
grepl("subjectID", colNames) |
grepl("mean..", colNames) |
grepl("std...", colNames)
)
# Making nessesary subset
setforMeanandStd <- finaldataset[ , mean_and_std == TRUE]
#3.Use descriptive activity names
setWithActivityNames <- merge(setforMeanandStd, activityLabels,
by = "activityID",
all.x = TRUE)
#Label the data set with descriptive variable names
#5.Creating a second, independent tidy data set with the avg of each variable for each activity and subject
#Making a second tidy data set
tidySet <- aggregate(. ~subjectID + activityID, setWithActivityNames, mean)
tidySet <- tidySet[order(tidySet$subjectID, tidySet$activityID), ]
#Writing second tidy data set into a txt file
write.table(tidySet, "tidySet.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
Charvick-Nuvusetty/Getting-and-Cleaning-Data-Week-4-Assignment
|
R
| false
| false
| 2,800
|
r
|
library(plyr)
# Download the dataset
if(!file.exists("./getcleandata")){dir.create("./getcleandata")}
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, destfile = "./getcleandata/projectdataset.zip")
# Unzip the dataset
unzip(zipfile = "./getcleandata/projectdataset.zip", exdir = "./getcleandata")
# 1. Merge the training and test datasets
# Reading files
# Reading training datasets
x_train <- read.table("./getcleandata/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./getcleandata/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./getcleandata/UCI HAR Dataset/train/subject_train.txt")
# Reading test datasets
x_test <- read.table("./getcleandata/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./getcleandata/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./getcleandata/UCI HAR Dataset/test/subject_test.txt")
# Reading feature vector
features <- read.table("./getcleandata/UCI HAR Dataset/features.txt")
# Reading activity labels
activityLabels = read.table("./getcleandata/UCI HAR Dataset/activity_labels.txt")
# Assigning variable names
colnames(x_train) <- features[,2]
colnames(y_train) <- "activityID"
colnames(subject_train) <- "subjectID"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityID"
colnames(subject_test) <- "subjectID"
colnames(activityLabels) <- c("activityID", "activityType")
# Merging all datasets into one set
alltrain <- cbind(y_train, subject_train, x_train)
alltest <- cbind(y_test, subject_test, x_test)
finaldataset <- rbind(alltrain, alltest)
# 2. Extracting only the measurements on the mean and sd for each measurement
# Reading column names
colNames <- colnames(finaldataset)
# Create vector for defining ID, mean, and sd
mean_and_std <- (grepl("activityID", colNames) |
grepl("subjectID", colNames) |
grepl("mean..", colNames) |
grepl("std...", colNames)
)
# Making nessesary subset
setforMeanandStd <- finaldataset[ , mean_and_std == TRUE]
#3.Use descriptive activity names
setWithActivityNames <- merge(setforMeanandStd, activityLabels,
by = "activityID",
all.x = TRUE)
#Label the data set with descriptive variable names
#5.Creating a second, independent tidy data set with the avg of each variable for each activity and subject
#Making a second tidy data set
tidySet <- aggregate(. ~subjectID + activityID, setWithActivityNames, mean)
tidySet <- tidySet[order(tidySet$subjectID, tidySet$activityID), ]
#Writing second tidy data set into a txt file
write.table(tidySet, "tidySet.txt", row.names = FALSE)
|
#' @title get_eurostat_raw
#' @description Download data from the eurostat database. Downloads datasets
#' from the eurostat database and transforms into tabular format.
#' @param id A code name for the data set of interested. See the table of
#' contents of eurostat datasets for more details.
#' @return A dataset in data.frame format. First column contains comma
#' separated codes of cases. Other columns usually corresponds to
#' years and column names are years with preceding X. Data is in
#' character format as it contains values together with eurostat
#' flags for data.
#' @seealso \code{\link{get_eurostat}}.
#' @details Data is downloaded from
#' \url{http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing}
#' @references see citation("eurostat")
#' @importFrom utils download.file
#' @importFrom utils read.table
#' @author Przemyslaw Biecek, Leo Lahti and Janne Huovari \email{ropengov-forum@@googlegroups.com}
#' @examples \dontrun{
#' tmp <- get_eurostat_raw("educ_iste")
#' head(tmp)
#' }
#' @keywords utilities database
get_eurostat_raw <- function(id) {
base <- eurostat_url()
url <- paste(base,
"estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=data%2F",
id, ".tsv.gz", sep = "")
tfile <- tempfile()
on.exit(unlink(tfile))
# download and read file
download.file(url, tfile)
dat <- read.table(gzfile(tfile), sep = "\t", na.strings = ": ",
header = TRUE, stringsAsFactors = FALSE)
# check validity
if (ncol(dat) < 2 | nrow(dat) < 1) {
if (grepl("does not exist or is not readable", dat[1])) {
stop(id, " does not exist or is not readable")
} else {
stop(paste("Could not download ", id))
}
}
dat
}
|
/R/get_eurostat_raw.R
|
no_license
|
YohanRobinson/eurostat
|
R
| false
| false
| 1,778
|
r
|
#' @title get_eurostat_raw
#' @description Download data from the eurostat database. Downloads datasets
#' from the eurostat database and transforms into tabular format.
#' @param id A code name for the data set of interested. See the table of
#' contents of eurostat datasets for more details.
#' @return A dataset in data.frame format. First column contains comma
#' separated codes of cases. Other columns usually corresponds to
#' years and column names are years with preceding X. Data is in
#' character format as it contains values together with eurostat
#' flags for data.
#' @seealso \code{\link{get_eurostat}}.
#' @details Data is downloaded from
#' \url{http://ec.europa.eu/eurostat/estat-navtree-portlet-prod/BulkDownloadListing}
#' @references see citation("eurostat")
#' @importFrom utils download.file
#' @importFrom utils read.table
#' @author Przemyslaw Biecek, Leo Lahti and Janne Huovari \email{ropengov-forum@@googlegroups.com}
#' @examples \dontrun{
#' tmp <- get_eurostat_raw("educ_iste")
#' head(tmp)
#' }
#' @keywords utilities database
get_eurostat_raw <- function(id) {
base <- eurostat_url()
url <- paste(base,
"estat-navtree-portlet-prod/BulkDownloadListing?sort=1&file=data%2F",
id, ".tsv.gz", sep = "")
tfile <- tempfile()
on.exit(unlink(tfile))
# download and read file
download.file(url, tfile)
dat <- read.table(gzfile(tfile), sep = "\t", na.strings = ": ",
header = TRUE, stringsAsFactors = FALSE)
# check validity
if (ncol(dat) < 2 | nrow(dat) < 1) {
if (grepl("does not exist or is not readable", dat[1])) {
stop(id, " does not exist or is not readable")
} else {
stop(paste("Could not download ", id))
}
}
dat
}
|
\name{world.legacy}
\alias{world.legacy}
\title{Legacy world map DEPRECATED}
\description{A function to switch the "world" map to the old (1990) version and back.}
\usage{
world.legacy(wl=FALSE)
}
\arguments{
\item{wl}{Set to TRUE to switch to the old world map. Set FALSE to switch back to the new map. In either case, the old legacy map can always be reached as \code{legacy_world}.}
}
\details{
This function switches the "world" database to the old (1990) version. It is provided temporarily for cases where the new updated world map causes problems. In this way, code may run unchanged and still use the legacy world data base. It is highly recommended, though, to use this possibility only as a last resource. Addressing \code{mapdata::worldLores} directly or updating code to use the new data base is the recommended option.
The new version is updated to about 2013, so has many new countries that did not exist in 1990 (e.g. in Central and Eastern Europe).
This function may be removed from the \code{maps} package in future releases. The legacy world database will be available as \code{worldLores} from the \code{mapdata} package.
}
\seealso{\code{\link{map}},\code{\link{world}},\code{\link{legacy_world}}}
|
/man/world.legacy.Rd
|
no_license
|
theGreatWhiteShark/maps
|
R
| false
| false
| 1,220
|
rd
|
\name{world.legacy}
\alias{world.legacy}
\title{Legacy world map DEPRECATED}
\description{A function to switch the "world" map to the old (1990) version and back.}
\usage{
world.legacy(wl=FALSE)
}
\arguments{
\item{wl}{Set to TRUE to switch to the old world map. Set FALSE to switch back to the new map. In either case, the old legacy map can always be reached as \code{legacy_world}.}
}
\details{
This function switches the "world" database to the old (1990) version. It is provided temporarily for cases where the new updated world map causes problems. In this way, code may run unchanged and still use the legacy world data base. It is highly recommended, though, to use this possibility only as a last resource. Addressing \code{mapdata::worldLores} directly or updating code to use the new data base is the recommended option.
The new version is updated to about 2013, so has many new countries that did not exist in 1990 (e.g. in Central and Eastern Europe).
This function may be removed from the \code{maps} package in future releases. The legacy world database will be available as \code{worldLores} from the \code{mapdata} package.
}
\seealso{\code{\link{map}},\code{\link{world}},\code{\link{legacy_world}}}
|
library(foreach)
library(doParallel)
library(rlecuyer)
library(Rcpp)
library(microbenchmark)
library(ggplot2)
# set up working directory and load data
working.dir <- file.path("C:/Users/rrruss/Desktop/stat215a/stat215af14/lab3")
load(file.path(working.dir, "lingBinary.RData"))
# set up parallelization
nCores <- 9
registerDoParallel(nCores)
# ensure independent streams of random numbers with foreach
RNGkind("L'Ecuyer-CMRG")
# look at dataset and extract the columns for clustering
dim(lingBinary)
ling.qdata <- lingBinary[7:474]
# if memory is an issue, lingBinary can be removed
# since it is not needed any more
#rm(lingBinary)
CMatrix <- function(clust, k) {
# Creates the C matrix for a given clustering partition,
# as defined at the top of page 3 in Ben-Hur et al.
#
# Args:
# clust: a vector encoding which cluster the data point belongs to
# k : number of clusters, which should be equal to max(clust)
#
# Returns:
# C matrix, which is a square symmetric matrix
# with dimension equal to length(clust)
#
clust.len <- length(clust)
# initialize the matrix with NAs
c.mat <- matrix(rep(NA, clust.len ^ 2), nrow=clust.len, ncol=clust.len)
for (i in 1:clust.len) {
for (j in 1:clust.len) {
if (clust[i] == clust[j]) {
c.mat[i, j] <- c.mat[j, i] <- 1
} else {
c.mat[i, j] <- c.mat[j, i] <- 0
}
}
}
# force the diagonal to be 0s, as specified in Ben-Hur et al.
diag(c.mat) <- 0
return(c.mat)
}
ComputeSimSlow <- function(clust1, clust2, k) {
# computes correlation similarity between two clusters (Fowlkes-Mallows index)
# the algorithm implemented here is the one specified in Ben-Hur et al.
#
# this function runs very slowly and should only be used for sanity checking
# the results of subsequent implementations of calculating the FM index
#
#
# Requires: CMatrix()
#
# Args:
# clust1: a numeric vector encoding which cluster the data point belongs to,
# for partition 1
# clust2: a numeric vector encoding which cluster the data point belongs to,
# for partition 2
# clust1 and clust2 are assumed to be ordered, on the same set of data points
# so that clust1[i] and clust2[i] both refer to the same data point i
#
# k : number of clusters,
# which should be equal to max(clust1) and max(clust2)
#
# Returns:
# FM index for a pair of clustering partitions
# (a real number returned as a numerical value)
#
c.mat1 <- CMatrix(clust1, k)
c.mat2 <- CMatrix(clust2, k)
l1.l2 <- sum(c.mat1 * c.mat2)
l1.l1 <- sum(c.mat1) # or sum(table(clust1)^2) - length(clust1)
l2.l2 <- sum(c.mat2) # or sum(table(clust2)^2) - length(clust2)
corr <- l1.l2 / sqrt(l1.l1 * l2.l2)
return(corr)
}
ComputeSim <- function(clust1, clust2, k) {
# computes correlation similarity between two clusters (Fowlkes-Mallows index)
# the algorithm implemented here is due to wikipedia
#
# Args:
# clust1: a numeric vector encoding which cluster the data point belongs to,
# for partition 1
# clust2: a numeric vector encoding which cluster the data point belongs to,
# for partition 2
# clust1 and clust2 are assumed to be ordered, on the same set of data points
# so that clust1[i] and clust2[i] both refer to the same data point i
#
# k : number of clusters,
# which should be equal to max(clust1) and max(clust2)
#
# Returns:
# FM index for the pair of clustering partitions
# (a real number returned as a numerical value)
#
clust.len <- length(clust1)
k.mat <- matrix(rep(NA, k ^ 2), nrow=k, ncol=k)
for (rw in 1:k) {
for (cl in 1:k) {
k.mat[rw, cl] <- length(intersect(which(clust1==rw), which(clust2==cl)))
}
}
tk <- sum(k.mat ^ 2) - clust.len
pk <- sum(colSums(k.mat) ^ 2) - clust.len
qk <- sum(rowSums(k.mat) ^ 2) - clust.len
return(tk / sqrt(pk * qk))
}
cppFunction('double SimC(NumericVector clust1, NumericVector clust2) {
/* calculates the Fowlkes-Mallows index for two clustering partitions
the algorithm implemented here is described in Ben-Hur et al.
Args:
clust1: a numeric vector encoding which cluster the data point
belongs to, for partition 1
clust2: a numeric vector encoding which cluster the data point
belongs to, for partition 2
clust1 and clust2 are assumed to be ordered, on the same set of data points
so that clust1[i] and clust2[i] both refer to the same data point i
Returns:
FM-index for the pair of clustering partitions
(a real number returned as a double) */
//initialize and define variables
int i, j, n;
double x, y, z, sim;
n = clust1.size();
x = 0.0;
y = 0.0;
z = 0.0;
for(i = 0; i < n; i ++){
for(j = i + 1; j < n; j ++){
if((clust1[i] == clust1[j]) && (clust2[i] == clust2[j])){
x += 1.0;
} else if((clust1[i] == clust1[j]) && (clust2[i] != clust2[j])){
y += 1.0;
} else if((clust1[i] != clust1[j]) && (clust2[i] == clust2[j])){
z += 1.0;
}
}
}
sim = x / sqrt((x + y) * (x + z));
return sim;
}')
ClustStability <- function(m) {
# implements the model explorer algorithm outlined in Ben-Hur et al.
# for k-means clustering of the binary-coded linguistic data provided
# for 9 values of k between 2 and 10 inclusive
#
# Requires: libarary(foreach), library(doParallel), ComputeSim()
#
# Args:
# m: fraction of data to be subsampled
#
#
# Returns:
# a matrix with 100 rows and 9 columns, where each column contains
# the 100 similarity values calculated for each value of k
#
num.obs <- nrow(ling.qdata)
k.similarities <- foreach(k=2:10, .combine='cbind') %dopar% {
# initialize the vector of similarities with NAs
sim <- rep(NA, 100)
for (i in 1:100) {
# sample and sort for easy comparison later
# no need to worry about size = m*num.obs not being an integer
# since it is rounded down in the sample function
sub1.index <- sort(sample(1:num.obs, size=m*num.obs, replace=FALSE))
sub2.index <- sort(sample(1:num.obs, size=m*num.obs, replace=FALSE))
# find intersection, which is already sorted
intersection <- intersect(sub1.index, sub2.index)
# error handling in the very unlikely case of an empty intersection
if (m <= 0.5 && length(intersection) == 0) {
# no similarity value is inserted into the sim vector
# leaving the option for the NA to be subsequently removed or dealt with
# in another manner if this warning is thrown
warning("An intersection is empty. Similarity between clusters cannot be computed.")
} else {
# cluster and immediately extract the clusters
# since we are only interested in that
clust1 <- kmeans(ling.qdata[sub1.index, ], centers=k, iter.max=40)$cluster
clust2 <- kmeans(ling.qdata[sub2.index, ], centers=k, iter.max=40)$cluster
# restrict ourselves to looking at the clusterings on the intersection
# followed by computing the similarity for this pair of partitions
clust1.int <- unname(clust1[row.names(ling.qdata[intersection, ])])
clust2.int <- unname(clust2[row.names(ling.qdata[intersection, ])])
sim[i] <- ComputeSim(clust1.int, clust2.int, k)
}
}
# output sim so that foreach will combine the vectors using cbind()
sim
}
return(k.similarities)
}
print("Finished defining functions")
# run and save so that plotting can be done separately
similarities08 <- ClustStability(m = 0.8)
save(similarities08, file="sim08.RData")
# testing running times
set.seed(387)
testclust1 <- sample(1:6, size=45000, replace=TRUE)
testclust2 <- sample(1:6, size=45000, replace=TRUE)
microbenchmark(ComputeSim(testclust1, testclust2, 6),
SimC(testclust1, testclust2), times=5)
|
/lab3/kcluststability.R
|
no_license
|
rrruss88/stat215af14
|
R
| false
| false
| 8,053
|
r
|
library(foreach)
library(doParallel)
library(rlecuyer)
library(Rcpp)
library(microbenchmark)
library(ggplot2)
# set up working directory and load data
working.dir <- file.path("C:/Users/rrruss/Desktop/stat215a/stat215af14/lab3")
load(file.path(working.dir, "lingBinary.RData"))
# set up parallelization
nCores <- 9
registerDoParallel(nCores)
# ensure independent streams of random numbers with foreach
RNGkind("L'Ecuyer-CMRG")
# look at dataset and extract the columns for clustering
dim(lingBinary)
ling.qdata <- lingBinary[7:474]
# if memory is an issue, lingBinary can be removed
# since it is not needed any more
#rm(lingBinary)
CMatrix <- function(clust, k) {
# Creates the C matrix for a given clustering partition,
# as defined at the top of page 3 in Ben-Hur et al.
#
# Args:
# clust: a vector encoding which cluster the data point belongs to
# k : number of clusters, which should be equal to max(clust)
#
# Returns:
# C matrix, which is a square symmetric matrix
# with dimension equal to length(clust)
#
clust.len <- length(clust)
# initialize the matrix with NAs
c.mat <- matrix(rep(NA, clust.len ^ 2), nrow=clust.len, ncol=clust.len)
for (i in 1:clust.len) {
for (j in 1:clust.len) {
if (clust[i] == clust[j]) {
c.mat[i, j] <- c.mat[j, i] <- 1
} else {
c.mat[i, j] <- c.mat[j, i] <- 0
}
}
}
# force the diagonal to be 0s, as specified in Ben-Hur et al.
diag(c.mat) <- 0
return(c.mat)
}
ComputeSimSlow <- function(clust1, clust2, k) {
# computes correlation similarity between two clusters (Fowlkes-Mallows index)
# the algorithm implemented here is the one specified in Ben-Hur et al.
#
# this function runs very slowly and should only be used for sanity checking
# the results of subsequent implementations of calculating the FM index
#
#
# Requires: CMatrix()
#
# Args:
# clust1: a numeric vector encoding which cluster the data point belongs to,
# for partition 1
# clust2: a numeric vector encoding which cluster the data point belongs to,
# for partition 2
# clust1 and clust2 are assumed to be ordered, on the same set of data points
# so that clust1[i] and clust2[i] both refer to the same data point i
#
# k : number of clusters,
# which should be equal to max(clust1) and max(clust2)
#
# Returns:
# FM index for a pair of clustering partitions
# (a real number returned as a numerical value)
#
c.mat1 <- CMatrix(clust1, k)
c.mat2 <- CMatrix(clust2, k)
l1.l2 <- sum(c.mat1 * c.mat2)
l1.l1 <- sum(c.mat1) # or sum(table(clust1)^2) - length(clust1)
l2.l2 <- sum(c.mat2) # or sum(table(clust2)^2) - length(clust2)
corr <- l1.l2 / sqrt(l1.l1 * l2.l2)
return(corr)
}
ComputeSim <- function(clust1, clust2, k) {
# computes correlation similarity between two clusters (Fowlkes-Mallows index)
# the algorithm implemented here is due to wikipedia
#
# Args:
# clust1: a numeric vector encoding which cluster the data point belongs to,
# for partition 1
# clust2: a numeric vector encoding which cluster the data point belongs to,
# for partition 2
# clust1 and clust2 are assumed to be ordered, on the same set of data points
# so that clust1[i] and clust2[i] both refer to the same data point i
#
# k : number of clusters,
# which should be equal to max(clust1) and max(clust2)
#
# Returns:
# FM index for the pair of clustering partitions
# (a real number returned as a numerical value)
#
clust.len <- length(clust1)
k.mat <- matrix(rep(NA, k ^ 2), nrow=k, ncol=k)
for (rw in 1:k) {
for (cl in 1:k) {
k.mat[rw, cl] <- length(intersect(which(clust1==rw), which(clust2==cl)))
}
}
tk <- sum(k.mat ^ 2) - clust.len
pk <- sum(colSums(k.mat) ^ 2) - clust.len
qk <- sum(rowSums(k.mat) ^ 2) - clust.len
return(tk / sqrt(pk * qk))
}
cppFunction('double SimC(NumericVector clust1, NumericVector clust2) {
/* calculates the Fowlkes-Mallows index for two clustering partitions
the algorithm implemented here is described in Ben-Hur et al.
Args:
clust1: a numeric vector encoding which cluster the data point
belongs to, for partition 1
clust2: a numeric vector encoding which cluster the data point
belongs to, for partition 2
clust1 and clust2 are assumed to be ordered, on the same set of data points
so that clust1[i] and clust2[i] both refer to the same data point i
Returns:
FM-index for the pair of clustering partitions
(a real number returned as a double) */
//initialize and define variables
int i, j, n;
double x, y, z, sim;
n = clust1.size();
x = 0.0;
y = 0.0;
z = 0.0;
for(i = 0; i < n; i ++){
for(j = i + 1; j < n; j ++){
if((clust1[i] == clust1[j]) && (clust2[i] == clust2[j])){
x += 1.0;
} else if((clust1[i] == clust1[j]) && (clust2[i] != clust2[j])){
y += 1.0;
} else if((clust1[i] != clust1[j]) && (clust2[i] == clust2[j])){
z += 1.0;
}
}
}
sim = x / sqrt((x + y) * (x + z));
return sim;
}')
ClustStability <- function(m) {
# implements the model explorer algorithm outlined in Ben-Hur et al.
# for k-means clustering of the binary-coded linguistic data provided
# for 9 values of k between 2 and 10 inclusive
#
# Requires: libarary(foreach), library(doParallel), ComputeSim()
#
# Args:
# m: fraction of data to be subsampled
#
#
# Returns:
# a matrix with 100 rows and 9 columns, where each column contains
# the 100 similarity values calculated for each value of k
#
num.obs <- nrow(ling.qdata)
k.similarities <- foreach(k=2:10, .combine='cbind') %dopar% {
# initialize the vector of similarities with NAs
sim <- rep(NA, 100)
for (i in 1:100) {
# sample and sort for easy comparison later
# no need to worry about size = m*num.obs not being an integer
# since it is rounded down in the sample function
sub1.index <- sort(sample(1:num.obs, size=m*num.obs, replace=FALSE))
sub2.index <- sort(sample(1:num.obs, size=m*num.obs, replace=FALSE))
# find intersection, which is already sorted
intersection <- intersect(sub1.index, sub2.index)
# error handling in the very unlikely case of an empty intersection
if (m <= 0.5 && length(intersection) == 0) {
# no similarity value is inserted into the sim vector
# leaving the option for the NA to be subsequently removed or dealt with
# in another manner if this warning is thrown
warning("An intersection is empty. Similarity between clusters cannot be computed.")
} else {
# cluster and immediately extract the clusters
# since we are only interested in that
clust1 <- kmeans(ling.qdata[sub1.index, ], centers=k, iter.max=40)$cluster
clust2 <- kmeans(ling.qdata[sub2.index, ], centers=k, iter.max=40)$cluster
# restrict ourselves to looking at the clusterings on the intersection
# followed by computing the similarity for this pair of partitions
clust1.int <- unname(clust1[row.names(ling.qdata[intersection, ])])
clust2.int <- unname(clust2[row.names(ling.qdata[intersection, ])])
sim[i] <- ComputeSim(clust1.int, clust2.int, k)
}
}
# output sim so that foreach will combine the vectors using cbind()
sim
}
return(k.similarities)
}
print("Finished defining functions")
# run and save so that plotting can be done separately
similarities08 <- ClustStability(m = 0.8)
save(similarities08, file="sim08.RData")
# testing running times
set.seed(387)
testclust1 <- sample(1:6, size=45000, replace=TRUE)
testclust2 <- sample(1:6, size=45000, replace=TRUE)
microbenchmark(ComputeSim(testclust1, testclust2, 6),
SimC(testclust1, testclust2), times=5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rf.R
\name{underModels.rf}
\alias{underModels.rf}
\title{Generate undermodels to 'rf' class object}
\usage{
underModels.rf(model, B = 100, ...)
}
\arguments{
\item{model}{'rf' class object}
\item{B}{numeric value which is the number of undermodels generate}
\item{...}{further arguments passed to or from other methods.}
}
\value{
a list of 'rf' models
}
\description{
Allow to generate undermodels by bootstraping the dataset used for training.
}
\examples{
library(mfe)
data(indicateurs)
X <- indicateurs[, -c(1,2,3)]
Y <- indicateurs[,1]
model <- rf(X = X, Y = Y, Ylabel = colnames(indicateurs)[1])
model$undermodels <- undermodels.rf(model, B = 10)
}
\seealso{
\code{\link{rf}}
}
|
/man/underModels.rf.Rd
|
no_license
|
alex-conanec/OptFilBov
|
R
| false
| true
| 765
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rf.R
\name{underModels.rf}
\alias{underModels.rf}
\title{Generate undermodels to 'rf' class object}
\usage{
underModels.rf(model, B = 100, ...)
}
\arguments{
\item{model}{'rf' class object}
\item{B}{numeric value which is the number of undermodels generate}
\item{...}{further arguments passed to or from other methods.}
}
\value{
a list of 'rf' models
}
\description{
Allow to generate undermodels by bootstraping the dataset used for training.
}
\examples{
library(mfe)
data(indicateurs)
X <- indicateurs[, -c(1,2,3)]
Y <- indicateurs[,1]
model <- rf(X = X, Y = Y, Ylabel = colnames(indicateurs)[1])
model$undermodels <- undermodels.rf(model, B = 10)
}
\seealso{
\code{\link{rf}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list.R
\name{list_unstaged_files}
\alias{list_unstaged_files}
\title{Get a list of unstaged files in a repo}
\usage{
list_unstaged_files(path = getwd(), verbose = TRUE)
}
\description{
Get a list of unstaged files in a repo
}
|
/man/list_unstaged_files.Rd
|
no_license
|
meerapatelmd/glitter
|
R
| false
| true
| 304
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/list.R
\name{list_unstaged_files}
\alias{list_unstaged_files}
\title{Get a list of unstaged files in a repo}
\usage{
list_unstaged_files(path = getwd(), verbose = TRUE)
}
\description{
Get a list of unstaged files in a repo
}
|
#' Source specific lines in an R file
#'
#' @param file character string with the path to the file to source.
#' @param start row number of first line in \code{file} to source.
#' @param end row number of last line in \code{file} to source.
#' @export
#' @examples
#' # non-operative
#' # source_lines("load-data.R", start = 50)
source_lines = function (file, start = NULL, end = NULL){
if (length(start) > 1) stop("Length of start > 1.")
if (length(end) > 1) stop("Length of end > 1.")
lines <- readLines(file)
if (is.null(start)) start <- 1
if (is.null(end)) end <- length(lines)
source(textConnection(lines[start:end]))
}
|
/R/source_lines.R
|
no_license
|
wepelham3/sack2
|
R
| false
| false
| 643
|
r
|
#' Source specific lines in an R file
#'
#' @param file character string with the path to the file to source.
#' @param start row number of first line in \code{file} to source.
#' @param end row number of last line in \code{file} to source.
#' @export
#' @examples
#' # non-operative
#' # source_lines("load-data.R", start = 50)
source_lines = function (file, start = NULL, end = NULL){
if (length(start) > 1) stop("Length of start > 1.")
if (length(end) > 1) stop("Length of end > 1.")
lines <- readLines(file)
if (is.null(start)) start <- 1
if (is.null(end)) end <- length(lines)
source(textConnection(lines[start:end]))
}
|
# This file consists of functions related estimating logistic regression models using the original
# PGG algorithm and our unbiased estimator
# from util_logisticregression --------------------------------------------
#'@export
logisticregression_xbeta <- function(X, beta){
return(xbeta_(X, beta))
}
#'@export
logisticregression_sigma <- function(X, w){
return(sigma_(X, w))
}
#'@export
logisticregression_sigma_function <- function(omega, X, invB){
return(solve(logisticregression_sigma(X, omega) + invB))
}
#'@export
logisticregression_m_function <- function(omega, Sigma, X, Y, invBtimesb){
return(Sigma %*% (t(X) %*% matrix(Y - rep(0.5, length(Y)), ncol = 1) + invBtimesb))
}
# from logistic_precomputation --------------------------------------------
#'@rdname logistic_precomputation
#'@title Precomputation to prepare for the Polya-Gamma sampler
#'@description This function takes the canonical elements defining the logistic regression
#' problem (the vector of outcome Y, covariate matrix X, the prior mean b and the prior variance B),
#' and precomputes some quantities repeatedly used in the Polya-Gamma sampler and variants of it.
#' The precomputed quantities are returned in a list, which is then meant to be passed to the samplers.
#'@export
logisticregression_precomputation <- function(Y, X, b, B){
invB <- solve(B)
invBtimesb <- invB %*% b
Ykappa <- matrix(Y - rep(0.5, length(Y)), ncol=1)
XTkappa <- t(X) %*% Ykappa
KTkappaplusinvBtimesb <- XTkappa + invBtimesb
return(list(n=nrow(X), p=ncol(X), X=X, Y=Y, b=b, B=B,
invB=invB, invBtimesb=invBtimesb, KTkappaplusinvBtimesb=KTkappaplusinvBtimesb))
}
# from m_and_sigma --------------------------------------------------------
# The following function computes m(omega) and Sigma(omega)... (or what we really need instead)
# it returns m (= m(omega)), Sigma_inverse = Sigma(omega)^{-1},
# as well as Cholesky_inverse and Cholesky that are such that
# Cholesky_inverse is the lower triangular matrix L, in the decomposition Sigma^{-1} = L L^T
# whereas Cholesky is the lower triangular matrix Ltilde in the decomposition Sigma = Ltilde^T Ltilde
#'@export
logisticregression_m_and_sigma <- function(omega, X, invB, KTkappaplusinvBtimesb){
return(m_sigma_function_(omega, X, invB, KTkappaplusinvBtimesb))
}
# from pg_gibbs -----------------------------------------------------------
#'@rdname pg_gibbs
#'@title Polya-Gamma Gibbs sampler
#'@description This implements the sampler proposed in
#' Nicholas G Polson, James G Scott, and Jesse Windle. Bayesian inference for logistic models using
#' Polya–Gamma latent variables. Journal of the American statistical Association, 108(504):1339–1349, 2013.
#' The arguments are:
#' \itemize{
#' \item niterations: the number of desired MCMC iterations,
#' \item logistic_setting: a list of precomputed quantities obtained via 'logistic_precomputation'.
#' }
#'@return a matrix where each row corresponds to an iteration of the sampler, and contains
#' the regression coefficient at that iteration.
#'@export
pg_gibbs <- function(niterations, logistic_setting){
n <- nrow(logistic_setting$X)
p <- ncol(logistic_setting$X)
betas <- matrix(0, ncol=p, nrow=niterations)
beta <- matrix(0, ncol=p, nrow=1)
for (iteration in 1:niterations){
zs <- abs(logisticregression_xbeta(logistic_setting$X, beta))
w <- BayesLogit::rpg(n, h=1, z=zs)
res <- logisticregression_m_and_sigma(w, logistic_setting$X, logistic_setting$invB, logistic_setting$KTkappaplusinvBtimesb)
beta <- fast_rmvnorm_chol(1, res$m, res$Cholesky)
betas[iteration,] <- beta
}
return(betas)
}
# from sample_w -----------------------------------------------------------
# w_indep <- function(beta1,beta2,X){
# n <- nrow(X)
# w1 <- rep(0., n)
# w2 <- rep(0., n)
# z1s <- abs(xbeta(X, beta1))
# z2s <- abs(xbeta(X, beta2))
#
# for (i in 1:n){
# w1[[i]] <- rpg(num=1, h=1, z=z1s[i])
# w2[[i]] <- rpg(num=1, h=1, z=z2s[i])
# }
# return(list(w1=w1, w2=w2))
# }
#'@export
w_rejsampler_caller <- function(beta1,beta2,X){
w_rejsamplerC(beta1, beta2, X)
}
#'@export
w_max_coupling_caller <- function(beta1,beta2,X){
w_max_couplingC(beta1, beta2, X)
}
#'@export
w_rejsampler <- function(beta1, beta2, X){
# we can do rejection sampling, sampling from z1 and aiming for z2,
# provided z2 > z1. The ratio of densities is proportional to
logratio <- function(proposal, z_min, z_max){
return(-proposal * 0.5 * (z_max^2 - z_min^2))
}
w1 <- rep(0., n)
w2 <- rep(0., n)
z1s <- abs(logisticregression_xbeta(X, beta1))
z2s <- abs(logisticregression_xbeta(X, beta2))
for (i in 1:n){
z_i <- c(z1s[i], z2s[i])
z_min <- min(z_i)
z_max <- max(z_i)
proposal <- BayesLogit::rpg(num=1, h=1, z=z_min)
w_min <- proposal
if (log(runif(1)) < logratio(proposal, z_min, z_max)){
w_max <- proposal
} else {
w_max <- BayesLogit::rpg(num=1, h=1, z=z_max)
}
if (which.min(z_i) == 1){
w1[i] <- w_min
w2[i] <- w_max
} else {
w2[i] <- w_min
w1[i] <- w_max
}
}
return(list(w1=w1, w2=w2))
}
w_max_coupling <- function(beta1, beta2, X){
w1s <- rep(0., n)
w2s <- rep(0., n)
z1s <- abs(logisticregression_xbeta(X, beta1))
z2s <- abs(logisticregression_xbeta(X, beta2))
for (i in 1:n){
z1 <- z1s[i]
z2 <- z2s[i]
w1 <- BayesLogit::rpg(num=1, h=1, z=z1)
w1s[[i]] <- w1
u <- runif(1,0,cosh(z1/2)*exp(-0.5*z1^2*w1))
if(u <= cosh(z2/2)*exp(-0.5*z2^2*w1)){
w2 <- w1
} else {
accept <- FALSE
while(!accept){
w2 <- BayesLogit::rpg(num=1, h=1, z=z2)
u <- runif(1,0,cosh(z2/2)*exp(-0.5*z2^2*w2))
if(u > cosh(z1/2)*exp(-0.5*z1^2*w2)){
accept <- TRUE
}
}
}
w2s[[i]] <- w2
}
return(list(w1=w1s, w2=w2s))
}
#'@export
sample_w <- function(beta1, beta2, X, mode='max'){
if (mode == 'rej_samp'){
w1w2_mat <- w_rejsamplerC(beta1, beta2, X)
w1w2 <- list(w1=w1w2_mat[,1], w2=w1w2_mat[,2])
} else if (mode == 'max'){
w1w2_mat <- w_max_couplingC(beta1, beta2, X)
w1w2 <- list(w1=w1w2_mat[,1], w2=w1w2_mat[,2])
}
return(w1w2)
}
# from sample_beta --------------------------------------------------------
#'@export
sample_beta <- function(w1, w2, logistic_setting, mode="max_coupling", mc_prob=0.5){
KTkappaplusinvBtimesb <- logistic_setting$KTkappaplusinvBtimesb
res1 <- logisticregression_m_and_sigma(w1, logistic_setting$X, logistic_setting$invB, KTkappaplusinvBtimesb)
res2 <- logisticregression_m_and_sigma(w2, logistic_setting$X, logistic_setting$invB, KTkappaplusinvBtimesb)
if(mode=='max_coupling'){
x <- gaussian_max_coupling_cholesky(res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[,1]
beta2 <- x[,2]
} else if(mode=='opt_transport'){
x <- gaussian_opt_transport(1, res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[[1]][,1]
beta2 <- x[[1]][,2]
} else if(mode=='both'){
if (runif(1) < mc_prob){
x <- gaussian_max_coupling_cholesky(res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[,1]
beta2 <- x[,2]
} else {
x <- gaussian_opt_transport(1, res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[[1]][,1]
beta2 <- x[[1]][,2]
}
} else {
stop('invalid coupling method')
}
return(list(beta1=beta1, beta2=beta2))
}
|
/R/logistic_regression.R
|
no_license
|
jiangyuinsar/debiasedmcmc
|
R
| false
| false
| 7,599
|
r
|
# This file consists of functions related estimating logistic regression models using the original
# PGG algorithm and our unbiased estimator
# from util_logisticregression --------------------------------------------
#'@export
logisticregression_xbeta <- function(X, beta){
return(xbeta_(X, beta))
}
#'@export
logisticregression_sigma <- function(X, w){
return(sigma_(X, w))
}
#'@export
logisticregression_sigma_function <- function(omega, X, invB){
return(solve(logisticregression_sigma(X, omega) + invB))
}
#'@export
logisticregression_m_function <- function(omega, Sigma, X, Y, invBtimesb){
return(Sigma %*% (t(X) %*% matrix(Y - rep(0.5, length(Y)), ncol = 1) + invBtimesb))
}
# from logistic_precomputation --------------------------------------------
#'@rdname logistic_precomputation
#'@title Precomputation to prepare for the Polya-Gamma sampler
#'@description This function takes the canonical elements defining the logistic regression
#' problem (the vector of outcome Y, covariate matrix X, the prior mean b and the prior variance B),
#' and precomputes some quantities repeatedly used in the Polya-Gamma sampler and variants of it.
#' The precomputed quantities are returned in a list, which is then meant to be passed to the samplers.
#'@export
logisticregression_precomputation <- function(Y, X, b, B){
invB <- solve(B)
invBtimesb <- invB %*% b
Ykappa <- matrix(Y - rep(0.5, length(Y)), ncol=1)
XTkappa <- t(X) %*% Ykappa
KTkappaplusinvBtimesb <- XTkappa + invBtimesb
return(list(n=nrow(X), p=ncol(X), X=X, Y=Y, b=b, B=B,
invB=invB, invBtimesb=invBtimesb, KTkappaplusinvBtimesb=KTkappaplusinvBtimesb))
}
# from m_and_sigma --------------------------------------------------------
# The following function computes m(omega) and Sigma(omega)... (or what we really need instead)
# it returns m (= m(omega)), Sigma_inverse = Sigma(omega)^{-1},
# as well as Cholesky_inverse and Cholesky that are such that
# Cholesky_inverse is the lower triangular matrix L, in the decomposition Sigma^{-1} = L L^T
# whereas Cholesky is the lower triangular matrix Ltilde in the decomposition Sigma = Ltilde^T Ltilde
#'@export
logisticregression_m_and_sigma <- function(omega, X, invB, KTkappaplusinvBtimesb){
return(m_sigma_function_(omega, X, invB, KTkappaplusinvBtimesb))
}
# from pg_gibbs -----------------------------------------------------------
#'@rdname pg_gibbs
#'@title Polya-Gamma Gibbs sampler
#'@description This implements the sampler proposed in
#' Nicholas G Polson, James G Scott, and Jesse Windle. Bayesian inference for logistic models using
#' Polya–Gamma latent variables. Journal of the American statistical Association, 108(504):1339–1349, 2013.
#' The arguments are:
#' \itemize{
#' \item niterations: the number of desired MCMC iterations,
#' \item logistic_setting: a list of precomputed quantities obtained via 'logistic_precomputation'.
#' }
#'@return a matrix where each row corresponds to an iteration of the sampler, and contains
#' the regression coefficient at that iteration.
#'@export
pg_gibbs <- function(niterations, logistic_setting){
n <- nrow(logistic_setting$X)
p <- ncol(logistic_setting$X)
betas <- matrix(0, ncol=p, nrow=niterations)
beta <- matrix(0, ncol=p, nrow=1)
for (iteration in 1:niterations){
zs <- abs(logisticregression_xbeta(logistic_setting$X, beta))
w <- BayesLogit::rpg(n, h=1, z=zs)
res <- logisticregression_m_and_sigma(w, logistic_setting$X, logistic_setting$invB, logistic_setting$KTkappaplusinvBtimesb)
beta <- fast_rmvnorm_chol(1, res$m, res$Cholesky)
betas[iteration,] <- beta
}
return(betas)
}
# from sample_w -----------------------------------------------------------
# w_indep <- function(beta1,beta2,X){
# n <- nrow(X)
# w1 <- rep(0., n)
# w2 <- rep(0., n)
# z1s <- abs(xbeta(X, beta1))
# z2s <- abs(xbeta(X, beta2))
#
# for (i in 1:n){
# w1[[i]] <- rpg(num=1, h=1, z=z1s[i])
# w2[[i]] <- rpg(num=1, h=1, z=z2s[i])
# }
# return(list(w1=w1, w2=w2))
# }
#'@export
w_rejsampler_caller <- function(beta1,beta2,X){
w_rejsamplerC(beta1, beta2, X)
}
#'@export
w_max_coupling_caller <- function(beta1,beta2,X){
w_max_couplingC(beta1, beta2, X)
}
#'@export
w_rejsampler <- function(beta1, beta2, X){
# we can do rejection sampling, sampling from z1 and aiming for z2,
# provided z2 > z1. The ratio of densities is proportional to
logratio <- function(proposal, z_min, z_max){
return(-proposal * 0.5 * (z_max^2 - z_min^2))
}
w1 <- rep(0., n)
w2 <- rep(0., n)
z1s <- abs(logisticregression_xbeta(X, beta1))
z2s <- abs(logisticregression_xbeta(X, beta2))
for (i in 1:n){
z_i <- c(z1s[i], z2s[i])
z_min <- min(z_i)
z_max <- max(z_i)
proposal <- BayesLogit::rpg(num=1, h=1, z=z_min)
w_min <- proposal
if (log(runif(1)) < logratio(proposal, z_min, z_max)){
w_max <- proposal
} else {
w_max <- BayesLogit::rpg(num=1, h=1, z=z_max)
}
if (which.min(z_i) == 1){
w1[i] <- w_min
w2[i] <- w_max
} else {
w2[i] <- w_min
w1[i] <- w_max
}
}
return(list(w1=w1, w2=w2))
}
w_max_coupling <- function(beta1, beta2, X){
w1s <- rep(0., n)
w2s <- rep(0., n)
z1s <- abs(logisticregression_xbeta(X, beta1))
z2s <- abs(logisticregression_xbeta(X, beta2))
for (i in 1:n){
z1 <- z1s[i]
z2 <- z2s[i]
w1 <- BayesLogit::rpg(num=1, h=1, z=z1)
w1s[[i]] <- w1
u <- runif(1,0,cosh(z1/2)*exp(-0.5*z1^2*w1))
if(u <= cosh(z2/2)*exp(-0.5*z2^2*w1)){
w2 <- w1
} else {
accept <- FALSE
while(!accept){
w2 <- BayesLogit::rpg(num=1, h=1, z=z2)
u <- runif(1,0,cosh(z2/2)*exp(-0.5*z2^2*w2))
if(u > cosh(z1/2)*exp(-0.5*z1^2*w2)){
accept <- TRUE
}
}
}
w2s[[i]] <- w2
}
return(list(w1=w1s, w2=w2s))
}
#'@export
sample_w <- function(beta1, beta2, X, mode='max'){
if (mode == 'rej_samp'){
w1w2_mat <- w_rejsamplerC(beta1, beta2, X)
w1w2 <- list(w1=w1w2_mat[,1], w2=w1w2_mat[,2])
} else if (mode == 'max'){
w1w2_mat <- w_max_couplingC(beta1, beta2, X)
w1w2 <- list(w1=w1w2_mat[,1], w2=w1w2_mat[,2])
}
return(w1w2)
}
# from sample_beta --------------------------------------------------------
#'@export
sample_beta <- function(w1, w2, logistic_setting, mode="max_coupling", mc_prob=0.5){
KTkappaplusinvBtimesb <- logistic_setting$KTkappaplusinvBtimesb
res1 <- logisticregression_m_and_sigma(w1, logistic_setting$X, logistic_setting$invB, KTkappaplusinvBtimesb)
res2 <- logisticregression_m_and_sigma(w2, logistic_setting$X, logistic_setting$invB, KTkappaplusinvBtimesb)
if(mode=='max_coupling'){
x <- gaussian_max_coupling_cholesky(res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[,1]
beta2 <- x[,2]
} else if(mode=='opt_transport'){
x <- gaussian_opt_transport(1, res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[[1]][,1]
beta2 <- x[[1]][,2]
} else if(mode=='both'){
if (runif(1) < mc_prob){
x <- gaussian_max_coupling_cholesky(res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[,1]
beta2 <- x[,2]
} else {
x <- gaussian_opt_transport(1, res1$m, res2$m, res1$Cholesky, res2$Cholesky, res1$Cholesky_inverse, res2$Cholesky_inverse)
beta1 <- x[[1]][,1]
beta2 <- x[[1]][,2]
}
} else {
stop('invalid coupling method')
}
return(list(beta1=beta1, beta2=beta2))
}
|
#' @rdname fslacos
#' @aliases fsl_acos
#' @export
#' @note Functions with underscores have different defaults
#' and will return an output filename, so to be used for piping
fsl_acos = function(
...,
outfile = tempfile(fileext = ".nii.gz"),
retimg = FALSE
) {
fslacos(..., outfile = outfile, retimg = retimg)
return(outfile)
}
|
/R/fsl_acos.R
|
no_license
|
muschellij2/fslr
|
R
| false
| false
| 338
|
r
|
#' @rdname fslacos
#' @aliases fsl_acos
#' @export
#' @note Functions with underscores have different defaults
#' and will return an output filename, so to be used for piping
fsl_acos = function(
...,
outfile = tempfile(fileext = ".nii.gz"),
retimg = FALSE
) {
fslacos(..., outfile = outfile, retimg = retimg)
return(outfile)
}
|
#' A maximum correlation coefficient classifier (CL) object
#'
#' An implementation of a maximum correlation coefficeint classifier. Like all classifiers, this classifier
#' learning a model based on training data and then makes predictions on new test data.
#' This object uses \href{https://cran.r-project.org/web/packages/R6/vignettes/Introduction.html}{R6 package}
#'
#'
#' @section max_correlation_CL constructor:
#'
#' \describe{
#' \item{\code{max_correlation_CL$new()}}{
#' if successful, will return a new \code{max_correlation_CL} object.
#' }}
#'
#' @section Methods
#' \describe{
#' \item{\code{get_predictions(train.data, all.times.test.data)}}{
#' Learns a model from the train.data and then makes predictions on the
#' all.times.test.data data set.
#' }}
#'
#'
#'
#' @import R6
#' @export
max_correlation_CL <- R6Class("max_correlation_CL",
public = list(
# no properties for this classifier
# the constructor does not take any arguments
initialize = function() {},
# methods
# could break this up into two methods: train() and test()
get_predictions = function(train_data, all_times_test_data) {
### Train the classifier
prototypes <- train_data %>% group_by(labels) %>% summarise_all(funs(mean))
### Test the classifier
train_test_cor <- cor(t(prototypes[, 2:dim(prototypes)[2]]), t(select(all_times_test_data, -labels, -time)))
#train_test_cor <- cor(t(prototypes[, 2:133]), t(select(all_times_test_data, -labels, -time)))
# get the predicted labels
# predicted_inds <- apply(train_test_cor, 2, which.max) # need to create rand.which.max() function...
#predicted_inds <- apply(train_test_cor, 2, which.is.max) # only slightly slower but breaks ties
predicted_inds <- apply(train_test_cor, 2, rand_which_max) # only slightly slower but breaks ties
predicted_labels <- prototypes$labels[predicted_inds]
# create a data frame that has all the results
results <- data.frame(time = all_times_test_data$time, actual_labels = all_times_test_data$labels,
predicted_labels = predicted_labels) %>%
mutate(correct = actual_labels == predicted_labels)
# get the decision values
decision_values <- data.frame(t(train_test_cor))
names(decision_values) <- paste0('decision_val_', prototypes$labels)
# return the results
results <- cbind(results, decision_values)
return(results)
} # end the get_predictions method
)
) # end the class
|
/R/max_correlation_CL.R
|
no_license
|
xf15/NDTr
|
R
| false
| false
| 2,591
|
r
|
#' A maximum correlation coefficient classifier (CL) object
#'
#' An implementation of a maximum correlation coefficeint classifier. Like all classifiers, this classifier
#' learning a model based on training data and then makes predictions on new test data.
#' This object uses \href{https://cran.r-project.org/web/packages/R6/vignettes/Introduction.html}{R6 package}
#'
#'
#' @section max_correlation_CL constructor:
#'
#' \describe{
#' \item{\code{max_correlation_CL$new()}}{
#' if successful, will return a new \code{max_correlation_CL} object.
#' }}
#'
#' @section Methods
#' \describe{
#' \item{\code{get_predictions(train.data, all.times.test.data)}}{
#' Learns a model from the train.data and then makes predictions on the
#' all.times.test.data data set.
#' }}
#'
#'
#'
#' @import R6
#' @export
max_correlation_CL <- R6Class("max_correlation_CL",
public = list(
# no properties for this classifier
# the constructor does not take any arguments
initialize = function() {},
# methods
# could break this up into two methods: train() and test()
get_predictions = function(train_data, all_times_test_data) {
### Train the classifier
prototypes <- train_data %>% group_by(labels) %>% summarise_all(funs(mean))
### Test the classifier
train_test_cor <- cor(t(prototypes[, 2:dim(prototypes)[2]]), t(select(all_times_test_data, -labels, -time)))
#train_test_cor <- cor(t(prototypes[, 2:133]), t(select(all_times_test_data, -labels, -time)))
# get the predicted labels
# predicted_inds <- apply(train_test_cor, 2, which.max) # need to create rand.which.max() function...
#predicted_inds <- apply(train_test_cor, 2, which.is.max) # only slightly slower but breaks ties
predicted_inds <- apply(train_test_cor, 2, rand_which_max) # only slightly slower but breaks ties
predicted_labels <- prototypes$labels[predicted_inds]
# create a data frame that has all the results
results <- data.frame(time = all_times_test_data$time, actual_labels = all_times_test_data$labels,
predicted_labels = predicted_labels) %>%
mutate(correct = actual_labels == predicted_labels)
# get the decision values
decision_values <- data.frame(t(train_test_cor))
names(decision_values) <- paste0('decision_val_', prototypes$labels)
# return the results
results <- cbind(results, decision_values)
return(results)
} # end the get_predictions method
)
) # end the class
|
require(ctsem)
require(testthat)
context("kalmanVram")
test_that("time calc", {
set.seed(4)
nsubjects=20
#2 latent 2 td preds
TDPREDEFFECT=matrix(c(-20,1,-1,20),2,2)
gm=ctModel(Tpoints=10,n.latent=2,n.manifest=3,LAMBDA=matrix(c(1,0,0,0,1,.5),ncol=2),
DRIFT=matrix(c(-.3,0,0,-.05),2,2),DIFFUSION=diag(2,2),
# TRAITVAR=diag(2),
CINT=matrix(c(5,3),ncol=1),
TDPREDEFFECT=TDPREDEFFECT,
TDPREDVAR=diag(.002,10*2),
MANIFESTVAR=diag(.3,3),
T0VAR=diag(2),
n.TDpred=2,
TDPREDMEANS=matrix(rep(c(1,rep(0,4),0,rep(0,4)),2),ncol=1))
gd=ctGenerate(gm,nsubjects,burnin=50)
ctIndplot(datawide = gd,n.manifest = 3,Tpoints = 10)
m=ctModel(Tpoints=10,n.latent=2,n.manifest=3,LAMBDA=matrix(c(1,0,0,0,1,.5),ncol=2),
DRIFT=matrix(c(-.3,0,0,-.05),2,2),
# MANIFESTVAR=diag(.3,3),
# DIFFUSION=diag(2,2),
TDPREDEFFECT=matrix(c('td1',1,-1,20),2,2),
# TRAITVAR='auto',
CINT=matrix(c(5,3),ncol=1),
MANIFESTMEANS=matrix(c(0,0,0),ncol=1),
n.TDpred=2)
f1=ctRefineTo(dat = gd,m,retryattempts = 3,objective='Kalman',stationary=c('T0MEANS','T0VAR'),carefulFit=TRUE)
# f1$mxobj=mxRun(f1$mxobj)
f2=ctRefineTo(dat = gd,m,retryattempts = 3,objective='mxRAM',stationary=c('T0MEANS','T0VAR'),carefulFit=TRUE)
expect_equal(f1$mxobj$output$estimate,f2$mxobj$output$estimate,tolerance=.001)
ctPostPredict(f1,timestep=.1,n.subjects=200)
}
|
/tests/testthat/kalmanVram.R
|
no_license
|
AndreMikulec/ctsem
|
R
| false
| false
| 1,342
|
r
|
require(ctsem)
require(testthat)
context("kalmanVram")
test_that("time calc", {
set.seed(4)
nsubjects=20
#2 latent 2 td preds
TDPREDEFFECT=matrix(c(-20,1,-1,20),2,2)
gm=ctModel(Tpoints=10,n.latent=2,n.manifest=3,LAMBDA=matrix(c(1,0,0,0,1,.5),ncol=2),
DRIFT=matrix(c(-.3,0,0,-.05),2,2),DIFFUSION=diag(2,2),
# TRAITVAR=diag(2),
CINT=matrix(c(5,3),ncol=1),
TDPREDEFFECT=TDPREDEFFECT,
TDPREDVAR=diag(.002,10*2),
MANIFESTVAR=diag(.3,3),
T0VAR=diag(2),
n.TDpred=2,
TDPREDMEANS=matrix(rep(c(1,rep(0,4),0,rep(0,4)),2),ncol=1))
gd=ctGenerate(gm,nsubjects,burnin=50)
ctIndplot(datawide = gd,n.manifest = 3,Tpoints = 10)
m=ctModel(Tpoints=10,n.latent=2,n.manifest=3,LAMBDA=matrix(c(1,0,0,0,1,.5),ncol=2),
DRIFT=matrix(c(-.3,0,0,-.05),2,2),
# MANIFESTVAR=diag(.3,3),
# DIFFUSION=diag(2,2),
TDPREDEFFECT=matrix(c('td1',1,-1,20),2,2),
# TRAITVAR='auto',
CINT=matrix(c(5,3),ncol=1),
MANIFESTMEANS=matrix(c(0,0,0),ncol=1),
n.TDpred=2)
f1=ctRefineTo(dat = gd,m,retryattempts = 3,objective='Kalman',stationary=c('T0MEANS','T0VAR'),carefulFit=TRUE)
# f1$mxobj=mxRun(f1$mxobj)
f2=ctRefineTo(dat = gd,m,retryattempts = 3,objective='mxRAM',stationary=c('T0MEANS','T0VAR'),carefulFit=TRUE)
expect_equal(f1$mxobj$output$estimate,f2$mxobj$output$estimate,tolerance=.001)
ctPostPredict(f1,timestep=.1,n.subjects=200)
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
term1 = c("I","am","at","in","a","is","on","it","up","can","mum","dad","us","an","boy","girl","to","my","with","you","no","so","he","she","be","go","the","like")
term2 = c("me","by","we","and","of","this","was","went")
week = c("see","get","do","said")
getSightWords <- function(wordlist){
if(wordlist == "Term 1"){ return(term1) }
if(wordlist == "Term 2"){ return(c(term2,week)) }
if(wordlist == "This week"){ return(week) }
return(c(term1,term2,week))
}
ui = fluidPage(
#numericInput("num", label = "Make changes", value = 1),
sidebarLayout(
sidebarPanel(
selectInput("wordlist","Word list:",c("All","Term 1","Term 2","This week")),
actionButton("update" ,"New word", icon("refresh"),
class = "btn btn-primary"),
helpText("When you click the button above, you should see",
"a new sight word, right:")
),
mainPanel(
#verbatimTextOutput("value"),
htmlOutput("htmlword"),
helpText("Now type the sight word in the box below:"),
textInput("yourturn","Your turn:"),
htmlOutput("assess")
)
)
)
server = function(input, output, session) {
newWord <- reactiveVal("me")
oldWord <- reactiveVal("me")
observeEvent(input$update,{
updateTextInput(session, "yourturn", value="")
})
observeEvent(input$yourturn,{
oldWord(newWord())
})
output$htmlword <- renderUI({
input$update
sightwords = getSightWords(input$wordlist)
isolate({
newWord(sightwords[as.integer(runif(1,1,length(sightwords))+0.5)])
})
isolate(HTML(paste0("<p><font size=36 color=\"blue\">", newWord(), "</font></p><hr>")))
# isolate(HTML(paste0("<p><font size=36 color=\"blue\">", sightwords[as.integer(runif(1,1,length(sightwords))+0.5)], "</font></p><hr>")))
})
output$assess <- renderUI({
attempt = tolower(input$yourturn)
myword = tolower(newWord())
if(attempt == myword){
HTML("<p><font size=36 color=\"green\">Yes!</font></p><hr>")
}else{
if(input$yourturn == ""){
HTML("<p><font size=24 color=\"grey\">Have a go!</font></p><hr>")
}else{
HTML("<p><font size=36 color=\"red\">?</font></p><hr>")
}
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/sightwords/app.R
|
no_license
|
cabbagesofdoom/sightwords
|
R
| false
| false
| 2,728
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
term1 = c("I","am","at","in","a","is","on","it","up","can","mum","dad","us","an","boy","girl","to","my","with","you","no","so","he","she","be","go","the","like")
term2 = c("me","by","we","and","of","this","was","went")
week = c("see","get","do","said")
getSightWords <- function(wordlist){
if(wordlist == "Term 1"){ return(term1) }
if(wordlist == "Term 2"){ return(c(term2,week)) }
if(wordlist == "This week"){ return(week) }
return(c(term1,term2,week))
}
ui = fluidPage(
#numericInput("num", label = "Make changes", value = 1),
sidebarLayout(
sidebarPanel(
selectInput("wordlist","Word list:",c("All","Term 1","Term 2","This week")),
actionButton("update" ,"New word", icon("refresh"),
class = "btn btn-primary"),
helpText("When you click the button above, you should see",
"a new sight word, right:")
),
mainPanel(
#verbatimTextOutput("value"),
htmlOutput("htmlword"),
helpText("Now type the sight word in the box below:"),
textInput("yourturn","Your turn:"),
htmlOutput("assess")
)
)
)
server = function(input, output, session) {
newWord <- reactiveVal("me")
oldWord <- reactiveVal("me")
observeEvent(input$update,{
updateTextInput(session, "yourturn", value="")
})
observeEvent(input$yourturn,{
oldWord(newWord())
})
output$htmlword <- renderUI({
input$update
sightwords = getSightWords(input$wordlist)
isolate({
newWord(sightwords[as.integer(runif(1,1,length(sightwords))+0.5)])
})
isolate(HTML(paste0("<p><font size=36 color=\"blue\">", newWord(), "</font></p><hr>")))
# isolate(HTML(paste0("<p><font size=36 color=\"blue\">", sightwords[as.integer(runif(1,1,length(sightwords))+0.5)], "</font></p><hr>")))
})
output$assess <- renderUI({
attempt = tolower(input$yourturn)
myword = tolower(newWord())
if(attempt == myword){
HTML("<p><font size=36 color=\"green\">Yes!</font></p><hr>")
}else{
if(input$yourturn == ""){
HTML("<p><font size=24 color=\"grey\">Have a go!</font></p><hr>")
}else{
HTML("<p><font size=36 color=\"red\">?</font></p><hr>")
}
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
# plo2.R
# global active power line graph
setwd("~/Documents/coursera/exploratoryData/ExData_Plotting1/")
#Read the datasets
householdTotal <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?")
#convert the date column to POSIX date format
householdTotal$Time <- as.POSIXct(paste(householdTotal$Date, householdTotal$Time), format="%d/%m/%Y %H:%M:%S")
householdSubset <- householdTotal[(householdTotal$Time >= "2007-02-01" & householdTotal$Time < "2007-02-03"),]
# Create and output plot as png
png("plot2.png", width=480, height=480)
plot(householdSubset$Time, householdSubset$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowats)")
dev.off()
|
/plot2.R
|
no_license
|
JamesHayes/ExData_Plotting1
|
R
| false
| false
| 703
|
r
|
# plo2.R
# global active power line graph
setwd("~/Documents/coursera/exploratoryData/ExData_Plotting1/")
#Read the datasets
householdTotal <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?")
#convert the date column to POSIX date format
householdTotal$Time <- as.POSIXct(paste(householdTotal$Date, householdTotal$Time), format="%d/%m/%Y %H:%M:%S")
householdSubset <- householdTotal[(householdTotal$Time >= "2007-02-01" & householdTotal$Time < "2007-02-03"),]
# Create and output plot as png
png("plot2.png", width=480, height=480)
plot(householdSubset$Time, householdSubset$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowats)")
dev.off()
|
library(tidyverse)
stocks <- tibble(
year = c(2015, 2015, 2016, 2016),
half=c(1, 2, 1, 2),
return=c(1.88, 0.59, 0.92, 0.17)
)
stocks %>%
spread(year, return) %>%
gather("year","return","2015":"2016")
#Question 4
preg <- tribble(
~pregnant, ~male, ~female,
"yes", NA, 10,
"no", 20, 12
)
preg
#Exercises separate()
library(tidyverse)
tibble(x=c("a,b,c", "d,e,f,g","h,i,j")) %>%
separate(x, c("one","two","three"), remove = FALSE)
|
/gather_spread_exercise.R
|
no_license
|
erickndava/r4ds
|
R
| false
| false
| 453
|
r
|
library(tidyverse)
stocks <- tibble(
year = c(2015, 2015, 2016, 2016),
half=c(1, 2, 1, 2),
return=c(1.88, 0.59, 0.92, 0.17)
)
stocks %>%
spread(year, return) %>%
gather("year","return","2015":"2016")
#Question 4
preg <- tribble(
~pregnant, ~male, ~female,
"yes", NA, 10,
"no", 20, 12
)
preg
#Exercises separate()
library(tidyverse)
tibble(x=c("a,b,c", "d,e,f,g","h,i,j")) %>%
separate(x, c("one","two","three"), remove = FALSE)
|
source("Step_6_Approach_3_KNN_Cascade.R")
####
#### 4 APPLYING MODELS ####
####
A1_validation <- validation_wide
A2_validation <- validation_wide
A3_validation <- validation_wide
### APPROACH #1: RF INDEPENDENT APPROACH
A1_vali_floor <- predict(A1_rf_tr_mdl_fl, A1_validation) #Floor
A1_vali_lat <- predict(A1_rf_tr_mdl_lat, A1_validation)
A1_vali_long <- predict(A1_rf_tr_mdl_lon, A1_validation)
A1_results <- cbind(A1_vali_lat,A1_vali_long,A1_vali_floor)
#write.csv(A1_results,file = "results_independent_RF.csv", row.names = FALSE)
### APPROACH #2: RF WATERFALL APPROACH
A2_vali_building <- predict(A2_rf_tr_mdl_bid, A2_validation) #Floor
A2_validation$BUILDINGID <- A2_vali_building #always insert the new predictions
A2_vali_floor <- predict(A2_rf_tr_mdl_fl, A2_validation)
A2_validation$FLOOR <- A2_vali_floor
A2_vali_lat <- predict(A2_rf_tr_mdl_lat, A2_validation)
A2_validation$LATITUDE <- A2_vali_lat
A2_vali_lon <- predict(A2_rf_tr_mdl_lon, A2_validation)
A2_results <- cbind(A2_vali_lat, A2_vali_lon, A2_vali_floor)
#write.csv(A2_results,file = "results_cascaded_RF.csv", row.names = FALSE)
|
/scripts/Step_7_Applying_RFmodels_to_validation.R
|
no_license
|
FlorianJUnger/Feb-Mar2019-Wi-Fi-Localisation
|
R
| false
| false
| 1,113
|
r
|
source("Step_6_Approach_3_KNN_Cascade.R")
####
#### 4 APPLYING MODELS ####
####
A1_validation <- validation_wide
A2_validation <- validation_wide
A3_validation <- validation_wide
### APPROACH #1: RF INDEPENDENT APPROACH
A1_vali_floor <- predict(A1_rf_tr_mdl_fl, A1_validation) #Floor
A1_vali_lat <- predict(A1_rf_tr_mdl_lat, A1_validation)
A1_vali_long <- predict(A1_rf_tr_mdl_lon, A1_validation)
A1_results <- cbind(A1_vali_lat,A1_vali_long,A1_vali_floor)
#write.csv(A1_results,file = "results_independent_RF.csv", row.names = FALSE)
### APPROACH #2: RF WATERFALL APPROACH
A2_vali_building <- predict(A2_rf_tr_mdl_bid, A2_validation) #Floor
A2_validation$BUILDINGID <- A2_vali_building #always insert the new predictions
A2_vali_floor <- predict(A2_rf_tr_mdl_fl, A2_validation)
A2_validation$FLOOR <- A2_vali_floor
A2_vali_lat <- predict(A2_rf_tr_mdl_lat, A2_validation)
A2_validation$LATITUDE <- A2_vali_lat
A2_vali_lon <- predict(A2_rf_tr_mdl_lon, A2_validation)
A2_results <- cbind(A2_vali_lat, A2_vali_lon, A2_vali_floor)
#write.csv(A2_results,file = "results_cascaded_RF.csv", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{query_master}
\alias{query_day}
\alias{query_interval}
\alias{query_master}
\alias{query_month}
\alias{query_week}
\alias{query_year}
\alias{sum_day}
\alias{sum_interval}
\alias{sum_master}
\alias{sum_month}
\alias{sum_week}
\alias{sum_year}
\title{Query data and aggregate data}
\usage{
query_master(db, time, col, prop, columns = "name", time.range = NULL,
filter = NULL, phase = 4)
query_interval(db, ...)
query_day(db, ...)
query_week(db, ...)
query_month(db, ...)
query_year(db, ...)
sum_master(db, time, col, prop, columns = "name", time.range = NULL,
filter = NULL, phase = 4, multiply.time = FALSE)
sum_interval(db, ...)
sum_day(db, ...)
sum_week(db, ...)
sum_month(db, ...)
sum_year(db, ...)
}
\arguments{
\item{db}{PLEXOS database object}
\item{time}{character. Table to query from (interval, day, week, month, year)}
\item{col}{character. Collection to query}
\item{prop}{character vector. Property or properties to query}
\item{columns}{character. Data columns to query or aggregate by (defaults to \code{name})}
\item{time.range}{POSIXt or character. Range of dates of length 2 (given as date, datetime or character in 'ymdhms' or 'ymd' format)}
\item{filter}{list. Used to filter by data columns (see details)}
\item{phase}{integer. PLEXOS optimization phase (1-LT, 2-PASA, 3-MT, 4-ST)}
\item{...}{parameters passed from shortcut functions to master (all except \code{time})}
\item{multiply.time}{boolean. When summing interval data, provide the value multiplied by interval duration (See details).}
}
\value{
A data frame that contains data summarized/aggregated by scenario.
}
\description{
This collection of functions retrieves data from the processed PLEXOS solutions and
returns it in a convenient format.
}
\details{
The family \code{query_*} returns the raw data in the databases, while \code{sum_*}
aggregates the data according to \code{columns}.
The functions \code{*_day}, \code{*_week}, \code{*_month} and \code{*_year} are
shortcuts for the corresponding, \code{*_master} function.
The following is a list of valid items for \code{columns} and filtering. Additionally,
\code{time} can be specified for summary data (interval data always includes \code{time}).
\itemize{
\item{\code{category}}
\item{\code{property}}
\item{\code{name} (default for columns)}
\item{\code{parent} (automatically selected when \code{name} is selected)}
\item{\code{category}}
\item{\code{region} (only meaningful for generators)}
\item{\code{zone} (only meaningful for generators)}
\item{\code{period_type}}
\item{\code{band}}
\item{\code{sample}}
\item{\code{timeslice}}
}
If defined, the \code{filter} parameter must be a \code{list}. The elements must be chracter
vectors and need to have a valid column name (see previous bullet points). For example, one
could define it as follows:
\code{filter = list(name = c("Generator1", "Generator2"), region = "Region1")}
To filter by time use the \code{time.range} parameter, instead of adding it as an entry in the
\code{filter} parameter. For example use \code{c("2015-03-14", "2015-03-15")} in your query.
Please note that the year/month/date starts at midnight (00:00:00).
If a scenario has multiple databases, the data will be aggregated automatically. If two or more
databases within the same scenario have overlapping time periods, the default is to select the
data from the last database (execute \code{summary(db)} so see the order). To change this behavior
set the global option \code{rplexos.tiebreak} to \code{first}, \code{last}, or \code{all} to
select data from the first database, the last one or keep all of them.
Multiple properties can be queried within a collection. If \code{prop} equals the widcard
\code{"*"}, all the properties within a collection are returned.
The parameter \code{multiply.time} allows to multiply values by interval duration (in hours) when
doing the sum of interval data. This can be used, for example, to obtain total energy (in MWh)
from power time series (in MW).
}
\examples{
# Process the folder with the solution file provided by rplexos
location <- location_solution_rplexos()
process_folder(location)
# Query data
db <- plexos_open(location)
query_day(db, "Generator", "Generation")
query_day(db, "Region", "*")
query_interval(db, "Generator", "Generation")
}
\seealso{
\code{\link{plexos_open}} to create the PLEXOS database object
\code{\link{query_sql}} to perform custom queries
}
|
/man/query_master.Rd
|
no_license
|
danielsjf/rplexos
|
R
| false
| true
| 4,539
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/query.R
\name{query_master}
\alias{query_day}
\alias{query_interval}
\alias{query_master}
\alias{query_month}
\alias{query_week}
\alias{query_year}
\alias{sum_day}
\alias{sum_interval}
\alias{sum_master}
\alias{sum_month}
\alias{sum_week}
\alias{sum_year}
\title{Query data and aggregate data}
\usage{
query_master(db, time, col, prop, columns = "name", time.range = NULL,
filter = NULL, phase = 4)
query_interval(db, ...)
query_day(db, ...)
query_week(db, ...)
query_month(db, ...)
query_year(db, ...)
sum_master(db, time, col, prop, columns = "name", time.range = NULL,
filter = NULL, phase = 4, multiply.time = FALSE)
sum_interval(db, ...)
sum_day(db, ...)
sum_week(db, ...)
sum_month(db, ...)
sum_year(db, ...)
}
\arguments{
\item{db}{PLEXOS database object}
\item{time}{character. Table to query from (interval, day, week, month, year)}
\item{col}{character. Collection to query}
\item{prop}{character vector. Property or properties to query}
\item{columns}{character. Data columns to query or aggregate by (defaults to \code{name})}
\item{time.range}{POSIXt or character. Range of dates of length 2 (given as date, datetime or character in 'ymdhms' or 'ymd' format)}
\item{filter}{list. Used to filter by data columns (see details)}
\item{phase}{integer. PLEXOS optimization phase (1-LT, 2-PASA, 3-MT, 4-ST)}
\item{...}{parameters passed from shortcut functions to master (all except \code{time})}
\item{multiply.time}{boolean. When summing interval data, provide the value multiplied by interval duration (See details).}
}
\value{
A data frame that contains data summarized/aggregated by scenario.
}
\description{
This collection of functions retrieves data from the processed PLEXOS solutions and
returns it in a convenient format.
}
\details{
The family \code{query_*} returns the raw data in the databases, while \code{sum_*}
aggregates the data according to \code{columns}.
The functions \code{*_day}, \code{*_week}, \code{*_month} and \code{*_year} are
shortcuts for the corresponding, \code{*_master} function.
The following is a list of valid items for \code{columns} and filtering. Additionally,
\code{time} can be specified for summary data (interval data always includes \code{time}).
\itemize{
\item{\code{category}}
\item{\code{property}}
\item{\code{name} (default for columns)}
\item{\code{parent} (automatically selected when \code{name} is selected)}
\item{\code{category}}
\item{\code{region} (only meaningful for generators)}
\item{\code{zone} (only meaningful for generators)}
\item{\code{period_type}}
\item{\code{band}}
\item{\code{sample}}
\item{\code{timeslice}}
}
If defined, the \code{filter} parameter must be a \code{list}. The elements must be chracter
vectors and need to have a valid column name (see previous bullet points). For example, one
could define it as follows:
\code{filter = list(name = c("Generator1", "Generator2"), region = "Region1")}
To filter by time use the \code{time.range} parameter, instead of adding it as an entry in the
\code{filter} parameter. For example use \code{c("2015-03-14", "2015-03-15")} in your query.
Please note that the year/month/date starts at midnight (00:00:00).
If a scenario has multiple databases, the data will be aggregated automatically. If two or more
databases within the same scenario have overlapping time periods, the default is to select the
data from the last database (execute \code{summary(db)} so see the order). To change this behavior
set the global option \code{rplexos.tiebreak} to \code{first}, \code{last}, or \code{all} to
select data from the first database, the last one or keep all of them.
Multiple properties can be queried within a collection. If \code{prop} equals the widcard
\code{"*"}, all the properties within a collection are returned.
The parameter \code{multiply.time} allows to multiply values by interval duration (in hours) when
doing the sum of interval data. This can be used, for example, to obtain total energy (in MWh)
from power time series (in MW).
}
\examples{
# Process the folder with the solution file provided by rplexos
location <- location_solution_rplexos()
process_folder(location)
# Query data
db <- plexos_open(location)
query_day(db, "Generator", "Generation")
query_day(db, "Region", "*")
query_interval(db, "Generator", "Generation")
}
\seealso{
\code{\link{plexos_open}} to create the PLEXOS database object
\code{\link{query_sql}} to perform custom queries
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/changeSeatrackPassword.R
\name{changeSeatrackPassword}
\alias{changeSeatrackPassword}
\title{changeSeatrackPassword}
\usage{
changeSeatrackPassword(password = NULL)
}
\arguments{
\item{password}{Your new password (character string).}
}
\value{
Null
}
\description{
Changes the password for a user in the Seatrack database. Since the passwords for the file archive are fetched from the database,
this also affects the file archive.
}
\examples{
\dontrun{
changeSeatrackPassword("newPassword")
}
}
|
/seatrackR/man/changeSeatrackPassword.Rd
|
no_license
|
NINAnor/seatrack-db
|
R
| false
| true
| 575
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/changeSeatrackPassword.R
\name{changeSeatrackPassword}
\alias{changeSeatrackPassword}
\title{changeSeatrackPassword}
\usage{
changeSeatrackPassword(password = NULL)
}
\arguments{
\item{password}{Your new password (character string).}
}
\value{
Null
}
\description{
Changes the password for a user in the Seatrack database. Since the passwords for the file archive are fetched from the database,
this also affects the file archive.
}
\examples{
\dontrun{
changeSeatrackPassword("newPassword")
}
}
|
a <- 2
b <- 3
c <- 5
d <- 6
e <- 0
f <- e
|
/test1.R
|
no_license
|
qcjun2191/test
|
R
| false
| false
| 41
|
r
|
a <- 2
b <- 3
c <- 5
d <- 6
e <- 0
f <- e
|
library(ggplot2)
library(dplyr)
library(broom)
d=read.csv('D:/DA/SUV Predictions data.csv')
print(d)
head(d)
tail(d)
print(is.null(d))
names(d)
nrow(d)
ncol(d)
mean(d$Salary)
print(unique(d))
print(median(d$Age))
hist(d$Salary)
boxplot(d$Salary~d$Age)
pie(table(d$Age))
print(plot(Salary~Age,data=d))
li=lm(Salary~Age,data=d)
print(li)
print(summary(li))
i<-ggplot(d,aes(x=Salary,y=Age))+geom_point()
print(i)
i<-i+geom_smooth(method = 'lm',col='blue')
print(i)
|
/Projects/R programming/Linear Regression R.R
|
no_license
|
karthicknadaraj/Edubridge
|
R
| false
| false
| 487
|
r
|
library(ggplot2)
library(dplyr)
library(broom)
d=read.csv('D:/DA/SUV Predictions data.csv')
print(d)
head(d)
tail(d)
print(is.null(d))
names(d)
nrow(d)
ncol(d)
mean(d$Salary)
print(unique(d))
print(median(d$Age))
hist(d$Salary)
boxplot(d$Salary~d$Age)
pie(table(d$Age))
print(plot(Salary~Age,data=d))
li=lm(Salary~Age,data=d)
print(li)
print(summary(li))
i<-ggplot(d,aes(x=Salary,y=Age))+geom_point()
print(i)
i<-i+geom_smooth(method = 'lm',col='blue')
print(i)
|
# Question 3
library(ggplot2)
for(i in seq(4)){
ggplot(anscombe, aes(anscombe[,i], anscombe[,i+4])) +
geom_point(size=3) +
geom_smooth(method="lm", se=FALSE) +
ggtitle(paste("Y", i, " ~ X", i, sep="")) +
xlab(paste("\n X", i, sep="")) +
ylab(paste("Y", i, "\n", sep="")) +
xlim(min(anscombe[,1:4]), max(anscombe[,1:4])) +
ylim(min(anscombe[,5:8]), max(anscombe[,5:8])) +
theme(axis.text = element_text(size=24),
strip.text.y = element_text(size = 24),
title = element_text(size = 24))
ggsave(paste0("q3-", i, ".png"), height = 8.5, width = 11)
}
# Question 4
bodyfat <- read.csv("extraColumnsOfRandomData.csv", header=T)
q4 <- function(df){
r2 <- data.frame("R2"=rep(0, ncol(df)-1), "Adj.R2"=rep(0, ncol(df)-1))
for(x in 2:ncol(df)){
df_temp <- df[,1:x]
lm_temp <- lm(data=df_temp, BODYFAT~.)
r2$R2[x-1] <- summary(lm_temp)$r.squared
r2$Adj.R2[x-1] <- summary(lm_temp)$adj.r.squared
}
r2 <- reshape2::melt(r2)
r2$Var <- rep(1:27, 2)
ggplot(r2, aes(x=Var, y=value, colour=variable)) +
geom_point(size = 3) +
geom_line(size = 0.5) +
ggtitle(expression(paste("Variation of ", R^{2}, " and Adj.", R^{2}))) +
xlab("\n Number of explanatory variables") +
ylab("Value of coefficient of determination \n") +
theme(axis.text = element_text(size=24),
strip.text.y = element_text(size = 24),
title = element_text(size = 24),
legend.text = element_text(size = 18),
legend.title=element_blank()) +
scale_x_continuous(breaks=seq(0, ncol(df), 2))
ggsave("q4.png", height = 8.5, width = 11)
return(r2)
}
r2 <- q4(bodyfat)
# Question 5
electric <- read.table("electricBillData.txt", header=F, sep="", na.strings = "*")
names(electric) <- c(
"NUM",
"YEAR",
"MONTH",
"BILL",
"TEMP",
"HDD",
"CDD",
"SIZE",
"METER",
"PUMP1",
"PUMP2",
"RIDER TOTAL",
"CONSUMPTION"
)
# a)
lm1 <- lm(data=electric, BILL~TEMP+HDD+CDD+SIZE+METER+PUMP1+PUMP2)
summary(lm1)
# e)
cor(electric[,5:7])
# f)
lm2 <- lm(data=electric, BILL~HDD+CDD+SIZE+METER+PUMP1+PUMP2)
summary(lm2)
# Question 6
senic <- read.csv("SENIC_data.csv", header=F)
names(senic) <- c(
"IdNumber", "LengthOfStay", "Age", "InfectionRisk", "RoutineCulturingRatio", "RoutineChestXRayRatio",
"NumberOfBeds", "MedSchoolAffiliation", "Region", "AverageDailyCensus", "NumberOfNurses", "AvailableFacilitiesAndServices"
)
# 1)
lm_senic <- lm(data=senic, NumberOfNurses~AvailableFacilitiesAndServices + I(AvailableFacilitiesAndServices^2))
summary(lm_senic)
postscript("q6_1", width = 11, height = 8)
car::residualPlot(lm_senic)
dev.off()
# 2)
lm_senic1 <- lm(data=senic, NumberOfNurses~AvailableFacilitiesAndServices)
summary(lm_senic1)
# Question 8
senic <- read.csv("SENIC_data.csv", header=F)
names(senic) <- c(
"IdNumber", "LengthOfStay", "Age", "InfectionRisk", "RoutineCulturingRatio", "RoutineChestXRayRatio",
"NumberOfBeds", "MedSchoolAffiliation", "Region", "AverageDailyCensus", "NumberOfNurses", "AvailableFacilitiesAndServices"
)
# 1)
senic$NE <- ifelse(senic$Region == 1, 1, 0)
senic$NC <- ifelse(senic$Region == 2, 1, 0)
senic$S <- ifelse(senic$Region == 3, 1, 0)
lm_senic <- lm(data=senic, LengthOfStay ~ Age +
RoutineCulturingRatio +
AverageDailyCensus +
AvailableFacilitiesAndServices +
NE + NC + S)
summary(lm_senic)
|
/HW3/HW3.R
|
no_license
|
aguimaraesduarte/msan601
|
R
| false
| false
| 3,440
|
r
|
# Question 3
library(ggplot2)
for(i in seq(4)){
ggplot(anscombe, aes(anscombe[,i], anscombe[,i+4])) +
geom_point(size=3) +
geom_smooth(method="lm", se=FALSE) +
ggtitle(paste("Y", i, " ~ X", i, sep="")) +
xlab(paste("\n X", i, sep="")) +
ylab(paste("Y", i, "\n", sep="")) +
xlim(min(anscombe[,1:4]), max(anscombe[,1:4])) +
ylim(min(anscombe[,5:8]), max(anscombe[,5:8])) +
theme(axis.text = element_text(size=24),
strip.text.y = element_text(size = 24),
title = element_text(size = 24))
ggsave(paste0("q3-", i, ".png"), height = 8.5, width = 11)
}
# Question 4
bodyfat <- read.csv("extraColumnsOfRandomData.csv", header=T)
q4 <- function(df){
r2 <- data.frame("R2"=rep(0, ncol(df)-1), "Adj.R2"=rep(0, ncol(df)-1))
for(x in 2:ncol(df)){
df_temp <- df[,1:x]
lm_temp <- lm(data=df_temp, BODYFAT~.)
r2$R2[x-1] <- summary(lm_temp)$r.squared
r2$Adj.R2[x-1] <- summary(lm_temp)$adj.r.squared
}
r2 <- reshape2::melt(r2)
r2$Var <- rep(1:27, 2)
ggplot(r2, aes(x=Var, y=value, colour=variable)) +
geom_point(size = 3) +
geom_line(size = 0.5) +
ggtitle(expression(paste("Variation of ", R^{2}, " and Adj.", R^{2}))) +
xlab("\n Number of explanatory variables") +
ylab("Value of coefficient of determination \n") +
theme(axis.text = element_text(size=24),
strip.text.y = element_text(size = 24),
title = element_text(size = 24),
legend.text = element_text(size = 18),
legend.title=element_blank()) +
scale_x_continuous(breaks=seq(0, ncol(df), 2))
ggsave("q4.png", height = 8.5, width = 11)
return(r2)
}
r2 <- q4(bodyfat)
# Question 5
electric <- read.table("electricBillData.txt", header=F, sep="", na.strings = "*")
names(electric) <- c(
"NUM",
"YEAR",
"MONTH",
"BILL",
"TEMP",
"HDD",
"CDD",
"SIZE",
"METER",
"PUMP1",
"PUMP2",
"RIDER TOTAL",
"CONSUMPTION"
)
# a)
lm1 <- lm(data=electric, BILL~TEMP+HDD+CDD+SIZE+METER+PUMP1+PUMP2)
summary(lm1)
# e)
cor(electric[,5:7])
# f)
lm2 <- lm(data=electric, BILL~HDD+CDD+SIZE+METER+PUMP1+PUMP2)
summary(lm2)
# Question 6
senic <- read.csv("SENIC_data.csv", header=F)
names(senic) <- c(
"IdNumber", "LengthOfStay", "Age", "InfectionRisk", "RoutineCulturingRatio", "RoutineChestXRayRatio",
"NumberOfBeds", "MedSchoolAffiliation", "Region", "AverageDailyCensus", "NumberOfNurses", "AvailableFacilitiesAndServices"
)
# 1)
lm_senic <- lm(data=senic, NumberOfNurses~AvailableFacilitiesAndServices + I(AvailableFacilitiesAndServices^2))
summary(lm_senic)
postscript("q6_1", width = 11, height = 8)
car::residualPlot(lm_senic)
dev.off()
# 2)
lm_senic1 <- lm(data=senic, NumberOfNurses~AvailableFacilitiesAndServices)
summary(lm_senic1)
# Question 8
senic <- read.csv("SENIC_data.csv", header=F)
names(senic) <- c(
"IdNumber", "LengthOfStay", "Age", "InfectionRisk", "RoutineCulturingRatio", "RoutineChestXRayRatio",
"NumberOfBeds", "MedSchoolAffiliation", "Region", "AverageDailyCensus", "NumberOfNurses", "AvailableFacilitiesAndServices"
)
# 1)
senic$NE <- ifelse(senic$Region == 1, 1, 0)
senic$NC <- ifelse(senic$Region == 2, 1, 0)
senic$S <- ifelse(senic$Region == 3, 1, 0)
lm_senic <- lm(data=senic, LengthOfStay ~ Age +
RoutineCulturingRatio +
AverageDailyCensus +
AvailableFacilitiesAndServices +
NE + NC + S)
summary(lm_senic)
|
# Load the libraries
library('shiny')
library('ggplot2')
library('e1071')
library('Hmisc')
library('corrplot')
library('PerformanceAnalytics')
library('visreg')
library('MASS')
library('fitdistrplus')
library('boot')
# Load the data
# Don't use data.table; this set is tiny!
# Import data
data = read.csv("simplified_export3.csv");
data = data[1:33,];
# Split identifiers from group
identifiers = data[,1];
data$Field.number = NULL;
alldata=data;
# Define server logic
shinyServer(function(input, output) {
### Import the data
# Create a renderUI element to allow user to select their input data based on what is actually there.
output$choose_cols = renderUI({
colNames = colnames(alldata);
# Create checkboxes; have first two ticked by default
checkboxGroupInput("theseColNames", "Select independents:", colNames, colNames[ c(1,2) ]);
})
# Create renderUI element to allow user to select dependent variable
# Provide some interpretative options based on what the variable looks like
# Names of unused columns
freeCols = reactive({
colNames = colnames(alldata);
theseCols = colNames[! (colNames %in% input$theseColNames) ];
if( is.na( theseCols) || is.null( theseCols ) ) {
return( NULL );
} else {
return( theseCols );
}
})
values = reactiveValues(old="Initialize");
output$choose_targ = renderUI({
# Check whether there are any columns left (if not, return null, which tells other reactives to present null output and UI elements to display message accordingly)
# If pass, check whether thisTarg is set to not Null. If so, check to see that it's a valid column. If it's a valid column, don't change anything.
if( is.null( freeCols() ) || is.na( freeCols() ) ) {
X = helpText("There are no dependent columns left. Please make sure at least one column is not selected as an independent variable.")
} else {
if( !is.null( values$old ) ) {
if( values$old %in% freeCols() ) {
X = radioButtons("thisTargName", "Select dependent:", freeCols(), values$old );
} else {
X = radioButtons("thisTargName", "Select dependent:", freeCols(), freeCols()[1] );
}
} else {
X = radioButtons("thisTargName", "Select dependent:", freeCols(), freeCols()[1] );
}
}
isolate({ values$old = input$thisTargName });
return(X);
})
# Define the raw independent and dependent variables. Independent variable later gets transformed according to user decisions.
indData = reactive({
outputDF = data.frame( matrix(0, ncol = length( input$theseColNames), nrow = nrow( alldata ) ) );
colnames(outputDF) = input$theseColNames;
# Iterate through, populating new DF
for( var in input$theseColNames ) {
X = alldata[, var];
outputDF[, var] = as.numeric(X);
}
return( outputDF )
})
targRaw = reactive({ alldata[, input$thisTargName ] })
targType = reactive({
if( class( targRaw() ) == "NULL" || length( targRaw() ) == 0 || is.null( targRaw() ) ) {
return( NULL );
} else if( class( targRaw() ) == "numeric" ) {
return( "numeric" );
} else if( class( targRaw() ) == "factor" ) {
return( "factor" );
} else if( class( targRaw() ) == "character") {
return( "character" );
} else if( class( targRaw() ) == "integer" ) {
return( "numeric" );
} else {
return( NULL );
}
})
# Populate some additional input in case the target variable is numeric
output$isNumericPanel = renderUI({
if( length(input$numericType) == 0 || is.null( input$numericType) ) {
radioButtons("numericType", "Interpret numeric:", c("continuous", "discrete") , selected = "discrete")
} else if( targType() == "numeric" && input$numericType == 'discrete') {
list(
radioButtons("numericType", "Interpret numeric:", c("continuous", "discrete") , selected = "discrete"),
sliderInput("discN","Discretization:", min = 1, max = 15, value = 3)
)
} else {
radioButtons("numericType", "Interpret numeric:", c("continuous", "discrete") , selected = "discrete")
}
})
numericType = reactive({
if( length( input$numericType ) == 0 ) {
return( "discrete" )
} else {
return( input$numericType )
}
})
discN = reactive({
if( length( input$discN ) == 0 ) {
return( 3 )
} else {
return( input$discN )
}
})
# Define the final target variable
targetVar_Factor = reactive({
# If the raw column is character, convert to factor
if( is.null( targType() ) ) {
return( NULL )
} else if( targType() == "numeric" && numericType() == "discrete" ) {
if( length( unique( targRaw() ) ) <= discN() ) {
return( targRaw() )
} else if( length( unique( targRaw() ) ) <= 5 ) {
return( targRaw() )
} else {
Y= cut(targRaw(), as.vector(ggplot2:::breaks( targRaw(), "n", n=discN())));
Y[ is.na(Y) ] = levels(Y)[1];
return(Y)
}
} else if( targType() == "numeric" && numericType() == "continuous" ) {
return( targRaw() )
} else if( targType() == "character" ) {
return( as.factor( targRaw() ) )
} else if( targType() == "factor") {
return( targRaw() )
} else {
return( NULL )
}
})
targetVar = reactive({
if( class( targetVar_Factor() ) == "factor" ) {
return( as.numeric( targetVar_Factor() ) );
} else {
return( targetVar_Factor() )
}
})
targColors = reactive({
theseColors = rainbow( length( unique( targetVar() ) ) );
return( theseColors[ as.numeric(as.factor(targetVar())) ] );
})
output$debugInfo = renderUI({
list(
input$thisTargName,
freeCols()
)
})
output$debugTable = renderTable({
as.data.frame( targRaw() )
})
output$debugTable2 = renderTable({
as.data.frame( targetVar_Factor() )
})
output$debugTable3 = renderTable({
as.data.frame( targetVar() )
})
### Statistics summary
cor.mtest <- function(mat, conf.level = 0.95) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat <- lowCI.mat <- uppCI.mat <- matrix(NA, n, n)
diag(p.mat) <- 0
diag(lowCI.mat) <- diag(uppCI.mat) <- 1
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], conf.level = conf.level)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
lowCI.mat[i, j] <- lowCI.mat[j, i] <- tmp$conf.int[1]
uppCI.mat[i, j] <- uppCI.mat[j, i] <- tmp$conf.int[2]
}
}
return(list(p.mat, lowCI.mat, uppCI.mat))
}
output$corrPlot = renderPlot({
res1 <- cor.mtest(indData(), 0.95)
res2 <- cor.mtest(indData(), 0.99)
## specialized the insignificant value according to the significant level
M = cor( indData() );
corrplot.mixed(M, p.mat = res1[[1]], sig.level = input$corSigLevel/100)
})
output$corrPlot2 = renderPlot({
chart.Correlation(indData(), method=input$cor2Method, histogram=input$cor2Hist, pch="+")
})
### Distribution fitting
output$distPlot = renderPlot({
fitg = fitdist( indData()[, input$distCol ] , input$distDist);
plot(fitg, demp=input$distDemp )
})
output$distSummary = renderText({
fitg = fitdist( indData()[, input$distCol ] , input$distDist);
capture.output( summary(fitg) );
})
## Make a plot of the straight-up PCA
# Perform PCA
output$distOptions = renderUI({
radioButtons('distCol', 'Column to check:', colnames(indData()), colnames(indData())[1])
})
pca.rows = reactive({
x = indData();
pca.vars = prcomp(~., x , scale = TRUE);
as.data.frame( predict( pca.vars, x ) )
})
# Display
output$pcaRaw = renderPlot({
colors = targColors();
df = cbind(pca.rows(), colors);
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="colors"))
p = p + scale_color_manual(values = unique( colors ),
labels = unique( targetVar_Factor() ))
return( p + geom_point() )
})
# Run manual SVM
model.svm = reactive({
df = cbind( indData(), targetVar() );
tVar = as.factor(targetVar());
thisGamma = input$gamma;
thisDeg = input$deg;
thisCoef0 = input$coef0;
thisCost = input$cost;
thisK = input$k;
# Each kernel has its own hyperparameters, so might as well just call each one
# TODO: are poly and sigmoid not converging..?
#
switch(input$kernel,
"linear" = svm(tVar ~ ., data = df, kernel = "linear", cost=thisCost, k=thisK),
"radial" = svm(tVar ~ ., data = df, kernel = "radial", gamma=thisGamma, cost=thisCost, k=thisK),
"poly" = svm(tVar ~ ., data = df, kernel = "poly", gamma=thisGamma, deg = input$deg, coef0=thisCoef0, cost=thisCost, k=thisK),
"sigmoid" = svm(tVar ~ ., data = df, kernel = "sigmoid", gamma=thisGamma, coef0=thisCoef0, cost=thisCost, k=thisK)
)
})
# Run auto-tuned SVM
tuned.svm = reactive({
df = indData();
switch(input$kernel,
"linear" = tune.svm(df, as.factor(targetVar()), kernel = "linear"),
"radial" = tune.svm(df, as.factor(targetVar()), kernel = "radial"),
"poly" = tune.svm(df, as.factor(targetVar()), kernel = "poly"),
"sigmoid" = tune.svm(df, as.factor(targetVar()), kernel = "sigmoid")
)
})
# Run auto-tuned SVM on a subset of the PCA space
tuned.svm.pca = reactive({
df = pca.rows()[,1:input$kPCA];
switch(input$kernel,
"linear" = tune.svm(df, as.factor(targetVar()), kernel = "linear"),
"radial" = tune.svm(df, as.factor(targetVar()), kernel = "radial"),
"poly" = tune.svm(df, as.factor(targetVar()), kernel = "poly"),
"sigmoid" = tune.svm(df, as.factor(targetVar()), kernel = "sigmoid")
)
})
# Make plot of PCA along chosen coords
output$pcaPlot <- renderPlot({
# build DF holding data and colors from original PCA
classes = targColors();
df = cbind(pca.rows(), classes) # because ggplot2 prefers data frames
# build DF holding data and colors for just the support vectors
thisIndex = model.svm()$index;
SV_rows = pca.rows()[thisIndex,];
SV_colors = classes[thisIndex];
SV_df = cbind(SV_rows, SV_colors)
# Now visualize, with triangles over support vectors
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="classes"))
p = p + geom_point()
p = p + geom_point(SV_df, shape=2, mapping=aes_string(x=colnames(SV_df)[input$pca1], y=colnames(SV_df)[input$pca2], colour="SV_colors", size="10"))
p = p + scale_color_manual(values = unique( classes ),
labels = unique( targetVar_Factor() ))
return(p);
})
# Show summary statistics of manually tuned SVM
output$pcaSummary = renderText({
capture.output(summary( model.svm() ))
})
# Make a plot of the tuned SVM performance
output$tunedPlot <- renderPlot({
# build DF holding data and colors from original PCA
classes = targColors();
df = cbind(pca.rows(), classes) # because ggplot2 prefers data frames
# build DF holding data and colors for just the support vectors
thisIndex = tuned.svm()$best.model$index;
SV_rows = pca.rows()[thisIndex,];
SV_colors = classes[thisIndex];
SV_df = cbind(SV_rows, SV_colors)
# Now visualize, with triangles over support vectors
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="classes"))
p = p + geom_point()
p = p + geom_point(SV_df, shape=2, mapping=aes_string(x=colnames(SV_df)[input$pca1], y=colnames(SV_df)[input$pca2], colour="SV_colors", size="10"))
p = p + scale_color_manual(values = unique( classes ),
labels = unique( targetVar_Factor() ))
return(p);
})
# Show summary statistics of automatically tuned SVM
output$tunedSummary = renderText({
capture.output(summary( tuned.svm()$best.model ))
})
# Show perf of best automatically tuned SVM
output$tunedPerf = renderUI({
list(
"Best performance: ",
1 - tuned.svm()$best.performance,
"% success rate."
)
})
# Make a plot of the tuned SVM (on PCA data) performance
output$tunedPCAPlot <- renderPlot({
# build DF holding data and colors from original PCA
classes = targColors();
df = cbind(pca.rows(), classes) # because ggplot2 prefers data frames
# build DF holding data and colors for just the support vectors
thisIndex = tuned.svm.pca()$best.model$index;
SV_rows = pca.rows()[thisIndex,];
SV_colors = classes[thisIndex];
SV_df = cbind(SV_rows, SV_colors)
# Now visualize, with triangles over support vectors
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="classes"))
p = p + geom_point()
p = p + geom_point(SV_df, shape=2, mapping=aes_string(x=colnames(SV_df)[input$pca1], y=colnames(SV_df)[input$pca2], colour="SV_colors", size="10"))
p = p + scale_color_manual(values = unique( classes ),
labels = unique( targetVar_Factor() ))
return(p);
})
# Show summary statistics of automatically tuned SVM
output$tunedPCASummary = renderText({
capture.output(summary( tuned.svm.pca()$best.model ))
})
# Show perf of best automatically tuned SVM
output$tunedPCAPerf = renderUI({
list(
"Best performance: ",
1 - tuned.svm.pca()$best.performance,
"% success rate."
)
})
# Perform a GLM on the data
output$GLMPlotVarButtons = renderUI({
radioButtons("thisGLMPlotVar", "Select GLM plot variable:", input$theseColNames, input$theseColNames[1]);
})
output$GLMPlot = renderPlot({
Y = targetVar();
df = cbind(Y, indData() );
glm.fit = glm(Y ~., data=df, family=input$GLMFamily );
return( visreg( glm.fit, input$thisGLMPlotVar, type=input$GLMMethod ) );
})
output$GLMDiagPLot = renderPlot({
Y = targetVar();
df = cbind(Y, indData() );
glm.fit = glm(Y ~., data=df, family=input$GLMFamily );
glm.diag = glm.diag( glm.fit );
glm.diag.plots(glm.fit, glm.diag)
})
})
|
/shiny/server.r
|
no_license
|
sanchda/batStats
|
R
| false
| false
| 14,612
|
r
|
# Load the libraries
library('shiny')
library('ggplot2')
library('e1071')
library('Hmisc')
library('corrplot')
library('PerformanceAnalytics')
library('visreg')
library('MASS')
library('fitdistrplus')
library('boot')
# Load the data
# Don't use data.table; this set is tiny!
# Import data
data = read.csv("simplified_export3.csv");
data = data[1:33,];
# Split identifiers from group
identifiers = data[,1];
data$Field.number = NULL;
alldata=data;
# Define server logic
shinyServer(function(input, output) {
### Import the data
# Create a renderUI element to allow user to select their input data based on what is actually there.
output$choose_cols = renderUI({
colNames = colnames(alldata);
# Create checkboxes; have first two ticked by default
checkboxGroupInput("theseColNames", "Select independents:", colNames, colNames[ c(1,2) ]);
})
# Create renderUI element to allow user to select dependent variable
# Provide some interpretative options based on what the variable looks like
# Names of unused columns
freeCols = reactive({
colNames = colnames(alldata);
theseCols = colNames[! (colNames %in% input$theseColNames) ];
if( is.na( theseCols) || is.null( theseCols ) ) {
return( NULL );
} else {
return( theseCols );
}
})
values = reactiveValues(old="Initialize");
output$choose_targ = renderUI({
# Check whether there are any columns left (if not, return null, which tells other reactives to present null output and UI elements to display message accordingly)
# If pass, check whether thisTarg is set to not Null. If so, check to see that it's a valid column. If it's a valid column, don't change anything.
if( is.null( freeCols() ) || is.na( freeCols() ) ) {
X = helpText("There are no dependent columns left. Please make sure at least one column is not selected as an independent variable.")
} else {
if( !is.null( values$old ) ) {
if( values$old %in% freeCols() ) {
X = radioButtons("thisTargName", "Select dependent:", freeCols(), values$old );
} else {
X = radioButtons("thisTargName", "Select dependent:", freeCols(), freeCols()[1] );
}
} else {
X = radioButtons("thisTargName", "Select dependent:", freeCols(), freeCols()[1] );
}
}
isolate({ values$old = input$thisTargName });
return(X);
})
# Define the raw independent and dependent variables. Independent variable later gets transformed according to user decisions.
indData = reactive({
outputDF = data.frame( matrix(0, ncol = length( input$theseColNames), nrow = nrow( alldata ) ) );
colnames(outputDF) = input$theseColNames;
# Iterate through, populating new DF
for( var in input$theseColNames ) {
X = alldata[, var];
outputDF[, var] = as.numeric(X);
}
return( outputDF )
})
targRaw = reactive({ alldata[, input$thisTargName ] })
targType = reactive({
if( class( targRaw() ) == "NULL" || length( targRaw() ) == 0 || is.null( targRaw() ) ) {
return( NULL );
} else if( class( targRaw() ) == "numeric" ) {
return( "numeric" );
} else if( class( targRaw() ) == "factor" ) {
return( "factor" );
} else if( class( targRaw() ) == "character") {
return( "character" );
} else if( class( targRaw() ) == "integer" ) {
return( "numeric" );
} else {
return( NULL );
}
})
# Populate some additional input in case the target variable is numeric
output$isNumericPanel = renderUI({
if( length(input$numericType) == 0 || is.null( input$numericType) ) {
radioButtons("numericType", "Interpret numeric:", c("continuous", "discrete") , selected = "discrete")
} else if( targType() == "numeric" && input$numericType == 'discrete') {
list(
radioButtons("numericType", "Interpret numeric:", c("continuous", "discrete") , selected = "discrete"),
sliderInput("discN","Discretization:", min = 1, max = 15, value = 3)
)
} else {
radioButtons("numericType", "Interpret numeric:", c("continuous", "discrete") , selected = "discrete")
}
})
numericType = reactive({
if( length( input$numericType ) == 0 ) {
return( "discrete" )
} else {
return( input$numericType )
}
})
discN = reactive({
if( length( input$discN ) == 0 ) {
return( 3 )
} else {
return( input$discN )
}
})
# Define the final target variable
targetVar_Factor = reactive({
# If the raw column is character, convert to factor
if( is.null( targType() ) ) {
return( NULL )
} else if( targType() == "numeric" && numericType() == "discrete" ) {
if( length( unique( targRaw() ) ) <= discN() ) {
return( targRaw() )
} else if( length( unique( targRaw() ) ) <= 5 ) {
return( targRaw() )
} else {
Y= cut(targRaw(), as.vector(ggplot2:::breaks( targRaw(), "n", n=discN())));
Y[ is.na(Y) ] = levels(Y)[1];
return(Y)
}
} else if( targType() == "numeric" && numericType() == "continuous" ) {
return( targRaw() )
} else if( targType() == "character" ) {
return( as.factor( targRaw() ) )
} else if( targType() == "factor") {
return( targRaw() )
} else {
return( NULL )
}
})
targetVar = reactive({
if( class( targetVar_Factor() ) == "factor" ) {
return( as.numeric( targetVar_Factor() ) );
} else {
return( targetVar_Factor() )
}
})
targColors = reactive({
theseColors = rainbow( length( unique( targetVar() ) ) );
return( theseColors[ as.numeric(as.factor(targetVar())) ] );
})
output$debugInfo = renderUI({
list(
input$thisTargName,
freeCols()
)
})
output$debugTable = renderTable({
as.data.frame( targRaw() )
})
output$debugTable2 = renderTable({
as.data.frame( targetVar_Factor() )
})
output$debugTable3 = renderTable({
as.data.frame( targetVar() )
})
### Statistics summary
cor.mtest <- function(mat, conf.level = 0.95) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat <- lowCI.mat <- uppCI.mat <- matrix(NA, n, n)
diag(p.mat) <- 0
diag(lowCI.mat) <- diag(uppCI.mat) <- 1
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], conf.level = conf.level)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
lowCI.mat[i, j] <- lowCI.mat[j, i] <- tmp$conf.int[1]
uppCI.mat[i, j] <- uppCI.mat[j, i] <- tmp$conf.int[2]
}
}
return(list(p.mat, lowCI.mat, uppCI.mat))
}
output$corrPlot = renderPlot({
res1 <- cor.mtest(indData(), 0.95)
res2 <- cor.mtest(indData(), 0.99)
## specialized the insignificant value according to the significant level
M = cor( indData() );
corrplot.mixed(M, p.mat = res1[[1]], sig.level = input$corSigLevel/100)
})
output$corrPlot2 = renderPlot({
chart.Correlation(indData(), method=input$cor2Method, histogram=input$cor2Hist, pch="+")
})
### Distribution fitting
output$distPlot = renderPlot({
fitg = fitdist( indData()[, input$distCol ] , input$distDist);
plot(fitg, demp=input$distDemp )
})
output$distSummary = renderText({
fitg = fitdist( indData()[, input$distCol ] , input$distDist);
capture.output( summary(fitg) );
})
## Make a plot of the straight-up PCA
# Perform PCA
output$distOptions = renderUI({
radioButtons('distCol', 'Column to check:', colnames(indData()), colnames(indData())[1])
})
pca.rows = reactive({
x = indData();
pca.vars = prcomp(~., x , scale = TRUE);
as.data.frame( predict( pca.vars, x ) )
})
# Display
output$pcaRaw = renderPlot({
colors = targColors();
df = cbind(pca.rows(), colors);
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="colors"))
p = p + scale_color_manual(values = unique( colors ),
labels = unique( targetVar_Factor() ))
return( p + geom_point() )
})
# Run manual SVM
model.svm = reactive({
df = cbind( indData(), targetVar() );
tVar = as.factor(targetVar());
thisGamma = input$gamma;
thisDeg = input$deg;
thisCoef0 = input$coef0;
thisCost = input$cost;
thisK = input$k;
# Each kernel has its own hyperparameters, so might as well just call each one
# TODO: are poly and sigmoid not converging..?
#
switch(input$kernel,
"linear" = svm(tVar ~ ., data = df, kernel = "linear", cost=thisCost, k=thisK),
"radial" = svm(tVar ~ ., data = df, kernel = "radial", gamma=thisGamma, cost=thisCost, k=thisK),
"poly" = svm(tVar ~ ., data = df, kernel = "poly", gamma=thisGamma, deg = input$deg, coef0=thisCoef0, cost=thisCost, k=thisK),
"sigmoid" = svm(tVar ~ ., data = df, kernel = "sigmoid", gamma=thisGamma, coef0=thisCoef0, cost=thisCost, k=thisK)
)
})
# Run auto-tuned SVM
tuned.svm = reactive({
df = indData();
switch(input$kernel,
"linear" = tune.svm(df, as.factor(targetVar()), kernel = "linear"),
"radial" = tune.svm(df, as.factor(targetVar()), kernel = "radial"),
"poly" = tune.svm(df, as.factor(targetVar()), kernel = "poly"),
"sigmoid" = tune.svm(df, as.factor(targetVar()), kernel = "sigmoid")
)
})
# Run auto-tuned SVM on a subset of the PCA space
tuned.svm.pca = reactive({
df = pca.rows()[,1:input$kPCA];
switch(input$kernel,
"linear" = tune.svm(df, as.factor(targetVar()), kernel = "linear"),
"radial" = tune.svm(df, as.factor(targetVar()), kernel = "radial"),
"poly" = tune.svm(df, as.factor(targetVar()), kernel = "poly"),
"sigmoid" = tune.svm(df, as.factor(targetVar()), kernel = "sigmoid")
)
})
# Make plot of PCA along chosen coords
output$pcaPlot <- renderPlot({
# build DF holding data and colors from original PCA
classes = targColors();
df = cbind(pca.rows(), classes) # because ggplot2 prefers data frames
# build DF holding data and colors for just the support vectors
thisIndex = model.svm()$index;
SV_rows = pca.rows()[thisIndex,];
SV_colors = classes[thisIndex];
SV_df = cbind(SV_rows, SV_colors)
# Now visualize, with triangles over support vectors
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="classes"))
p = p + geom_point()
p = p + geom_point(SV_df, shape=2, mapping=aes_string(x=colnames(SV_df)[input$pca1], y=colnames(SV_df)[input$pca2], colour="SV_colors", size="10"))
p = p + scale_color_manual(values = unique( classes ),
labels = unique( targetVar_Factor() ))
return(p);
})
# Show summary statistics of manually tuned SVM
output$pcaSummary = renderText({
capture.output(summary( model.svm() ))
})
# Make a plot of the tuned SVM performance
output$tunedPlot <- renderPlot({
# build DF holding data and colors from original PCA
classes = targColors();
df = cbind(pca.rows(), classes) # because ggplot2 prefers data frames
# build DF holding data and colors for just the support vectors
thisIndex = tuned.svm()$best.model$index;
SV_rows = pca.rows()[thisIndex,];
SV_colors = classes[thisIndex];
SV_df = cbind(SV_rows, SV_colors)
# Now visualize, with triangles over support vectors
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="classes"))
p = p + geom_point()
p = p + geom_point(SV_df, shape=2, mapping=aes_string(x=colnames(SV_df)[input$pca1], y=colnames(SV_df)[input$pca2], colour="SV_colors", size="10"))
p = p + scale_color_manual(values = unique( classes ),
labels = unique( targetVar_Factor() ))
return(p);
})
# Show summary statistics of automatically tuned SVM
output$tunedSummary = renderText({
capture.output(summary( tuned.svm()$best.model ))
})
# Show perf of best automatically tuned SVM
output$tunedPerf = renderUI({
list(
"Best performance: ",
1 - tuned.svm()$best.performance,
"% success rate."
)
})
# Make a plot of the tuned SVM (on PCA data) performance
output$tunedPCAPlot <- renderPlot({
# build DF holding data and colors from original PCA
classes = targColors();
df = cbind(pca.rows(), classes) # because ggplot2 prefers data frames
# build DF holding data and colors for just the support vectors
thisIndex = tuned.svm.pca()$best.model$index;
SV_rows = pca.rows()[thisIndex,];
SV_colors = classes[thisIndex];
SV_df = cbind(SV_rows, SV_colors)
# Now visualize, with triangles over support vectors
p = ggplot(df, mapping=aes_string(x=colnames(df)[input$pca1], y=colnames(df)[input$pca2], colour="classes"))
p = p + geom_point()
p = p + geom_point(SV_df, shape=2, mapping=aes_string(x=colnames(SV_df)[input$pca1], y=colnames(SV_df)[input$pca2], colour="SV_colors", size="10"))
p = p + scale_color_manual(values = unique( classes ),
labels = unique( targetVar_Factor() ))
return(p);
})
# Show summary statistics of automatically tuned SVM
output$tunedPCASummary = renderText({
capture.output(summary( tuned.svm.pca()$best.model ))
})
# Show perf of best automatically tuned SVM
output$tunedPCAPerf = renderUI({
list(
"Best performance: ",
1 - tuned.svm.pca()$best.performance,
"% success rate."
)
})
# Perform a GLM on the data
output$GLMPlotVarButtons = renderUI({
radioButtons("thisGLMPlotVar", "Select GLM plot variable:", input$theseColNames, input$theseColNames[1]);
})
output$GLMPlot = renderPlot({
Y = targetVar();
df = cbind(Y, indData() );
glm.fit = glm(Y ~., data=df, family=input$GLMFamily );
return( visreg( glm.fit, input$thisGLMPlotVar, type=input$GLMMethod ) );
})
output$GLMDiagPLot = renderPlot({
Y = targetVar();
df = cbind(Y, indData() );
glm.fit = glm(Y ~., data=df, family=input$GLMFamily );
glm.diag = glm.diag( glm.fit );
glm.diag.plots(glm.fit, glm.diag)
})
})
|
#' Change R's prompt when running on R for MS Windows via Wine
#'
#' Options that are set:
#' * `prompt`
#'
#' @author Henrik Bengtsson
options(prompt = "R on Wine> ")
|
/Rprofile.d/interactive=TRUE/ui,os=windows,wine=TRUE.R
|
no_license
|
HenrikBengtsson/dotfiles-for-R
|
R
| false
| false
| 168
|
r
|
#' Change R's prompt when running on R for MS Windows via Wine
#'
#' Options that are set:
#' * `prompt`
#'
#' @author Henrik Bengtsson
options(prompt = "R on Wine> ")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ec2_operations.R
\name{delete_client_vpn_endpoint}
\alias{delete_client_vpn_endpoint}
\title{Deletes the specified Client VPN endpoint}
\usage{
delete_client_vpn_endpoint(ClientVpnEndpointId, DryRun = NULL)
}
\arguments{
\item{ClientVpnEndpointId}{[required] The ID of the Client VPN to be deleted.}
\item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Deletes the specified Client VPN endpoint. You must disassociate all target networks before you can delete a Client VPN endpoint.
}
\section{Accepted Parameters}{
\preformatted{delete_client_vpn_endpoint(
ClientVpnEndpointId = "string",
DryRun = TRUE|FALSE
)
}
}
|
/service/paws.ec2/man/delete_client_vpn_endpoint.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 948
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.ec2_operations.R
\name{delete_client_vpn_endpoint}
\alias{delete_client_vpn_endpoint}
\title{Deletes the specified Client VPN endpoint}
\usage{
delete_client_vpn_endpoint(ClientVpnEndpointId, DryRun = NULL)
}
\arguments{
\item{ClientVpnEndpointId}{[required] The ID of the Client VPN to be deleted.}
\item{DryRun}{Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is \code{DryRunOperation}. Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Deletes the specified Client VPN endpoint. You must disassociate all target networks before you can delete a Client VPN endpoint.
}
\section{Accepted Parameters}{
\preformatted{delete_client_vpn_endpoint(
ClientVpnEndpointId = "string",
DryRun = TRUE|FALSE
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Simpson}
\alias{Simpson}
\title{Grade point averages of men and women participating in various sports-an
illustration of Simpson's paradox}
\format{A data frame with 100 observations on the following 15 variables.
\describe{
\item{gpa}{a numeric vector}
\item{spor}{a numeric vector}
\item{gender}{a numeric vector}
\item{gpamale}{a numeric vector}
\item{sptmale}{a numeric vector}
\item{gpafemal}{a numeric vector}
\item{sptfemal}{a numeric vector}
\item{bbgpa}{a numeric vector}
\item{genderbb}{a numeric vector}
\item{sogpa}{a numeric vector}
\item{genderso}{a numeric vector}
\item{tkgpa}{a numeric vector}
\item{gendertk}{a numeric vector}
\item{gradept}{a numeric vector}
\item{gender2}{a numeric vector}
}}
\description{
Data for Example 1.18
}
\examples{
str(Simpson)
attach(Simpson)
par(mfrow=c(1,2))
boxplot(gpa~gender,col=c("blue","pink"),names=c("Male","Female"),
main="GPA versus Gender",xlab="Gender",ylab="Grade Point Average")
boxplot(gradept~gender2,las=2,col=c("blue","pink"),
names=c("M-BBALL","F-BBALL","M-SOCC","F-SOCC","M-TRAC","F-TRAC"),
ylab="Grade Point Average",main="GPA vs Gender by Sports")
par(mfrow=c(1,1))
detach(Simpson)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\keyword{datasets}
|
/man/Simpson.Rd
|
no_license
|
johnsonjc6/BSDA
|
R
| false
| true
| 1,394
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BSDA-package.R
\docType{data}
\name{Simpson}
\alias{Simpson}
\title{Grade point averages of men and women participating in various sports-an
illustration of Simpson's paradox}
\format{A data frame with 100 observations on the following 15 variables.
\describe{
\item{gpa}{a numeric vector}
\item{spor}{a numeric vector}
\item{gender}{a numeric vector}
\item{gpamale}{a numeric vector}
\item{sptmale}{a numeric vector}
\item{gpafemal}{a numeric vector}
\item{sptfemal}{a numeric vector}
\item{bbgpa}{a numeric vector}
\item{genderbb}{a numeric vector}
\item{sogpa}{a numeric vector}
\item{genderso}{a numeric vector}
\item{tkgpa}{a numeric vector}
\item{gendertk}{a numeric vector}
\item{gradept}{a numeric vector}
\item{gender2}{a numeric vector}
}}
\description{
Data for Example 1.18
}
\examples{
str(Simpson)
attach(Simpson)
par(mfrow=c(1,2))
boxplot(gpa~gender,col=c("blue","pink"),names=c("Male","Female"),
main="GPA versus Gender",xlab="Gender",ylab="Grade Point Average")
boxplot(gradept~gender2,las=2,col=c("blue","pink"),
names=c("M-BBALL","F-BBALL","M-SOCC","F-SOCC","M-TRAC","F-TRAC"),
ylab="Grade Point Average",main="GPA vs Gender by Sports")
par(mfrow=c(1,1))
detach(Simpson)
}
\references{
Kitchens, L. J. (2003) \emph{Basic Statistics and Data Analysis}.
Duxbury
}
\keyword{datasets}
|
context("lx_selector")
testthat::test_that(desc = "selector string correct",
{
expect_true(lx_selector(id = "A")=="id:A")
expect_true(lx_selector(id = "A",group = "B")=="id:A,group:B")
expect_true(lx_selector(id = "A",group = "B", zones = c(1,2))=="id:A|1|2,group:B|1|2")
})
|
/tests/testthat/test-lx_selector.R
|
no_license
|
cran/lifx
|
R
| false
| false
| 382
|
r
|
context("lx_selector")
testthat::test_that(desc = "selector string correct",
{
expect_true(lx_selector(id = "A")=="id:A")
expect_true(lx_selector(id = "A",group = "B")=="id:A,group:B")
expect_true(lx_selector(id = "A",group = "B", zones = c(1,2))=="id:A|1|2,group:B|1|2")
})
|
source("~/repo/neonectria_barcoding_012220/sum_trees/read_ASV_dat.LULU_tab.r")
library(Hmsc)
library(MASS)
set.seed(6)
#working with only asv_1 and asv_4 which are the dominant Nf and Nd asvs respectively
asv_tab.neonectria = asv_tab[rownames(asv_tab) %in% c("ASV_1", "ASV_4"),] %>% t()
asv_tab.neonectria[asv_tab.neonectria > 0] = 1
full_metadata.sorted = left_join(
data.frame(sample = rownames(asv_tab.neonectria)),
full_metadata,
by = "sample"
)
#This is 117 samples
#Start with simulated data according to tutorial, but use sample and site random levels from real data
n = 117 #modified to match real dat
ns = 2 #modified to match real dat
beta1 = c(-1,1) #modified to match real dat
alpha = rep(0,ns)
beta = cbind(alpha,beta1)
x = cbind(rep(1,n),rnorm(n))
Lf = x%*%t(beta)
xycoords = matrix(runif(2*n),ncol=2)
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = 1:n
sigma.spatial = c(2)
alpha.spatial = c(0.35)
Sigma = sigma.spatial^2*exp(-as.matrix(dist(xycoords))/alpha.spatial)
eta1 = mvrnorm(mu=rep(0,n), Sigma=Sigma)
lambda1 = c(1,2)
Lr = eta1%*%t(lambda1)
L = Lf + Lr
y = as.matrix(L + matrix(rnorm(n*ns),ncol=ns))
yprob = 1*((L +matrix(rnorm(n*ns),ncol=ns))>0)
XData = data.frame(x1=x[,2])
rbPal = colorRampPalette(c('cyan','red'))
par(mfrow=c(2,3))
Col = rbPal(10)[as.numeric(cut(x[,2],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('x'), asp=1)
for(s in 1:ns){
Col = rbPal(10)[as.numeric(cut(y[,s],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('Species',s), asp=1)
}
studyDesign = data.frame(sample = as.factor(full_metadata$sample), site = as.factor(full_metadata.sorted$Site), spatial = as.factor(full_metadata$sample))
rownames(xycoords) = full_metadata$sample
rL.sample = HmscRandomLevel(units = as.factor(full_metadata$sample))
rL.site = HmscRandomLevel(units = as.factor(full_metadata$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
nChains = 2
test.run = TRUE
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works
#Now substitute distance matrix at the site level
xycoords = matrix(runif(2*10),ncol=2)
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = levels(as.factor(full_metadata.sorted$Site))
studyDesign = data.frame(sample = as.factor(full_metadata$sample), site = as.factor(full_metadata.sorted$Site), spatial = as.factor(full_metadata$Site))
rL.sample = HmscRandomLevel(units = as.factor(full_metadata$sample))
rL.site = HmscRandomLevel(units = as.factor(full_metadata$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. now substitute real distances
xycoords = matrix(c(site_info$lat, site_info$lon),ncol=2)
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = site_info$Site
studyDesign = data.frame(sample = as.factor(full_metadata$sample), site = as.factor(full_metadata.sorted$Site), spatial = as.factor(full_metadata$Site))
rL.sample = HmscRandomLevel(units = as.factor(full_metadata$sample))
rL.site = HmscRandomLevel(units = as.factor(full_metadata$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
#rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. Now use real species data
asv_tab.neonectria = asv_tab[rownames(asv_tab) %in% c("ASV_1", "ASV_4"),] %>% t()
Y = as.matrix(asv_tab.neonectria)
asv_tab.neonectria[asv_tab.neonectria > 0] = 1
yprob = asv_tab.neonectria
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. Adding real covariates
XData = full_metadata.sorted %>% dplyr::select("HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "total_seqs")
XData[,colnames(Xdata) == "total_seqs"] = log(XData[,colnames(Xdata) == "total_seqs"])
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. setting priors for total_seqs covarite to force as offset
xycoords = data.frame(lat = jitter(full_metadata.sorted$lat), lon = jitter(full_metadata.sorted$lon))
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = 1:n
sigma.spatial = c(2)
alpha.spatial = c(0.35)
Sigma = sigma.spatial^2*exp(-as.matrix(dist(xycoords))/alpha.spatial)
eta1 = mvrnorm(mu=rep(0,n), Sigma=Sigma)
lambda1 = c(1,2)
Lr = eta1%*%t(lambda1)
L = Lf + Lr
y = as.matrix(L + matrix(rnorm(n*ns),ncol=ns))
yprob = 1*((L +matrix(rnorm(n*ns),ncol=ns))>0)
XData = data.frame(x1=x[,2])
rbPal = colorRampPalette(c('cyan','red'))
par(mfrow=c(2,3))
Col = rbPal(10)[as.numeric(cut(x[,2],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('x'), asp=1)
for(s in 1:ns){
Col = rbPal(10)[as.numeric(cut(y[,s],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('Species',s), asp=1)
}
studyDesign = data.frame(sample = as.factor(1:n))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.spatial),distr="probit")
nChains = 2
test.run = TRUE
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This fails. Appears this is truly a problem with the spatial matrix
#If data points are jittered this works
#Now add random effect of site
xycoords = data.frame(lat = jitter(full_metadata.sorted$lat), lon = jitter(full_metadata.sorted$lon))
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = 1:n
sigma.spatial = c(2)
alpha.spatial = c(0.35)
Sigma = sigma.spatial^2*exp(-as.matrix(dist(xycoords))/alpha.spatial)
eta1 = mvrnorm(mu=rep(0,n), Sigma=Sigma)
lambda1 = c(1,2)
Lr = eta1%*%t(lambda1)
L = Lf + Lr
y = as.matrix(L + matrix(rnorm(n*ns),ncol=ns))
yprob = 1*((L +matrix(rnorm(n*ns),ncol=ns))>0)
XData = data.frame(x1=x[,2])
rbPal = colorRampPalette(c('cyan','red'))
par(mfrow=c(2,3))
Col = rbPal(10)[as.numeric(cut(x[,2],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('x'), asp=1)
for(s in 1:ns){
Col = rbPal(10)[as.numeric(cut(y[,s],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('Species',s), asp=1)
}
studyDesign = data.frame(sample = as.factor(1:n), site = as.factor(full_metadata.sorted$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
rL.site = HmscRandomLevel(units = as.factor(full_metadata.sorted$Site))
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.spatial, site = rL.site),distr="probit")
nChains = 2
test.run = TRUE
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#Adding site as random level works with jittered spatial points, but not with raw data (i.e. not jittered)
#####################
#Testing according to https://github.com/hmsc-r/HMSC/issues/8
hM = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.spatial),distr="probit")
#run lines 46-197 in computeDataParameters.R
W %>% range #should be positive
isSymmetric(W) #should be true
#Try to set up with a random level of site, and distances defined on site
require(Hmsc)
require(vegan)
library(parallel)
library(corrplot)
require(MASS)
set.seed(1)
#vignette_3_multivariate_high
#Getting started with HMSC-R: high-dimensional
#multivariate models (source, pdf)
#vignette_2_multivariate_low
#Getting started with HMSC-R: low-dimensional
#multivariate models (source, pdf)
#vignette_4_spatial Getting started with HMSC-R: spatial models
#(source, pdf)
#vignette_1_univariate Getting started with HMSC-R: univariate models
#(source, pdf)
#vignette_5_performance
#Testing the performance of Hmsc with simulated
#data (source, pdf)
#source("~/repo/neonectria_barcoding_012220/sum_trees/read_ASV_dat.LULU_tab.r")
#working with only asv_1 and asv_4 which are the dominant Nf and Nd asvs respectively
asv_tab.neonectria = asv_tab[rownames(asv_tab) %in% c("ASV_1", "ASV_4"),] %>% t()
asv_tab.neonectria[asv_tab.neonectria > 0] = 1
full_metadata.sorted = left_join(
data.frame(sample = rownames(asv_tab.neonectria)),
full_metadata,
by = "sample"
)
#############################
#Get spatial distance matrix#
#Or use lat lon
xycoords = data.frame(lat = jitter(full_metadata.sorted$lat), lon = jitter(full_metadata.sorted$lon))
rownames(xycoords) = full_metadata.sorted$sample
#rownames(xycoords) = full_metadata.sorted$sample
rL.spatial = HmscRandomLevel(sData = xycoords)
#rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
############################
#Begin of model specification
#Species matrix
Y = as.matrix(asv_tab.neonectria)
#rownames(Y) = NULL
#covariate data fixed effects
XData = full_metadata.sorted %>% dplyr::select("HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "total_seqs")
#total_seqs should be supplied as part of the xData as described here https://github.com/hmsc-r/HMSC/issues/10 may also need to adjust priors
#Supply sample designation to study design
studyDesign = data.frame(
spatial = as.factor(full_metadata.sorted$sample)
)
#It's very importnat to set above to factor and not character, otherwise the preds are not recoverable
#Would include this as a random effect if not setting spatial mat
#rL = HmscRandomLevel(units = studyDesign$sample)
#rL$nfMax = 15
#model specification
m.spatial = Hmsc(Y=Y, XData=XData, XFormula=~.,
studyDesign=studyDesign, ranLevels=list(spatial = rL.spatial),distr="probit")
###########################
#MCMC sampling
#if running fast set test.run = T, but these results will not be reliable
nChains = 2
test.run = T
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
######################
#MCMC convergence
#ess == effective sample size
#psrf == potential scale reduction factor (closeto 1 indicates good MCMC convergence)
#beta == species niches (as realted to covarites)
#gamma == influence of traits on species niches
#omega == residual species association
#rho == phylogenetic signal
mpost = convertToCodaObject(m.spatial)
par(mfrow=c(3,2))
ess.beta = effectiveSize(mpost$Beta)
psrf.beta = gelman.diag(mpost$Beta, multivariate=FALSE)$psrf
hist(ess.beta)
hist(psrf.beta)
ess.gamma = effectiveSize(mpost$Gamma)
psrf.gamma = gelman.diag(mpost$Gamma, multivariate=FALSE)$psrf
hist(ess.gamma)
hist(psrf.gamma)
ns = 50
sppairs = matrix(sample(x = 1:ns^2, size = 100))
tmp = mpost$Omega[[1]]
for (chain in 1:length(tmp)){
tmp[[chain]] = tmp[[chain]][,sppairs]
}
ess.omega = effectiveSize(tmp)
psrf.omega = gelman.diag(tmp, multivariate=FALSE)$psrf
hist(ess.omega)
hist(psrf.omega)
##########################
#Model fit and partioning
preds = computePredictedValues(m.spatial)
MF = evaluateModelFit(hM=m.spatial, predY=preds)
hist(MF$TjurR2, xlim = c(0,1), main=paste0("Mean = ", round(mean(MF$TjurR2),2)))
#######################
#Variance partioning
#the group vairable assigns X covariates to different covariate groups
#so first look at the design matrix
head(m.spatial$X)
#For our real data we fit an intercept and three coninuous variables, so they can each be assigned separate groups
#If instead we had a categorical variable the levels could be assigned to a single group along with the intercept
VP = computeVariancePartitioning(m.spatial, group = c(1,2,3,4,5,6,7,8,9), groupnames = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "RaisedCanker", "Wax", "total_seqs","spatial"))
plotVariancePartitioning(m.spatial, VP = VP)
####################
#Plot variance partioning
#If included a square of continuous variable, negative response would indicate intermediate niche optimum (i.e., abundance goes up initially but then goes down)
postBeta = getPostEstimate(m.spatial, parName = "Beta")
plotBeta(m.spatial, post = postBeta, param = "Support",
plotTree = F, supportLevel = 0.95, split=.4, spNamesNumbers = c(T,F))
#This can also be mapped on a tree with plotTree = T, but then tree must be included in model
############################################
#transform VP and postBeta object for ggplot
#VP cpntains R2 vals and postBeta contains support (i.e. alpha)
VP.vals = data.frame(VP$vals)
colnames(VP.vals) = c("Nf", "Nd")
VP.vals = VP.vals[1:8,] #this removes the random effect
VP.vals$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
VP.vals.long = VP.vals %>%
pivot_longer(-variable, names_to = "ASV", values_to = "R2")
#Transform R2 based on positive or negative response
postBeta.mean = data.frame(postBeta$mean)
colnames(postBeta.mean) = c("Nf", "Nd")
postBeta.mean$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
postBeta.mean.long = postBeta.mean %>%
pivot_longer(-variable, names_to = "ASV", values_to = "mean")
for(i in 1:length(postBeta.mean.long$mean)){
if(postBeta.mean.long$mean[i] < 0){
VP.vals.long$R2[i] = VP.vals.long$R2[i] * -1
}
}
postBeta.support = data.frame(postBeta$support)
colnames(postBeta.support) = c("Nf", "Nd")
postBeta.support$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
postBeta.support.long = postBeta.support %>%
pivot_longer(-variable, names_to = "ASV", values_to = "support")
postBeta.supportNeg = data.frame(postBeta$supportNeg)
colnames(postBeta.supportNeg) = c("Nf", "Nd")
postBeta.supportNeg$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
postBeta.supportNeg.long = postBeta.supportNeg %>%
pivot_longer(-variable, names_to = "ASV", values_to = "supportNeg")
VP.vals.support = full_join(VP.vals.long, postBeta.support.long) %>%
full_join(., postBeta.supportNeg.long)
VP.vals.support = data.frame(VP.vals.support)
VP.vals.support$P.val = vector(mode = "character", length = length(VP.vals.support$support))
for(i in 1:length(VP.vals.support$P.val)){
if(VP.vals.support$supportNeg[i] > 0.95 || VP.vals.support$support[i] > 0.95){
VP.vals.support$P.val[i] = "P<0.05"
}else{
VP.vals.support$P.val[i] = "n.s."
}
}
require(RColorBrewer)
source("~/ggplot_theme.txt")
VP.vals.support$variable = factor(VP.vals.support$variable, levels = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs"))
p1 = ggplot(VP.vals.support, aes(ASV, variable, fill = R2, color = P.val)) +
geom_tile(size = 1, height = 0.975, width = 0.975) +
scale_fill_gradient2(low = "#2c7bb6", high = "#d7191c", mid = "white", midpoint = 0) +
scale_color_manual(values = c("P<0.05" = "black", "n.s." = "white"), ) +
my_gg_theme +
labs(
x = "",
y = "",
fill = "R2",
color = "alpha",
title = "HMSC variance partitioning\nNeonectria presence-absence"
) +
theme(legend.title = element_text(size = 20))
pdf("HMSC/Nf_Nd_variance_partitioning.bin.pdf", width = 8, height = 8)
p1
dev.off()
#scale_fill_continuous(limits = c(-1,1))
####################
####################
#Estimated residual var between spp
OmegaCor = computeAssociations(m.spatial)
supportLevel = 0.55 #this is alpha = 0.05
toPlot = ((OmegaCor[[1]]$support>supportLevel)
+ (OmegaCor[[1]]$support<(1-supportLevel))>0)*OmegaCor[[1]]$mean
corrplot(toPlot, method = "color",
col=colorRampPalette(c("blue","white","red"))(500),
tl.cex=.6, tl.col="black",
title=paste("random effect level:", m.spatial$rLNames[1]), mar=c(0,0,1,0))
#################
#Refit model with additional aprams
#covariate data fixed effects
XData = full_metadata.sorted %>% dplyr::select("HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "dbh", "TreeCond", "total_seqs")
#########################################################
#This block works through MCMC
rL = HmscRandomLevel(units = full_metadata.sorted$sample)
studyDesign = data.frame(
sample = full_metadata.sorted$sample
)
m.spatial = Hmsc(Y=Y, XData=XData, XFormula=~.,
studyDesign=studyDesign, ranLevels=list(sample = rL),distr="probit")
#########################################################
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
######################
#MCMC convergence
#ess == effective sample size
#psrf == potential scale reduction factor (closeto 1 indicates good MCMC convergence)
#beta == species niches (as realted to covarites)
#gamma == influence of traits on species niches
#omega == residual species association
#rho == phylogenetic signal
mpost = convertToCodaObject(m.spatial)
par(mfrow=c(3,2))
ess.beta = effectiveSize(mpost$Beta)
psrf.beta = gelman.diag(mpost$Beta, multivariate=FALSE)$psrf
hist(ess.beta)
hist(psrf.beta)
ess.gamma = effectiveSize(mpost$Gamma)
psrf.gamma = gelman.diag(mpost$Gamma, multivariate=FALSE)$psrf
hist(ess.gamma)
hist(psrf.gamma)
ns = 50
sppairs = matrix(sample(x = 1:ns^2, size = 100))
tmp = mpost$Omega[[1]]
for (chain in 1:length(tmp)){
tmp[[chain]] = tmp[[chain]][,sppairs]
}
ess.omega = effectiveSize(tmp)
psrf.omega = gelman.diag(tmp, multivariate=FALSE)$psrf
hist(ess.omega)
hist(psrf.omega)
##########################
#Model fit and partioning
preds = computePredictedValues(m.spatial)
MF = evaluateModelFit(hM=m.spatial, predY=preds)
hist(MF$TjurR2, xlim = c(0,1), main=paste0("Mean = ", round(mean(MF$TjurR2),2)))
#######################
#Variance partioning
#the group vairable assigns X covariates to different covariate groups
#so first look at the design matrix
head(m.spatial$X)
#For our real data we fit an intercept and three coninuous variables, so they can each be assigned separate groups
#If instead we had a categorical variable the levels could be assigned to a single group along with the intercept
VP = computeVariancePartitioning(m.spatial, group = c(1,2,3,4,5,6,7,8,9,10), groupnames = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "dbh", "TreeCond", "total_seqs"))
plotVariancePartitioning(m.spatial, VP = VP)
####################
#Plot variance partioning
#If included a square of continuous variable, negative response would indicate intermediate niche optimum (i.e., abundance goes up initially but then goes down)
postBeta = getPostEstimate(m.spatial, parName = "Beta")
plotBeta(m.spatial, post = postBeta, param = "Support",
plotTree = F, supportLevel = 0.95, split=.4, spNamesNumbers = c(T,F))
#This can also be mapped on a tree with plotTree = T, but then tree must be included in model
############################################
#transform VP and postBeta object for ggplot
#VP cpntains R2 vals and postBeta contains support (i.e. alpha)
VP.vals = data.frame(VP$vals)
colnames(VP.vals) = c("Nf", "Nd")
VP.vals = VP.vals[1:10,] #this removes the random effect
VP.vals$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
VP.vals.long = VP.vals %>%
pivot_longer(-variable, names_to = "ASV", values_to = "R2")
#Transform R2 based on positive or negative response
postBeta.mean = data.frame(postBeta$mean)
colnames(postBeta.mean) = c("Nf", "Nd")
postBeta.mean$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
postBeta.mean.long = postBeta.mean %>%
pivot_longer(-variable, names_to = "ASV", values_to = "mean")
for(i in 1:length(postBeta.mean.long$mean)){
if(postBeta.mean.long$mean[i] < 0){
VP.vals.long$R2[i] = VP.vals.long$R2[i] * -1
}
}
postBeta.support = data.frame(postBeta$support)
colnames(postBeta.support) = c("Nf", "Nd")
postBeta.support$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
postBeta.support.long = postBeta.support %>%
pivot_longer(-variable, names_to = "ASV", values_to = "support")
postBeta.supportNeg = data.frame(postBeta$supportNeg)
colnames(postBeta.supportNeg) = c("Nf", "Nd")
postBeta.supportNeg$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
postBeta.supportNeg.long = postBeta.supportNeg %>%
pivot_longer(-variable, names_to = "ASV", values_to = "supportNeg")
VP.vals.support = full_join(VP.vals.long, postBeta.support.long) %>%
full_join(., postBeta.supportNeg.long)
VP.vals.support = data.frame(VP.vals.support)
VP.vals.support$P.val = vector(mode = "character", length = length(VP.vals.support$support))
for(i in 1:length(VP.vals.support$P.val)){
if(VP.vals.support$supportNeg[i] > 0.95 || VP.vals.support$support[i] > 0.95){
VP.vals.support$P.val[i] = "P<0.05"
}else{
VP.vals.support$P.val[i] = "n.s."
}
}
require(RColorBrewer)
source("~/ggplot_theme.txt")
VP.vals.support$variable = factor(VP.vals.support$variable, levels = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs"))
p1 = ggplot(VP.vals.support, aes(ASV, variable, fill = R2, color = P.val)) +
geom_tile(size = 1, height = 0.975, width = 0.975) +
scale_fill_gradient2(low = "#2c7bb6", high = "#d7191c", mid = "white", midpoint = 0) +
scale_color_manual(values = c("P<0.05" = "black", "n.s." = "white"), ) +
my_gg_theme +
labs(
x = "",
y = "",
fill = "R2",
color = "alpha",
title = "HMSC variance partitioning\nNeonectria presence-absence"
) +
theme(legend.title = element_text(size = 20))
pdf("HMSC/Nf_Nd_variance_partitioning.bin.plus_vars.pdf", width = 8, height = 8)
p1
dev.off()
|
/Hmsc/Nf_Nd_bin_variance_spatial.spatial_test_site_level.r
|
no_license
|
ewmorr/neonectria_barcoding_012220
|
R
| false
| false
| 25,396
|
r
|
source("~/repo/neonectria_barcoding_012220/sum_trees/read_ASV_dat.LULU_tab.r")
library(Hmsc)
library(MASS)
set.seed(6)
#working with only asv_1 and asv_4 which are the dominant Nf and Nd asvs respectively
asv_tab.neonectria = asv_tab[rownames(asv_tab) %in% c("ASV_1", "ASV_4"),] %>% t()
asv_tab.neonectria[asv_tab.neonectria > 0] = 1
full_metadata.sorted = left_join(
data.frame(sample = rownames(asv_tab.neonectria)),
full_metadata,
by = "sample"
)
#This is 117 samples
#Start with simulated data according to tutorial, but use sample and site random levels from real data
n = 117 #modified to match real dat
ns = 2 #modified to match real dat
beta1 = c(-1,1) #modified to match real dat
alpha = rep(0,ns)
beta = cbind(alpha,beta1)
x = cbind(rep(1,n),rnorm(n))
Lf = x%*%t(beta)
xycoords = matrix(runif(2*n),ncol=2)
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = 1:n
sigma.spatial = c(2)
alpha.spatial = c(0.35)
Sigma = sigma.spatial^2*exp(-as.matrix(dist(xycoords))/alpha.spatial)
eta1 = mvrnorm(mu=rep(0,n), Sigma=Sigma)
lambda1 = c(1,2)
Lr = eta1%*%t(lambda1)
L = Lf + Lr
y = as.matrix(L + matrix(rnorm(n*ns),ncol=ns))
yprob = 1*((L +matrix(rnorm(n*ns),ncol=ns))>0)
XData = data.frame(x1=x[,2])
rbPal = colorRampPalette(c('cyan','red'))
par(mfrow=c(2,3))
Col = rbPal(10)[as.numeric(cut(x[,2],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('x'), asp=1)
for(s in 1:ns){
Col = rbPal(10)[as.numeric(cut(y[,s],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('Species',s), asp=1)
}
studyDesign = data.frame(sample = as.factor(full_metadata$sample), site = as.factor(full_metadata.sorted$Site), spatial = as.factor(full_metadata$sample))
rownames(xycoords) = full_metadata$sample
rL.sample = HmscRandomLevel(units = as.factor(full_metadata$sample))
rL.site = HmscRandomLevel(units = as.factor(full_metadata$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
nChains = 2
test.run = TRUE
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works
#Now substitute distance matrix at the site level
xycoords = matrix(runif(2*10),ncol=2)
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = levels(as.factor(full_metadata.sorted$Site))
studyDesign = data.frame(sample = as.factor(full_metadata$sample), site = as.factor(full_metadata.sorted$Site), spatial = as.factor(full_metadata$Site))
rL.sample = HmscRandomLevel(units = as.factor(full_metadata$sample))
rL.site = HmscRandomLevel(units = as.factor(full_metadata$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. now substitute real distances
xycoords = matrix(c(site_info$lat, site_info$lon),ncol=2)
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = site_info$Site
studyDesign = data.frame(sample = as.factor(full_metadata$sample), site = as.factor(full_metadata.sorted$Site), spatial = as.factor(full_metadata$Site))
rL.sample = HmscRandomLevel(units = as.factor(full_metadata$sample))
rL.site = HmscRandomLevel(units = as.factor(full_metadata$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
#rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. Now use real species data
asv_tab.neonectria = asv_tab[rownames(asv_tab) %in% c("ASV_1", "ASV_4"),] %>% t()
Y = as.matrix(asv_tab.neonectria)
asv_tab.neonectria[asv_tab.neonectria > 0] = 1
yprob = asv_tab.neonectria
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. Adding real covariates
XData = full_metadata.sorted %>% dplyr::select("HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "total_seqs")
XData[,colnames(Xdata) == "total_seqs"] = log(XData[,colnames(Xdata) == "total_seqs"])
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.sample, "site" = rL.site, "spatial" = rL.spatial),distr="probit")
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This works. setting priors for total_seqs covarite to force as offset
xycoords = data.frame(lat = jitter(full_metadata.sorted$lat), lon = jitter(full_metadata.sorted$lon))
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = 1:n
sigma.spatial = c(2)
alpha.spatial = c(0.35)
Sigma = sigma.spatial^2*exp(-as.matrix(dist(xycoords))/alpha.spatial)
eta1 = mvrnorm(mu=rep(0,n), Sigma=Sigma)
lambda1 = c(1,2)
Lr = eta1%*%t(lambda1)
L = Lf + Lr
y = as.matrix(L + matrix(rnorm(n*ns),ncol=ns))
yprob = 1*((L +matrix(rnorm(n*ns),ncol=ns))>0)
XData = data.frame(x1=x[,2])
rbPal = colorRampPalette(c('cyan','red'))
par(mfrow=c(2,3))
Col = rbPal(10)[as.numeric(cut(x[,2],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('x'), asp=1)
for(s in 1:ns){
Col = rbPal(10)[as.numeric(cut(y[,s],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('Species',s), asp=1)
}
studyDesign = data.frame(sample = as.factor(1:n))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.spatial),distr="probit")
nChains = 2
test.run = TRUE
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#This fails. Appears this is truly a problem with the spatial matrix
#If data points are jittered this works
#Now add random effect of site
xycoords = data.frame(lat = jitter(full_metadata.sorted$lat), lon = jitter(full_metadata.sorted$lon))
colnames(xycoords) = c("x-coordinate","y-coordinate")
rownames(xycoords) = 1:n
sigma.spatial = c(2)
alpha.spatial = c(0.35)
Sigma = sigma.spatial^2*exp(-as.matrix(dist(xycoords))/alpha.spatial)
eta1 = mvrnorm(mu=rep(0,n), Sigma=Sigma)
lambda1 = c(1,2)
Lr = eta1%*%t(lambda1)
L = Lf + Lr
y = as.matrix(L + matrix(rnorm(n*ns),ncol=ns))
yprob = 1*((L +matrix(rnorm(n*ns),ncol=ns))>0)
XData = data.frame(x1=x[,2])
rbPal = colorRampPalette(c('cyan','red'))
par(mfrow=c(2,3))
Col = rbPal(10)[as.numeric(cut(x[,2],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('x'), asp=1)
for(s in 1:ns){
Col = rbPal(10)[as.numeric(cut(y[,s],breaks = 10))]
plot(xycoords[,2],xycoords[,1],pch = 20,col = Col,main=paste('Species',s), asp=1)
}
studyDesign = data.frame(sample = as.factor(1:n), site = as.factor(full_metadata.sorted$Site))
rL.spatial = HmscRandomLevel(sData = xycoords)
rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
rL.site = HmscRandomLevel(units = as.factor(full_metadata.sorted$Site))
m.spatial = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.spatial, site = rL.site),distr="probit")
nChains = 2
test.run = TRUE
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
#Adding site as random level works with jittered spatial points, but not with raw data (i.e. not jittered)
#####################
#Testing according to https://github.com/hmsc-r/HMSC/issues/8
hM = Hmsc(Y=yprob, XData=XData, XFormula=~x1,
studyDesign=studyDesign, ranLevels=list("sample"=rL.spatial),distr="probit")
#run lines 46-197 in computeDataParameters.R
W %>% range #should be positive
isSymmetric(W) #should be true
#Try to set up with a random level of site, and distances defined on site
require(Hmsc)
require(vegan)
library(parallel)
library(corrplot)
require(MASS)
set.seed(1)
#vignette_3_multivariate_high
#Getting started with HMSC-R: high-dimensional
#multivariate models (source, pdf)
#vignette_2_multivariate_low
#Getting started with HMSC-R: low-dimensional
#multivariate models (source, pdf)
#vignette_4_spatial Getting started with HMSC-R: spatial models
#(source, pdf)
#vignette_1_univariate Getting started with HMSC-R: univariate models
#(source, pdf)
#vignette_5_performance
#Testing the performance of Hmsc with simulated
#data (source, pdf)
#source("~/repo/neonectria_barcoding_012220/sum_trees/read_ASV_dat.LULU_tab.r")
#working with only asv_1 and asv_4 which are the dominant Nf and Nd asvs respectively
asv_tab.neonectria = asv_tab[rownames(asv_tab) %in% c("ASV_1", "ASV_4"),] %>% t()
asv_tab.neonectria[asv_tab.neonectria > 0] = 1
full_metadata.sorted = left_join(
data.frame(sample = rownames(asv_tab.neonectria)),
full_metadata,
by = "sample"
)
#############################
#Get spatial distance matrix#
#Or use lat lon
xycoords = data.frame(lat = jitter(full_metadata.sorted$lat), lon = jitter(full_metadata.sorted$lon))
rownames(xycoords) = full_metadata.sorted$sample
#rownames(xycoords) = full_metadata.sorted$sample
rL.spatial = HmscRandomLevel(sData = xycoords)
#rL.spatial = setPriors(rL.spatial,nfMin=1,nfMax=1)
############################
#Begin of model specification
#Species matrix
Y = as.matrix(asv_tab.neonectria)
#rownames(Y) = NULL
#covariate data fixed effects
XData = full_metadata.sorted %>% dplyr::select("HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "total_seqs")
#total_seqs should be supplied as part of the xData as described here https://github.com/hmsc-r/HMSC/issues/10 may also need to adjust priors
#Supply sample designation to study design
studyDesign = data.frame(
spatial = as.factor(full_metadata.sorted$sample)
)
#It's very importnat to set above to factor and not character, otherwise the preds are not recoverable
#Would include this as a random effect if not setting spatial mat
#rL = HmscRandomLevel(units = studyDesign$sample)
#rL$nfMax = 15
#model specification
m.spatial = Hmsc(Y=Y, XData=XData, XFormula=~.,
studyDesign=studyDesign, ranLevels=list(spatial = rL.spatial),distr="probit")
###########################
#MCMC sampling
#if running fast set test.run = T, but these results will not be reliable
nChains = 2
test.run = T
if (test.run){
# with this option, the vignette runs fast but results are not reliable
thin = 1
samples = 10
transient = 5
verbose = 0
} else {
# with this option, the vignette evaluates slow but it reproduces the results of
# the .pdf version
thin = 10
samples = 1000
transient = 1000
verbose = 0
}
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
######################
#MCMC convergence
#ess == effective sample size
#psrf == potential scale reduction factor (closeto 1 indicates good MCMC convergence)
#beta == species niches (as realted to covarites)
#gamma == influence of traits on species niches
#omega == residual species association
#rho == phylogenetic signal
mpost = convertToCodaObject(m.spatial)
par(mfrow=c(3,2))
ess.beta = effectiveSize(mpost$Beta)
psrf.beta = gelman.diag(mpost$Beta, multivariate=FALSE)$psrf
hist(ess.beta)
hist(psrf.beta)
ess.gamma = effectiveSize(mpost$Gamma)
psrf.gamma = gelman.diag(mpost$Gamma, multivariate=FALSE)$psrf
hist(ess.gamma)
hist(psrf.gamma)
ns = 50
sppairs = matrix(sample(x = 1:ns^2, size = 100))
tmp = mpost$Omega[[1]]
for (chain in 1:length(tmp)){
tmp[[chain]] = tmp[[chain]][,sppairs]
}
ess.omega = effectiveSize(tmp)
psrf.omega = gelman.diag(tmp, multivariate=FALSE)$psrf
hist(ess.omega)
hist(psrf.omega)
##########################
#Model fit and partioning
preds = computePredictedValues(m.spatial)
MF = evaluateModelFit(hM=m.spatial, predY=preds)
hist(MF$TjurR2, xlim = c(0,1), main=paste0("Mean = ", round(mean(MF$TjurR2),2)))
#######################
#Variance partioning
#the group vairable assigns X covariates to different covariate groups
#so first look at the design matrix
head(m.spatial$X)
#For our real data we fit an intercept and three coninuous variables, so they can each be assigned separate groups
#If instead we had a categorical variable the levels could be assigned to a single group along with the intercept
VP = computeVariancePartitioning(m.spatial, group = c(1,2,3,4,5,6,7,8,9), groupnames = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "RaisedCanker", "Wax", "total_seqs","spatial"))
plotVariancePartitioning(m.spatial, VP = VP)
####################
#Plot variance partioning
#If included a square of continuous variable, negative response would indicate intermediate niche optimum (i.e., abundance goes up initially but then goes down)
postBeta = getPostEstimate(m.spatial, parName = "Beta")
plotBeta(m.spatial, post = postBeta, param = "Support",
plotTree = F, supportLevel = 0.95, split=.4, spNamesNumbers = c(T,F))
#This can also be mapped on a tree with plotTree = T, but then tree must be included in model
############################################
#transform VP and postBeta object for ggplot
#VP cpntains R2 vals and postBeta contains support (i.e. alpha)
VP.vals = data.frame(VP$vals)
colnames(VP.vals) = c("Nf", "Nd")
VP.vals = VP.vals[1:8,] #this removes the random effect
VP.vals$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
VP.vals.long = VP.vals %>%
pivot_longer(-variable, names_to = "ASV", values_to = "R2")
#Transform R2 based on positive or negative response
postBeta.mean = data.frame(postBeta$mean)
colnames(postBeta.mean) = c("Nf", "Nd")
postBeta.mean$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
postBeta.mean.long = postBeta.mean %>%
pivot_longer(-variable, names_to = "ASV", values_to = "mean")
for(i in 1:length(postBeta.mean.long$mean)){
if(postBeta.mean.long$mean[i] < 0){
VP.vals.long$R2[i] = VP.vals.long$R2[i] * -1
}
}
postBeta.support = data.frame(postBeta$support)
colnames(postBeta.support) = c("Nf", "Nd")
postBeta.support$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
postBeta.support.long = postBeta.support %>%
pivot_longer(-variable, names_to = "ASV", values_to = "support")
postBeta.supportNeg = data.frame(postBeta$supportNeg)
colnames(postBeta.supportNeg) = c("Nf", "Nd")
postBeta.supportNeg$variable = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs")
postBeta.supportNeg.long = postBeta.supportNeg %>%
pivot_longer(-variable, names_to = "ASV", values_to = "supportNeg")
VP.vals.support = full_join(VP.vals.long, postBeta.support.long) %>%
full_join(., postBeta.supportNeg.long)
VP.vals.support = data.frame(VP.vals.support)
VP.vals.support$P.val = vector(mode = "character", length = length(VP.vals.support$support))
for(i in 1:length(VP.vals.support$P.val)){
if(VP.vals.support$supportNeg[i] > 0.95 || VP.vals.support$support[i] > 0.95){
VP.vals.support$P.val[i] = "P<0.05"
}else{
VP.vals.support$P.val[i] = "n.s."
}
}
require(RColorBrewer)
source("~/ggplot_theme.txt")
VP.vals.support$variable = factor(VP.vals.support$variable, levels = c("intercept","HDD4.nongrowing", "freezeThaw.nongrowing", "ppt.nongrowing", "duration_infection", "cankers", "wax", "total_seqs"))
p1 = ggplot(VP.vals.support, aes(ASV, variable, fill = R2, color = P.val)) +
geom_tile(size = 1, height = 0.975, width = 0.975) +
scale_fill_gradient2(low = "#2c7bb6", high = "#d7191c", mid = "white", midpoint = 0) +
scale_color_manual(values = c("P<0.05" = "black", "n.s." = "white"), ) +
my_gg_theme +
labs(
x = "",
y = "",
fill = "R2",
color = "alpha",
title = "HMSC variance partitioning\nNeonectria presence-absence"
) +
theme(legend.title = element_text(size = 20))
pdf("HMSC/Nf_Nd_variance_partitioning.bin.pdf", width = 8, height = 8)
p1
dev.off()
#scale_fill_continuous(limits = c(-1,1))
####################
####################
#Estimated residual var between spp
OmegaCor = computeAssociations(m.spatial)
supportLevel = 0.55 #this is alpha = 0.05
toPlot = ((OmegaCor[[1]]$support>supportLevel)
+ (OmegaCor[[1]]$support<(1-supportLevel))>0)*OmegaCor[[1]]$mean
corrplot(toPlot, method = "color",
col=colorRampPalette(c("blue","white","red"))(500),
tl.cex=.6, tl.col="black",
title=paste("random effect level:", m.spatial$rLNames[1]), mar=c(0,0,1,0))
#################
#Refit model with additional aprams
#covariate data fixed effects
XData = full_metadata.sorted %>% dplyr::select("HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "dbh", "TreeCond", "total_seqs")
#########################################################
#This block works through MCMC
rL = HmscRandomLevel(units = full_metadata.sorted$sample)
studyDesign = data.frame(
sample = full_metadata.sorted$sample
)
m.spatial = Hmsc(Y=Y, XData=XData, XFormula=~.,
studyDesign=studyDesign, ranLevels=list(sample = rL),distr="probit")
#########################################################
m.spatial = sampleMcmc(m.spatial, thin = thin, samples = samples, transient = transient,
nChains = nChains, verbose = verbose,updater=list(GammaEta=FALSE))
######################
#MCMC convergence
#ess == effective sample size
#psrf == potential scale reduction factor (closeto 1 indicates good MCMC convergence)
#beta == species niches (as realted to covarites)
#gamma == influence of traits on species niches
#omega == residual species association
#rho == phylogenetic signal
mpost = convertToCodaObject(m.spatial)
par(mfrow=c(3,2))
ess.beta = effectiveSize(mpost$Beta)
psrf.beta = gelman.diag(mpost$Beta, multivariate=FALSE)$psrf
hist(ess.beta)
hist(psrf.beta)
ess.gamma = effectiveSize(mpost$Gamma)
psrf.gamma = gelman.diag(mpost$Gamma, multivariate=FALSE)$psrf
hist(ess.gamma)
hist(psrf.gamma)
ns = 50
sppairs = matrix(sample(x = 1:ns^2, size = 100))
tmp = mpost$Omega[[1]]
for (chain in 1:length(tmp)){
tmp[[chain]] = tmp[[chain]][,sppairs]
}
ess.omega = effectiveSize(tmp)
psrf.omega = gelman.diag(tmp, multivariate=FALSE)$psrf
hist(ess.omega)
hist(psrf.omega)
##########################
#Model fit and partioning
preds = computePredictedValues(m.spatial)
MF = evaluateModelFit(hM=m.spatial, predY=preds)
hist(MF$TjurR2, xlim = c(0,1), main=paste0("Mean = ", round(mean(MF$TjurR2),2)))
#######################
#Variance partioning
#the group vairable assigns X covariates to different covariate groups
#so first look at the design matrix
head(m.spatial$X)
#For our real data we fit an intercept and three coninuous variables, so they can each be assigned separate groups
#If instead we had a categorical variable the levels could be assigned to a single group along with the intercept
VP = computeVariancePartitioning(m.spatial, group = c(1,2,3,4,5,6,7,8,9,10), groupnames = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "RaisedCanker", "Wax", "dbh", "TreeCond", "total_seqs"))
plotVariancePartitioning(m.spatial, VP = VP)
####################
#Plot variance partioning
#If included a square of continuous variable, negative response would indicate intermediate niche optimum (i.e., abundance goes up initially but then goes down)
postBeta = getPostEstimate(m.spatial, parName = "Beta")
plotBeta(m.spatial, post = postBeta, param = "Support",
plotTree = F, supportLevel = 0.95, split=.4, spNamesNumbers = c(T,F))
#This can also be mapped on a tree with plotTree = T, but then tree must be included in model
############################################
#transform VP and postBeta object for ggplot
#VP cpntains R2 vals and postBeta contains support (i.e. alpha)
VP.vals = data.frame(VP$vals)
colnames(VP.vals) = c("Nf", "Nd")
VP.vals = VP.vals[1:10,] #this removes the random effect
VP.vals$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
VP.vals.long = VP.vals %>%
pivot_longer(-variable, names_to = "ASV", values_to = "R2")
#Transform R2 based on positive or negative response
postBeta.mean = data.frame(postBeta$mean)
colnames(postBeta.mean) = c("Nf", "Nd")
postBeta.mean$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
postBeta.mean.long = postBeta.mean %>%
pivot_longer(-variable, names_to = "ASV", values_to = "mean")
for(i in 1:length(postBeta.mean.long$mean)){
if(postBeta.mean.long$mean[i] < 0){
VP.vals.long$R2[i] = VP.vals.long$R2[i] * -1
}
}
postBeta.support = data.frame(postBeta$support)
colnames(postBeta.support) = c("Nf", "Nd")
postBeta.support$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
postBeta.support.long = postBeta.support %>%
pivot_longer(-variable, names_to = "ASV", values_to = "support")
postBeta.supportNeg = data.frame(postBeta$supportNeg)
colnames(postBeta.supportNeg) = c("Nf", "Nd")
postBeta.supportNeg$variable = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs")
postBeta.supportNeg.long = postBeta.supportNeg %>%
pivot_longer(-variable, names_to = "ASV", values_to = "supportNeg")
VP.vals.support = full_join(VP.vals.long, postBeta.support.long) %>%
full_join(., postBeta.supportNeg.long)
VP.vals.support = data.frame(VP.vals.support)
VP.vals.support$P.val = vector(mode = "character", length = length(VP.vals.support$support))
for(i in 1:length(VP.vals.support$P.val)){
if(VP.vals.support$supportNeg[i] > 0.95 || VP.vals.support$support[i] > 0.95){
VP.vals.support$P.val[i] = "P<0.05"
}else{
VP.vals.support$P.val[i] = "n.s."
}
}
require(RColorBrewer)
source("~/ggplot_theme.txt")
VP.vals.support$variable = factor(VP.vals.support$variable, levels = c("intercept","HDD4.mean_nongrowing", "freezeThaw.mean_nongrowing", "ppt.mean_nongrowing", "duration_infection", "cankers", "wax", "dbh", "treeCond", "total_seqs"))
p1 = ggplot(VP.vals.support, aes(ASV, variable, fill = R2, color = P.val)) +
geom_tile(size = 1, height = 0.975, width = 0.975) +
scale_fill_gradient2(low = "#2c7bb6", high = "#d7191c", mid = "white", midpoint = 0) +
scale_color_manual(values = c("P<0.05" = "black", "n.s." = "white"), ) +
my_gg_theme +
labs(
x = "",
y = "",
fill = "R2",
color = "alpha",
title = "HMSC variance partitioning\nNeonectria presence-absence"
) +
theme(legend.title = element_text(size = 20))
pdf("HMSC/Nf_Nd_variance_partitioning.bin.plus_vars.pdf", width = 8, height = 8)
p1
dev.off()
|
\name{predNuPoP}
\alias{predNuPoP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{R function for nucleosome positioning prediction, occupancy score and nucleosome binding affinity score calculation}
\description{This function invokes Fortran codes to compute the Viterbi prediction of nucleosome positioning, nucleosome occupancy score and nucleosome binding affinity score . A pre-trained linker DNA length distribution for the current species is used in a duration Hidden Markov model.}
\usage{predNuPoP(file,species=7,model=4)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{a string for the path and name of a DNA sequence file in FASTA format. This sequence file can be located in any directory. It must contain only one sequence of any length. By FASTA format, we require each line to be of the same length (the last line can be shorter; the first line should be '>sequenceName'). The length of each line should be not longer than 10 million bp.}
\item{species}{an integer from 0 to 11 as the label for a species indexed as follows: 1 = Human; 2 = Mouse; 3 = Rat; 4 = Zebrafish; 5 = D. melanogaster; 6 = C. elegans; 7 = S. cerevisiae; 8 = C. albicans; 9 = S. pombe; 10 = A. thaliana; 11 = Maize; 0 = Other. The default is 7 = S. cerevisiae . If \code{species=0} is specified, NuPoP will identify a species from 1-11 that has most similar base composition to the input sequence, and then use the models from the selected species for prediction.}
\item{model}{an integer = 4 or 1. NuPoP has two models integrated. One is the first order Markov chain for both nucleosome and linker DNA states. The other is 4th order (default). The latter distinguishes nucleosome/linker in up to 5-mer usage, and thus is slightly more effective in prediction, but runs slower. The time used by 4th order model is about 2.5 times of the 1st order model.}
}
\value{\code{predNuPoP} outputs the prediction results into the current working directory. The output file is named after the input file with an added extension \code{_Prediction1.txt} or \code{_Prediction4.txt}, where 1 or 4 stands for the order of Markov chain models specified. The output file has five columns, \code{Position}, \code{P-start}, \code{Occup}, \code{N/L}, \code{Affinity}:
\item{Position}{position in the input DNA sequence}
\item{P-start}{probability that the current position is the start of a nucleosome}
\item{Occup}{nucleosome occupancy score}
\item{N/L}{nucleosome (1) or linker (0) for each position based on Viterbi prediction}
\item{Affinity}{nucleosome binding affinity score}
}
\examples{
library(NuPoP)
predNuPoP(system.file("extdata", "test.seq", package="NuPoP"),species=7,model=4)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{}
|
/man/predNuPoP.Rd
|
no_license
|
oazarate/NuPoP
|
R
| false
| false
| 2,832
|
rd
|
\name{predNuPoP}
\alias{predNuPoP}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{R function for nucleosome positioning prediction, occupancy score and nucleosome binding affinity score calculation}
\description{This function invokes Fortran codes to compute the Viterbi prediction of nucleosome positioning, nucleosome occupancy score and nucleosome binding affinity score . A pre-trained linker DNA length distribution for the current species is used in a duration Hidden Markov model.}
\usage{predNuPoP(file,species=7,model=4)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{file}{a string for the path and name of a DNA sequence file in FASTA format. This sequence file can be located in any directory. It must contain only one sequence of any length. By FASTA format, we require each line to be of the same length (the last line can be shorter; the first line should be '>sequenceName'). The length of each line should be not longer than 10 million bp.}
\item{species}{an integer from 0 to 11 as the label for a species indexed as follows: 1 = Human; 2 = Mouse; 3 = Rat; 4 = Zebrafish; 5 = D. melanogaster; 6 = C. elegans; 7 = S. cerevisiae; 8 = C. albicans; 9 = S. pombe; 10 = A. thaliana; 11 = Maize; 0 = Other. The default is 7 = S. cerevisiae . If \code{species=0} is specified, NuPoP will identify a species from 1-11 that has most similar base composition to the input sequence, and then use the models from the selected species for prediction.}
\item{model}{an integer = 4 or 1. NuPoP has two models integrated. One is the first order Markov chain for both nucleosome and linker DNA states. The other is 4th order (default). The latter distinguishes nucleosome/linker in up to 5-mer usage, and thus is slightly more effective in prediction, but runs slower. The time used by 4th order model is about 2.5 times of the 1st order model.}
}
\value{\code{predNuPoP} outputs the prediction results into the current working directory. The output file is named after the input file with an added extension \code{_Prediction1.txt} or \code{_Prediction4.txt}, where 1 or 4 stands for the order of Markov chain models specified. The output file has five columns, \code{Position}, \code{P-start}, \code{Occup}, \code{N/L}, \code{Affinity}:
\item{Position}{position in the input DNA sequence}
\item{P-start}{probability that the current position is the start of a nucleosome}
\item{Occup}{nucleosome occupancy score}
\item{N/L}{nucleosome (1) or linker (0) for each position based on Viterbi prediction}
\item{Affinity}{nucleosome binding affinity score}
}
\examples{
library(NuPoP)
predNuPoP(system.file("extdata", "test.seq", package="NuPoP"),species=7,model=4)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{}
|
# Step 0 - Load reshape2 package to use later
library(reshape2)
## Step 1 - Merges the training and the test data sets to create one data set
# download and unzip the dataset
filename <- "getdata_dataset.zip"
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, filename, method="wininet")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# load activity labels and features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
## Step 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
# extract only the mean and standard deviation data
featuresWant <- grep(".*mean.*|.*std.*", features[,2])
featuresWant.names <- features[featuresWant,2]
featuresWant.names = gsub('-mean', 'Mean', featuresWant.names)
featuresWant.names = gsub('-std', 'Std', featuresWant.names)
featuresWant.names <- gsub('[-()]', '', featuresWant.names)
# load the newly assembled datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWant]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWant]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge the train and test datasets. Add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWant.names)
## Step 3 - Use descriptive activity names to name the activities in the data set.
## Step 4 - Appropriately label the data set with descriptive activity names.
# transform activities and subjects into 'factors'
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
## Step 5 - From the data set in step 4, create a second, independent tidy data set with the average of each variable for each activity and each subject.
# create the tidy data set
allDataMelted <- melt(allData, id = c("subject", "activity"))
allDataMean <- dcast(allDataMelted, subject + activity ~ variable, mean)
write.table(allDataMean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
/run_analysis.R
|
no_license
|
jonesth/Getting-and-Cleaning-Data-Course-Project
|
R
| false
| false
| 2,685
|
r
|
# Step 0 - Load reshape2 package to use later
library(reshape2)
## Step 1 - Merges the training and the test data sets to create one data set
# download and unzip the dataset
filename <- "getdata_dataset.zip"
if (!file.exists(filename)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, filename, method="wininet")
}
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
# load activity labels and features
activityLabels <- read.table("UCI HAR Dataset/activity_labels.txt")
activityLabels[,2] <- as.character(activityLabels[,2])
features <- read.table("UCI HAR Dataset/features.txt")
features[,2] <- as.character(features[,2])
## Step 2 - Extracts only the measurements on the mean and standard deviation for each measurement.
# extract only the mean and standard deviation data
featuresWant <- grep(".*mean.*|.*std.*", features[,2])
featuresWant.names <- features[featuresWant,2]
featuresWant.names = gsub('-mean', 'Mean', featuresWant.names)
featuresWant.names = gsub('-std', 'Std', featuresWant.names)
featuresWant.names <- gsub('[-()]', '', featuresWant.names)
# load the newly assembled datasets
train <- read.table("UCI HAR Dataset/train/X_train.txt")[featuresWant]
trainActivities <- read.table("UCI HAR Dataset/train/Y_train.txt")
trainSubjects <- read.table("UCI HAR Dataset/train/subject_train.txt")
train <- cbind(trainSubjects, trainActivities, train)
test <- read.table("UCI HAR Dataset/test/X_test.txt")[featuresWant]
testActivities <- read.table("UCI HAR Dataset/test/Y_test.txt")
testSubjects <- read.table("UCI HAR Dataset/test/subject_test.txt")
test <- cbind(testSubjects, testActivities, test)
# merge the train and test datasets. Add labels
allData <- rbind(train, test)
colnames(allData) <- c("subject", "activity", featuresWant.names)
## Step 3 - Use descriptive activity names to name the activities in the data set.
## Step 4 - Appropriately label the data set with descriptive activity names.
# transform activities and subjects into 'factors'
allData$activity <- factor(allData$activity, levels = activityLabels[,1], labels = activityLabels[,2])
allData$subject <- as.factor(allData$subject)
## Step 5 - From the data set in step 4, create a second, independent tidy data set with the average of each variable for each activity and each subject.
# create the tidy data set
allDataMelted <- melt(allData, id = c("subject", "activity"))
allDataMean <- dcast(allDataMelted, subject + activity ~ variable, mean)
write.table(allDataMean, "tidy.txt", row.names = FALSE, quote = FALSE)
|
# General setup ----
## Function
rm(list=ls(all.names=T))
library(runjags)
source("function_simdata_ver5.R")
## MCMC setting
n.ad <- 100
n.iter <- 1E+4
n.thin <- max(3, ceiling(n.iter/500))
burn <- ceiling(max(10, n.iter/2))
Sample <- ceiling(n.iter/n.thin)
## Parameter set
N <- c(100, 500, 1000)
LEN <- c(500, 1000)
SIGMA <- seq(50, 300, length = 6)
PHI <- c(0.4, 0.8)
SIGMA.PHI <- 1
PARA <- as.matrix(expand.grid(N, LEN, SIGMA, PHI, SIGMA.PHI))
Kernel <- "Gaussian"
colnames(PARA) <- c("N", "LEN", "SIGMA", "PHI", "SIGMA.PHI")
## N replicate and N parameter combinations
Nrep <- 50
Npara <- nrow(PARA)
# Bayesian Inference ----
output <- NULL
## Different sampling designs and model parameters
for(i in 1:Npara){
RE <- NULL
## Replicates under the same sampling designs and model parameters
for(j in 1:Nrep){
print(c(i,j))
sigma <- PARA[i,"SIGMA"]
## Simulated Data
D <- fun_disp(N = PARA[i,"N"], sec_len = PARA[i,"LEN"],
sigma = sigma, family = Kernel,
phi = PARA[i,"PHI"], hetero.phi = T, sigma.phi = PARA[i,"SIGMA.PHI"])
## Data for JAGS
X <- D$X
X0 <- D$X0
Y <- 1 - is.na(D$X)
L <- PARA[i,"LEN"]
## Run JAGS
Djags <- list( X = X, X0 = X0, Y = Y, Nsample = length(X), L = L )
para <- c("sigma", "mu.phi")
inits <- replicate(3, list(tau = 1/(sigma^2), .RNG.name = "base::Mersenne-Twister", .RNG.seed = NA ), simplify = F )
for(k in 1:3) inits[[k]]$.RNG.seed <- k
m <- read.jagsfile("bayes-model/gaussian/model_disp_obs_gaussian_v1.R")
post <- run.jags(m$model, monitor = para, data = Djags,
n.chains = 3, inits = inits, method = "parallel",
burnin = burn, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, modules = "glm")
print(post$psrf$psrf[,1])
while(any(post$psrf$psrf[,1] >= 1.1)){
post <- extend.jags(post, burnin = 0, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, combine = T)
print(post$psrf$psrf[,1])
}
## Output
MCMCiter <- (post$sample/Sample)*n.iter + burn
re <- summary(post)
RE <- rbind(RE, c(PARA[i,],
mean(is.na(X)==0),
mean(is.na(D$x_stay)==0),
re["sigma", 1:3],
re["mu.phi", 1:3],
re["sigma", "psrf"],
re["mu.phi", "psrf"],
MCMCiter, burn, n.thin, post$sample) )
View(RE)
}#j
## Compile final output
output <- rbind(output, RE)
}#i
# Save results ----
colnames(output) <- c("N", "LEN", "SIGMA", "PHI", "SIGMA.PHI",
"Pcap", "Pstay",
"sigma_lower", "sigma_med", "sigma_upper",
"phi_lower", "phi_med", "phi_upper",
"R_hat_sigma", "R_hat_phi",
"MCMCiter", "Burn_in", "N_thin", "N_sample")
filename <- paste0("result/sim_model_disp_obs_hetero_", Kernel, Sys.Date(), ".csv")
write.csv(output, filename)
|
/bayes-model/gaussian/inits06_hetero_disp_obs_gaussian.R
|
permissive
|
aterui/public-proj_disp-model-sim
|
R
| false
| false
| 3,278
|
r
|
# General setup ----
## Function
rm(list=ls(all.names=T))
library(runjags)
source("function_simdata_ver5.R")
## MCMC setting
n.ad <- 100
n.iter <- 1E+4
n.thin <- max(3, ceiling(n.iter/500))
burn <- ceiling(max(10, n.iter/2))
Sample <- ceiling(n.iter/n.thin)
## Parameter set
N <- c(100, 500, 1000)
LEN <- c(500, 1000)
SIGMA <- seq(50, 300, length = 6)
PHI <- c(0.4, 0.8)
SIGMA.PHI <- 1
PARA <- as.matrix(expand.grid(N, LEN, SIGMA, PHI, SIGMA.PHI))
Kernel <- "Gaussian"
colnames(PARA) <- c("N", "LEN", "SIGMA", "PHI", "SIGMA.PHI")
## N replicate and N parameter combinations
Nrep <- 50
Npara <- nrow(PARA)
# Bayesian Inference ----
output <- NULL
## Different sampling designs and model parameters
for(i in 1:Npara){
RE <- NULL
## Replicates under the same sampling designs and model parameters
for(j in 1:Nrep){
print(c(i,j))
sigma <- PARA[i,"SIGMA"]
## Simulated Data
D <- fun_disp(N = PARA[i,"N"], sec_len = PARA[i,"LEN"],
sigma = sigma, family = Kernel,
phi = PARA[i,"PHI"], hetero.phi = T, sigma.phi = PARA[i,"SIGMA.PHI"])
## Data for JAGS
X <- D$X
X0 <- D$X0
Y <- 1 - is.na(D$X)
L <- PARA[i,"LEN"]
## Run JAGS
Djags <- list( X = X, X0 = X0, Y = Y, Nsample = length(X), L = L )
para <- c("sigma", "mu.phi")
inits <- replicate(3, list(tau = 1/(sigma^2), .RNG.name = "base::Mersenne-Twister", .RNG.seed = NA ), simplify = F )
for(k in 1:3) inits[[k]]$.RNG.seed <- k
m <- read.jagsfile("bayes-model/gaussian/model_disp_obs_gaussian_v1.R")
post <- run.jags(m$model, monitor = para, data = Djags,
n.chains = 3, inits = inits, method = "parallel",
burnin = burn, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, modules = "glm")
print(post$psrf$psrf[,1])
while(any(post$psrf$psrf[,1] >= 1.1)){
post <- extend.jags(post, burnin = 0, sample = Sample, adapt = n.ad, thin = n.thin,
n.sims = 3, combine = T)
print(post$psrf$psrf[,1])
}
## Output
MCMCiter <- (post$sample/Sample)*n.iter + burn
re <- summary(post)
RE <- rbind(RE, c(PARA[i,],
mean(is.na(X)==0),
mean(is.na(D$x_stay)==0),
re["sigma", 1:3],
re["mu.phi", 1:3],
re["sigma", "psrf"],
re["mu.phi", "psrf"],
MCMCiter, burn, n.thin, post$sample) )
View(RE)
}#j
## Compile final output
output <- rbind(output, RE)
}#i
# Save results ----
colnames(output) <- c("N", "LEN", "SIGMA", "PHI", "SIGMA.PHI",
"Pcap", "Pstay",
"sigma_lower", "sigma_med", "sigma_upper",
"phi_lower", "phi_med", "phi_upper",
"R_hat_sigma", "R_hat_phi",
"MCMCiter", "Burn_in", "N_thin", "N_sample")
filename <- paste0("result/sim_model_disp_obs_hetero_", Kernel, Sys.Date(), ".csv")
write.csv(output, filename)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{bwKW}
\alias{bwKW}
\title{Bandwidth selection for KW smoothing}
\usage{
bwKW(g, k = 1, minbw = 0.1)
}
\arguments{
\item{g}{KW fitted object}
\item{k}{multiplicative fudge factor}
\item{minbw}{minimum allowed value of bandwidth}
}
\description{
Bandwidth selection for KW smoothing
}
\author{
R. Koenker
}
\keyword{utility}
|
/man/bwKW.Rd
|
no_license
|
cran/REBayes
|
R
| false
| true
| 416
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/misc.R
\name{bwKW}
\alias{bwKW}
\title{Bandwidth selection for KW smoothing}
\usage{
bwKW(g, k = 1, minbw = 0.1)
}
\arguments{
\item{g}{KW fitted object}
\item{k}{multiplicative fudge factor}
\item{minbw}{minimum allowed value of bandwidth}
}
\description{
Bandwidth selection for KW smoothing
}
\author{
R. Koenker
}
\keyword{utility}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mult-PCA.R
\name{get_pairs}
\alias{get_pairs}
\title{Get paired individual on a Coe, PCA or LDA objects}
\usage{
get_pairs(x, fac, range)
}
\arguments{
\item{x}{any \link{Coe}, \link{PCA} of \link{LDA} object.}
\item{fac}{factor or column name or id corresponding to the pairing factor.}
\item{range}{numeric the range of coefficients for \code{Coe}, or PC (LD) axes on which to return scores.}
}
\value{
a list with components \code{x1} all coefficients/scores corresponding to the
first level of the \code{fac} provided; \code{x2} same thing for the second level;
\code{fac} the corresponding \code{fac}.
}
\description{
If you have paired individuals, i.e. before and after a treatment or for repeated measures,
and if you have coded coded it into \code{$fac}, this methods allows you to retrieve the cooresponding PC/LD scores,
or coefficients for \link{Coe} objects.
}
\examples{
data(bot)
bot2 <- bot1 <- coo_scale(coo_center(coo_sample(bot, 60)))
bot1$fac$session <- factor(rep("session1", 40))
# we simulate an measurement error
bot2 <- coo_jitter(bot1, amount=0.01)
bot2$fac$session <- factor(rep("session2", 40))
botc <- combine(bot1, bot2)
botcf <- efourier(botc, 12)
# we gonna plot the PCA with the two measurement sessions and the two types
botcp <- PCA(botcf)
plot(botcp, "type", col=col_summer(2), pch=rep(c(1, 20), each=40), eigen=FALSE)
bot.pairs <- get_pairs(botcp, fac = "session", range=1:2)
segments(bot.pairs$session1[, 1], bot.pairs$session1[, 2],
bot.pairs$session2[, 1], bot.pairs$session2[, 2],
col=col_summer(2)[bot.pairs$fac$type])
}
|
/man/get_pairs.Rd
|
no_license
|
stas-malavin/Momocs
|
R
| false
| true
| 1,661
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mult-PCA.R
\name{get_pairs}
\alias{get_pairs}
\title{Get paired individual on a Coe, PCA or LDA objects}
\usage{
get_pairs(x, fac, range)
}
\arguments{
\item{x}{any \link{Coe}, \link{PCA} of \link{LDA} object.}
\item{fac}{factor or column name or id corresponding to the pairing factor.}
\item{range}{numeric the range of coefficients for \code{Coe}, or PC (LD) axes on which to return scores.}
}
\value{
a list with components \code{x1} all coefficients/scores corresponding to the
first level of the \code{fac} provided; \code{x2} same thing for the second level;
\code{fac} the corresponding \code{fac}.
}
\description{
If you have paired individuals, i.e. before and after a treatment or for repeated measures,
and if you have coded coded it into \code{$fac}, this methods allows you to retrieve the cooresponding PC/LD scores,
or coefficients for \link{Coe} objects.
}
\examples{
data(bot)
bot2 <- bot1 <- coo_scale(coo_center(coo_sample(bot, 60)))
bot1$fac$session <- factor(rep("session1", 40))
# we simulate an measurement error
bot2 <- coo_jitter(bot1, amount=0.01)
bot2$fac$session <- factor(rep("session2", 40))
botc <- combine(bot1, bot2)
botcf <- efourier(botc, 12)
# we gonna plot the PCA with the two measurement sessions and the two types
botcp <- PCA(botcf)
plot(botcp, "type", col=col_summer(2), pch=rep(c(1, 20), each=40), eigen=FALSE)
bot.pairs <- get_pairs(botcp, fac = "session", range=1:2)
segments(bot.pairs$session1[, 1], bot.pairs$session1[, 2],
bot.pairs$session2[, 1], bot.pairs$session2[, 2],
col=col_summer(2)[bot.pairs$fac$type])
}
|
rm(list=ls())
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
library(tidyverse)
library(dplyr)
library(plyr)
combine = function(fly.df, dat){
for(i in 2:ncol(dat)){
dat.sub = data.frame(age = dat[-c(1,2,3,4),1] %>% as.numeric(),
num = dat[-c(1,2,3,4), i] %>% as.numeric(),
Cohort = dat[1,i],
Size = dat[2,i],
Cage = dat[3,i],
seg = dat[4,i])
fly.df = fly.df %>% rbind(dat.sub)
}
return(fly.df)
}
L = 173
fly.df = data.frame()
df.male = read.table("MALE.txt", nrows = 21*(L+4))
col_time = read.table("MALE.txt", header = FALSE, nrows = L + 4)[,1]
for(i in 1:21){
dat = df.male[(1+(i-1)*(L+4)):(i*(L+4)), ]
if(dat[1,1] != "Cohort") {
dat = cbind(col_time, dat)
}
fly.df = fly.df %>% combine(dat)
}
fly_male = fly.df
save(fly_male, file = "fly_male.Rda")
fly.df = data.frame()
df.female = read.table("FEMALE.txt", nrows = 21*(L+4))
col_time = read.table("FEMALE.txt", header = FALSE, nrows = L + 4)[,1]
for(i in 1:21){
dat = df.female[(1+(i-1)*(L+4)):(i*(L+4)), ]
if(dat[1,1] != "Cohort") {
dat = cbind(col_time, dat)
}
fly.df = fly.df %>% combine(dat)
}
fly_female = fly.df
save(fly_female, file = "fly_female.Rda")
|
/preprocess.R
|
no_license
|
hango1996/survival-analysis
|
R
| false
| false
| 1,316
|
r
|
rm(list=ls())
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
library(tidyverse)
library(dplyr)
library(plyr)
combine = function(fly.df, dat){
for(i in 2:ncol(dat)){
dat.sub = data.frame(age = dat[-c(1,2,3,4),1] %>% as.numeric(),
num = dat[-c(1,2,3,4), i] %>% as.numeric(),
Cohort = dat[1,i],
Size = dat[2,i],
Cage = dat[3,i],
seg = dat[4,i])
fly.df = fly.df %>% rbind(dat.sub)
}
return(fly.df)
}
L = 173
fly.df = data.frame()
df.male = read.table("MALE.txt", nrows = 21*(L+4))
col_time = read.table("MALE.txt", header = FALSE, nrows = L + 4)[,1]
for(i in 1:21){
dat = df.male[(1+(i-1)*(L+4)):(i*(L+4)), ]
if(dat[1,1] != "Cohort") {
dat = cbind(col_time, dat)
}
fly.df = fly.df %>% combine(dat)
}
fly_male = fly.df
save(fly_male, file = "fly_male.Rda")
fly.df = data.frame()
df.female = read.table("FEMALE.txt", nrows = 21*(L+4))
col_time = read.table("FEMALE.txt", header = FALSE, nrows = L + 4)[,1]
for(i in 1:21){
dat = df.female[(1+(i-1)*(L+4)):(i*(L+4)), ]
if(dat[1,1] != "Cohort") {
dat = cbind(col_time, dat)
}
fly.df = fly.df %>% combine(dat)
}
fly_female = fly.df
save(fly_female, file = "fly_female.Rda")
|
#' Reporting for the coupled EDGE-T Transport Sector Model (REMIND Module edge_esm)
#'
#' Data is loaded from the EDGE-T subfolder in the output folder.
#' The input files can be (re-) generated calling
#' `Rscript EDGETransport.R --reporting`
#' from the output folder.
#'
#' *Warning* The function modifies the "REMIND_generic_<scenario>.mif" file by appending the
#' additional reporting variables and replaces the "_withoutPlus" version.
#'
#' Region subsets are obtained from fulldata.gdx
#'
#' @param output_folder path to the output folder, default is current folder.
#' @param sub_folder subfolder with EDGE-T output files (level_2 for standalone, EDGE-T for coupled runs)
#' @param loadmif shall we try to load a REMIND MIF file from the output folder to append the variables?
#' @param extendedReporting report a larger set of variables
#' @param scenario_title a scenario title string
#' @param model_name a model name string
#' @param gdx path to the GDX file used for the run.
#' @author Johanna Hoppe Alois Dirnaichner Marianna Rottoli
#'
#' @importFrom rmndt approx_dt readMIF writeMIF
#' @importFrom gdxdt readgdx
#' @importFrom data.table fread fwrite rbindlist copy CJ
#' @importFrom remind2 toolRegionSubsets
#' @importFrom quitte as.quitte
#' @export
reportEDGETransport2 <- function(output_folder = ".", sub_folder = "EDGE-T/",
loadmif = TRUE , extendedReporting = FALSE,
scenario_title = NULL, model_name = "EDGE-Transport",
gdx = NULL) {
## NULL Definitons for codeCheck compliance
RegionCode <- CountryCode <- `.` <- sector <- subsector_L3 <- region <- year <- NULL
subsector_L2 <- subsector_L1 <- aggr_mode <- vehicle_type <- det_veh <- aggr_nonmot <- NULL
demand_F <- demand_EJ <- remind_rep <- V25 <- aggr_veh <- technology <- NULL
ttot <- se_share <- fe_demand <- variable <- value <- demand_VKM <- loadFactor <- NULL
all_enty <- ef <- variable_agg <- model <- scenario <- period <- NULL
Region <- Variable <- co2 <- co2val <- elh2 <- fe <- NULL
int <- se <- sec <- sharesec <- te <- tech <- val <- share <- NULL
eff <- sharebio <- sharesyn <- totseliq <- type <- ven <- NULL
unit <- tot_VOT_price <- tot_price <- logit_type <- capture.output <- weight <- NULL
#pkm or tkm is called km in the reporting. Vehicle km are called vkm
yrs <- c(seq(2005, 2060, 5), seq(2070, 2100, 10))
datapath <- function(fname){
file.path(output_folder, sub_folder, fname)}
reporting <- function(datatable, mode){
aggr_mode_tech <- aggr_LDV <- aggr_LDV_tech <- det_veh_tech <- aggr_bunkers <- aggr_bunkers_tech <- aggr_veh_tech <- capture.output <- NULL
report <- list()
datatable[, sector := ifelse(sector %in% c("trn_pass", "trn_aviation_intl"), "Pass", "Freight")]
datatable <- merge(datatable,Aggrdata,by = c("sector", "subsector_L1", "subsector_L2", "subsector_L3", "vehicle_type", "technology"), all.x = TRUE, allow.cartesian = TRUE)
#How to account for Hybrid Electric in Final Energy?
if (mode == "FE") {
techmap <- data.table(
technology = c("BEV","Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids", "NG"),
remind_rep = c("Electricity", "Electricity", "Liquids", "Hydrogen", "Hydrogen", "Liquids", "Gases"))
} else {
techmap <- data.table(
technology = c("BEV", "Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids","NG"),
remind_rep = c("BEV", "Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids", "Gases"))
}
datatable <- merge(datatable,techmap,by = c("technology"), all.x = TRUE)
datatable[!is.na(aggr_mode) & !is.na(remind_rep), aggr_mode_tech := paste0(aggr_mode, "|", remind_rep)]
datatable[!is.na(aggr_veh) & !is.na(remind_rep), aggr_veh_tech := paste0(aggr_veh, "|", remind_rep)]
datatable[!is.na(aggr_LDV) & !is.na(remind_rep), aggr_LDV_tech := paste0(aggr_LDV, "|", remind_rep)]
datatable[!is.na(det_veh) & !is.na(remind_rep), det_veh_tech := paste0(det_veh, "|", remind_rep)]
datatable[!is.na(aggr_bunkers) & !is.na(remind_rep), aggr_bunkers_tech := paste0(aggr_bunkers, "|", remind_rep)]
unit <- switch(mode,
"FE" = "EJ/yr",
"ES" = "bn km/yr",
"VKM" = "bn vkm/yr")
prefix <- switch(mode,
"FE" = "FE|Transport|",
"ES" = "ES|Transport|",
"VKM" = "ES|Transport|VKM|")
var <- c("Pass","Freight")
Aggr <- c("aggr_mode", "aggr_veh", "aggr_LDV", "det_veh", "nonmot", "aggr_nonmot", "aggr_bunkers", "aggr_mode_tech", "aggr_veh_tech", "aggr_LDV_tech", "det_veh_tech","aggr_bunkers_tech")
for (var0 in var) {
for (Aggr0 in Aggr) {
#Aggregate data
datatable0 <- copy(datatable)
datatable0 <- datatable0[!is.na(get(Aggr0))]
datatable0 <- datatable0[sector == var0, .(value = sum(value, na.rm = T)),
by = c("region", "year", Aggr0)]
if(nrow(datatable0) > 0) {
setnames(datatable0, "year", "period")
datatable0 <- datatable0[, model := model_name][, scenario := scenario_title][, variable := paste0(prefix, get(Aggr0))][, unit := unit][, eval(Aggr0) := NULL]
datatable0 <- approx_dt(datatable0, yrs, xcol = "period", ycol = "value",
idxcols = c("scenario","variable","unit","model","region"),
extrapolate = T)
report <- rbind(report, datatable0)}
}
}
return(report)
}
## Demand emissions
reportingEmi <- function(repFE, gdx){
## load emission factors for fossil fuels
p_ef_dem <- readgdx(gdx, "p_ef_dem")[all_enty %in% c("fepet", "fedie", "fegas")] ## MtCO2/EJ
p_ef_dem[all_enty == "fegas", all_enty := "fegat"]
setnames(p_ef_dem, old = c("value", "all_regi"), new = c("ef", "region"))
## attribute explicitly fuel used to the FE values
emidem = repFE[grepl("Liquids|Gases|Hydrogen|Electricity", variable) & region != "World"] ## EJ
emidem[, all_enty := ifelse(grepl("Liquids", variable), "fedie", NA)]
emidem[, all_enty := ifelse(grepl("LDV.+Liquids", variable), "fepet", all_enty)]
emidem[, all_enty := ifelse(grepl("Gases", variable), "fegat", all_enty)]
emidem[, all_enty := ifelse(grepl("Electricity", variable), "feelt", all_enty)]
emidem[, all_enty := ifelse(grepl("Hydrogen", variable), "feh2t", all_enty)]
## merge with emission factors
emidem = emidem[p_ef_dem, on = c("all_enty","region")]
## calculate emissions and attribute variable and unit names
emidem[, value := value*ef][, c("variable", "unit") := list(gsub("FE", "Emi\\|CO2", variable), "Mt CO2/yr")]
emi = rbind(copy(emidem)[, c("type", "variable") := list("tailpipe", paste0(variable, "|Tailpipe"))],
copy(emidem)[, c("type", "variable") := list("demand", paste0(variable, "|Demand"))])
prodFe <- readgdx(gdx, "vm_prodFE")[, ttot := as.numeric(ttot)]
setnames(prodFe,
c("period", "region", "se", "all_enty", "te", "fe_demand"))
prodFe[, se_share := fe_demand/sum(fe_demand), by = c("period", "region", "all_enty")]
prodFe <- prodFe[all_enty %in% c("fedie", "fepet", "fegat") & se %in% c("segafos", "seliqfos")][, c("se", "te", "fe_demand") := NULL]
emi <- prodFe[emi, on = c("period", "region", "all_enty")]
## in case no fossil fuels are used (e.g. 100% biodiesel), the value in se_share results NA. set the NA value to 0
emi[is.na(se_share), se_share := 0]
emi <- emi[all_enty %in% c("fedie", "fepet", "fegat") & type == "demand", value := value*se_share]
emi[, c("se_share", "type", "ef", "all_enty") := NULL]
## aggregate removing the fuel dependency
emi[, variable_agg := gsub("\\|Liquids|\\|Electricity|\\|Hydrogen|\\|Gases", "", variable)]
emi = emi[, .(value = sum(value)), by = c("model", "scenario", "region", "unit", "period", "variable_agg")]
setnames(emi, old = "variable_agg", new = "variable")
emi = emi[, .(model, scenario, region, variable, unit, period, value)]
return(emi)
}
reportingVehNum <- function(demand_vkm, annual_mileage){
venum <- copy(demand_vkm)
## merge annual mileage
anmil <- copy(annual_mileage)
anmil[grepl("Subcompact", vehicle_type),
variable := "Pass|Road|LDV|Small"]
anmil[grepl("Mini", vehicle_type),
variable := "Pass|Road|LDV|Mini"]
anmil[vehicle_type == "Compact Car", variable := "Pass|Road|LDV|Medium"]
anmil[grepl("Large Car|Midsize Car", vehicle_type), variable := "Pass|Road|LDV|Large"]
anmil[grepl("SUV", vehicle_type),
variable := "Pass|Road|LDV|SUV"]
anmil[grepl("Van|Multipurpose", vehicle_type),
variable := "Pass|Road|LDV|Van"]
anmil[grepl("Motorcycle|Scooter|Moped", vehicle_type),
variable := "Pass|Road|LDV|Two-Wheelers"]
anmil[grepl("^Truck", vehicle_type),
variable := sprintf("Freight|Road|%s", vehicle_type)]
anmil[grepl("Bus", vehicle_type),
variable := "Pass|Road|Bus"]
anmil <- anmil[,.(region, period = year, variable, annual_mileage)]
anmil <- approx_dt(anmil, unique(demand_vkm$period), xcol = "period", ycol = "annual_mileage", idxcols = c("region", "variable"), extrapolate = T)
anmil<- unique(anmil[, c("period", "region", "variable", "annual_mileage")])
anmil <- anmil[, variable := paste0("ES|Transport|VKM|", variable)]
venum <- merge(demand_vkm, anmil, by = c("variable", "region", "period"))
venum[, ven := value/annual_mileage] # billion vehicle-km -> thousand vehicles
venum <- venum[!is.na(ven)]
venum[, variable := gsub("|VKM", "|VNUM", variable, fixed=TRUE)][, c("value", "annual_mileage") := NULL]
venum[, unit := "tsd veh"]
setnames(venum, "ven", "value")
venum = venum[,.(model, scenario, region, variable, unit, period, value)]
return(venum)
}
reportStockAndSales <- function(annual_mileage){
if(file.exists(file.path(output_folder, "vintcomp.csv"))){
vintages_file <- file.path(output_folder, "vintcomp.csv")
vintgs <- fread(vintages_file)
} else if (file.exists(datapath(fname = "vintcomp.RDS"))){
#vintages_file <- datapath(fname = "vintcomp.RDS")
#vintgs <- readRDS(vintages_file)
return(NULL)
} else {
print("EDGE-T Reporting: No vintages file found.")
return(NULL)
}
year_c <- construction_year <- Stock <- Sales <- vintage_demand_vkm <- fct <- category <- NULL
## backward compat. fix
fct <- 1.
if("variable" %in% colnames(vintgs)){
fct <- 1e-6
setnames(vintgs, "variable", "construction_year")
}
vintgs[, year_c := as.numeric(gsub("C_", "", construction_year))]
## stock is the full stock up to the end of the current year
## sales are the sales of the current year
setnames(vintgs, "full_demand_vkm", "Stock")
vintgs[, Stock := Stock * fct]
vintgs[, Sales := Stock - sum(vintage_demand_vkm), by=.(year, region, vehicle_type, technology)]
vintgs[, c("construction_year", "vintage_demand_vkm", "year_c") := NULL]
vintgs <- unique(vintgs)
vintgs <- data.table::melt(vintgs, measure.vars = c("Stock", "Sales"), variable.name = "category")
## vkm -> v-num
vintgs = merge(vintgs, annual_mileage, by = c("year", "region", "vehicle_type"))
vintgs[, value := value / annual_mileage]
vintgs[, variable := ifelse(
vehicle_type == "Bus_tmp_vehicletype",
sprintf("%s|Transport|Bus|%s", category, technology),
sprintf("%s|Transport|LDV|%s|%s", category, vehicle_type, technology))]
## totals
vintgs <- rbindlist(list(
vintgs,
vintgs[, .(value=sum(value), variable=gsub("(.+)\\|.+$", "\\1", variable)),
by=c("category", "year", "region", "vehicle_type")],
vintgs[grepl("|LDV|", variable, fixed=TRUE),
.(value=sum(value), variable=sprintf("%s|Transport|LDV", category)),
by=c("category", "year", "region")]), fill=TRUE)
vintgs[, c("vehicle_type", "technology", "annual_mileage", "category") := NULL]
vintgs <- unique(vintgs[!is.na(value)])
setnames(vintgs, "year", "period")
vintgs = approx_dt(vintgs, c(2005, 2010, unique(vintgs$period), 2110, 2130, 2150),
xcol = "period", ycol = "value", idxcols = c("region", "variable"), extrapolate = T)
vintgs[period <= 2010|period > 2100, value := 0]
## remove the variable (e.g. vehicle_types) that are not present for this specific region
vintgs[, `:=`(model = model_name, scenario = scenario_title, unit = "Million vehicles")]
return(vintgs)
}
reportTotals <- function(aggrname, datatable, varlist){
vars <- varlist[[aggrname]]
if (length(unique(datatable[variable %in% vars]$variable)) < length(vars)){
print(paste0("Missing variables to aggregate data to ", aggrname))}
datatable <- datatable[variable %in% vars,
.(variable = aggrname,
value = sum(value)),
by = c("model", "scenario", "region", "period","unit")]
return(datatable)
}
## check the regional aggregation
regionSubsetList <- toolRegionSubsets(gdx)
# ADD EU-27 region aggregation if possible
if("EUR" %in% names(regionSubsetList)){
regionSubsetList <- c(regionSubsetList,list(
"EU27"=c("ENC", "EWN", "ECS", "ESC", "ECE", "FRA", "DEU", "ESW")
))
}
Aggrdata <- fread(system.file("extdata", "EDGETdataAggregation.csv", package = "edgeTrpLib"),header = TRUE)
## load input data from last EDGE run
## Data manipulation shouldnt be necessary
demand_km <- readRDS(datapath(fname = "demandF_plot_pkm.RDS"))
demand_km[, demand_F := demand_F * 1e-3] ## million -> billion pkm
setnames(demand_km, "demand_F", "value")
demand_ej <- readRDS(datapath(fname = "demandF_plot_EJ.RDS")) ## detailed final energy demand, EJ
setnames(demand_ej, "demand_EJ", "value")
demand_ej[, demand_F := NULL]
load_factor <- readRDS(datapath(fname = "loadFactor.RDS"))
annual_mileage <- readRDS(datapath(fname = "annual_mileage.RDS"))
if (length(annual_mileage)> 4){
#Same is done in lvl2_createoutput
annual_mileage <- unique(annual_mileage[, c("region", "year", "vkm.veh", "vehicle_type")])
setnames(annual_mileage, old = "vkm.veh", new = "annual_mileage")
}
if (length(load_factor)> 4){
load_factor <- load_factor[, c("year","region","vehicle_type","loadFactor","technology")]
demand_vkm <- merge(demand_km, load_factor, by = c("year", "region", "vehicle_type","technology"))
demand_vkm[, value := value/loadFactor] ## billion vkm
} else {
demand_vkm <- merge(demand_km, load_factor, by = c("year", "region", "vehicle_type"))
demand_vkm[, value := value/loadFactor]} ## billion vkm
repFE <- reporting(
demand_ej,
mode = "FE")
repVKM <- reporting(
datatable = demand_vkm,
mode = "VKM")
repES <- reporting(
datatable = demand_km,
mode = "ES")
toMIF <- rbind(
repFE,
repVKM,
repES,
reportingVehNum(repVKM,
annual_mileage),
reportingEmi(repFE = repFE,
gdx = gdx)
)
varsl <- list(
`ES|Transport|Pass|Road` = c("ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized"),
`ES|Transport|Pass|Aviation` = c("ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|Pass|Rail` = c("ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR"),
`ES|Transport|Pass` = c("ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized","ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR","ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|Freight` = c("ES|Transport|Freight|Road","ES|Transport|Freight|International Shipping","ES|Transport|Freight|Rail", "ES|Transport|Freight|Navigation"),
`ES|Transport` = c("ES|Transport|Freight|Road","ES|Transport|Freight|International Shipping","ES|Transport|Freight|Rail", "ES|Transport|Freight|Navigation","ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized","ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR","ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|VKM|Pass|Road` = c("ES|Transport|VKM|Pass|Road|LDV", "ES|Transport|VKM|Pass|Road|Bus"),
`ES|Transport|VKM||Road` = c("ES|Transport|VKM|Freight|Road", "ES|Transport|VKM|Pass|Road|LDV", "ES|Transport|VKM|Pass|Road|Bus"),
`ES|Transport|VKM|Rail` = c("ES|Transport|VKM|Pass|Rail|HSR", "ES|Transport|VKM|Pass|Rail|non-HSR", "ES|Transport|VKM|Freight|Rail" ),
`FE|Transport|Pass|Road` = c("FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus"),
`FE|Transport|Road` = c("FE|Transport|Freight|Road", "FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus"),
`FE|Transport|Pass|Rail` = c("FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR"),
`FE|Transport|Rail` = c("FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR", "FE|Transport|Freight|Rail"),
`FE|Transport|Pass` = c("FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus","FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR","FE|Transport|Pass|Aviation|International", "FE|Transport|Pass|Aviation|Domestic"),
`FE|Transport|Freight` = c("FE|Transport|Freight|Road","FE|Transport|Freight|International Shipping","FE|Transport|Freight|Rail", "FE|Transport|Freight|Navigation"),
`FE|Transport` = c("FE|Transport|Freight|Road","FE|Transport|Freight|International Shipping","FE|Transport|Freight|Rail", "FE|Transport|Freight|Navigation","FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus","FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR","FE|Transport|Pass|Aviation|International", "FE|Transport|Pass|Aviation|Domestic"),
`FE|Transport|w/o bunkers` = c("FE|Transport|Freight|w/o bunkers","FE|Transport|Pass|w/o bunkers"),
`FE|Transport|Pass|Liquids` = c("FE|Transport|Pass|Road|LDV|Liquids", "FE|Transport|Pass|Road|Bus|Liquids", "FE|Transport|Pass|Rail|non-HSR|Liquids","FE|Transport|Pass|Aviation|International|Liquids", "FE|Transport|Pass|Aviation|Domestic|Liquids"),
`FE|Transport|Pass|Hydrogen` = c("FE|Transport|Pass|Road|LDV|Hydrogen", "FE|Transport|Pass|Road|Bus|Hydrogen", "FE|Transport|Pass|Aviation|Domestic|Hydrogen"),
`FE|Transport|Pass|Gases` = c("FE|Transport|Pass|Road|LDV|Gases", "FE|Transport|Pass|Road|Bus|Gases"),
`FE|Transport|Pass|Electricity` = c("FE|Transport|Pass|Road|LDV|Electricity", "FE|Transport|Pass|Road|Bus|Electricity","FE|Transport|Pass|Rail|HSR|Electricity", "FE|Transport|Pass|Rail|non-HSR|Electricity"),
`FE|Transport|Freight|Liquids` = c("FE|Transport|Freight|Road|Liquids","FE|Transport|Freight|International Shipping|Liquids","FE|Transport|Freight|Rail|Liquids", "FE|Transport|Freight|Navigation|Liquids"),
`FE|Transport|Freight|Hydrogen` = c("FE|Transport|Freight|Road|Hydrogen"),
`FE|Transport|Freight|Gases` = c("FE|Transport|Freight|Road|Gases"),
`FE|Transport|Freight|Electricity` = c("FE|Transport|Freight|Road|Electricity","FE|Transport|Freight|Rail|Electricity"),
`FE|Transport|Liquids` = c("FE|Transport|Freight|Road|Liquids","FE|Transport|Freight|International Shipping|Liquids","FE|Transport|Freight|Rail|Liquids", "FE|Transport|Freight|Navigation|Liquids","FE|Transport|Pass|Road|LDV|Liquids", "FE|Transport|Pass|Road|Bus|Liquids", "FE|Transport|Pass|Rail|non-HSR|Liquids","FE|Transport|Pass|Aviation|International|Liquids", "FE|Transport|Pass|Aviation|Domestic|Liquids"),
`FE|Transport|Hydrogen` = c("FE|Transport|Freight|Road|Hydrogen","FE|Transport|Pass|Road|LDV|Hydrogen", "FE|Transport|Pass|Road|Bus|Hydrogen", "FE|Transport|Pass|Aviation|Domestic|Hydrogen"),
`FE|Transport|Gases` = c("FE|Transport|Freight|Road|Gases","FE|Transport|Pass|Road|LDV|Gases", "FE|Transport|Pass|Road|Bus|Gases"),
`FE|Transport|Electricity` = c("FE|Transport|Freight|Road|Electricity","FE|Transport|Freight|Rail|Electricity","FE|Transport|Pass|Road|LDV|Electricity", "FE|Transport|Pass|Road|Bus|Electricity","FE|Transport|Pass|Rail|HSR|Electricity", "FE|Transport|Pass|Rail|non-HSR|Electricity"),
`FE|Transport|w/o bunkers|Liquids` = c("FE|Transport|Freight|w/o bunkers|Liquids","FE|Transport|Pass|w/o bunkers|Liquids"),
`FE|Transport|w/o bunkers|Hydrogen` = c("FE|Transport|Freight|w/o bunkers|Hydrogen","FE|Transport|Pass|w/o bunkers|Hydrogen"),
`FE|Transport|w/o bunkers|Gases` = c("FE|Transport|Freight|w/o bunkers|Gases","FE|Transport|Pass|w/o bunkers|Gases"),
`FE|Transport|w/o bunkers|Electricity` = c("FE|Transport|Freight|w/o bunkers|Electricity","FE|Transport|Pass|w/o bunkers|Electricity"),
`Emi|CO2|Transport|Pass|Road|Tailpipe` = c("Emi|CO2|Transport|Pass|Road|LDV|Tailpipe", "Emi|CO2|Transport|Pass|Road|Bus|Tailpipe"),
`Emi|CO2|Transport|Pass|Road|Demand` = c("Emi|CO2|Transport|Pass|Road|LDV|Demand", "Emi|CO2|Transport|Pass|Road|Bus|Demand"),
`Emi|CO2|Transport|Road|Tailpipe` = c("Emi|CO2|Transport|Freight|Road|Tailpipe", "Emi|CO2|Transport|Pass|Road|LDV|Tailpipe", "Emi|CO2|Transport|Pass|Road|Bus|Tailpipe"),
`Emi|CO2|Transport|Rail|Tailpipe` = c("Emi|CO2|Transport|Pass|Rail|non-HSR|Tailpipe", "Emi|CO2|Transport|Freight|Rail|Tailpipe"),
`Emi|CO2|Transport|Road|Demand` = c("Emi|CO2|Transport|Freight|Road|Demand", "Emi|CO2|Transport|Pass|Road|LDV|Demand", "Emi|CO2|Transport|Pass|Road|Bus|Demand"),
`Emi|CO2|Transport|Rail|Demand` = c("Emi|CO2|Transport|Pass|Rail|non-HSR|Demand", "Emi|CO2|Transport|Freight|Rail|Demand"))
names <- names(varsl)
totals <- sapply(names, reportTotals, datatable = toMIF, varlist = varsl, simplify = FALSE, USE.NAMES = TRUE)
totals <- rbindlist(totals, use.names = TRUE)
toMIF <- rbind(toMIF, totals)
toMIF <- rbindlist(list(toMIF, reportStockAndSales(annual_mileage)), use.names=TRUE)
if (!is.null(regionSubsetList)){
toMIF <- rbindlist(list(
toMIF,
toMIF[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
toMIF[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
toMIF[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
toMIF[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)]
), use.names=TRUE)
}
if (extendedReporting) {
LogitCostplotdata <- function(priceData, prefData, logitExp, groupValue, Reg_Aggregation){
tot_price <- sw <- logit.exponent <- weight <- NULL
yrs_costs <-c(seq(2005, 2060, 5), seq(2070, 2100, 10))
all_subsectors <- c("technology", "vehicle_type", "subsector_L1", "subsector_L2",
"subsector_L3", "sector")
# change variable names for quitte format
setnames(priceData, c("year"), c("period"))
setnames(prefData, c("year"), c("period"))
prefData <- prefData[period %in% yrs_costs]
priceData<- priceData[period %in% yrs_costs][, -c("share")]
#Filter for logit level according to groupValue. leave out tmp placeholders
priceData <- priceData[!grepl("tmp", get(groupValue))]
prefData <- prefData[!grepl("tmp", get(groupValue))]
# Calculate Inconvenience Cost from share Weight
# Logit Exponent and total price are needed for this
prefData_inco <- merge(prefData, logitExp, all.y = TRUE)
#rename original prefs afterwards
setnames(prefData,c("sw"),c("value"))
#Reduce priceData to total price
price_tot <- priceData[, c("period", "region", "tot_price", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]), with = FALSE]
prefData_inco <- merge(prefData_inco, price_tot, by = c("period", "region", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
prefData_inco[, value := tot_price * (sw^(1 / logit.exponent) - 1)]
#Set Inconveniencecost to zero for shareweights where ES demand is anyway zero
prefData_inco <- prefData_inco[is.infinite(prefData_inco$value), value:=0]
prefData_inco <- prefData_inco[, c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)], "value"), with = FALSE][, variable := "Eq inconvenience cost"]
#Prepare PriceData
priceData <- data.table::melt(priceData[, -c("tot_price")], id.vars = c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
#Regional Aggregation
#Costs are intensive variables and are aggregated with ES weights for each level of the logit
weight_pkm_logitlevel <- weight_pkm[, .(weight = sum(weight)), by = c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)])]
prefData_aggr <- aggregate_dt(prefData[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region & period %in% prefData$period], datacols = c("period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(prefData_aggr,"aggr_reg","region")
prefData_inco_aggr <- aggregate_dt(prefData_inco[region %in% Reg_Aggregation$region], Reg_Aggregation , fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region], datacols = c("period", "variable", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(prefData_inco_aggr,"aggr_reg","region")
priceData_aggr <- aggregate_dt(priceData[region %in% Reg_Aggregation$region], Reg_Aggregation , fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region], datacols = c("period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(priceData_aggr,"aggr_reg","region")
prefData <- rbind(prefData, prefData_aggr)
priceData <- rbind(prefData_inco, prefData_inco_aggr, priceData,priceData_aggr)
if (groupValue=="vehicle_type"){
#Before prices are finally structured, vehicles are aggregated
Aggrdata_veh <- as.data.table(Aggrdata[, c("vehicle_type", "det_veh")])
Aggrdata_veh <- unique(Aggrdata_veh[!is.na(det_veh)])[, det_veh := gsub("Freight\\|Road\\||Pass\\|Road\\|", "", det_veh)]
#Exclude those wihout aggregation
Aggrdata_veh <- Aggrdata_veh[!vehicle_type==det_veh]
priceData <- priceData[, c("region","variable","vehicle_type","period","value")]
weight_pkm_VS1 <- weight_pkm[,.(weight = sum(weight)), by = c("region", "vehicle_type", "period")]
weight_pkm_VS1_aggrreg <- aggregate_dt(weight_pkm_VS1[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", valuecol="weight", datacols = c("period", "vehicle_type"))
setnames(weight_pkm_VS1_aggrreg,"aggr_reg","region")
weight_pkm_VS1 <- rbind(weight_pkm_VS1, weight_pkm_VS1_aggrreg)
Prices_veh_aggr <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_veh$vehicle_type], Aggrdata_veh , fewcol = "det_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_VS1[vehicle_type %in% Aggrdata_veh$vehicle_type], datacols = c("region","variable"))
setnames(Prices_veh_aggr, "det_veh", "vehicle_type")
Prices_veh_aggr[, variable:=paste0("Logit cost|V|", vehicle_type, "|", variable)][, vehicle_type := NULL]
}
if (groupValue=="vehicle_type"){
#Convert original shareweights to quitte format
prefData[, variable := paste0("Shareweight|V|", get(groupValue))]
prefData <- prefData[, .(region, period, scenario, variable, value)]
#Convert costs to quitte format
priceData[, variable := paste0("Logit cost|V|", get(groupValue), "|", variable)]
priceData <- priceData[, .(region, period, scenario, variable, value)]
priceData <- rbind(priceData, Prices_veh_aggr)}
else{
prefData[, variable := paste0("Shareweight|S",gsub("[^123]","",groupValue), "|", get(groupValue))]
prefData <- prefData[, .(region, period, scenario, variable, value)]
#Convert costs to quitte format
priceData[, variable := paste0("Logit cost|S",gsub("[^123]","",groupValue), "|", get(groupValue), "|", variable)]
priceData <- priceData[, .(region, period, scenario, variable, value)]
}
data <- rbind(prefData[, unit := "-"], priceData[, unit := "$2005/km"])
data[, scenario := scenario_title][, model := model_name]
return(data)
}
LogitCostplotdata_FV <- function(priceData, prefData, logitExp, Reg_Aggregation){
tot_price <- sw <- logit.exponent <- weight <- logit_type <- av_veh <- NULL
#Calcualte equivalent inconvenience cost and
yrs_costs <-c(seq(2005, 2060, 5), seq(2070, 2100, 10))
# change variable names for mip
setnames(priceData, c("year"), c("period"))
setnames(prefData, c("year"), c("period"))
#Exclude active modes as they have no fuel
prefData <- prefData[period %in% yrs_costs & !technology %in% c("Cycle_tmp_technology","Walk_tmp_technology")]
priceData<- priceData[period %in% yrs_costs]
# Calculate Inconvenience Cost from share Weight
priceData_sw <- copy(prefData)
priceData_sw <- priceData_sw[logit_type == "sw"][, logit_type := NULL]
setnames(priceData_sw, "value", "sw")
priceData_sw <- merge(priceData_sw, logitExp, all.x = TRUE)
#This should be removed in refactoring process
priceData_sw[grepl("^Truck", vehicle_type), logit.exponent := -4]
priceData_sw <- priceData_sw[is.na(logit.exponent), logit.exponent := -10]
price_tot <- priceData[, c("period", "region","tot_price", "technology","vehicle_type")]
priceData_sw <- merge(priceData_sw, price_tot, by = c("period", "region", "technology","vehicle_type"),
all.x=TRUE)
priceData_sw[, value := tot_price * (sw^(1 / logit.exponent) - 1)]
#Set Inconveniencecost to zero for shareweights where ES demand is anyway zero
priceData_sw <- priceData_sw[is.infinite(priceData_sw$value), value := 0]
#Some total prices are missing
priceData_sw <- priceData_sw[is.na(priceData_sw$value), value := 0]
priceData_sw <- priceData_sw[, c("period", "region", "technology","vehicle_type","value")][, variable := "Eq inconvenience cost"]
priceData_inco_LDV <- prefData[!logit_type == "sw"][, c("period", "region", "technology","vehicle_type","value","logit_type")]
setnames(priceData_inco_LDV, "logit_type", "variable")
#Exclude LDV inco from prefdata
prefData <- prefData[logit_type == "sw"]
prefData <- prefData[, .(region, period, scenario, vehicle_type, technology, value)]
priceData <- data.table::melt(priceData[, -c("tot_price", "share", "subsector_L1", "subsector_L2", "subsector_L3", "sector")], id.vars = c("region", "period", "technology", "vehicle_type"))
priceData <- rbind(priceData, priceData_sw, priceData_inco_LDV)
#Regional Aggregation
#Costs are intensive variables and are aggregated with ES weights for each level of the logit
weight_pkm_FV <- weight_pkm[, .(weight = sum(weight)), by = c("region", "period","vehicle_type", "technology")]
#TO FIX:
#Hydrogen and BEV technologies for aviation and 2Wheelers are not everywhere available: -> Insert zero as weight
weight_pkm_FV <- merge(weight_pkm_FV, priceData, on=c("region", "period", "vehicle_type", "technology"), all = TRUE)
weight_pkm_FV[is.na(weight_pkm_FV$weight), weight := 0]
weight_pkm_FV <- weight_pkm_FV[, c("region", "period","vehicle_type", "technology", "weight")]
weight_pkm_FV <- weight_pkm_FV[period > 1990 & period < 2110]
weight_pkm_FV <- unique(weight_pkm_FV)
weight_pkm_FV_aggrreg <- aggregate_dt(weight_pkm_FV[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", valuecol="weight", datacols = c("period", "vehicle_type","technology"))
setnames(weight_pkm_FV_aggrreg,"aggr_reg","region")
weight_pkm_FV <- rbind(weight_pkm_FV, weight_pkm_FV_aggrreg)
priceData_aggrreg <- aggregate_dt(priceData[region %in% Reg_Aggregation$region], Reg_Aggregation, fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_FV[region %in% Reg_Aggregation$region], datacols = c("period", "technology", "vehicle_type"))
setnames(priceData_aggrreg,"aggr_reg","region")
priceData <- rbind(priceData, priceData_aggrreg)
prefData_aggrreg <- aggregate_dt(prefData[region %in% Reg_Aggregation$region], Reg_Aggregation, fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_FV[region %in% Reg_Aggregation$region], datacols = c("period", "technology", "vehicle_type"))
setnames(prefData_aggrreg,"aggr_reg","region")
prefData <- rbind(prefData, prefData_aggrreg)
#Before prices are finally structured, vehicles are aggregated
#ES pkm are used as weights for data aggregation
Aggrdata_veh <- as.data.table(Aggrdata[, c("vehicle_type", "det_veh")])
#Remove entries that are not aggregated
Aggrdata_veh <- Aggrdata_veh[!vehicle_type == det_veh]
Aggrdata_veh <- unique(Aggrdata_veh[!is.na(det_veh)])[, det_veh := gsub("Freight\\|Road\\||Pass\\|Road\\|", "", det_veh)]
priceData_aggr <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_veh$vehicle_type], Aggrdata_veh , fewcol = "det_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_FV[vehicle_type %in% Aggrdata_veh$vehicle_type], datacols = c("region", "variable", "technology"))
setnames(priceData_aggr, "det_veh", "vehicle_type")
#Aggregate average vehicle
Aggrdata_avveh <- as.data.table(Aggrdata)
Aggrdata_avveh <- Aggrdata_avveh[subsector_L1 == "trn_pass_road_LDV_4W"]
Aggrdata_avveh <- unique(Aggrdata_avveh[, c("vehicle_type")])
Aggrdata_avveh[, av_veh := "Average veh"]
priceData_av <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_avveh$vehicle_type], Aggrdata_avveh , fewcol = "av_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_FV[vehicle_type %in% Aggrdata_avveh$vehicle_type], datacols = c("region", "variable","technology"))
setnames(priceData_av, "av_veh", "vehicle_type")
priceData <- rbind(priceData, priceData_aggr, priceData_av)
priceData <- priceData[, variable := paste0("Logit cost|F|", gsub("_tmp_vehicletype", "", vehicle_type), "|", technology, "|", variable)][, c("region", "period", "variable", "value")][, unit := "$2005/km"][, model := model_name][, scenario := scenario_title]
prefData[, variable := paste0("Shareweight|F|", gsub("_tmp_vehicletype", "", vehicle_type), "|", technology)][, unit := "-"][, model := model_name][, scenario := scenario_title]
prefData <- prefData[, c("period", "region", "variable", "unit", "model", "scenario", "value")]
data <- rbind(priceData, prefData)
return(data)
}
# Mapping efficiencies for useful energy
Mapp_UE <- data.table(
technology = c("FCEV", "BEV", "Electric", "Liquids", "Hydrogen"),
UE_efficiency = c(0.36, 0.64, 0.8, 0.23, 0.25))
#ES pkm are used as weights for data aggregation
weight_pkm <- copy(demand_km)
setnames(weight_pkm, c("value","year"), c("weight","period"))
weight_pkm[, sector := ifelse(sector %in% c("Pass"), "trn_pass", "trn_freight")]
weight_pkm[, sector := ifelse(subsector_L3 == c("International Aviation"), "trn_aviation_intl", sector)]
weight_pkm[, sector := ifelse(subsector_L3 == c("International Ship"), "trn_shipping_intl", sector)]
#Mapping for region Aggregation
RegAggregation <- data.table(
aggr_reg = c("EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "NEU", "NEU"),
region = c("ENC", "EWN", "ECS", "ESC", "ECE", "FRA", "DEU", "UKI", "ESW", "NES", "NEN"))
# #Calculate useful energy
# UE <- toMIF[grepl("FE" & ("FCEV"|"BEV"|"Electric"|"Liquids"|"Hydrogen"), variable)]
# UE[, technology := gsub(!("FCEV"|"BEV"|"Electric"|"Liquids"|"Hydrogen"),"", variable)]
# UE <- merge(UE, Mapp_UE)
# UE[, value:= value*UE_efficiency][, variable := gsub("FE","UE", variable)]
# toMIF <- rbind(toMIF, UE)
#Calculate logit Costs
#Read in additional data if exist
if (file.exists(datapath(fname = "logit_data.RDS"))){
logit_data <- readRDS(datapath(fname = "logit_data.RDS"))
prices <- logit_data$share_list
Pref <- logit_data$pref_data
if (file.exists(datapath(fname = "logit_exp.RDS"))){
logit_exp <- readRDS(datapath(fname = "logit_exp.RDS"))
logit_exp <- logit_exp$logit_output
#Prices S3S
Prices_S3S <- prices$S3S_shares
setkey(Prices_S3S, NULL)
Pref_S3S <- Pref$S3S_final_pref
setkey(Pref_S3S, NULL)
logit_exp_S3S <- logit_exp$logit_exponent_S3S
setkey(logit_exp_S3S, NULL)
#Adjust in model itself in refactoring process
Prices_S3S[subsector_L3 %in% c("Cycle","Walk"), tot_VOT_price := tot_price]
PrefandPrices_S3S <- LogitCostplotdata(priceData = Prices_S3S, prefData = Pref_S3S, logitExp =logit_exp_S3S, groupValue = "subsector_L3", Reg_Aggregation = RegAggregation)
#Prices S2S3
Prices_S2S3 <- prices$S2S3_shares
setkey(Prices_S2S3, NULL)
Pref_S2S3 <- Pref$S2S3_final_pref
setkey(Pref_S2S3, NULL)
logit_exp_S2S3 <- logit_exp$logit_exponent_S2S3
setkey(logit_exp_S2S3, NULL)
PrefandPrices_S2S3 <- LogitCostplotdata(priceData = Prices_S2S3, prefData = Pref_S2S3, logitExp = logit_exp_S2S3, groupValue = "subsector_L2", Reg_Aggregation = RegAggregation)
#Prices S1S2
Prices_S1S2 <- prices$S1S2_shares
setkey(Prices_S1S2, NULL)
Pref_S1S2 <- Pref$S1S2_final_pref
setkey(Pref_S1S2, NULL)
logit_exp_S1S2 <- logit_exp$logit_exponent_S1S2
setkey(logit_exp_S1S2, NULL)
PrefandPrices_S1S2 <- LogitCostplotdata(priceData = Prices_S1S2, prefData = Pref_S1S2, logitExp = logit_exp_S1S2, groupValue = "subsector_L1", Reg_Aggregation = RegAggregation)
#Prices VS1
Prices_VS1 <- prices$VS1_shares
setkey(Prices_VS1, NULL)
Pref_VS1 <- Pref$VS1_final_pref
setkey(Pref_VS1, NULL)
logit_exp_VS1 <- logit_exp$logit_exponent_VS1
setkey(logit_exp_VS1, NULL)
#Add subsector_L2, subsector L3 and sector to Prices_VS1 (for structural conformity)
Prices_VS1 <- merge(Prices_VS1, unique(Pref_VS1[, c("subsector_L2", "subsector_L3", "sector", "vehicle_type")]), by = "vehicle_type", all.x = TRUE)
PrefandPrices_VS1 <- LogitCostplotdata(priceData=Prices_VS1, prefData = Pref_VS1,logitExp = logit_exp_VS1, groupValue = "vehicle_type", Reg_Aggregation = RegAggregation)
#Prices FV
Prices_FV <- prices$FV_shares
setkey(Prices_FV, NULL)
Pref_FV <- Pref$FV_final_pref
setkey(Pref_FV, NULL)
logit_exp_VS1 <- logit_exp$logit_exponent_FV
setkey(logit_exp_VS1, NULL)
Prices_FV <- LogitCostplotdata_FV(priceData=Prices_FV, prefData=Pref_FV, logitExp=logit_exp_VS1, Reg_Aggregation = RegAggregation)
Pref_FV <- Pref_FV[logit_type=="sw"]
#Walking and cycling have no fuel options
Pref_FV <- Pref_FV[!technology %in% c("Cycle_tmp_technology","Walk_tmp_technology")]
Pref_FV[, variable:=paste0("Shareweight|F|",gsub("_tmp_vehicletype","",vehicle_type),"|",technology)][,unit:="-"][,scenario:=scenario_title][,model:=model_name]
Pref_FV <- Pref_FV[,.(region,period,scenario,variable,value,unit,model)]
toMIF <- rbind(toMIF,PrefandPrices_S3S, PrefandPrices_S2S3, PrefandPrices_S1S2, PrefandPrices_VS1, Prices_FV, Pref_FV)}}
#Aggregate data
#Insert POP and GDP
if (file.exists(datapath(fname = "POP.RDS")) & file.exists(datapath(fname = "GDP.RDS"))){
POP <- readRDS(datapath(fname = "POP.RDS"))
GDP <- readRDS(datapath(fname = "GDP.RDS"))
POP <- POP[year %in% yrs]
GDP <- GDP[year %in% yrs]
POP[, model:= model_name][, scenario:= scenario_title][, variable := "Population"][, unit := "million"]
GDP[, model:= model_name][, scenario:= scenario_title][, variable := "GDP|PPP"]
GDP[, weight := weight*0.001][, unit := "billion US$2005/yr"]
setnames(GDP,c("year","weight"),c("period","value"))
setnames(POP,"year","period")
if (!is.null(regionSubsetList)){
toMIF <- rbindlist(list(
toMIF,
POP[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
POP[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
POP[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
POP[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
GDP[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)]
), use.names=TRUE)
}
toMIF <- rbind(toMIF, POP, GDP)
}
}
#We should finally decide for which yrs the model runs and shows reasonable results
toMIF <- toMIF[period %in% yrs]
## Make sure there are no duplicates!
idx <- anyDuplicated(toMIF, by = c("region", "variable", "period"))
if(idx){
warning(paste0("Duplicates found in EDGE-T reporting output:",
capture.output(toMIF[idx]), collapse="\n"))
}
toMIF <- toMIF[!duplicated(toMIF)]
toMIF <- toMIF[, c("model", "scenario", "region", "variable", "unit", "period", "value")]
return(as.quitte(toMIF))
}
|
/R/reportEDGETransport2.R
|
no_license
|
pik-piam/edgeTrpLib
|
R
| false
| false
| 42,896
|
r
|
#' Reporting for the coupled EDGE-T Transport Sector Model (REMIND Module edge_esm)
#'
#' Data is loaded from the EDGE-T subfolder in the output folder.
#' The input files can be (re-) generated calling
#' `Rscript EDGETransport.R --reporting`
#' from the output folder.
#'
#' *Warning* The function modifies the "REMIND_generic_<scenario>.mif" file by appending the
#' additional reporting variables and replaces the "_withoutPlus" version.
#'
#' Region subsets are obtained from fulldata.gdx
#'
#' @param output_folder path to the output folder, default is current folder.
#' @param sub_folder subfolder with EDGE-T output files (level_2 for standalone, EDGE-T for coupled runs)
#' @param loadmif shall we try to load a REMIND MIF file from the output folder to append the variables?
#' @param extendedReporting report a larger set of variables
#' @param scenario_title a scenario title string
#' @param model_name a model name string
#' @param gdx path to the GDX file used for the run.
#' @author Johanna Hoppe Alois Dirnaichner Marianna Rottoli
#'
#' @importFrom rmndt approx_dt readMIF writeMIF
#' @importFrom gdxdt readgdx
#' @importFrom data.table fread fwrite rbindlist copy CJ
#' @importFrom remind2 toolRegionSubsets
#' @importFrom quitte as.quitte
#' @export
reportEDGETransport2 <- function(output_folder = ".", sub_folder = "EDGE-T/",
loadmif = TRUE , extendedReporting = FALSE,
scenario_title = NULL, model_name = "EDGE-Transport",
gdx = NULL) {
## NULL Definitons for codeCheck compliance
RegionCode <- CountryCode <- `.` <- sector <- subsector_L3 <- region <- year <- NULL
subsector_L2 <- subsector_L1 <- aggr_mode <- vehicle_type <- det_veh <- aggr_nonmot <- NULL
demand_F <- demand_EJ <- remind_rep <- V25 <- aggr_veh <- technology <- NULL
ttot <- se_share <- fe_demand <- variable <- value <- demand_VKM <- loadFactor <- NULL
all_enty <- ef <- variable_agg <- model <- scenario <- period <- NULL
Region <- Variable <- co2 <- co2val <- elh2 <- fe <- NULL
int <- se <- sec <- sharesec <- te <- tech <- val <- share <- NULL
eff <- sharebio <- sharesyn <- totseliq <- type <- ven <- NULL
unit <- tot_VOT_price <- tot_price <- logit_type <- capture.output <- weight <- NULL
#pkm or tkm is called km in the reporting. Vehicle km are called vkm
yrs <- c(seq(2005, 2060, 5), seq(2070, 2100, 10))
datapath <- function(fname){
file.path(output_folder, sub_folder, fname)}
reporting <- function(datatable, mode){
aggr_mode_tech <- aggr_LDV <- aggr_LDV_tech <- det_veh_tech <- aggr_bunkers <- aggr_bunkers_tech <- aggr_veh_tech <- capture.output <- NULL
report <- list()
datatable[, sector := ifelse(sector %in% c("trn_pass", "trn_aviation_intl"), "Pass", "Freight")]
datatable <- merge(datatable,Aggrdata,by = c("sector", "subsector_L1", "subsector_L2", "subsector_L3", "vehicle_type", "technology"), all.x = TRUE, allow.cartesian = TRUE)
#How to account for Hybrid Electric in Final Energy?
if (mode == "FE") {
techmap <- data.table(
technology = c("BEV","Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids", "NG"),
remind_rep = c("Electricity", "Electricity", "Liquids", "Hydrogen", "Hydrogen", "Liquids", "Gases"))
} else {
techmap <- data.table(
technology = c("BEV", "Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids","NG"),
remind_rep = c("BEV", "Electric", "Hybrid Electric", "FCEV", "Hydrogen", "Liquids", "Gases"))
}
datatable <- merge(datatable,techmap,by = c("technology"), all.x = TRUE)
datatable[!is.na(aggr_mode) & !is.na(remind_rep), aggr_mode_tech := paste0(aggr_mode, "|", remind_rep)]
datatable[!is.na(aggr_veh) & !is.na(remind_rep), aggr_veh_tech := paste0(aggr_veh, "|", remind_rep)]
datatable[!is.na(aggr_LDV) & !is.na(remind_rep), aggr_LDV_tech := paste0(aggr_LDV, "|", remind_rep)]
datatable[!is.na(det_veh) & !is.na(remind_rep), det_veh_tech := paste0(det_veh, "|", remind_rep)]
datatable[!is.na(aggr_bunkers) & !is.na(remind_rep), aggr_bunkers_tech := paste0(aggr_bunkers, "|", remind_rep)]
unit <- switch(mode,
"FE" = "EJ/yr",
"ES" = "bn km/yr",
"VKM" = "bn vkm/yr")
prefix <- switch(mode,
"FE" = "FE|Transport|",
"ES" = "ES|Transport|",
"VKM" = "ES|Transport|VKM|")
var <- c("Pass","Freight")
Aggr <- c("aggr_mode", "aggr_veh", "aggr_LDV", "det_veh", "nonmot", "aggr_nonmot", "aggr_bunkers", "aggr_mode_tech", "aggr_veh_tech", "aggr_LDV_tech", "det_veh_tech","aggr_bunkers_tech")
for (var0 in var) {
for (Aggr0 in Aggr) {
#Aggregate data
datatable0 <- copy(datatable)
datatable0 <- datatable0[!is.na(get(Aggr0))]
datatable0 <- datatable0[sector == var0, .(value = sum(value, na.rm = T)),
by = c("region", "year", Aggr0)]
if(nrow(datatable0) > 0) {
setnames(datatable0, "year", "period")
datatable0 <- datatable0[, model := model_name][, scenario := scenario_title][, variable := paste0(prefix, get(Aggr0))][, unit := unit][, eval(Aggr0) := NULL]
datatable0 <- approx_dt(datatable0, yrs, xcol = "period", ycol = "value",
idxcols = c("scenario","variable","unit","model","region"),
extrapolate = T)
report <- rbind(report, datatable0)}
}
}
return(report)
}
## Demand emissions
reportingEmi <- function(repFE, gdx){
## load emission factors for fossil fuels
p_ef_dem <- readgdx(gdx, "p_ef_dem")[all_enty %in% c("fepet", "fedie", "fegas")] ## MtCO2/EJ
p_ef_dem[all_enty == "fegas", all_enty := "fegat"]
setnames(p_ef_dem, old = c("value", "all_regi"), new = c("ef", "region"))
## attribute explicitly fuel used to the FE values
emidem = repFE[grepl("Liquids|Gases|Hydrogen|Electricity", variable) & region != "World"] ## EJ
emidem[, all_enty := ifelse(grepl("Liquids", variable), "fedie", NA)]
emidem[, all_enty := ifelse(grepl("LDV.+Liquids", variable), "fepet", all_enty)]
emidem[, all_enty := ifelse(grepl("Gases", variable), "fegat", all_enty)]
emidem[, all_enty := ifelse(grepl("Electricity", variable), "feelt", all_enty)]
emidem[, all_enty := ifelse(grepl("Hydrogen", variable), "feh2t", all_enty)]
## merge with emission factors
emidem = emidem[p_ef_dem, on = c("all_enty","region")]
## calculate emissions and attribute variable and unit names
emidem[, value := value*ef][, c("variable", "unit") := list(gsub("FE", "Emi\\|CO2", variable), "Mt CO2/yr")]
emi = rbind(copy(emidem)[, c("type", "variable") := list("tailpipe", paste0(variable, "|Tailpipe"))],
copy(emidem)[, c("type", "variable") := list("demand", paste0(variable, "|Demand"))])
prodFe <- readgdx(gdx, "vm_prodFE")[, ttot := as.numeric(ttot)]
setnames(prodFe,
c("period", "region", "se", "all_enty", "te", "fe_demand"))
prodFe[, se_share := fe_demand/sum(fe_demand), by = c("period", "region", "all_enty")]
prodFe <- prodFe[all_enty %in% c("fedie", "fepet", "fegat") & se %in% c("segafos", "seliqfos")][, c("se", "te", "fe_demand") := NULL]
emi <- prodFe[emi, on = c("period", "region", "all_enty")]
## in case no fossil fuels are used (e.g. 100% biodiesel), the value in se_share results NA. set the NA value to 0
emi[is.na(se_share), se_share := 0]
emi <- emi[all_enty %in% c("fedie", "fepet", "fegat") & type == "demand", value := value*se_share]
emi[, c("se_share", "type", "ef", "all_enty") := NULL]
## aggregate removing the fuel dependency
emi[, variable_agg := gsub("\\|Liquids|\\|Electricity|\\|Hydrogen|\\|Gases", "", variable)]
emi = emi[, .(value = sum(value)), by = c("model", "scenario", "region", "unit", "period", "variable_agg")]
setnames(emi, old = "variable_agg", new = "variable")
emi = emi[, .(model, scenario, region, variable, unit, period, value)]
return(emi)
}
reportingVehNum <- function(demand_vkm, annual_mileage){
venum <- copy(demand_vkm)
## merge annual mileage
anmil <- copy(annual_mileage)
anmil[grepl("Subcompact", vehicle_type),
variable := "Pass|Road|LDV|Small"]
anmil[grepl("Mini", vehicle_type),
variable := "Pass|Road|LDV|Mini"]
anmil[vehicle_type == "Compact Car", variable := "Pass|Road|LDV|Medium"]
anmil[grepl("Large Car|Midsize Car", vehicle_type), variable := "Pass|Road|LDV|Large"]
anmil[grepl("SUV", vehicle_type),
variable := "Pass|Road|LDV|SUV"]
anmil[grepl("Van|Multipurpose", vehicle_type),
variable := "Pass|Road|LDV|Van"]
anmil[grepl("Motorcycle|Scooter|Moped", vehicle_type),
variable := "Pass|Road|LDV|Two-Wheelers"]
anmil[grepl("^Truck", vehicle_type),
variable := sprintf("Freight|Road|%s", vehicle_type)]
anmil[grepl("Bus", vehicle_type),
variable := "Pass|Road|Bus"]
anmil <- anmil[,.(region, period = year, variable, annual_mileage)]
anmil <- approx_dt(anmil, unique(demand_vkm$period), xcol = "period", ycol = "annual_mileage", idxcols = c("region", "variable"), extrapolate = T)
anmil<- unique(anmil[, c("period", "region", "variable", "annual_mileage")])
anmil <- anmil[, variable := paste0("ES|Transport|VKM|", variable)]
venum <- merge(demand_vkm, anmil, by = c("variable", "region", "period"))
venum[, ven := value/annual_mileage] # billion vehicle-km -> thousand vehicles
venum <- venum[!is.na(ven)]
venum[, variable := gsub("|VKM", "|VNUM", variable, fixed=TRUE)][, c("value", "annual_mileage") := NULL]
venum[, unit := "tsd veh"]
setnames(venum, "ven", "value")
venum = venum[,.(model, scenario, region, variable, unit, period, value)]
return(venum)
}
reportStockAndSales <- function(annual_mileage){
if(file.exists(file.path(output_folder, "vintcomp.csv"))){
vintages_file <- file.path(output_folder, "vintcomp.csv")
vintgs <- fread(vintages_file)
} else if (file.exists(datapath(fname = "vintcomp.RDS"))){
#vintages_file <- datapath(fname = "vintcomp.RDS")
#vintgs <- readRDS(vintages_file)
return(NULL)
} else {
print("EDGE-T Reporting: No vintages file found.")
return(NULL)
}
year_c <- construction_year <- Stock <- Sales <- vintage_demand_vkm <- fct <- category <- NULL
## backward compat. fix
fct <- 1.
if("variable" %in% colnames(vintgs)){
fct <- 1e-6
setnames(vintgs, "variable", "construction_year")
}
vintgs[, year_c := as.numeric(gsub("C_", "", construction_year))]
## stock is the full stock up to the end of the current year
## sales are the sales of the current year
setnames(vintgs, "full_demand_vkm", "Stock")
vintgs[, Stock := Stock * fct]
vintgs[, Sales := Stock - sum(vintage_demand_vkm), by=.(year, region, vehicle_type, technology)]
vintgs[, c("construction_year", "vintage_demand_vkm", "year_c") := NULL]
vintgs <- unique(vintgs)
vintgs <- data.table::melt(vintgs, measure.vars = c("Stock", "Sales"), variable.name = "category")
## vkm -> v-num
vintgs = merge(vintgs, annual_mileage, by = c("year", "region", "vehicle_type"))
vintgs[, value := value / annual_mileage]
vintgs[, variable := ifelse(
vehicle_type == "Bus_tmp_vehicletype",
sprintf("%s|Transport|Bus|%s", category, technology),
sprintf("%s|Transport|LDV|%s|%s", category, vehicle_type, technology))]
## totals
vintgs <- rbindlist(list(
vintgs,
vintgs[, .(value=sum(value), variable=gsub("(.+)\\|.+$", "\\1", variable)),
by=c("category", "year", "region", "vehicle_type")],
vintgs[grepl("|LDV|", variable, fixed=TRUE),
.(value=sum(value), variable=sprintf("%s|Transport|LDV", category)),
by=c("category", "year", "region")]), fill=TRUE)
vintgs[, c("vehicle_type", "technology", "annual_mileage", "category") := NULL]
vintgs <- unique(vintgs[!is.na(value)])
setnames(vintgs, "year", "period")
vintgs = approx_dt(vintgs, c(2005, 2010, unique(vintgs$period), 2110, 2130, 2150),
xcol = "period", ycol = "value", idxcols = c("region", "variable"), extrapolate = T)
vintgs[period <= 2010|period > 2100, value := 0]
## remove the variable (e.g. vehicle_types) that are not present for this specific region
vintgs[, `:=`(model = model_name, scenario = scenario_title, unit = "Million vehicles")]
return(vintgs)
}
reportTotals <- function(aggrname, datatable, varlist){
vars <- varlist[[aggrname]]
if (length(unique(datatable[variable %in% vars]$variable)) < length(vars)){
print(paste0("Missing variables to aggregate data to ", aggrname))}
datatable <- datatable[variable %in% vars,
.(variable = aggrname,
value = sum(value)),
by = c("model", "scenario", "region", "period","unit")]
return(datatable)
}
## check the regional aggregation
regionSubsetList <- toolRegionSubsets(gdx)
# ADD EU-27 region aggregation if possible
if("EUR" %in% names(regionSubsetList)){
regionSubsetList <- c(regionSubsetList,list(
"EU27"=c("ENC", "EWN", "ECS", "ESC", "ECE", "FRA", "DEU", "ESW")
))
}
Aggrdata <- fread(system.file("extdata", "EDGETdataAggregation.csv", package = "edgeTrpLib"),header = TRUE)
## load input data from last EDGE run
## Data manipulation shouldnt be necessary
demand_km <- readRDS(datapath(fname = "demandF_plot_pkm.RDS"))
demand_km[, demand_F := demand_F * 1e-3] ## million -> billion pkm
setnames(demand_km, "demand_F", "value")
demand_ej <- readRDS(datapath(fname = "demandF_plot_EJ.RDS")) ## detailed final energy demand, EJ
setnames(demand_ej, "demand_EJ", "value")
demand_ej[, demand_F := NULL]
load_factor <- readRDS(datapath(fname = "loadFactor.RDS"))
annual_mileage <- readRDS(datapath(fname = "annual_mileage.RDS"))
if (length(annual_mileage)> 4){
#Same is done in lvl2_createoutput
annual_mileage <- unique(annual_mileage[, c("region", "year", "vkm.veh", "vehicle_type")])
setnames(annual_mileage, old = "vkm.veh", new = "annual_mileage")
}
if (length(load_factor)> 4){
load_factor <- load_factor[, c("year","region","vehicle_type","loadFactor","technology")]
demand_vkm <- merge(demand_km, load_factor, by = c("year", "region", "vehicle_type","technology"))
demand_vkm[, value := value/loadFactor] ## billion vkm
} else {
demand_vkm <- merge(demand_km, load_factor, by = c("year", "region", "vehicle_type"))
demand_vkm[, value := value/loadFactor]} ## billion vkm
repFE <- reporting(
demand_ej,
mode = "FE")
repVKM <- reporting(
datatable = demand_vkm,
mode = "VKM")
repES <- reporting(
datatable = demand_km,
mode = "ES")
toMIF <- rbind(
repFE,
repVKM,
repES,
reportingVehNum(repVKM,
annual_mileage),
reportingEmi(repFE = repFE,
gdx = gdx)
)
varsl <- list(
`ES|Transport|Pass|Road` = c("ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized"),
`ES|Transport|Pass|Aviation` = c("ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|Pass|Rail` = c("ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR"),
`ES|Transport|Pass` = c("ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized","ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR","ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|Freight` = c("ES|Transport|Freight|Road","ES|Transport|Freight|International Shipping","ES|Transport|Freight|Rail", "ES|Transport|Freight|Navigation"),
`ES|Transport` = c("ES|Transport|Freight|Road","ES|Transport|Freight|International Shipping","ES|Transport|Freight|Rail", "ES|Transport|Freight|Navigation","ES|Transport|Pass|Road|LDV", "ES|Transport|Pass|Road|Bus", "ES|Transport|Pass|Road|Non-Motorized","ES|Transport|Pass|Rail|HSR", "ES|Transport|Pass|Rail|non-HSR","ES|Transport|Pass|Aviation|International", "ES|Transport|Pass|Aviation|Domestic"),
`ES|Transport|VKM|Pass|Road` = c("ES|Transport|VKM|Pass|Road|LDV", "ES|Transport|VKM|Pass|Road|Bus"),
`ES|Transport|VKM||Road` = c("ES|Transport|VKM|Freight|Road", "ES|Transport|VKM|Pass|Road|LDV", "ES|Transport|VKM|Pass|Road|Bus"),
`ES|Transport|VKM|Rail` = c("ES|Transport|VKM|Pass|Rail|HSR", "ES|Transport|VKM|Pass|Rail|non-HSR", "ES|Transport|VKM|Freight|Rail" ),
`FE|Transport|Pass|Road` = c("FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus"),
`FE|Transport|Road` = c("FE|Transport|Freight|Road", "FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus"),
`FE|Transport|Pass|Rail` = c("FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR"),
`FE|Transport|Rail` = c("FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR", "FE|Transport|Freight|Rail"),
`FE|Transport|Pass` = c("FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus","FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR","FE|Transport|Pass|Aviation|International", "FE|Transport|Pass|Aviation|Domestic"),
`FE|Transport|Freight` = c("FE|Transport|Freight|Road","FE|Transport|Freight|International Shipping","FE|Transport|Freight|Rail", "FE|Transport|Freight|Navigation"),
`FE|Transport` = c("FE|Transport|Freight|Road","FE|Transport|Freight|International Shipping","FE|Transport|Freight|Rail", "FE|Transport|Freight|Navigation","FE|Transport|Pass|Road|LDV", "FE|Transport|Pass|Road|Bus","FE|Transport|Pass|Rail|HSR", "FE|Transport|Pass|Rail|non-HSR","FE|Transport|Pass|Aviation|International", "FE|Transport|Pass|Aviation|Domestic"),
`FE|Transport|w/o bunkers` = c("FE|Transport|Freight|w/o bunkers","FE|Transport|Pass|w/o bunkers"),
`FE|Transport|Pass|Liquids` = c("FE|Transport|Pass|Road|LDV|Liquids", "FE|Transport|Pass|Road|Bus|Liquids", "FE|Transport|Pass|Rail|non-HSR|Liquids","FE|Transport|Pass|Aviation|International|Liquids", "FE|Transport|Pass|Aviation|Domestic|Liquids"),
`FE|Transport|Pass|Hydrogen` = c("FE|Transport|Pass|Road|LDV|Hydrogen", "FE|Transport|Pass|Road|Bus|Hydrogen", "FE|Transport|Pass|Aviation|Domestic|Hydrogen"),
`FE|Transport|Pass|Gases` = c("FE|Transport|Pass|Road|LDV|Gases", "FE|Transport|Pass|Road|Bus|Gases"),
`FE|Transport|Pass|Electricity` = c("FE|Transport|Pass|Road|LDV|Electricity", "FE|Transport|Pass|Road|Bus|Electricity","FE|Transport|Pass|Rail|HSR|Electricity", "FE|Transport|Pass|Rail|non-HSR|Electricity"),
`FE|Transport|Freight|Liquids` = c("FE|Transport|Freight|Road|Liquids","FE|Transport|Freight|International Shipping|Liquids","FE|Transport|Freight|Rail|Liquids", "FE|Transport|Freight|Navigation|Liquids"),
`FE|Transport|Freight|Hydrogen` = c("FE|Transport|Freight|Road|Hydrogen"),
`FE|Transport|Freight|Gases` = c("FE|Transport|Freight|Road|Gases"),
`FE|Transport|Freight|Electricity` = c("FE|Transport|Freight|Road|Electricity","FE|Transport|Freight|Rail|Electricity"),
`FE|Transport|Liquids` = c("FE|Transport|Freight|Road|Liquids","FE|Transport|Freight|International Shipping|Liquids","FE|Transport|Freight|Rail|Liquids", "FE|Transport|Freight|Navigation|Liquids","FE|Transport|Pass|Road|LDV|Liquids", "FE|Transport|Pass|Road|Bus|Liquids", "FE|Transport|Pass|Rail|non-HSR|Liquids","FE|Transport|Pass|Aviation|International|Liquids", "FE|Transport|Pass|Aviation|Domestic|Liquids"),
`FE|Transport|Hydrogen` = c("FE|Transport|Freight|Road|Hydrogen","FE|Transport|Pass|Road|LDV|Hydrogen", "FE|Transport|Pass|Road|Bus|Hydrogen", "FE|Transport|Pass|Aviation|Domestic|Hydrogen"),
`FE|Transport|Gases` = c("FE|Transport|Freight|Road|Gases","FE|Transport|Pass|Road|LDV|Gases", "FE|Transport|Pass|Road|Bus|Gases"),
`FE|Transport|Electricity` = c("FE|Transport|Freight|Road|Electricity","FE|Transport|Freight|Rail|Electricity","FE|Transport|Pass|Road|LDV|Electricity", "FE|Transport|Pass|Road|Bus|Electricity","FE|Transport|Pass|Rail|HSR|Electricity", "FE|Transport|Pass|Rail|non-HSR|Electricity"),
`FE|Transport|w/o bunkers|Liquids` = c("FE|Transport|Freight|w/o bunkers|Liquids","FE|Transport|Pass|w/o bunkers|Liquids"),
`FE|Transport|w/o bunkers|Hydrogen` = c("FE|Transport|Freight|w/o bunkers|Hydrogen","FE|Transport|Pass|w/o bunkers|Hydrogen"),
`FE|Transport|w/o bunkers|Gases` = c("FE|Transport|Freight|w/o bunkers|Gases","FE|Transport|Pass|w/o bunkers|Gases"),
`FE|Transport|w/o bunkers|Electricity` = c("FE|Transport|Freight|w/o bunkers|Electricity","FE|Transport|Pass|w/o bunkers|Electricity"),
`Emi|CO2|Transport|Pass|Road|Tailpipe` = c("Emi|CO2|Transport|Pass|Road|LDV|Tailpipe", "Emi|CO2|Transport|Pass|Road|Bus|Tailpipe"),
`Emi|CO2|Transport|Pass|Road|Demand` = c("Emi|CO2|Transport|Pass|Road|LDV|Demand", "Emi|CO2|Transport|Pass|Road|Bus|Demand"),
`Emi|CO2|Transport|Road|Tailpipe` = c("Emi|CO2|Transport|Freight|Road|Tailpipe", "Emi|CO2|Transport|Pass|Road|LDV|Tailpipe", "Emi|CO2|Transport|Pass|Road|Bus|Tailpipe"),
`Emi|CO2|Transport|Rail|Tailpipe` = c("Emi|CO2|Transport|Pass|Rail|non-HSR|Tailpipe", "Emi|CO2|Transport|Freight|Rail|Tailpipe"),
`Emi|CO2|Transport|Road|Demand` = c("Emi|CO2|Transport|Freight|Road|Demand", "Emi|CO2|Transport|Pass|Road|LDV|Demand", "Emi|CO2|Transport|Pass|Road|Bus|Demand"),
`Emi|CO2|Transport|Rail|Demand` = c("Emi|CO2|Transport|Pass|Rail|non-HSR|Demand", "Emi|CO2|Transport|Freight|Rail|Demand"))
names <- names(varsl)
totals <- sapply(names, reportTotals, datatable = toMIF, varlist = varsl, simplify = FALSE, USE.NAMES = TRUE)
totals <- rbindlist(totals, use.names = TRUE)
toMIF <- rbind(toMIF, totals)
toMIF <- rbindlist(list(toMIF, reportStockAndSales(annual_mileage)), use.names=TRUE)
if (!is.null(regionSubsetList)){
toMIF <- rbindlist(list(
toMIF,
toMIF[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
toMIF[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
toMIF[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
toMIF[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)]
), use.names=TRUE)
}
if (extendedReporting) {
LogitCostplotdata <- function(priceData, prefData, logitExp, groupValue, Reg_Aggregation){
tot_price <- sw <- logit.exponent <- weight <- NULL
yrs_costs <-c(seq(2005, 2060, 5), seq(2070, 2100, 10))
all_subsectors <- c("technology", "vehicle_type", "subsector_L1", "subsector_L2",
"subsector_L3", "sector")
# change variable names for quitte format
setnames(priceData, c("year"), c("period"))
setnames(prefData, c("year"), c("period"))
prefData <- prefData[period %in% yrs_costs]
priceData<- priceData[period %in% yrs_costs][, -c("share")]
#Filter for logit level according to groupValue. leave out tmp placeholders
priceData <- priceData[!grepl("tmp", get(groupValue))]
prefData <- prefData[!grepl("tmp", get(groupValue))]
# Calculate Inconvenience Cost from share Weight
# Logit Exponent and total price are needed for this
prefData_inco <- merge(prefData, logitExp, all.y = TRUE)
#rename original prefs afterwards
setnames(prefData,c("sw"),c("value"))
#Reduce priceData to total price
price_tot <- priceData[, c("period", "region", "tot_price", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]), with = FALSE]
prefData_inco <- merge(prefData_inco, price_tot, by = c("period", "region", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
prefData_inco[, value := tot_price * (sw^(1 / logit.exponent) - 1)]
#Set Inconveniencecost to zero for shareweights where ES demand is anyway zero
prefData_inco <- prefData_inco[is.infinite(prefData_inco$value), value:=0]
prefData_inco <- prefData_inco[, c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)], "value"), with = FALSE][, variable := "Eq inconvenience cost"]
#Prepare PriceData
priceData <- data.table::melt(priceData[, -c("tot_price")], id.vars = c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
#Regional Aggregation
#Costs are intensive variables and are aggregated with ES weights for each level of the logit
weight_pkm_logitlevel <- weight_pkm[, .(weight = sum(weight)), by = c("region", "period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)])]
prefData_aggr <- aggregate_dt(prefData[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region & period %in% prefData$period], datacols = c("period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(prefData_aggr,"aggr_reg","region")
prefData_inco_aggr <- aggregate_dt(prefData_inco[region %in% Reg_Aggregation$region], Reg_Aggregation , fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region], datacols = c("period", "variable", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(prefData_inco_aggr,"aggr_reg","region")
priceData_aggr <- aggregate_dt(priceData[region %in% Reg_Aggregation$region], Reg_Aggregation , fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_logitlevel[region %in% Reg_Aggregation$region], datacols = c("period", all_subsectors[
seq(match(groupValue, all_subsectors),
length(all_subsectors), 1)]))
setnames(priceData_aggr,"aggr_reg","region")
prefData <- rbind(prefData, prefData_aggr)
priceData <- rbind(prefData_inco, prefData_inco_aggr, priceData,priceData_aggr)
if (groupValue=="vehicle_type"){
#Before prices are finally structured, vehicles are aggregated
Aggrdata_veh <- as.data.table(Aggrdata[, c("vehicle_type", "det_veh")])
Aggrdata_veh <- unique(Aggrdata_veh[!is.na(det_veh)])[, det_veh := gsub("Freight\\|Road\\||Pass\\|Road\\|", "", det_veh)]
#Exclude those wihout aggregation
Aggrdata_veh <- Aggrdata_veh[!vehicle_type==det_veh]
priceData <- priceData[, c("region","variable","vehicle_type","period","value")]
weight_pkm_VS1 <- weight_pkm[,.(weight = sum(weight)), by = c("region", "vehicle_type", "period")]
weight_pkm_VS1_aggrreg <- aggregate_dt(weight_pkm_VS1[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", valuecol="weight", datacols = c("period", "vehicle_type"))
setnames(weight_pkm_VS1_aggrreg,"aggr_reg","region")
weight_pkm_VS1 <- rbind(weight_pkm_VS1, weight_pkm_VS1_aggrreg)
Prices_veh_aggr <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_veh$vehicle_type], Aggrdata_veh , fewcol = "det_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_VS1[vehicle_type %in% Aggrdata_veh$vehicle_type], datacols = c("region","variable"))
setnames(Prices_veh_aggr, "det_veh", "vehicle_type")
Prices_veh_aggr[, variable:=paste0("Logit cost|V|", vehicle_type, "|", variable)][, vehicle_type := NULL]
}
if (groupValue=="vehicle_type"){
#Convert original shareweights to quitte format
prefData[, variable := paste0("Shareweight|V|", get(groupValue))]
prefData <- prefData[, .(region, period, scenario, variable, value)]
#Convert costs to quitte format
priceData[, variable := paste0("Logit cost|V|", get(groupValue), "|", variable)]
priceData <- priceData[, .(region, period, scenario, variable, value)]
priceData <- rbind(priceData, Prices_veh_aggr)}
else{
prefData[, variable := paste0("Shareweight|S",gsub("[^123]","",groupValue), "|", get(groupValue))]
prefData <- prefData[, .(region, period, scenario, variable, value)]
#Convert costs to quitte format
priceData[, variable := paste0("Logit cost|S",gsub("[^123]","",groupValue), "|", get(groupValue), "|", variable)]
priceData <- priceData[, .(region, period, scenario, variable, value)]
}
data <- rbind(prefData[, unit := "-"], priceData[, unit := "$2005/km"])
data[, scenario := scenario_title][, model := model_name]
return(data)
}
LogitCostplotdata_FV <- function(priceData, prefData, logitExp, Reg_Aggregation){
tot_price <- sw <- logit.exponent <- weight <- logit_type <- av_veh <- NULL
#Calcualte equivalent inconvenience cost and
yrs_costs <-c(seq(2005, 2060, 5), seq(2070, 2100, 10))
# change variable names for mip
setnames(priceData, c("year"), c("period"))
setnames(prefData, c("year"), c("period"))
#Exclude active modes as they have no fuel
prefData <- prefData[period %in% yrs_costs & !technology %in% c("Cycle_tmp_technology","Walk_tmp_technology")]
priceData<- priceData[period %in% yrs_costs]
# Calculate Inconvenience Cost from share Weight
priceData_sw <- copy(prefData)
priceData_sw <- priceData_sw[logit_type == "sw"][, logit_type := NULL]
setnames(priceData_sw, "value", "sw")
priceData_sw <- merge(priceData_sw, logitExp, all.x = TRUE)
#This should be removed in refactoring process
priceData_sw[grepl("^Truck", vehicle_type), logit.exponent := -4]
priceData_sw <- priceData_sw[is.na(logit.exponent), logit.exponent := -10]
price_tot <- priceData[, c("period", "region","tot_price", "technology","vehicle_type")]
priceData_sw <- merge(priceData_sw, price_tot, by = c("period", "region", "technology","vehicle_type"),
all.x=TRUE)
priceData_sw[, value := tot_price * (sw^(1 / logit.exponent) - 1)]
#Set Inconveniencecost to zero for shareweights where ES demand is anyway zero
priceData_sw <- priceData_sw[is.infinite(priceData_sw$value), value := 0]
#Some total prices are missing
priceData_sw <- priceData_sw[is.na(priceData_sw$value), value := 0]
priceData_sw <- priceData_sw[, c("period", "region", "technology","vehicle_type","value")][, variable := "Eq inconvenience cost"]
priceData_inco_LDV <- prefData[!logit_type == "sw"][, c("period", "region", "technology","vehicle_type","value","logit_type")]
setnames(priceData_inco_LDV, "logit_type", "variable")
#Exclude LDV inco from prefdata
prefData <- prefData[logit_type == "sw"]
prefData <- prefData[, .(region, period, scenario, vehicle_type, technology, value)]
priceData <- data.table::melt(priceData[, -c("tot_price", "share", "subsector_L1", "subsector_L2", "subsector_L3", "sector")], id.vars = c("region", "period", "technology", "vehicle_type"))
priceData <- rbind(priceData, priceData_sw, priceData_inco_LDV)
#Regional Aggregation
#Costs are intensive variables and are aggregated with ES weights for each level of the logit
weight_pkm_FV <- weight_pkm[, .(weight = sum(weight)), by = c("region", "period","vehicle_type", "technology")]
#TO FIX:
#Hydrogen and BEV technologies for aviation and 2Wheelers are not everywhere available: -> Insert zero as weight
weight_pkm_FV <- merge(weight_pkm_FV, priceData, on=c("region", "period", "vehicle_type", "technology"), all = TRUE)
weight_pkm_FV[is.na(weight_pkm_FV$weight), weight := 0]
weight_pkm_FV <- weight_pkm_FV[, c("region", "period","vehicle_type", "technology", "weight")]
weight_pkm_FV <- weight_pkm_FV[period > 1990 & period < 2110]
weight_pkm_FV <- unique(weight_pkm_FV)
weight_pkm_FV_aggrreg <- aggregate_dt(weight_pkm_FV[region %in% Reg_Aggregation$region], Reg_Aggregation ,fewcol = "aggr_reg", manycol = "region", yearcol = "period", valuecol="weight", datacols = c("period", "vehicle_type","technology"))
setnames(weight_pkm_FV_aggrreg,"aggr_reg","region")
weight_pkm_FV <- rbind(weight_pkm_FV, weight_pkm_FV_aggrreg)
priceData_aggrreg <- aggregate_dt(priceData[region %in% Reg_Aggregation$region], Reg_Aggregation, fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_FV[region %in% Reg_Aggregation$region], datacols = c("period", "technology", "vehicle_type"))
setnames(priceData_aggrreg,"aggr_reg","region")
priceData <- rbind(priceData, priceData_aggrreg)
prefData_aggrreg <- aggregate_dt(prefData[region %in% Reg_Aggregation$region], Reg_Aggregation, fewcol = "aggr_reg", manycol = "region", yearcol = "period", weights = weight_pkm_FV[region %in% Reg_Aggregation$region], datacols = c("period", "technology", "vehicle_type"))
setnames(prefData_aggrreg,"aggr_reg","region")
prefData <- rbind(prefData, prefData_aggrreg)
#Before prices are finally structured, vehicles are aggregated
#ES pkm are used as weights for data aggregation
Aggrdata_veh <- as.data.table(Aggrdata[, c("vehicle_type", "det_veh")])
#Remove entries that are not aggregated
Aggrdata_veh <- Aggrdata_veh[!vehicle_type == det_veh]
Aggrdata_veh <- unique(Aggrdata_veh[!is.na(det_veh)])[, det_veh := gsub("Freight\\|Road\\||Pass\\|Road\\|", "", det_veh)]
priceData_aggr <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_veh$vehicle_type], Aggrdata_veh , fewcol = "det_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_FV[vehicle_type %in% Aggrdata_veh$vehicle_type], datacols = c("region", "variable", "technology"))
setnames(priceData_aggr, "det_veh", "vehicle_type")
#Aggregate average vehicle
Aggrdata_avveh <- as.data.table(Aggrdata)
Aggrdata_avveh <- Aggrdata_avveh[subsector_L1 == "trn_pass_road_LDV_4W"]
Aggrdata_avveh <- unique(Aggrdata_avveh[, c("vehicle_type")])
Aggrdata_avveh[, av_veh := "Average veh"]
priceData_av <- aggregate_dt(priceData[vehicle_type %in% Aggrdata_avveh$vehicle_type], Aggrdata_avveh , fewcol = "av_veh", manycol = "vehicle_type", yearcol = "period", weights = weight_pkm_FV[vehicle_type %in% Aggrdata_avveh$vehicle_type], datacols = c("region", "variable","technology"))
setnames(priceData_av, "av_veh", "vehicle_type")
priceData <- rbind(priceData, priceData_aggr, priceData_av)
priceData <- priceData[, variable := paste0("Logit cost|F|", gsub("_tmp_vehicletype", "", vehicle_type), "|", technology, "|", variable)][, c("region", "period", "variable", "value")][, unit := "$2005/km"][, model := model_name][, scenario := scenario_title]
prefData[, variable := paste0("Shareweight|F|", gsub("_tmp_vehicletype", "", vehicle_type), "|", technology)][, unit := "-"][, model := model_name][, scenario := scenario_title]
prefData <- prefData[, c("period", "region", "variable", "unit", "model", "scenario", "value")]
data <- rbind(priceData, prefData)
return(data)
}
# Mapping efficiencies for useful energy
Mapp_UE <- data.table(
technology = c("FCEV", "BEV", "Electric", "Liquids", "Hydrogen"),
UE_efficiency = c(0.36, 0.64, 0.8, 0.23, 0.25))
#ES pkm are used as weights for data aggregation
weight_pkm <- copy(demand_km)
setnames(weight_pkm, c("value","year"), c("weight","period"))
weight_pkm[, sector := ifelse(sector %in% c("Pass"), "trn_pass", "trn_freight")]
weight_pkm[, sector := ifelse(subsector_L3 == c("International Aviation"), "trn_aviation_intl", sector)]
weight_pkm[, sector := ifelse(subsector_L3 == c("International Ship"), "trn_shipping_intl", sector)]
#Mapping for region Aggregation
RegAggregation <- data.table(
aggr_reg = c("EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "EUR", "NEU", "NEU"),
region = c("ENC", "EWN", "ECS", "ESC", "ECE", "FRA", "DEU", "UKI", "ESW", "NES", "NEN"))
# #Calculate useful energy
# UE <- toMIF[grepl("FE" & ("FCEV"|"BEV"|"Electric"|"Liquids"|"Hydrogen"), variable)]
# UE[, technology := gsub(!("FCEV"|"BEV"|"Electric"|"Liquids"|"Hydrogen"),"", variable)]
# UE <- merge(UE, Mapp_UE)
# UE[, value:= value*UE_efficiency][, variable := gsub("FE","UE", variable)]
# toMIF <- rbind(toMIF, UE)
#Calculate logit Costs
#Read in additional data if exist
if (file.exists(datapath(fname = "logit_data.RDS"))){
logit_data <- readRDS(datapath(fname = "logit_data.RDS"))
prices <- logit_data$share_list
Pref <- logit_data$pref_data
if (file.exists(datapath(fname = "logit_exp.RDS"))){
logit_exp <- readRDS(datapath(fname = "logit_exp.RDS"))
logit_exp <- logit_exp$logit_output
#Prices S3S
Prices_S3S <- prices$S3S_shares
setkey(Prices_S3S, NULL)
Pref_S3S <- Pref$S3S_final_pref
setkey(Pref_S3S, NULL)
logit_exp_S3S <- logit_exp$logit_exponent_S3S
setkey(logit_exp_S3S, NULL)
#Adjust in model itself in refactoring process
Prices_S3S[subsector_L3 %in% c("Cycle","Walk"), tot_VOT_price := tot_price]
PrefandPrices_S3S <- LogitCostplotdata(priceData = Prices_S3S, prefData = Pref_S3S, logitExp =logit_exp_S3S, groupValue = "subsector_L3", Reg_Aggregation = RegAggregation)
#Prices S2S3
Prices_S2S3 <- prices$S2S3_shares
setkey(Prices_S2S3, NULL)
Pref_S2S3 <- Pref$S2S3_final_pref
setkey(Pref_S2S3, NULL)
logit_exp_S2S3 <- logit_exp$logit_exponent_S2S3
setkey(logit_exp_S2S3, NULL)
PrefandPrices_S2S3 <- LogitCostplotdata(priceData = Prices_S2S3, prefData = Pref_S2S3, logitExp = logit_exp_S2S3, groupValue = "subsector_L2", Reg_Aggregation = RegAggregation)
#Prices S1S2
Prices_S1S2 <- prices$S1S2_shares
setkey(Prices_S1S2, NULL)
Pref_S1S2 <- Pref$S1S2_final_pref
setkey(Pref_S1S2, NULL)
logit_exp_S1S2 <- logit_exp$logit_exponent_S1S2
setkey(logit_exp_S1S2, NULL)
PrefandPrices_S1S2 <- LogitCostplotdata(priceData = Prices_S1S2, prefData = Pref_S1S2, logitExp = logit_exp_S1S2, groupValue = "subsector_L1", Reg_Aggregation = RegAggregation)
#Prices VS1
Prices_VS1 <- prices$VS1_shares
setkey(Prices_VS1, NULL)
Pref_VS1 <- Pref$VS1_final_pref
setkey(Pref_VS1, NULL)
logit_exp_VS1 <- logit_exp$logit_exponent_VS1
setkey(logit_exp_VS1, NULL)
#Add subsector_L2, subsector L3 and sector to Prices_VS1 (for structural conformity)
Prices_VS1 <- merge(Prices_VS1, unique(Pref_VS1[, c("subsector_L2", "subsector_L3", "sector", "vehicle_type")]), by = "vehicle_type", all.x = TRUE)
PrefandPrices_VS1 <- LogitCostplotdata(priceData=Prices_VS1, prefData = Pref_VS1,logitExp = logit_exp_VS1, groupValue = "vehicle_type", Reg_Aggregation = RegAggregation)
#Prices FV
Prices_FV <- prices$FV_shares
setkey(Prices_FV, NULL)
Pref_FV <- Pref$FV_final_pref
setkey(Pref_FV, NULL)
logit_exp_VS1 <- logit_exp$logit_exponent_FV
setkey(logit_exp_VS1, NULL)
Prices_FV <- LogitCostplotdata_FV(priceData=Prices_FV, prefData=Pref_FV, logitExp=logit_exp_VS1, Reg_Aggregation = RegAggregation)
Pref_FV <- Pref_FV[logit_type=="sw"]
#Walking and cycling have no fuel options
Pref_FV <- Pref_FV[!technology %in% c("Cycle_tmp_technology","Walk_tmp_technology")]
Pref_FV[, variable:=paste0("Shareweight|F|",gsub("_tmp_vehicletype","",vehicle_type),"|",technology)][,unit:="-"][,scenario:=scenario_title][,model:=model_name]
Pref_FV <- Pref_FV[,.(region,period,scenario,variable,value,unit,model)]
toMIF <- rbind(toMIF,PrefandPrices_S3S, PrefandPrices_S2S3, PrefandPrices_S1S2, PrefandPrices_VS1, Prices_FV, Pref_FV)}}
#Aggregate data
#Insert POP and GDP
if (file.exists(datapath(fname = "POP.RDS")) & file.exists(datapath(fname = "GDP.RDS"))){
POP <- readRDS(datapath(fname = "POP.RDS"))
GDP <- readRDS(datapath(fname = "GDP.RDS"))
POP <- POP[year %in% yrs]
GDP <- GDP[year %in% yrs]
POP[, model:= model_name][, scenario:= scenario_title][, variable := "Population"][, unit := "million"]
GDP[, model:= model_name][, scenario:= scenario_title][, variable := "GDP|PPP"]
GDP[, weight := weight*0.001][, unit := "billion US$2005/yr"]
setnames(GDP,c("year","weight"),c("period","value"))
setnames(POP,"year","period")
if (!is.null(regionSubsetList)){
toMIF <- rbindlist(list(
toMIF,
POP[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
POP[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
POP[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
POP[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["EUR"]], .(value = sum(value), region = "EUR"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["NEU"]], .(value = sum(value), region = "NEU"), by = .(model, scenario, variable, unit, period)],
GDP[region %in% regionSubsetList[["EU27"]], .(value = sum(value), region = "EU27"), by = .(model, scenario, variable, unit, period)],
GDP[, .(value = sum(value), region = "World"), by = .(model, scenario, variable, unit, period)]
), use.names=TRUE)
}
toMIF <- rbind(toMIF, POP, GDP)
}
}
#We should finally decide for which yrs the model runs and shows reasonable results
toMIF <- toMIF[period %in% yrs]
## Make sure there are no duplicates!
idx <- anyDuplicated(toMIF, by = c("region", "variable", "period"))
if(idx){
warning(paste0("Duplicates found in EDGE-T reporting output:",
capture.output(toMIF[idx]), collapse="\n"))
}
toMIF <- toMIF[!duplicated(toMIF)]
toMIF <- toMIF[, c("model", "scenario", "region", "variable", "unit", "period", "value")]
return(as.quitte(toMIF))
}
|
# Read the file and subset the data
dataFile <- "C:/Users/Shashank/Documents/coursera assignments/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
subSetData
#Plot 2
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
/RPlot2.R
|
no_license
|
shashank3767/Exploratory-Data-Analsyis-Project-1
|
R
| false
| false
| 605
|
r
|
# Read the file and subset the data
dataFile <- "C:/Users/Shashank/Documents/coursera assignments/household_power_consumption.txt"
data <- read.table(dataFile, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
subSetData
#Plot 2
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
png("plot2.png", width=480, height=480)
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
#################################################################################
##
## R package rugarch by Alexios Ghalanos Copyright (C) 2008-2015.
## This file is part of the R package rugarch.
##
## The R package rugarch is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## The R package rugarch is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
#################################################################################
.multispecall = function( speclist ){
model = unlist( strsplit(class(speclist[[1]]), "spec") )
if( model == "ARFIMA" ){
ans = .multispecarfima( speclist )
} else{
ans = .multispecgarch( speclist )
}
return( ans )
}
.multispecgarch = function( speclist )
{
# first create a spec which goes through validation process
tp = 1
if( !all(unlist(lapply(speclist, FUN = function(x) is(x, "uGARCHspec"))) ) ){
stop("\nNot a valid list of univariate GARCH specs.")
}
# then check type
n = length(speclist)
for(i in 2:n){
modelnames1 = rownames( speclist[[i]]@model$pars[speclist[[i]]@model$pars[,3]==1, ] )
modelnames2 = rownames( speclist[[i-1]]@model$pars[speclist[[i-1]]@model$pars[,3]==1, ] )
if(length(modelnames1) != length(modelnames2))
{
tp = 0
break()
} else{
if(!all(modelnames1 == modelnames2))
{
tp = 0
break()
}
}
}
if(tp) type = "equal" else type = "unequal"
if(type=="unequal"){
# mcsGARCH and realGARCH cannot be in unequal specification. Either all the same or none.
mod = sapply(speclist, function(x) x@model$modeldesc$vmodel)
if(any(mod=="mcsGARCH")) stop("\nmultispec-->error: cannot have unequal spec containing mcsGARCH model.\n")
if(any(mod=="realGARCH")) stop("\nmultispec-->error: cannot have unequal spec containing realGARCH model.\n")
}
ans = new("uGARCHmultispec",
spec = speclist,
type = type)
return(ans)
}
# a multifit function possible utilizing parallel execution returning a fitlist
# object
.multifitgarch = function(multispec, data, out.sample = 0, solver = "solnp",
solver.control = list(), fit.control = list(stationarity = 1, fixed.se = 0, scale = 0,
rec.init = "all"), cluster = NULL, ...)
{
n = length(multispec@spec)
if(is.null(data)) stop("\nmultifit GARCH-->error: multifit requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data)) stop("\nmultifit GARCH-->error: multifit only supports xts, matrix or data.frame objects for the data", call. = FALSE)
asset.names = colnames(data)
if(dim(data)[2] != n)
stop("\nmultifit GARCH-->error: speclist length not equal to data length", call. = FALSE)
fitlist = vector(mode = "list", length = n)
if(length(out.sample) == 1 | length(out.sample) < n) out.sample = rep(out.sample, n)
if(multispec@type=="equal"){
mod = multispec@spec[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultifit-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultifit-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultifit-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultifit-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) stop("\nmultifit-->error: mcsGARCH model requires DailyVar xts matrix.")
if(!is.xts(DailyVar)) stop("\nmultifit-->error: DailyVar must be an xts matrix.")
if(ncol(DailyVar)!=n) stop("\nmultifit-->error: DailyVar must have the same number of columns as the data.")
}
} else{
mod = "X"
}
##################
# Parallel Execution Prelim Checks
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("multispec", "data", "out.sample", "solver",
"solver.control", "fit.control"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
fitlist = parLapply(cluster, as.list(1:n), fun = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
return(ans)
})
} else if(mod=="mcsGARCH"){
clusterExport(cluster, "DailyVar", envir = environment())
fitlist = parLapply(cluster, as.list(1:n), fun = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
return(ans)
})
} else{
fitlist = parLapply(cluster, as.list(1:n), fun = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
return(ans)
})
}
} else{
if(mod=="realGARCH"){
fitlist = lapply(as.list(1:n), FUN = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
return(ans)
})
} else if(mod=="mcsGARCH"){
fitlist = lapply(as.list(1:n), FUN = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
return(ans)
})
} else{
fitlist = lapply(as.list(1:n), FUN = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
return(ans)
})
}
}
# converged: print
desc = list()
desc$type = multispec@type
desc$asset.names = asset.names
ans = new("uGARCHmultifit",
fit = fitlist,
desc = desc)
return(ans)
}
.multifiltergarch1 = function(multifitORspec, data = NULL, out.sample = 0,
n.old = NULL, rec.init = 'all', cluster = NULL, ...)
{
fitlist = multifitORspec
n = length(fitlist@fit)
if(is.null(data))
stop("\nmultifilter GARCH-->error: multifilter requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data))
stop("\nmultifilter GARCH-->error: multifilter only supports xts, matrix or data.frame objects for the data", call. = FALSE)
if(dim(data)[2] != n)
stop("\nmultifilter GARCH-->error: fitlist length not equal to data length", call. = FALSE)
asset.names = colnames(data)
filterlist = vector(mode = "list", length = n)
if(length(out.sample) == 1 | length(out.sample) < n) out.sample = rep(out.sample, n)
if(length(rec.init) == 1 | length(rec.init) < n) rec.init = rep(rec.init, n)
mod = fitlist@fit[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultifilter-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultifilter-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultifilter-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultifilter-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) stop("\nmultifilter-->error: mcsGARCH model requires DailyVar xts matrix.")
if(!is.xts(DailyVar)) stop("\nmultifilter-->error: DailyVar must be an xts matrix.")
if(ncol(DailyVar)!=n) stop("\nmultifilter-->error: DailyVar must have the same number of columns as the data.")
}
specx = vector(mode = "list", length = n)
for(i in 1:n){
specx[[i]] = getspec(fitlist@fit[[i]])
specx[[i]]@model$fixed.pars = as.list(coef(fitlist@fit[[i]]))
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("specx", "data", "out.sample", "n.old", "rec.init"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
} else{
if(mod=="realGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
}
desc = list()
desc$type = "equal"
desc$asset.names = asset.names
ans = new("uGARCHmultifilter",
filter = filterlist,
desc = desc)
return(ans)
}
.multifiltergarch2 = function(multifitORspec, data = NULL, out.sample = 0,
n.old = NULL, rec.init = 'all', cluster = NULL, ...)
{
speclist = multifitORspec
n = length(speclist@spec)
if(is.null(data))
stop("\nmultifilter GARCH-->error: multifilter requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data))
stop("\nmultifilter GARCH-->error: multifilter only supports xts, matrix or data.frame objects for the data", call. = FALSE)
if(dim(data)[2] != n)
stop("\nmultifilter GARCH-->error: multispec length not equal to data length", call. = FALSE)
asset.names = colnames(data)
filterlist = vector(mode = "list", length = n)
if(length(out.sample) == 1 | length(out.sample) < n) out.sample = rep(out.sample, n)
if(length(rec.init) == 1 | length(rec.init) < n) rec.init = rep(rec.init, n)
if(speclist@type=="equal"){
mod = speclist@spec[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultifilter-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultifilter-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultifilter-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultifilter-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) stop("\nmultifilter-->error: mcsGARCH model requires DailyVar xts matrix.")
if(!is.xts(DailyVar)) stop("\nmultifilter-->error: DailyVar must be an xts matrix.")
if(ncol(DailyVar)!=n) stop("\nmultifilter-->error: DailyVar must have the same number of columns as the data.")
}
} else{
mod = "X"
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("speclist", "data", "out.sample", "n.old", "rec.init"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
clusterExport(cluster, "DailyVar", envir = environment())
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
} else{
if(mod=="realGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
}
# converged: print
desc = list()
desc$type = speclist@type
desc$asset.names = asset.names
ans = new("uGARCHmultifilter",
filter = filterlist,
desc = desc)
return(ans)
}
.multiforecastgarch1 = function(multifitORspec, data = NULL, n.ahead = 1,
n.roll = 0, out.sample = 0, external.forecasts = list(mregfor = NULL, vregfor = NULL),
cluster = NULL, ...)
{
# only need to account for mcsGARCH and only partially
multifit = multifitORspec
n = length(multifit@fit)
asset.names = multifit@desc$asset.names
forecastlist = vector(mode = "list", length = n)
mod = multifit@fit[[1]]@model$modeldesc$vmodel
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) includeD = FALSE else includeD = TRUE
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("multifit", "n.ahead", "n.roll", "external.forecasts"), envir = environment())
if(mod=="mcsGARCH"){
if(includeD) clusterExport(cluster, c("includeD","DailyVar"), envir = environment())
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts,
if(includeD) DailyVar = DailyVar[,i] else DailyVar = NULL)
})
} else{
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts)
})
}
} else{
if(mod=="mcsGARCH"){
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts,
if(includeD) DailyVar = DailyVar[,i] else DailyVar = NULL)
})
} else{
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts)
})
}
}
desc = list()
desc$type = "equal"
desc$asset.names = asset.names
ans = new("uGARCHmultiforecast",
forecast = forecastlist,
desc = desc)
return(ans)
}
.multiforecastgarch2 = function(multifitORspec, data = NULL, n.ahead = 1, n.roll = 0, out.sample = 0,
external.forecasts = list(mregfor = NULL, vregfor = NULL), cluster =NULL, ...)
{
multispec = multifitORspec
n = length(multispec@spec)
if(is.null(data))
stop("\nmultiforecast GARCH-->error: multiforecast with multiple spec requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data))
stop("\nmultiforecast GARCH-->error: multiforecast only supports xts, matrix or data.frame objects for the data", call. = FALSE)
if(dim(data)[2] != n)
stop("\nmultiforecast GARCH-->error: multispec length not equal to data length", call. = FALSE)
asset.names = colnames(data)
forecastlist = vector(mode = "list", length = n)
if(is.null(out.sample)) out.sample = 0
if(length(out.sample) == 1) out.sample = rep(out.sample, n)
if(length(out.sample) !=n )
stop("\nmultiforecast GARCH-->error: out.sample length not equal to data length", call. = FALSE)
if(multispec@type=="equal"){
mod = multispec@spec[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultiforecast GARCH-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultiforecast GARCH-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultiforecast GARCH-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultiforecast GARCH-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
stop("\nugarchforecast (and multiforecast) with specification object not available for mcsGARCH model")
}
} else{
mod = "X"
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("multispec", "data", "n.ahead", "n.roll",
"out.sample", "external.forecasts"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts,
realizedVol = realVol[,i])
})
} else{
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts)
})
}
} else{
if(mod=="realGARCH"){
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts,
realizedVol = realVol[,i])
})
} else{
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts, ...)
})
}
}
desc = list()
desc$type = multispec@type
desc$asset.names = asset.names
ans = new("uGARCHmultiforecast",
forecast = forecastlist,
desc = desc)
return(ans)
}
|
/R/rugarch-multi.R
|
no_license
|
Dwj359582058/rugarch
|
R
| false
| false
| 23,940
|
r
|
#################################################################################
##
## R package rugarch by Alexios Ghalanos Copyright (C) 2008-2015.
## This file is part of the R package rugarch.
##
## The R package rugarch is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## The R package rugarch is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
#################################################################################
.multispecall = function( speclist ){
model = unlist( strsplit(class(speclist[[1]]), "spec") )
if( model == "ARFIMA" ){
ans = .multispecarfima( speclist )
} else{
ans = .multispecgarch( speclist )
}
return( ans )
}
.multispecgarch = function( speclist )
{
# first create a spec which goes through validation process
tp = 1
if( !all(unlist(lapply(speclist, FUN = function(x) is(x, "uGARCHspec"))) ) ){
stop("\nNot a valid list of univariate GARCH specs.")
}
# then check type
n = length(speclist)
for(i in 2:n){
modelnames1 = rownames( speclist[[i]]@model$pars[speclist[[i]]@model$pars[,3]==1, ] )
modelnames2 = rownames( speclist[[i-1]]@model$pars[speclist[[i-1]]@model$pars[,3]==1, ] )
if(length(modelnames1) != length(modelnames2))
{
tp = 0
break()
} else{
if(!all(modelnames1 == modelnames2))
{
tp = 0
break()
}
}
}
if(tp) type = "equal" else type = "unequal"
if(type=="unequal"){
# mcsGARCH and realGARCH cannot be in unequal specification. Either all the same or none.
mod = sapply(speclist, function(x) x@model$modeldesc$vmodel)
if(any(mod=="mcsGARCH")) stop("\nmultispec-->error: cannot have unequal spec containing mcsGARCH model.\n")
if(any(mod=="realGARCH")) stop("\nmultispec-->error: cannot have unequal spec containing realGARCH model.\n")
}
ans = new("uGARCHmultispec",
spec = speclist,
type = type)
return(ans)
}
# a multifit function possible utilizing parallel execution returning a fitlist
# object
.multifitgarch = function(multispec, data, out.sample = 0, solver = "solnp",
solver.control = list(), fit.control = list(stationarity = 1, fixed.se = 0, scale = 0,
rec.init = "all"), cluster = NULL, ...)
{
n = length(multispec@spec)
if(is.null(data)) stop("\nmultifit GARCH-->error: multifit requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data)) stop("\nmultifit GARCH-->error: multifit only supports xts, matrix or data.frame objects for the data", call. = FALSE)
asset.names = colnames(data)
if(dim(data)[2] != n)
stop("\nmultifit GARCH-->error: speclist length not equal to data length", call. = FALSE)
fitlist = vector(mode = "list", length = n)
if(length(out.sample) == 1 | length(out.sample) < n) out.sample = rep(out.sample, n)
if(multispec@type=="equal"){
mod = multispec@spec[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultifit-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultifit-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultifit-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultifit-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) stop("\nmultifit-->error: mcsGARCH model requires DailyVar xts matrix.")
if(!is.xts(DailyVar)) stop("\nmultifit-->error: DailyVar must be an xts matrix.")
if(ncol(DailyVar)!=n) stop("\nmultifit-->error: DailyVar must have the same number of columns as the data.")
}
} else{
mod = "X"
}
##################
# Parallel Execution Prelim Checks
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("multispec", "data", "out.sample", "solver",
"solver.control", "fit.control"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
fitlist = parLapply(cluster, as.list(1:n), fun = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
return(ans)
})
} else if(mod=="mcsGARCH"){
clusterExport(cluster, "DailyVar", envir = environment())
fitlist = parLapply(cluster, as.list(1:n), fun = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
return(ans)
})
} else{
fitlist = parLapply(cluster, as.list(1:n), fun = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
return(ans)
})
}
} else{
if(mod=="realGARCH"){
fitlist = lapply(as.list(1:n), FUN = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
realizedVol = realVol[,i]), silent=TRUE)
}
return(ans)
})
} else if(mod=="mcsGARCH"){
fitlist = lapply(as.list(1:n), FUN = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control,
DailyVar = DailyVar[,i]), silent=TRUE)
}
return(ans)
})
} else{
fitlist = lapply(as.list(1:n), FUN = function(i){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = solver,
solver.control = solver.control, fit.control = fit.control), silent=TRUE)
if(inherits(ans, 'try-error')){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
if(convergence(ans)==1){
ans<-try(ugarchfit(spec = multispec@spec[[i]], data = data[, i, drop = FALSE],
out.sample = out.sample[i], solver = "gosolnp",
fit.control = fit.control), silent=TRUE)
}
return(ans)
})
}
}
# converged: print
desc = list()
desc$type = multispec@type
desc$asset.names = asset.names
ans = new("uGARCHmultifit",
fit = fitlist,
desc = desc)
return(ans)
}
.multifiltergarch1 = function(multifitORspec, data = NULL, out.sample = 0,
n.old = NULL, rec.init = 'all', cluster = NULL, ...)
{
fitlist = multifitORspec
n = length(fitlist@fit)
if(is.null(data))
stop("\nmultifilter GARCH-->error: multifilter requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data))
stop("\nmultifilter GARCH-->error: multifilter only supports xts, matrix or data.frame objects for the data", call. = FALSE)
if(dim(data)[2] != n)
stop("\nmultifilter GARCH-->error: fitlist length not equal to data length", call. = FALSE)
asset.names = colnames(data)
filterlist = vector(mode = "list", length = n)
if(length(out.sample) == 1 | length(out.sample) < n) out.sample = rep(out.sample, n)
if(length(rec.init) == 1 | length(rec.init) < n) rec.init = rep(rec.init, n)
mod = fitlist@fit[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultifilter-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultifilter-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultifilter-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultifilter-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) stop("\nmultifilter-->error: mcsGARCH model requires DailyVar xts matrix.")
if(!is.xts(DailyVar)) stop("\nmultifilter-->error: DailyVar must be an xts matrix.")
if(ncol(DailyVar)!=n) stop("\nmultifilter-->error: DailyVar must have the same number of columns as the data.")
}
specx = vector(mode = "list", length = n)
for(i in 1:n){
specx[[i]] = getspec(fitlist@fit[[i]])
specx[[i]]@model$fixed.pars = as.list(coef(fitlist@fit[[i]]))
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("specx", "data", "out.sample", "n.old", "rec.init"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
} else{
if(mod=="realGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = specx[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
}
desc = list()
desc$type = "equal"
desc$asset.names = asset.names
ans = new("uGARCHmultifilter",
filter = filterlist,
desc = desc)
return(ans)
}
.multifiltergarch2 = function(multifitORspec, data = NULL, out.sample = 0,
n.old = NULL, rec.init = 'all', cluster = NULL, ...)
{
speclist = multifitORspec
n = length(speclist@spec)
if(is.null(data))
stop("\nmultifilter GARCH-->error: multifilter requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data))
stop("\nmultifilter GARCH-->error: multifilter only supports xts, matrix or data.frame objects for the data", call. = FALSE)
if(dim(data)[2] != n)
stop("\nmultifilter GARCH-->error: multispec length not equal to data length", call. = FALSE)
asset.names = colnames(data)
filterlist = vector(mode = "list", length = n)
if(length(out.sample) == 1 | length(out.sample) < n) out.sample = rep(out.sample, n)
if(length(rec.init) == 1 | length(rec.init) < n) rec.init = rep(rec.init, n)
if(speclist@type=="equal"){
mod = speclist@spec[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultifilter-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultifilter-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultifilter-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultifilter-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) stop("\nmultifilter-->error: mcsGARCH model requires DailyVar xts matrix.")
if(!is.xts(DailyVar)) stop("\nmultifilter-->error: DailyVar must be an xts matrix.")
if(ncol(DailyVar)!=n) stop("\nmultifilter-->error: DailyVar must have the same number of columns as the data.")
}
} else{
mod = "X"
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("speclist", "data", "out.sample", "n.old", "rec.init"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
clusterExport(cluster, "DailyVar", envir = environment())
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
} else{
if(mod=="realGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
realizedVol = realVol[,i])
})
} else if(mod=="mcsGARCH"){
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i],
DailyVar = DailyVar[,i])
})
} else{
filterlist = lapply(as.list(1:n), FUN = function(i){
ugarchfilter(data = data[, i, drop = FALSE], spec = speclist@spec[[i]],
out.sample = out.sample[i], n.old = n.old, rec.init = rec.init[i])
})
}
}
# converged: print
desc = list()
desc$type = speclist@type
desc$asset.names = asset.names
ans = new("uGARCHmultifilter",
filter = filterlist,
desc = desc)
return(ans)
}
.multiforecastgarch1 = function(multifitORspec, data = NULL, n.ahead = 1,
n.roll = 0, out.sample = 0, external.forecasts = list(mregfor = NULL, vregfor = NULL),
cluster = NULL, ...)
{
# only need to account for mcsGARCH and only partially
multifit = multifitORspec
n = length(multifit@fit)
asset.names = multifit@desc$asset.names
forecastlist = vector(mode = "list", length = n)
mod = multifit@fit[[1]]@model$modeldesc$vmodel
if(mod=="mcsGARCH"){
DailyVar = list(...)$DailyVar
if(is.null(DailyVar)) includeD = FALSE else includeD = TRUE
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("multifit", "n.ahead", "n.roll", "external.forecasts"), envir = environment())
if(mod=="mcsGARCH"){
if(includeD) clusterExport(cluster, c("includeD","DailyVar"), envir = environment())
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts,
if(includeD) DailyVar = DailyVar[,i] else DailyVar = NULL)
})
} else{
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts)
})
}
} else{
if(mod=="mcsGARCH"){
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts,
if(includeD) DailyVar = DailyVar[,i] else DailyVar = NULL)
})
} else{
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multifit@fit[[i]], data = NULL, n.ahead = n.ahead,
n.roll = n.roll, external.forecasts = external.forecasts)
})
}
}
desc = list()
desc$type = "equal"
desc$asset.names = asset.names
ans = new("uGARCHmultiforecast",
forecast = forecastlist,
desc = desc)
return(ans)
}
.multiforecastgarch2 = function(multifitORspec, data = NULL, n.ahead = 1, n.roll = 0, out.sample = 0,
external.forecasts = list(mregfor = NULL, vregfor = NULL), cluster =NULL, ...)
{
multispec = multifitORspec
n = length(multispec@spec)
if(is.null(data))
stop("\nmultiforecast GARCH-->error: multiforecast with multiple spec requires a data object", call. = FALSE)
if(!is.xts(data) & !is.matrix(data) & !is.data.frame(data))
stop("\nmultiforecast GARCH-->error: multiforecast only supports xts, matrix or data.frame objects for the data", call. = FALSE)
if(dim(data)[2] != n)
stop("\nmultiforecast GARCH-->error: multispec length not equal to data length", call. = FALSE)
asset.names = colnames(data)
forecastlist = vector(mode = "list", length = n)
if(is.null(out.sample)) out.sample = 0
if(length(out.sample) == 1) out.sample = rep(out.sample, n)
if(length(out.sample) !=n )
stop("\nmultiforecast GARCH-->error: out.sample length not equal to data length", call. = FALSE)
if(multispec@type=="equal"){
mod = multispec@spec[[1]]@model$modeldesc$vmodel
if(mod=="realGARCH"){
realVol = list(...)$realizedVol
if(is.null(realVol)) stop("\nmultiforecast GARCH-->error: realGARCH model requires realizedVol xts matrix.")
if(!is.xts(realVol)) stop("\nmultiforecast GARCH-->error: realizedVol must be an xts matrix.")
if(ncol(realVol)!=n) stop("\nmultiforecast GARCH-->error: realizedVol must have the same number of columns as the data.")
if(nrow(realVol)!=nrow(data)) stop("\nmultiforecast GARCH-->error: realizedVol must have the same number of rows as the data.")
}
if(mod=="mcsGARCH"){
stop("\nugarchforecast (and multiforecast) with specification object not available for mcsGARCH model")
}
} else{
mod = "X"
}
if( !is.null(cluster) ){
clusterEvalQ(cluster, library(rugarch))
clusterExport(cluster, c("multispec", "data", "n.ahead", "n.roll",
"out.sample", "external.forecasts"), envir = environment())
if(mod=="realGARCH"){
clusterExport(cluster, "realVol", envir = environment())
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts,
realizedVol = realVol[,i])
})
} else{
forecastlist = parLapply(cluster, as.list(1:n), fun = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts)
})
}
} else{
if(mod=="realGARCH"){
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts,
realizedVol = realVol[,i])
})
} else{
forecastlist = lapply(as.list(1:n), FUN = function(i){
ugarchforecast(fitORspec = multispec@spec[[i]],
data = data[, i, drop = FALSE], n.ahead = n.ahead, n.roll = n.roll,
out.sample = out.sample[i], external.forecasts = external.forecasts, ...)
})
}
}
desc = list()
desc$type = multispec@type
desc$asset.names = asset.names
ans = new("uGARCHmultiforecast",
forecast = forecastlist,
desc = desc)
return(ans)
}
|
"make.keys" <-
function(nvars,keys.list,item.labels=NULL,key.labels=NULL) {
if(!is.null(ncol(nvars))) {item.labels <- colnames(nvars)
nvars <- ncol(nvars)} else {
if(!is.numeric(nvars)) {item.labels <- nvars
nvars <- length(item.labels)} }
nkeys <- length(keys.list)
keys <- matrix(rep(0,nvars*nkeys),ncol=nkeys)
for (i in 1:nkeys) {
if(!is.null(keys.list[[i]])) {
list.i <- unlist(keys.list[[i]])
if((is.character(list.i)) && !is.null(item.labels)) {
neg <- grep("-",list.i)
list.i <- sub("-","",list.i)
list.i <- match(list.i,item.labels)
if(!any(is.na(neg))) list.i[neg] <- -list.i[neg]}
keys[abs(list.i),i] <- sign(list.i )
}
}
if(!is.null(key.labels)) {colnames(keys) <- key.labels} else {colnames(keys) <- names(keys.list)}
if(!is.null(item.labels)) {rownames(keys) <- item.labels}
return(keys)}
#written June 11, 2008
#revised Sept 15, 2013 to allow for symbolic keys
#revised November 21, 2018 to allow null keys
#
# "keys2list" <- function(keys,sign=TRUE) {
# keys.list <- list()
# nkeys <- ncol(keys)
# for (i in 1:nkeys) {temp <- rownames(keys)[which(keys[,i] < 0)]
# if(sign && (length(temp) >0)) temp <- paste0("-",temp)
# keys.list[[i]] <- c(rownames(keys)[which(keys[,i] > 0)],temp)
# }
# names(keys.list) <- colnames(keys)
# keys.list}
#Added July 9, 2017
"selectFromKeys" <- function(keys.list) {
select <- sub("-","",unlist(keys.list))
select <- select[!duplicated(select)]
return(select)
}
#Basically, the opposite of make.keys
#Takes a keys matrix and converts it to a list structure (with negative signs appropriately placed)
#9/10/16
#revised 6/10/18 to not change the order of keys
"keys2list" <- function(keys,sign=TRUE) {
keys.list <- list()
nkeys <- ncol(keys)
for (i in 1:nkeys) {temp <- rownames(keys)[which(keys[,i] != 0)]
wk <- which(keys[,i] < 0)
temp[temp %in% names(wk)] <- paste0("-",temp[temp %in% names(wk)])
keys.list[[i]] <- temp
#if(sign && (length(temp) >0)) temp <- paste0("-",temp)
# keys.list[[i]] <- c(rownames(keys)[which(keys[,i] > 0)],temp)
}
names(keys.list) <- colnames(keys)
keys.list}
|
/R/make.keys.R
|
no_license
|
adamv227/psych
|
R
| false
| false
| 2,291
|
r
|
"make.keys" <-
function(nvars,keys.list,item.labels=NULL,key.labels=NULL) {
if(!is.null(ncol(nvars))) {item.labels <- colnames(nvars)
nvars <- ncol(nvars)} else {
if(!is.numeric(nvars)) {item.labels <- nvars
nvars <- length(item.labels)} }
nkeys <- length(keys.list)
keys <- matrix(rep(0,nvars*nkeys),ncol=nkeys)
for (i in 1:nkeys) {
if(!is.null(keys.list[[i]])) {
list.i <- unlist(keys.list[[i]])
if((is.character(list.i)) && !is.null(item.labels)) {
neg <- grep("-",list.i)
list.i <- sub("-","",list.i)
list.i <- match(list.i,item.labels)
if(!any(is.na(neg))) list.i[neg] <- -list.i[neg]}
keys[abs(list.i),i] <- sign(list.i )
}
}
if(!is.null(key.labels)) {colnames(keys) <- key.labels} else {colnames(keys) <- names(keys.list)}
if(!is.null(item.labels)) {rownames(keys) <- item.labels}
return(keys)}
#written June 11, 2008
#revised Sept 15, 2013 to allow for symbolic keys
#revised November 21, 2018 to allow null keys
#
# "keys2list" <- function(keys,sign=TRUE) {
# keys.list <- list()
# nkeys <- ncol(keys)
# for (i in 1:nkeys) {temp <- rownames(keys)[which(keys[,i] < 0)]
# if(sign && (length(temp) >0)) temp <- paste0("-",temp)
# keys.list[[i]] <- c(rownames(keys)[which(keys[,i] > 0)],temp)
# }
# names(keys.list) <- colnames(keys)
# keys.list}
#Added July 9, 2017
"selectFromKeys" <- function(keys.list) {
select <- sub("-","",unlist(keys.list))
select <- select[!duplicated(select)]
return(select)
}
#Basically, the opposite of make.keys
#Takes a keys matrix and converts it to a list structure (with negative signs appropriately placed)
#9/10/16
#revised 6/10/18 to not change the order of keys
"keys2list" <- function(keys,sign=TRUE) {
keys.list <- list()
nkeys <- ncol(keys)
for (i in 1:nkeys) {temp <- rownames(keys)[which(keys[,i] != 0)]
wk <- which(keys[,i] < 0)
temp[temp %in% names(wk)] <- paste0("-",temp[temp %in% names(wk)])
keys.list[[i]] <- temp
#if(sign && (length(temp) >0)) temp <- paste0("-",temp)
# keys.list[[i]] <- c(rownames(keys)[which(keys[,i] > 0)],temp)
}
names(keys.list) <- colnames(keys)
keys.list}
|
#########################################################
# Discretized serial interval (assuming a shifted gamma #
# distribution (with shift 1) #
#########################################################
#' Discretized Generation Time Distribution Assuming A Shifted Gamma
#' Distribution
#'
#' \code{discr_si} computes the discrete distribution of the serial interval,
#' assuming that the serial interval is shifted Gamma distributed, with shift 1.
#'
#' @param k Positive integer, or vector of positive interers for which the
#' discrete distribution is desired.
#' @param mu A positive real giving the mean of the Gamma distribution.
#' @param sigma A non-negative real giving the standard deviation of the Gamma
#' distribution.
#' @return Gives the discrete probability \eqn{w_k} that the serial interval is
#' equal to \eqn{k}.
#' @details{
#' Assuming that the serial interval is shifted Gamma distributed with mean
#' \eqn{\mu}, standard deviation \eqn{\sigma} and shift \eqn{1},
#' the discrete probability \eqn{w_k} that the serial interval is equal to
#' \eqn{k} is:
#'
#' \deqn{w_k = kF_{\{\mu-1,\sigma\}}(k)+(k-2)F_{\{\mu-1,\sigma\}}
#' (k-2)-2(k-1)F_{\{\mu-1,\sigma\}}(k-1)\\
#' +(\mu-1)(2F_{\{\mu-1+\frac{\sigma^2}{\mu-1},
#' \sigma\sqrt{1+\frac{\sigma^2}{\mu-1}}\}}(k-1)-
#' F_{\{\mu-1+\frac{\sigma^2}{\mu-1},
#' \sigma\sqrt{1+\frac{\sigma^2}{\mu-1}}\}}(k-2)-
#' F_{\{\mu-1+\frac{\sigma^2}{\mu-1},
#' \sigma\sqrt{1+\frac{\sigma^2}{\mu-1}}\}}(k))}
#'
#' where \eqn{F_{\{\mu,\sigma\}}} is the cumulative density function of a Gamma
#' distribution with mean \eqn{\mu} and standard deviation \eqn{\sigma}.
#' }
#' @seealso \code{\link{overall_infectivity}}, \code{\link{estimate_R}}
#' @author Anne Cori \email{a.cori@imperial.ac.uk}
#' @references Cori, A. et al. A new framework and software to estimate
#' time-varying reproduction numbers during epidemics (AJE 2013).
# #' @import stats
#' @export
#' @examples
#' ## Computing the discrete serial interval of influenza
#' mean_flu_si <- 2.6
#' sd_flu_si <- 1.5
#' dicrete_si_distr <- discr_si(seq(0, 20), mean_flu_si, sd_flu_si)
#' plot(seq(0, 20), dicrete_si_distr, type = "h",
#' lwd = 10, lend = 1, xlab = "time (days)", ylab = "frequency")
#' title(main = "Discrete distribution of the serial interval of influenza")
discr_si <- function(k, mu, sigma)
{
if (sigma < 0) {
stop("sigma must be >=0.")
}
if (mu <= 1) {
stop("mu must be >1")
}
if (any(k < 0)) {
stop("all values in k must be >=0.")
}
a <- ((mu - 1) / sigma)^2
b <- sigma^2 / (mu - 1)
cdf_gamma <- function(k, a, b) pgamma(k, shape = a, scale = b)
res <- k * cdf_gamma(k, a, b) +
(k - 2) * cdf_gamma(k - 2, a, b) - 2 * (k - 1) * cdf_gamma(k - 1, a, b)
res <- res + a * b * (2 * cdf_gamma(k - 1, a + 1, b) -
cdf_gamma(k - 2, a + 1, b) - cdf_gamma(k, a + 1, b))
res <- vnapply(res, function(e) max(0, e))
return(res)
}
|
/R/discr_si.R
|
no_license
|
Bigger-Physics/EpiEstim
|
R
| false
| false
| 2,973
|
r
|
#########################################################
# Discretized serial interval (assuming a shifted gamma #
# distribution (with shift 1) #
#########################################################
#' Discretized Generation Time Distribution Assuming A Shifted Gamma
#' Distribution
#'
#' \code{discr_si} computes the discrete distribution of the serial interval,
#' assuming that the serial interval is shifted Gamma distributed, with shift 1.
#'
#' @param k Positive integer, or vector of positive interers for which the
#' discrete distribution is desired.
#' @param mu A positive real giving the mean of the Gamma distribution.
#' @param sigma A non-negative real giving the standard deviation of the Gamma
#' distribution.
#' @return Gives the discrete probability \eqn{w_k} that the serial interval is
#' equal to \eqn{k}.
#' @details{
#' Assuming that the serial interval is shifted Gamma distributed with mean
#' \eqn{\mu}, standard deviation \eqn{\sigma} and shift \eqn{1},
#' the discrete probability \eqn{w_k} that the serial interval is equal to
#' \eqn{k} is:
#'
#' \deqn{w_k = kF_{\{\mu-1,\sigma\}}(k)+(k-2)F_{\{\mu-1,\sigma\}}
#' (k-2)-2(k-1)F_{\{\mu-1,\sigma\}}(k-1)\\
#' +(\mu-1)(2F_{\{\mu-1+\frac{\sigma^2}{\mu-1},
#' \sigma\sqrt{1+\frac{\sigma^2}{\mu-1}}\}}(k-1)-
#' F_{\{\mu-1+\frac{\sigma^2}{\mu-1},
#' \sigma\sqrt{1+\frac{\sigma^2}{\mu-1}}\}}(k-2)-
#' F_{\{\mu-1+\frac{\sigma^2}{\mu-1},
#' \sigma\sqrt{1+\frac{\sigma^2}{\mu-1}}\}}(k))}
#'
#' where \eqn{F_{\{\mu,\sigma\}}} is the cumulative density function of a Gamma
#' distribution with mean \eqn{\mu} and standard deviation \eqn{\sigma}.
#' }
#' @seealso \code{\link{overall_infectivity}}, \code{\link{estimate_R}}
#' @author Anne Cori \email{a.cori@imperial.ac.uk}
#' @references Cori, A. et al. A new framework and software to estimate
#' time-varying reproduction numbers during epidemics (AJE 2013).
# #' @import stats
#' @export
#' @examples
#' ## Computing the discrete serial interval of influenza
#' mean_flu_si <- 2.6
#' sd_flu_si <- 1.5
#' dicrete_si_distr <- discr_si(seq(0, 20), mean_flu_si, sd_flu_si)
#' plot(seq(0, 20), dicrete_si_distr, type = "h",
#' lwd = 10, lend = 1, xlab = "time (days)", ylab = "frequency")
#' title(main = "Discrete distribution of the serial interval of influenza")
discr_si <- function(k, mu, sigma)
{
if (sigma < 0) {
stop("sigma must be >=0.")
}
if (mu <= 1) {
stop("mu must be >1")
}
if (any(k < 0)) {
stop("all values in k must be >=0.")
}
a <- ((mu - 1) / sigma)^2
b <- sigma^2 / (mu - 1)
cdf_gamma <- function(k, a, b) pgamma(k, shape = a, scale = b)
res <- k * cdf_gamma(k, a, b) +
(k - 2) * cdf_gamma(k - 2, a, b) - 2 * (k - 1) * cdf_gamma(k - 1, a, b)
res <- res + a * b * (2 * cdf_gamma(k - 1, a + 1, b) -
cdf_gamma(k - 2, a + 1, b) - cdf_gamma(k, a + 1, b))
res <- vnapply(res, function(e) max(0, e))
return(res)
}
|
source("~/r_projects/cancer-mutations/ssm-background-slurm/trinucleotide_bg_enhancer-slurm.R")
print("successfully sourced trinucleotide_bg_enhancer-slurm.R")
slice <- dplyr::slice
rename <- dplyr::rename
## arguments from the bash file ##
args <- commandArgs(trailingOnly = TRUE)
cmd_arg <- as.numeric(args[1])
## functions ##
## absmax() gets the element with maximum absolute value from a vector of numbers
absmax <- function(x) {x[which.max(abs(x))]}
## DNAToBin() transforms DNA to binary number
## can apply R function strtoi() to further transform binary to decimal number
DNAToBin <- function(DNA){
temp <- DNA
temp <- gsub("A", "00", temp)
temp <- gsub("C", "01", temp)
temp <- gsub("G", "10", temp)
temp <- gsub("T", "11", temp)
return(temp)
}
# file paths and file names
qbic.model.path <- "/data/gordanlab/vincentius/qbic/predmodel"
output.path <- "~/r_projects/cancer-mutations/ssm-background-slurm/output"
# import the 12-mer prediction table
setwd(qbic.model.path)
filename_table_12mer <- Sys.glob("*.txt")[cmd_arg]
# wondering if there is a way to use read_table() to correctly read this table in
table_12mer <- read.table(filename_table_12mer, header = T, fill = T,
blank.lines.skip = F)
table_12mer <- as_tibble(table_12mer) %>%
mutate(idx = row_number() - 1)
# proportion of 3 possible mutations for each of the 32 trinucleotides
table_mutation_tri_mut_rate <- table_mutation_tri %>%
group_by(ref) %>%
mutate(prop = count/sum(count)) %>%
ungroup() %>%
inner_join(freq_tri %>% select(ref = trinucleotide, mut_rate)) %>%
mutate(tri_mut_rate = prop*mut_rate) %>%
select(ref_tri = ref,
mut_tri = mut,
tri_mut_rate) %>%
mutate(mut_type = row_number())
enhancers_w_mutation <- which(data_enhancers_mutated$count > 0)
result_list <- list()
for(i in 1:length(enhancers_w_mutation)){
enhancer_ex <- enhancers_w_mutation[i]
print(enhancer_ex)
## Part 1: Generate all possible single mutations for enhancers with mutations ##
# get the location and sequence of the enhancer
data_enhancer_ex <- data_enhancers_fantom %>% slice(enhancer_ex) # the enhancer location
seq_enhancer_ex <- seq_enhancers_fantom[[enhancer_ex]] # the enhancer sequence
seq_enhancer_ex_bg <- getSeq(genome, data_enhancer_ex$chromosome,
data_enhancer_ex$start - 5,
data_enhancer_ex$end + 5) # the enhancer sequence including 5bp context
seq_enhancer_ex_11mer <- DNAStringSet(seq_enhancer_ex_bg,
start = 1:(length(seq_enhancer_ex_bg)-10),
width = 11) # cut the enhancer into 11mers
# generate all possible single mutations
data_enhancer_all_possible_mutations <- tibble(
chromosome = data_enhancer_ex$chromosome,
pos = seq(data_enhancer_ex$start, data_enhancer_ex$end),
enhancer = enhancer_ex,
seq_mut_bg = as.character(seq_enhancer_ex_11mer)
) %>%
mutate(ref = unlist(strsplit(as.character(seq_enhancer_ex), ""))) %>%
slice(rep(seq(1, length(seq_enhancer_ex)), each = 4)) %>%
mutate(mut = rep(c("A", "C", "G", "T"), length(seq_enhancer_ex))) %>%
filter(ref != mut) %>%
mutate(twimer = paste0(seq_mut_bg, mut)) %>%
mutate(idx = DNAToBin(twimer)) %>%
mutate(idx = strtoi(idx, base = 2)) %>%
select(-seq_mut_bg)
## Part 2: Predict the effect of the synthetic mutations using QBiC ##
# use the 12-mer table to predict the effect
data_enhancer_mutation_prediction <- data_enhancer_all_possible_mutations %>%
mutate(effect = table_12mer$diff[idx+1]) %>%
mutate(ref_tri = substr(twimer, 5, 7)) %>%
mutate(mut_tri = paste0(substr(ref_tri, 1, 1), mut, substr(ref_tri, 3, 3))) %>%
mutate(ref_tri_rev = as.character(reverseComplement(DNAStringSet(ref_tri))),
mut_tri_rev = as.character(reverseComplement(DNAStringSet(mut_tri)))) %>%
mutate(ref_tri = ifelse(ref_tri < ref_tri_rev, ref_tri, ref_tri_rev),
mut_tri = ifelse(ref_tri < ref_tri_rev, mut_tri, mut_tri_rev)) %>%
select(-c(ref_tri_rev, mut_tri_rev)) %>%
inner_join(table_mutation_tri_mut_rate, by = c("ref_tri", "mut_tri")) %>%
mutate(cond_tri_mut_rate = tri_mut_rate/sum(tri_mut_rate))
# mutation effect prediction for the actual mutations in the enhancer
mut_enhancer_effect_prediction <- mut_enhancer %>%
genome_inner_join(data_enhancer_ex,
by = c("chromosome", "start", "end")) %>%
select(chromosome = chromosome.x,
pos = start.x,
icgc_mutation_id,
icgc_donor_id,
ref,
mut) %>%
inner_join(data_enhancer_mutation_prediction,
by = c("chromosome", "pos", "ref", "mut")) %>%
mutate(p_less = 0)
for(j in 1:nrow(mut_enhancer_effect_prediction)){
mut_enhancer_effect_prediction$p_less[j] <- sum(
data_enhancer_mutation_prediction %>%
filter(effect < mut_enhancer_effect_prediction$effect[j]) %>%
pull(cond_tri_mut_rate)
)
}
result <- mut_enhancer_effect_prediction %>%
select(icgc_mutation_id, icgc_donor_id, enhancer, effect, p_less)
result_list[[i]] <- result
}
enhancer_result <- bind_rows(result_list)
setwd(output.path)
write.table(enhancer_result, paste0("enhancer_mutation_result_", filename_table_12mer),
row.names = F)
|
/archive/ssm-background-slurm/analytical_enhancer_fixed_per_donor-slurm-parallel.R
|
no_license
|
jz132/cancer-mutations
|
R
| false
| false
| 5,418
|
r
|
source("~/r_projects/cancer-mutations/ssm-background-slurm/trinucleotide_bg_enhancer-slurm.R")
print("successfully sourced trinucleotide_bg_enhancer-slurm.R")
slice <- dplyr::slice
rename <- dplyr::rename
## arguments from the bash file ##
args <- commandArgs(trailingOnly = TRUE)
cmd_arg <- as.numeric(args[1])
## functions ##
## absmax() gets the element with maximum absolute value from a vector of numbers
absmax <- function(x) {x[which.max(abs(x))]}
## DNAToBin() transforms DNA to binary number
## can apply R function strtoi() to further transform binary to decimal number
DNAToBin <- function(DNA){
temp <- DNA
temp <- gsub("A", "00", temp)
temp <- gsub("C", "01", temp)
temp <- gsub("G", "10", temp)
temp <- gsub("T", "11", temp)
return(temp)
}
# file paths and file names
qbic.model.path <- "/data/gordanlab/vincentius/qbic/predmodel"
output.path <- "~/r_projects/cancer-mutations/ssm-background-slurm/output"
# import the 12-mer prediction table
setwd(qbic.model.path)
filename_table_12mer <- Sys.glob("*.txt")[cmd_arg]
# wondering if there is a way to use read_table() to correctly read this table in
table_12mer <- read.table(filename_table_12mer, header = T, fill = T,
blank.lines.skip = F)
table_12mer <- as_tibble(table_12mer) %>%
mutate(idx = row_number() - 1)
# proportion of 3 possible mutations for each of the 32 trinucleotides
table_mutation_tri_mut_rate <- table_mutation_tri %>%
group_by(ref) %>%
mutate(prop = count/sum(count)) %>%
ungroup() %>%
inner_join(freq_tri %>% select(ref = trinucleotide, mut_rate)) %>%
mutate(tri_mut_rate = prop*mut_rate) %>%
select(ref_tri = ref,
mut_tri = mut,
tri_mut_rate) %>%
mutate(mut_type = row_number())
enhancers_w_mutation <- which(data_enhancers_mutated$count > 0)
result_list <- list()
for(i in 1:length(enhancers_w_mutation)){
enhancer_ex <- enhancers_w_mutation[i]
print(enhancer_ex)
## Part 1: Generate all possible single mutations for enhancers with mutations ##
# get the location and sequence of the enhancer
data_enhancer_ex <- data_enhancers_fantom %>% slice(enhancer_ex) # the enhancer location
seq_enhancer_ex <- seq_enhancers_fantom[[enhancer_ex]] # the enhancer sequence
seq_enhancer_ex_bg <- getSeq(genome, data_enhancer_ex$chromosome,
data_enhancer_ex$start - 5,
data_enhancer_ex$end + 5) # the enhancer sequence including 5bp context
seq_enhancer_ex_11mer <- DNAStringSet(seq_enhancer_ex_bg,
start = 1:(length(seq_enhancer_ex_bg)-10),
width = 11) # cut the enhancer into 11mers
# generate all possible single mutations
data_enhancer_all_possible_mutations <- tibble(
chromosome = data_enhancer_ex$chromosome,
pos = seq(data_enhancer_ex$start, data_enhancer_ex$end),
enhancer = enhancer_ex,
seq_mut_bg = as.character(seq_enhancer_ex_11mer)
) %>%
mutate(ref = unlist(strsplit(as.character(seq_enhancer_ex), ""))) %>%
slice(rep(seq(1, length(seq_enhancer_ex)), each = 4)) %>%
mutate(mut = rep(c("A", "C", "G", "T"), length(seq_enhancer_ex))) %>%
filter(ref != mut) %>%
mutate(twimer = paste0(seq_mut_bg, mut)) %>%
mutate(idx = DNAToBin(twimer)) %>%
mutate(idx = strtoi(idx, base = 2)) %>%
select(-seq_mut_bg)
## Part 2: Predict the effect of the synthetic mutations using QBiC ##
# use the 12-mer table to predict the effect
data_enhancer_mutation_prediction <- data_enhancer_all_possible_mutations %>%
mutate(effect = table_12mer$diff[idx+1]) %>%
mutate(ref_tri = substr(twimer, 5, 7)) %>%
mutate(mut_tri = paste0(substr(ref_tri, 1, 1), mut, substr(ref_tri, 3, 3))) %>%
mutate(ref_tri_rev = as.character(reverseComplement(DNAStringSet(ref_tri))),
mut_tri_rev = as.character(reverseComplement(DNAStringSet(mut_tri)))) %>%
mutate(ref_tri = ifelse(ref_tri < ref_tri_rev, ref_tri, ref_tri_rev),
mut_tri = ifelse(ref_tri < ref_tri_rev, mut_tri, mut_tri_rev)) %>%
select(-c(ref_tri_rev, mut_tri_rev)) %>%
inner_join(table_mutation_tri_mut_rate, by = c("ref_tri", "mut_tri")) %>%
mutate(cond_tri_mut_rate = tri_mut_rate/sum(tri_mut_rate))
# mutation effect prediction for the actual mutations in the enhancer
mut_enhancer_effect_prediction <- mut_enhancer %>%
genome_inner_join(data_enhancer_ex,
by = c("chromosome", "start", "end")) %>%
select(chromosome = chromosome.x,
pos = start.x,
icgc_mutation_id,
icgc_donor_id,
ref,
mut) %>%
inner_join(data_enhancer_mutation_prediction,
by = c("chromosome", "pos", "ref", "mut")) %>%
mutate(p_less = 0)
for(j in 1:nrow(mut_enhancer_effect_prediction)){
mut_enhancer_effect_prediction$p_less[j] <- sum(
data_enhancer_mutation_prediction %>%
filter(effect < mut_enhancer_effect_prediction$effect[j]) %>%
pull(cond_tri_mut_rate)
)
}
result <- mut_enhancer_effect_prediction %>%
select(icgc_mutation_id, icgc_donor_id, enhancer, effect, p_less)
result_list[[i]] <- result
}
enhancer_result <- bind_rows(result_list)
setwd(output.path)
write.table(enhancer_result, paste0("enhancer_mutation_result_", filename_table_12mer),
row.names = F)
|
## This function make a "special" matrix object that can cache the input matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## It calculated the inverse of the build matrix by Makecachematrix. If the inverse already is calculated before this function will give the inverse back. When the inverse is not calculated before, then de data will stored in the new matrix.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("hij is binnen")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
Justusdelange/ProgrammingAssignment2
|
R
| false
| false
| 832
|
r
|
## This function make a "special" matrix object that can cache the input matrix and its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## It calculated the inverse of the build matrix by Makecachematrix. If the inverse already is calculated before this function will give the inverse back. When the inverse is not calculated before, then de data will stored in the new matrix.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("hij is binnen")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
# Objective: create a function to calculate local solar time
# Author: Grant Coble-Neal
t_sol <- function( TimeCorrection, DaylightSavingsTime = FALSE ){
# t_sol: local solar time as measured by a sundial
# TimeCorrection (TC) is a series consisting of each day's time correction between
# the longitude of a given site and the standard meridian (local time zone)
hours = seq( from = 1, to = length(TimeCorrection) ) %% 24
# TCbyHour <- rep(TimeCorrection, each = 24)
t_solar = if(DaylightSavingsTime)
{
hours + (TimeCorrection - 60)/60
} else {
hours + TimeCorrection/60
}
return( t_solar )
# unit of measurement: hour
}
# t_sol(TC(L_st = 120, L_loc = 115.86, (EoT(DayAngle(seq(1:1461), 365.25)))), DaylightSavingsTime = FALSE)
#
# TC = TC(L_st = 120, L_loc = 115.86, (EoT(DayAngle(seq(1:1461), 365.25))))
#
# hours = seq( from = 0, to = length(TC) - 1 ) %% 24
#
# TCbyHour <- rep(TC, each = 24)
#
# hours + TCbyHour/60
|
/R/Local Solar Time function.R
|
no_license
|
cobleg/solarPV
|
R
| false
| false
| 975
|
r
|
# Objective: create a function to calculate local solar time
# Author: Grant Coble-Neal
t_sol <- function( TimeCorrection, DaylightSavingsTime = FALSE ){
# t_sol: local solar time as measured by a sundial
# TimeCorrection (TC) is a series consisting of each day's time correction between
# the longitude of a given site and the standard meridian (local time zone)
hours = seq( from = 1, to = length(TimeCorrection) ) %% 24
# TCbyHour <- rep(TimeCorrection, each = 24)
t_solar = if(DaylightSavingsTime)
{
hours + (TimeCorrection - 60)/60
} else {
hours + TimeCorrection/60
}
return( t_solar )
# unit of measurement: hour
}
# t_sol(TC(L_st = 120, L_loc = 115.86, (EoT(DayAngle(seq(1:1461), 365.25)))), DaylightSavingsTime = FALSE)
#
# TC = TC(L_st = 120, L_loc = 115.86, (EoT(DayAngle(seq(1:1461), 365.25))))
#
# hours = seq( from = 0, to = length(TC) - 1 ) %% 24
#
# TCbyHour <- rep(TC, each = 24)
#
# hours + TCbyHour/60
|
testlist <- list(doy = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), latitude = numeric(0), temp = c(-6.16249584696239e+100, 2.61788686270017e+185))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_ThornthwaiteWilmott/AFL_ET0_ThornthwaiteWilmott/ET0_ThornthwaiteWilmott_valgrind_files/1615837486-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 368
|
r
|
testlist <- list(doy = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), latitude = numeric(0), temp = c(-6.16249584696239e+100, 2.61788686270017e+185))
result <- do.call(meteor:::ET0_ThornthwaiteWilmott,testlist)
str(result)
|
# install.packages("igraph")
rm(list = ls())
library(igraph)
library(reshape2)
library(plyr)
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
#### new tries ####
load("DFusers.Rda")
vortexVec <- as.vector(unique(DFusers$repo))
testDF <- dcast(DFusers, opener ~ repo, value.var = "merged", fun.aggregate = sum)
X <- c()
edgeVec0 <- ddply(testDF, .(opener), function(x){
y<- as.vector(x[1,])
y[[1]]<- NULL
y[as.list(y) == 0] <- NULL
if(dim(y)[2] > 1){
combinations <- c(combn(colnames(y), m=2))
}else{
combinations <- c("0")
}
data.frame(combis = combinations)
})
edgeVec <- edgeVec0
edgeVec <- edgeVec[edgeVec$combis != 0,]
edgeVec <- as.character(edgeVec$combis)
edgeVec
g3 <- graph(edgeVec, directed=F)
g3$weight <- 1
E(g3)
plot(g3)
edge_attr(g3)
E(g3)$weight <- 1
l <- layout_in_circle(g3)
g3s <- simplify(g3, remove.multiple = T, remove.loops = F, edge.attr.comb=c(weight="sum"))
E(g3s)$width <- E(g3s)$weight*0.8
# pdf("project_network.pdf")
#circle plot
plot(g3s, layout=l, vertex.color="orange", vertex.frame.color="white", vertex.label=V(g3s)$media,
vertex.label.color="black", vertex.label.cex=.7, vertex.label.dist=1.5,
vertex.label.degree = -pi/2)
# dev.off()
# pdf("project_network2.pdf")
#cluster plot
plot(g3s, edge.arrow.size=.2, edge.curved=0,
vertex.color="orange", vertex.frame.color="#555555",
vertex.label=V(g3)$media, vertex.label.color="black", vertex.label.cex=.7)
# dev.off()
#http://www.kateto.net/wp-content/uploads/2016/01/NetSciX_2016_Workshop.pdf for more details
#### compare to Hars & Ou ####
#### function from internet ####
# https://stats.stackexchange.com/questions/30394/how-to-perform-two-sample-t-tests-in-r-by-inputting-sample-statistics-rather-tha
# m1, m2: the sample means
# s1, s2: the sample standard deviations
# n1, n2: the same sizes
# m0: the null value for the difference in means to be tested for. Default is 0.
# equal.variance: whether or not to assume equal variance. Default is FALSE.
t.test2 <- function(m1,m2,s1,s2,n1,n2,m0=0,equal.variance=FALSE)
{
if( equal.variance==FALSE )
{
se <- sqrt( (s1^2/n1) + (s2^2/n2) )
# welch-satterthwaite df
df <- ( (s1^2/n1 + s2^2/n2)^2 )/( (s1^2/n1)^2/(n1-1) + (s2^2/n2)^2/(n2-1) )
} else
{
# pooled standard deviation, scaled by the sample sizes
se <- sqrt( (1/n1 + 1/n2) * ((n1-1)*s1^2 + (n2-1)*s2^2)/(n1+n2-2) )
df <- n1+n2-2
}
t <- (m1-m2-m0)/se
dat <- c(m1-m2, se, t, 2*pt(-abs(t),df))
names(dat) <- c("Difference of means", "Std Error", "t", "p-value")
return(dat)
}
#### testing ####
m1 <- 8/41
sd1 <- sqrt(m1*(1-m1))
n1 <- 79
m2 <- length(unique(V(g3s))) / length(unique(DFusers$repo))
sd2 <- sqrt(m2*(1-m2))
n2 <- length(unique(DFusers$merger))
df <- n1 + n2 -2
t.test2(m1,m2,sd1,sd2,n1,n2,m0=0,equal.variance=FALSE)
m2
|
/Step3_3_network.R
|
no_license
|
bvbgh/aimloss
|
R
| false
| false
| 2,853
|
r
|
# install.packages("igraph")
rm(list = ls())
library(igraph)
library(reshape2)
library(plyr)
this.dir <- dirname(parent.frame(2)$ofile)
setwd(this.dir)
#### new tries ####
load("DFusers.Rda")
vortexVec <- as.vector(unique(DFusers$repo))
testDF <- dcast(DFusers, opener ~ repo, value.var = "merged", fun.aggregate = sum)
X <- c()
edgeVec0 <- ddply(testDF, .(opener), function(x){
y<- as.vector(x[1,])
y[[1]]<- NULL
y[as.list(y) == 0] <- NULL
if(dim(y)[2] > 1){
combinations <- c(combn(colnames(y), m=2))
}else{
combinations <- c("0")
}
data.frame(combis = combinations)
})
edgeVec <- edgeVec0
edgeVec <- edgeVec[edgeVec$combis != 0,]
edgeVec <- as.character(edgeVec$combis)
edgeVec
g3 <- graph(edgeVec, directed=F)
g3$weight <- 1
E(g3)
plot(g3)
edge_attr(g3)
E(g3)$weight <- 1
l <- layout_in_circle(g3)
g3s <- simplify(g3, remove.multiple = T, remove.loops = F, edge.attr.comb=c(weight="sum"))
E(g3s)$width <- E(g3s)$weight*0.8
# pdf("project_network.pdf")
#circle plot
plot(g3s, layout=l, vertex.color="orange", vertex.frame.color="white", vertex.label=V(g3s)$media,
vertex.label.color="black", vertex.label.cex=.7, vertex.label.dist=1.5,
vertex.label.degree = -pi/2)
# dev.off()
# pdf("project_network2.pdf")
#cluster plot
plot(g3s, edge.arrow.size=.2, edge.curved=0,
vertex.color="orange", vertex.frame.color="#555555",
vertex.label=V(g3)$media, vertex.label.color="black", vertex.label.cex=.7)
# dev.off()
#http://www.kateto.net/wp-content/uploads/2016/01/NetSciX_2016_Workshop.pdf for more details
#### compare to Hars & Ou ####
#### function from internet ####
# https://stats.stackexchange.com/questions/30394/how-to-perform-two-sample-t-tests-in-r-by-inputting-sample-statistics-rather-tha
# m1, m2: the sample means
# s1, s2: the sample standard deviations
# n1, n2: the same sizes
# m0: the null value for the difference in means to be tested for. Default is 0.
# equal.variance: whether or not to assume equal variance. Default is FALSE.
t.test2 <- function(m1,m2,s1,s2,n1,n2,m0=0,equal.variance=FALSE)
{
if( equal.variance==FALSE )
{
se <- sqrt( (s1^2/n1) + (s2^2/n2) )
# welch-satterthwaite df
df <- ( (s1^2/n1 + s2^2/n2)^2 )/( (s1^2/n1)^2/(n1-1) + (s2^2/n2)^2/(n2-1) )
} else
{
# pooled standard deviation, scaled by the sample sizes
se <- sqrt( (1/n1 + 1/n2) * ((n1-1)*s1^2 + (n2-1)*s2^2)/(n1+n2-2) )
df <- n1+n2-2
}
t <- (m1-m2-m0)/se
dat <- c(m1-m2, se, t, 2*pt(-abs(t),df))
names(dat) <- c("Difference of means", "Std Error", "t", "p-value")
return(dat)
}
#### testing ####
m1 <- 8/41
sd1 <- sqrt(m1*(1-m1))
n1 <- 79
m2 <- length(unique(V(g3s))) / length(unique(DFusers$repo))
sd2 <- sqrt(m2*(1-m2))
n2 <- length(unique(DFusers$merger))
df <- n1 + n2 -2
t.test2(m1,m2,sd1,sd2,n1,n2,m0=0,equal.variance=FALSE)
m2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.