content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
# public functions:
loadPrismaData = function(path, maxLines=-1, fastSally=TRUE, alpha=.05, skipFeatureCorrelation=FALSE) {
data = readPrismaInput(path, maxLines, fastSally)
data = preprocessPrismaData(data, alpha, skipFeatureCorrelation)
data$path = path
class(data) = "prisma"
return(data)
}
getDuplicateData = function(prismaData) {
return(prismaData$data[, prismaData$remapper])
}
corpusToPrisma = function(corpus, alpha=.05, skipFeatureCorrelation=FALSE) {
#require(Matrix)
if (requireNamespace("tm", quietly = TRUE) && packageVersion("tm") >= '0.6') {
#require(tm)
tdm = tm::TermDocumentMatrix(corpus)
data = list(data=Matrix(as.matrix(tdm)))
data = preprocessPrismaData(data, alpha, skipFeatureCorrelation)
data$path = "tm-Corpus"
class(data) = "prisma"
return(data)
}
else {
stop("Need package tm (>='0.6')")
}
}
print.prisma = function(x, ...) {
prismaData=x
cat("PRISMA data", prismaData$path, "\n")
cat("Unprocessed data: # features:", nrow(prismaData$unprocessed),
"# entries:", ncol(prismaData$unprocessed), "\n")
cat("Processed data: # features:", nrow(prismaData$data),
"# entries:", ncol(prismaData$data), "\n")
}
plot.prisma = function(x, ...) {
prismaData=x
image(prismaData$data)
}
# private functions:
readFSally = function(path, maxLines=-1) {
#require(Matrix)
f = file(path)
cat("Reading data...\n")
data = readLines(f)
cat("Splitting ngrams...\n")
ngrams = strsplit(data, " ", fixed=TRUE)
total = length(data)
allNgrams = ngrams[[total]]
close(f)
cat("Calc indices...\n")
indices = match(unlist(ngrams[-total]), allNgrams)
cat("Setup matrix...\n")
N = total-1
mat = sparseMatrix(indices, rep(1:N, sapply(ngrams[-total], length)),
x=1,
dims=c(length(allNgrams), N),
dimnames=list(allNgrams, paste("line", 1:N, sep="")))
if (maxLines > 0) {
return(mat[, 1:maxLines])
}
else {
return(mat)
}
}
readSally = function(path, maxLines=-1) {
#require(Matrix)
f = file(path)
data = scan(f, what="char", sep=" ", quote="", quiet=TRUE, comment.char="", skip=1, nlines=maxLines)
close(f)
rawngrams = data[c(TRUE, FALSE)]
origin = data[c(FALSE, TRUE)]
processNgram = function(cv) {
ret = cv[3]
names(ret) = cv[2]
return(ret)
}
ngrams = lapply(strsplit(rawngrams, ",", fixed=TRUE), function(obj) sapply(strsplit(obj, ":", fixed=TRUE), processNgram))
allNgrams = unique(unlist(lapply(ngrams, function(ngram) names(ngram)), use.names=FALSE))
indices = unlist(lapply(ngrams, function(ngram) match(names(ngram), allNgrams)), use.names=FALSE)
# generate a matrix in ml-style: rows are the features, cols are the samples
mat = sparseMatrix(indices, rep(1:length(ngrams), sapply(ngrams, length)), x= as.numeric(unlist(ngrams, use.names=FALSE)), dims=c(length(allNgrams), length(ngrams)), dimnames=list(allNgrams, origin))
return(mat)
}
readHarry = function(path, maxLines=-1) {
harry = read.table(path, sep="\t", quote="", comment.char="",
as.is=TRUE, header=TRUE, nrows=maxLines)
return(harry)
}
readRaw = function(path, maxLines=-1) {
f = file(path)
raw = readLines(f, n=maxLines)
close(f)
#rawsplit = strsplit(raw, " ", fixed=TRUE)
return(raw)
}
readPrismaInput = function(path, maxLines=-1, fastSally=TRUE) {
if (fastSally) {
sally = readFSally(sprintf("%s.fsally", path), maxLines)
}
else {
sally = readSally(sprintf("%s.sally", path), maxLines)
}
data = list(data=sally)
hfile = sprintf("%s.harry", path)
if (file.exists(hfile) && file.access(hfile, mode=4)) {
data$annotation = readHarry(hfile, maxLines)
}
rfile = sprintf("%s.rawquoted", path)
if (file.exists(rfile) && file.access(rfile, mode=4)) {
data$raw = readRaw(rfile, maxLines)
}
return(data)
}
duplicateRemover = function(data) {
if (inherits(data, "Matrix")) {
classes = calcClassForSparseMatrix(data)
}
else {
classes = sapply(1:ncol(data), function(colIndex) paste(which(data[, colIndex] == 1), collapse=" "))
}
classCount = table(classes)
uniqueClasses = names(classCount)
# just pick the first data point for each class:
classIndex = sapply(uniqueClasses, function(cl) match(cl, classes))
data = data[, classIndex]
remapper = sapply(classes, function(cl) match(cl, uniqueClasses))
return(list(data=data, remapper=remapper, count=classCount))
}
calcClassForSparseMatrix = function(data) {
i = data@i
dp = c(0, diff(data@p))
csdp = cumsum(dp)
oneClass = function(index) {
from = csdp[index]+1
to = csdp[index+1]
if (from > to) {
# zero entry
return("")
}
else {
return(paste(i[from:to], collapse=" "))
}
}
sapply(1:ncol(data), oneClass)
}
preprocessPrismaData =function(data, alpha=.05, skipFeatureCorrelation=FALSE) {
data$unprocessed = data$data
processed = filterDataByTestAndCor(data$data, alpha, skipFeatureCorrelation)
duplicatesRemoved = duplicateRemover(processed$mat)
data$data = duplicatesRemoved$data
data$remapper = duplicatesRemoved$remapper
data$duplicatecount = as.vector(duplicatesRemoved$count)
data$group = processed$group
data$occAlways = processed$always
data$occNever = processed$never
return(data)
}
count2freq = function(mat) {
# use the samples x features view for simpler calculation
mat = t(mat)
return(t(mat / rowSums(mat)))
}
count2bin = function(mat) {
#require(Matrix)
if (inherits(mat, "TsparseMatrix")) {
ret = mat
}
else if (inherits(mat, "CsparseMatrix")) {
ret = sparseMatrix(mat@i+1, p=mat@p, x=1, dims=mat@Dim, dimnames=mat@Dimnames)
}
else {
ret = as.matrix(mat)
ret[ret > 0] = 1
}
return(ret)
}
groupCorrelatedNgrams = function(data) {
nfeats = nrow(data)
ndocs = ncol(data)
toCheck = 1:nfeats
groups = rep(-1, nfeats)
groupCount = 1
# is it possible to calculate correlations on sparse matrices?
#mat = as.matrix(data)
mat = data
while (length(toCheck) > 0) {
cat("to check:", length(toCheck), "\n")
if (length(toCheck) == 1) {
curCor = 1
}
else {
curCor = sparse.cor(mat[toCheck, ])
}
group = toCheck[curCor == 1]
groups[group] = groupCount
groupCount = groupCount + 1
toCheck = toCheck[curCor != 1]
#cat(data$str[group], "\n")
}
return(groups)
}
sparse.cor <- function(X){
docsWithFeature = (X[1, ] != 0)
onDocs = sum(docsWithFeature)
offDocs = ncol(X) - onDocs
ret = rep(0, nrow(X))
ret[1] = 1
if (onDocs >= 1) {
onFeatureDocs = X[, docsWithFeature]
offFeatureDocs = X[, !docsWithFeature]
if (onDocs > 1) {
# we have more than one document for this feature...
# so calculate the number of documents for this feature
onFeatureDocs = rowSums(onFeatureDocs)
}
if (offDocs > 1) {
offFeatureDocs = rowSums(offFeatureDocs)
}
# just set the correlation to one, if the number of
# documents, in which the feature is turned of, is zero
# and the number of documents, in which the feature is on, is the same
ret[(offFeatureDocs == 0) & (onFeatureDocs == onDocs)] = 1
}
return(ret)
}
compressByGroup = function(data) {
features = rownames(data)
groups = groupCorrelatedNgrams(data)
indByG = split(1:length(groups), groups)
names(groups) = features
newDimNames = sapply(indByG, function(g) paste(features[g], collapse=" "))
# just keep the first feature of the group...
# since the rest contains the same information (cor=1)
data = data[sapply(indByG, function(g) g[1]), ]
rownames(data) = newDimNames
return(list(data=data, group=groups))
}
# data should be binary and unnormalized!
# hmmm... the "normal" testing weirdness of thinking-negative:
# never = ttestNgrams(data, 0, "greater")
# we would keep these...
# data$str[p.adjust(never, "bonf") < 0.05]
## [1] "\nAcc" "\nHos" " */*" " HTT" " cgi" " www" "*/*\n" ".1\nH" ".com"
## [10] ".foo" ".php" "/1.1" "/sea" "1\nHo" "1.1\n" ": */" ": ww" "Acce"
## [19] "ET c" "GET " "HTTP" "Host" "P/1." "T cg" "TP/1" "TTP/" "ar.c"
## [28] "arch" "bar." "ccep" "cept" "cgi/" "ch.p" "com\n" "earc" "ept:"
## [37] "foob" "gi/s" "h.ph" "hp?s" "i/se" "m\nAc" "obar" "om\nA" "ooba"
## [46] "ost:" "p?s=" "php?" "pt: " "r.co" "rch." "sear" "st: " "t: *"
## [55] "t: w" "w.fo" "ww.f" "www." "&par" "/adm" "=ren" "?act" "acti"
## [64] "admi" "ame&" "ctio" "dmin" "e&pa" "enam" "gi/a" "hp?a" "i/ad"
## [73] "in.p" "ion=" "me&p" "min." "n.ph" "n=re" "name" "on=r" "p?ac"
## [82] "par=" "rena" "tion" " sta" ".htm" "ET s" "T st" "atic" "html"
## [91] "l HT" "ml H" "stat" "tati" "tic/" "tml " "=mov" "move" "n=mo"
## [100] "on=m" "ove&" "ve&p" "=sho" "how&" "n=sh" "on=s" "ow&p" "show"
## [109] "w&pa" "=del" "dele" "elet" "ete&" "lete" "n=de" "on=d" "te&p"
## [118] "G HT"
# always = ttestNgrams(data, 1, "less")
# ...and drop these...
# data$str[p.adjust(always, "bonf") > 0.05]
## [1] "\nAcc" "\nHos" " */*" " HTT" " www" "*/*\n" ".1\nH" ".com" ".foo"
## [10] "/1.1" "1\nHo" "1.1\n" ": */" ": ww" "Acce" "GET " "HTTP" "Host"
## [19] "P/1." "TP/1" "TTP/" "ar.c" "bar." "ccep" "cept" "com\n" "ept:"
## [28] "foob" "m\nAc" "obar" "om\nA" "ooba" "ost:" "pt: " "r.co" "st: "
## [37] "t: *" "t: w" "w.fo" "ww.f" "www."
# So finally just keep these:
# data$str[p.adjust(always, "bonf") < 0.05 & p.adjust(never, "bonf") < 0.05]
ttestNgrams = function(data, mu, alternative=c("greater", "less")) {
#require(Matrix)
alternative <- match.arg(alternative)
N = ncol(data)
nfeats = nrow(data)
muNgram = rowMeans(data) * N
# some sources give 5, other 10 as a factor, of when the normal approx. works...
# we just take the average here.
mu = ifelse(mu == 0, 7.5/N, 1 - (7.5/N))
theVar = sqrt(N * mu * (1 - mu))
M = mu * N
if (alternative == "greater") {
pValues = sapply(muNgram, function(m) pnorm((m - M) / theVar, lower.tail = FALSE))
}
if (alternative == "less") {
pValues = sapply(muNgram, function(m) pnorm((m - M) / theVar, lower.tail = TRUE))
}
return(pValues)
}
filterDataByTestAndCor = function(data, alpha=0.05, skipFeatureCorrelation=FALSE) {
data = count2bin(data)
if (is.null(alpha)) {
#keep = (alwaysP != 1)
keep = rep(TRUE, nrow(data))
}
else {
never = ttestNgrams(data, 0, "greater")
always = ttestNgrams(data, 1, "less")
alwaysP = p.adjust(always, "holm")
neverP = p.adjust(never, "holm")
keep = (alwaysP < alpha & neverP < alpha)
}
allStr = rownames(data)
fdata = data[keep, ]
if (skipFeatureCorrelation) {
features = rownames(fdata)
groups = 1:length(features)
names(groups) = features
dataAndGroup =list(data=fdata, group=groups)
}
else {
dataAndGroup = compressByGroup(fdata)
}
if (is.null(alpha)) {
#always = allStr[(alwaysP == 1)]
always = c()
never = c()
}
else {
always = allStr[(alwaysP >= alpha)]
never = allStr[(neverP >= alpha)]
}
return(list(mat=dataAndGroup$data, group=dataAndGroup$group, always=always, never=never))
}
| /modules/PRISMA/R/prisma.R | permissive | N0nent1ty/pulsar | R | false | false | 11,180 | r | # public functions:
loadPrismaData = function(path, maxLines=-1, fastSally=TRUE, alpha=.05, skipFeatureCorrelation=FALSE) {
data = readPrismaInput(path, maxLines, fastSally)
data = preprocessPrismaData(data, alpha, skipFeatureCorrelation)
data$path = path
class(data) = "prisma"
return(data)
}
getDuplicateData = function(prismaData) {
return(prismaData$data[, prismaData$remapper])
}
corpusToPrisma = function(corpus, alpha=.05, skipFeatureCorrelation=FALSE) {
#require(Matrix)
if (requireNamespace("tm", quietly = TRUE) && packageVersion("tm") >= '0.6') {
#require(tm)
tdm = tm::TermDocumentMatrix(corpus)
data = list(data=Matrix(as.matrix(tdm)))
data = preprocessPrismaData(data, alpha, skipFeatureCorrelation)
data$path = "tm-Corpus"
class(data) = "prisma"
return(data)
}
else {
stop("Need package tm (>='0.6')")
}
}
print.prisma = function(x, ...) {
prismaData=x
cat("PRISMA data", prismaData$path, "\n")
cat("Unprocessed data: # features:", nrow(prismaData$unprocessed),
"# entries:", ncol(prismaData$unprocessed), "\n")
cat("Processed data: # features:", nrow(prismaData$data),
"# entries:", ncol(prismaData$data), "\n")
}
plot.prisma = function(x, ...) {
prismaData=x
image(prismaData$data)
}
# private functions:
readFSally = function(path, maxLines=-1) {
#require(Matrix)
f = file(path)
cat("Reading data...\n")
data = readLines(f)
cat("Splitting ngrams...\n")
ngrams = strsplit(data, " ", fixed=TRUE)
total = length(data)
allNgrams = ngrams[[total]]
close(f)
cat("Calc indices...\n")
indices = match(unlist(ngrams[-total]), allNgrams)
cat("Setup matrix...\n")
N = total-1
mat = sparseMatrix(indices, rep(1:N, sapply(ngrams[-total], length)),
x=1,
dims=c(length(allNgrams), N),
dimnames=list(allNgrams, paste("line", 1:N, sep="")))
if (maxLines > 0) {
return(mat[, 1:maxLines])
}
else {
return(mat)
}
}
readSally = function(path, maxLines=-1) {
#require(Matrix)
f = file(path)
data = scan(f, what="char", sep=" ", quote="", quiet=TRUE, comment.char="", skip=1, nlines=maxLines)
close(f)
rawngrams = data[c(TRUE, FALSE)]
origin = data[c(FALSE, TRUE)]
processNgram = function(cv) {
ret = cv[3]
names(ret) = cv[2]
return(ret)
}
ngrams = lapply(strsplit(rawngrams, ",", fixed=TRUE), function(obj) sapply(strsplit(obj, ":", fixed=TRUE), processNgram))
allNgrams = unique(unlist(lapply(ngrams, function(ngram) names(ngram)), use.names=FALSE))
indices = unlist(lapply(ngrams, function(ngram) match(names(ngram), allNgrams)), use.names=FALSE)
# generate a matrix in ml-style: rows are the features, cols are the samples
mat = sparseMatrix(indices, rep(1:length(ngrams), sapply(ngrams, length)), x= as.numeric(unlist(ngrams, use.names=FALSE)), dims=c(length(allNgrams), length(ngrams)), dimnames=list(allNgrams, origin))
return(mat)
}
readHarry = function(path, maxLines=-1) {
harry = read.table(path, sep="\t", quote="", comment.char="",
as.is=TRUE, header=TRUE, nrows=maxLines)
return(harry)
}
readRaw = function(path, maxLines=-1) {
f = file(path)
raw = readLines(f, n=maxLines)
close(f)
#rawsplit = strsplit(raw, " ", fixed=TRUE)
return(raw)
}
readPrismaInput = function(path, maxLines=-1, fastSally=TRUE) {
if (fastSally) {
sally = readFSally(sprintf("%s.fsally", path), maxLines)
}
else {
sally = readSally(sprintf("%s.sally", path), maxLines)
}
data = list(data=sally)
hfile = sprintf("%s.harry", path)
if (file.exists(hfile) && file.access(hfile, mode=4)) {
data$annotation = readHarry(hfile, maxLines)
}
rfile = sprintf("%s.rawquoted", path)
if (file.exists(rfile) && file.access(rfile, mode=4)) {
data$raw = readRaw(rfile, maxLines)
}
return(data)
}
duplicateRemover = function(data) {
if (inherits(data, "Matrix")) {
classes = calcClassForSparseMatrix(data)
}
else {
classes = sapply(1:ncol(data), function(colIndex) paste(which(data[, colIndex] == 1), collapse=" "))
}
classCount = table(classes)
uniqueClasses = names(classCount)
# just pick the first data point for each class:
classIndex = sapply(uniqueClasses, function(cl) match(cl, classes))
data = data[, classIndex]
remapper = sapply(classes, function(cl) match(cl, uniqueClasses))
return(list(data=data, remapper=remapper, count=classCount))
}
calcClassForSparseMatrix = function(data) {
i = data@i
dp = c(0, diff(data@p))
csdp = cumsum(dp)
oneClass = function(index) {
from = csdp[index]+1
to = csdp[index+1]
if (from > to) {
# zero entry
return("")
}
else {
return(paste(i[from:to], collapse=" "))
}
}
sapply(1:ncol(data), oneClass)
}
preprocessPrismaData =function(data, alpha=.05, skipFeatureCorrelation=FALSE) {
data$unprocessed = data$data
processed = filterDataByTestAndCor(data$data, alpha, skipFeatureCorrelation)
duplicatesRemoved = duplicateRemover(processed$mat)
data$data = duplicatesRemoved$data
data$remapper = duplicatesRemoved$remapper
data$duplicatecount = as.vector(duplicatesRemoved$count)
data$group = processed$group
data$occAlways = processed$always
data$occNever = processed$never
return(data)
}
count2freq = function(mat) {
# use the samples x features view for simpler calculation
mat = t(mat)
return(t(mat / rowSums(mat)))
}
count2bin = function(mat) {
#require(Matrix)
if (inherits(mat, "TsparseMatrix")) {
ret = mat
}
else if (inherits(mat, "CsparseMatrix")) {
ret = sparseMatrix(mat@i+1, p=mat@p, x=1, dims=mat@Dim, dimnames=mat@Dimnames)
}
else {
ret = as.matrix(mat)
ret[ret > 0] = 1
}
return(ret)
}
groupCorrelatedNgrams = function(data) {
nfeats = nrow(data)
ndocs = ncol(data)
toCheck = 1:nfeats
groups = rep(-1, nfeats)
groupCount = 1
# is it possible to calculate correlations on sparse matrices?
#mat = as.matrix(data)
mat = data
while (length(toCheck) > 0) {
cat("to check:", length(toCheck), "\n")
if (length(toCheck) == 1) {
curCor = 1
}
else {
curCor = sparse.cor(mat[toCheck, ])
}
group = toCheck[curCor == 1]
groups[group] = groupCount
groupCount = groupCount + 1
toCheck = toCheck[curCor != 1]
#cat(data$str[group], "\n")
}
return(groups)
}
sparse.cor <- function(X){
docsWithFeature = (X[1, ] != 0)
onDocs = sum(docsWithFeature)
offDocs = ncol(X) - onDocs
ret = rep(0, nrow(X))
ret[1] = 1
if (onDocs >= 1) {
onFeatureDocs = X[, docsWithFeature]
offFeatureDocs = X[, !docsWithFeature]
if (onDocs > 1) {
# we have more than one document for this feature...
# so calculate the number of documents for this feature
onFeatureDocs = rowSums(onFeatureDocs)
}
if (offDocs > 1) {
offFeatureDocs = rowSums(offFeatureDocs)
}
# just set the correlation to one, if the number of
# documents, in which the feature is turned of, is zero
# and the number of documents, in which the feature is on, is the same
ret[(offFeatureDocs == 0) & (onFeatureDocs == onDocs)] = 1
}
return(ret)
}
compressByGroup = function(data) {
features = rownames(data)
groups = groupCorrelatedNgrams(data)
indByG = split(1:length(groups), groups)
names(groups) = features
newDimNames = sapply(indByG, function(g) paste(features[g], collapse=" "))
# just keep the first feature of the group...
# since the rest contains the same information (cor=1)
data = data[sapply(indByG, function(g) g[1]), ]
rownames(data) = newDimNames
return(list(data=data, group=groups))
}
# data should be binary and unnormalized!
# hmmm... the "normal" testing weirdness of thinking-negative:
# never = ttestNgrams(data, 0, "greater")
# we would keep these...
# data$str[p.adjust(never, "bonf") < 0.05]
## [1] "\nAcc" "\nHos" " */*" " HTT" " cgi" " www" "*/*\n" ".1\nH" ".com"
## [10] ".foo" ".php" "/1.1" "/sea" "1\nHo" "1.1\n" ": */" ": ww" "Acce"
## [19] "ET c" "GET " "HTTP" "Host" "P/1." "T cg" "TP/1" "TTP/" "ar.c"
## [28] "arch" "bar." "ccep" "cept" "cgi/" "ch.p" "com\n" "earc" "ept:"
## [37] "foob" "gi/s" "h.ph" "hp?s" "i/se" "m\nAc" "obar" "om\nA" "ooba"
## [46] "ost:" "p?s=" "php?" "pt: " "r.co" "rch." "sear" "st: " "t: *"
## [55] "t: w" "w.fo" "ww.f" "www." "&par" "/adm" "=ren" "?act" "acti"
## [64] "admi" "ame&" "ctio" "dmin" "e&pa" "enam" "gi/a" "hp?a" "i/ad"
## [73] "in.p" "ion=" "me&p" "min." "n.ph" "n=re" "name" "on=r" "p?ac"
## [82] "par=" "rena" "tion" " sta" ".htm" "ET s" "T st" "atic" "html"
## [91] "l HT" "ml H" "stat" "tati" "tic/" "tml " "=mov" "move" "n=mo"
## [100] "on=m" "ove&" "ve&p" "=sho" "how&" "n=sh" "on=s" "ow&p" "show"
## [109] "w&pa" "=del" "dele" "elet" "ete&" "lete" "n=de" "on=d" "te&p"
## [118] "G HT"
# always = ttestNgrams(data, 1, "less")
# ...and drop these...
# data$str[p.adjust(always, "bonf") > 0.05]
## [1] "\nAcc" "\nHos" " */*" " HTT" " www" "*/*\n" ".1\nH" ".com" ".foo"
## [10] "/1.1" "1\nHo" "1.1\n" ": */" ": ww" "Acce" "GET " "HTTP" "Host"
## [19] "P/1." "TP/1" "TTP/" "ar.c" "bar." "ccep" "cept" "com\n" "ept:"
## [28] "foob" "m\nAc" "obar" "om\nA" "ooba" "ost:" "pt: " "r.co" "st: "
## [37] "t: *" "t: w" "w.fo" "ww.f" "www."
# So finally just keep these:
# data$str[p.adjust(always, "bonf") < 0.05 & p.adjust(never, "bonf") < 0.05]
ttestNgrams = function(data, mu, alternative=c("greater", "less")) {
#require(Matrix)
alternative <- match.arg(alternative)
N = ncol(data)
nfeats = nrow(data)
muNgram = rowMeans(data) * N
# some sources give 5, other 10 as a factor, of when the normal approx. works...
# we just take the average here.
mu = ifelse(mu == 0, 7.5/N, 1 - (7.5/N))
theVar = sqrt(N * mu * (1 - mu))
M = mu * N
if (alternative == "greater") {
pValues = sapply(muNgram, function(m) pnorm((m - M) / theVar, lower.tail = FALSE))
}
if (alternative == "less") {
pValues = sapply(muNgram, function(m) pnorm((m - M) / theVar, lower.tail = TRUE))
}
return(pValues)
}
filterDataByTestAndCor = function(data, alpha=0.05, skipFeatureCorrelation=FALSE) {
data = count2bin(data)
if (is.null(alpha)) {
#keep = (alwaysP != 1)
keep = rep(TRUE, nrow(data))
}
else {
never = ttestNgrams(data, 0, "greater")
always = ttestNgrams(data, 1, "less")
alwaysP = p.adjust(always, "holm")
neverP = p.adjust(never, "holm")
keep = (alwaysP < alpha & neverP < alpha)
}
allStr = rownames(data)
fdata = data[keep, ]
if (skipFeatureCorrelation) {
features = rownames(fdata)
groups = 1:length(features)
names(groups) = features
dataAndGroup =list(data=fdata, group=groups)
}
else {
dataAndGroup = compressByGroup(fdata)
}
if (is.null(alpha)) {
#always = allStr[(alwaysP == 1)]
always = c()
never = c()
}
else {
always = allStr[(alwaysP >= alpha)]
never = allStr[(neverP >= alpha)]
}
return(list(mat=dataAndGroup$data, group=dataAndGroup$group, always=always, never=never))
}
|
library(clusterProfiler)
library(org.Mm.eg.db)
library(tidyverse)
library(pals)
get_anno <- function(data){
df <- read.csv(data, header = T, stringsAsFactors = F, sep = "\t")
colnames(df)[1] <- "PeakID"
colnames(df)[12] <- "ENSEMBL"
return(df)
}
komoji2omoji <- function(string){
omoji <- toupper(substr(string, 1, 1))
sento_igai <- substr(string, 2, nchar(string))
return(paste0(omoji,sento_igai))
}
get_custom_plot <- function(ck_simp, selected_terms_by_dotplot){
ck_simp <- as.data.frame(ck_simp)
selected_terms_by_dotplot <- selected_terms_by_dotplot %>% rev() %>% Vectorize(komoji2omoji)()
ck_simp$Description <- Vectorize(komoji2omoji)(ck_simp$Description)
ck_simp <- subset(ck_simp, Description %in% selected_terms_by_dotplot)
ck_simp$Description <- factor(ck_simp$Description, levels = sort(selected_terms_by_dotplot, decreasing = T))
g <- ggplot(ck_simp, aes(x = Cluster, y = Description, fill = -log10(p.adjust)))+
geom_tile()+
theme_bw()+
guides(size=guide_legend(title="-log10 p.adjust"))+
guides(color = F)+
theme(axis.title = element_blank())+
theme(axis.text = element_text(size = 20))+
theme(legend.text = element_text(size = 12), legend.title = element_text(size = 12))+
scale_fill_gradientn(colours=colorRampPalette(c("mistyrose", "darkorange1"))(100))+
theme(panel.grid = element_blank(), strip.background = element_blank(), strip.text = element_blank())
plot(g)
}
WT <- get_anno("Path to your 'WT_SE_annotated.txt' generated by 'findPeaks_annotatePeaks.sh'")
KO <- get_anno("Path to your 'KO_SE_annotated.txt' generated by 'findPeaks_annotatePeaks.sh'")
TPM <- read.csv("Path to your 'Scaled_TPM.csv' generated by 'Calculate_scaled_TPM_of_RNA-seq.R'", stringsAsFactors = F)
colnames(TPM) <- c("ENSEMBL", "WT", "KO")
WT_with_TPM <- left_join(WT, TPM, by = "ENSEMBL") %>% distinct(ENSEMBL, .keep_all = TRUE)
WT_with_TPM <- subset(WT_with_TPM, WT > 1 | KO > 1) #Remove low expressing genes
KO_with_TPM <- left_join(KO, TPM, by = "ENSEMBL") %>% distinct(ENSEMBL, .keep_all = TRUE)
KO_with_TPM <- subset(KO_with_TPM, WT > 1 | KO > 1) #Remove low expressing genes
WT_ID <- unique(WT_with_TPM$Gene.Name)
KO_ID <- unique(KO_with_TPM$Gene.Name)
WT_ID <- bitr(WT_ID, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Mm.eg.db")
KO_ID <- bitr(KO_ID, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Mm.eg.db")
cls <- list(WT = WT_ID$ENTREZID, KO = KO_ID$ENTREZID)
ck <- compareCluster(geneCluster = cls , fun = "enrichGO", OrgDb = "org.Mm.eg.db",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05,
readable = TRUE)
ck_simp_WT_vs_KO <- clusterProfiler::simplify(ck, cutoff=0.7, by="p.adjust", select_fun=min)
dotplot(ck_simp_WT_vs_KO)
selected_terms_by_dotplot <- c("cell-substrate adhesion",
"actin filament organization",
"regulation of actin filament-based process",
"embryonic organ development",
"epithelial tube morphogenesis",
"striated muscle tissue development",
"positive regulation of cell adhesion",
"ossification",
"regulation of cellular response to growth factor stimulus",
"gland development")
get_custom_plot(ck_simp_WT_vs_KO, selected_terms_by_dotplot) | /GO_analysis_of_SE_associated_genes.R | permissive | okadalabipr/Kanazawa2021 | R | false | false | 3,741 | r | library(clusterProfiler)
library(org.Mm.eg.db)
library(tidyverse)
library(pals)
get_anno <- function(data){
df <- read.csv(data, header = T, stringsAsFactors = F, sep = "\t")
colnames(df)[1] <- "PeakID"
colnames(df)[12] <- "ENSEMBL"
return(df)
}
komoji2omoji <- function(string){
omoji <- toupper(substr(string, 1, 1))
sento_igai <- substr(string, 2, nchar(string))
return(paste0(omoji,sento_igai))
}
get_custom_plot <- function(ck_simp, selected_terms_by_dotplot){
ck_simp <- as.data.frame(ck_simp)
selected_terms_by_dotplot <- selected_terms_by_dotplot %>% rev() %>% Vectorize(komoji2omoji)()
ck_simp$Description <- Vectorize(komoji2omoji)(ck_simp$Description)
ck_simp <- subset(ck_simp, Description %in% selected_terms_by_dotplot)
ck_simp$Description <- factor(ck_simp$Description, levels = sort(selected_terms_by_dotplot, decreasing = T))
g <- ggplot(ck_simp, aes(x = Cluster, y = Description, fill = -log10(p.adjust)))+
geom_tile()+
theme_bw()+
guides(size=guide_legend(title="-log10 p.adjust"))+
guides(color = F)+
theme(axis.title = element_blank())+
theme(axis.text = element_text(size = 20))+
theme(legend.text = element_text(size = 12), legend.title = element_text(size = 12))+
scale_fill_gradientn(colours=colorRampPalette(c("mistyrose", "darkorange1"))(100))+
theme(panel.grid = element_blank(), strip.background = element_blank(), strip.text = element_blank())
plot(g)
}
WT <- get_anno("Path to your 'WT_SE_annotated.txt' generated by 'findPeaks_annotatePeaks.sh'")
KO <- get_anno("Path to your 'KO_SE_annotated.txt' generated by 'findPeaks_annotatePeaks.sh'")
TPM <- read.csv("Path to your 'Scaled_TPM.csv' generated by 'Calculate_scaled_TPM_of_RNA-seq.R'", stringsAsFactors = F)
colnames(TPM) <- c("ENSEMBL", "WT", "KO")
WT_with_TPM <- left_join(WT, TPM, by = "ENSEMBL") %>% distinct(ENSEMBL, .keep_all = TRUE)
WT_with_TPM <- subset(WT_with_TPM, WT > 1 | KO > 1) #Remove low expressing genes
KO_with_TPM <- left_join(KO, TPM, by = "ENSEMBL") %>% distinct(ENSEMBL, .keep_all = TRUE)
KO_with_TPM <- subset(KO_with_TPM, WT > 1 | KO > 1) #Remove low expressing genes
WT_ID <- unique(WT_with_TPM$Gene.Name)
KO_ID <- unique(KO_with_TPM$Gene.Name)
WT_ID <- bitr(WT_ID, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Mm.eg.db")
KO_ID <- bitr(KO_ID, fromType="SYMBOL", toType="ENTREZID", OrgDb="org.Mm.eg.db")
cls <- list(WT = WT_ID$ENTREZID, KO = KO_ID$ENTREZID)
ck <- compareCluster(geneCluster = cls , fun = "enrichGO", OrgDb = "org.Mm.eg.db",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05,
readable = TRUE)
ck_simp_WT_vs_KO <- clusterProfiler::simplify(ck, cutoff=0.7, by="p.adjust", select_fun=min)
dotplot(ck_simp_WT_vs_KO)
selected_terms_by_dotplot <- c("cell-substrate adhesion",
"actin filament organization",
"regulation of actin filament-based process",
"embryonic organ development",
"epithelial tube morphogenesis",
"striated muscle tissue development",
"positive regulation of cell adhesion",
"ossification",
"regulation of cellular response to growth factor stimulus",
"gland development")
get_custom_plot(ck_simp_WT_vs_KO, selected_terms_by_dotplot) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{non_inclusion_reasons}
\alias{non_inclusion_reasons}
\title{Metadata on non-inclusion reasons}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 9 rows and 3 columns.}
\usage{
non_inclusion_reasons
}
\description{
Metadata on non-inclusion reasons
}
\keyword{datasets}
| /package/man/non_inclusion_reasons.Rd | permissive | dynverse/dynbenchmark | R | false | true | 417 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{non_inclusion_reasons}
\alias{non_inclusion_reasons}
\title{Metadata on non-inclusion reasons}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 9 rows and 3 columns.}
\usage{
non_inclusion_reasons
}
\description{
Metadata on non-inclusion reasons
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/const.R
\name{PATH_GET_OPTIONS}
\alias{PATH_GET_OPTIONS}
\title{Build path to the options endpoint.}
\usage{
PATH_GET_OPTIONS(field)
}
\arguments{
\item{field}{the field name for the endpoint giving the available options}
}
\value{
A string: the path to the options endpoint.
}
\description{
Build path to the options endpoint.
}
| /man/PATH_GET_OPTIONS.Rd | permissive | MehdiChelh/eiopaR | R | false | true | 408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/const.R
\name{PATH_GET_OPTIONS}
\alias{PATH_GET_OPTIONS}
\title{Build path to the options endpoint.}
\usage{
PATH_GET_OPTIONS(field)
}
\arguments{
\item{field}{the field name for the endpoint giving the available options}
}
\value{
A string: the path to the options endpoint.
}
\description{
Build path to the options endpoint.
}
|
## File Name: mlnormal_soft_thresholding.R
## File Version: 0.13
mlnormal_soft_thresholding <- function( x, lambda )
{
x_abs <- abs(x)
x <- ifelse( x_abs > lambda, x - sign(x) * lambda, 0 )
return(x)
}
| /LAM/R/mlnormal_soft_thresholding.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 228 | r | ## File Name: mlnormal_soft_thresholding.R
## File Version: 0.13
mlnormal_soft_thresholding <- function( x, lambda )
{
x_abs <- abs(x)
x <- ifelse( x_abs > lambda, x - sign(x) * lambda, 0 )
return(x)
}
|
## version: 1.35
## method: get
## path: /tasks
## code: 200
## response: [{"ID":"0kzzo1i0y4jz6027t0k7aezc7","Version":{"Index":71},"CreatedAt":"2016-06-07T21:07:31.171892745Z","UpdatedAt":"2016-06-07T21:07:31.376370513Z","Spec":{"ContainerSpec":{"Image":"redis"},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0},"Placement":{}},"ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"NodeID":"60gvrl6tm78dmak4yl7srz94v","Status":{"Timestamp":"2016-06-07T21:07:31.290032978Z","State":"running","Message":"started","ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677}},"DesiredState":"running","NetworksAttachments":[{"Network":{"ID":"4qvuz4ko70xaltuqbt8956gd1","Version":{"Index":18},"CreatedAt":"2016-06-07T20:31:11.912919752Z","UpdatedAt":"2016-06-07T21:07:29.955277358Z","Spec":{"Name":"ingress","Labels":{"com.docker.swarm.internal":"true"},"DriverConfiguration":{},"IPAMOptions":{"Driver":{},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"IPAMOptions":{"Driver":{"Name":"default"},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"Addresses":"10.255.0.10/16"}]},{"ID":"1yljwbmlr8er2waf8orvqpwms","Version":{"Index":30},"CreatedAt":"2016-06-07T21:07:30.019104782Z","UpdatedAt":"2016-06-07T21:07:30.231958098Z","Name":"hopeful_cori","Spec":{"ContainerSpec":{"Image":"redis"},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0},"Placement":{}},"ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"NodeID":"60gvrl6tm78dmak4yl7srz94v","Status":{"Timestamp":"2016-06-07T21:07:30.202183143Z","State":"shutdown","Message":"shutdown","ContainerStatus":{"ContainerID":"1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"}},"DesiredState":"shutdown","NetworksAttachments":[{"Network":{"ID":"4qvuz4ko70xaltuqbt8956gd1","Version":{"Index":18},"CreatedAt":"2016-06-07T20:31:11.912919752Z","UpdatedAt":"2016-06-07T21:07:29.955277358Z","Spec":{"Name":"ingress","Labels":{"com.docker.swarm.internal":"true"},"DriverConfiguration":{},"IPAMOptions":{"Driver":{},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"IPAMOptions":{"Driver":{"Name":"default"},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"Addresses":"10.255.0.5/16"}]}]
NULL
container_spec <- list(
image = "redis",
labels = NULL,
command = character(0),
args = character(0),
hostname = NA_character_,
env = character(0),
dir = NA_character_,
user = NA_character_,
groups = character(0),
privileges = NULL,
tty = NA,
open_stdin = NA,
read_only = NA,
mounts = data_frame(
target = character(0),
source = character(0),
type = character(0),
read_only = logical(0),
consistency = character(0),
bind_options = I(list()),
volume_options = I(list()),
tmpfs_options = I(list())),
stop_signal = NA_character_,
stop_grace_period = NA_integer_,
health_check = NULL,
hosts = character(0),
dns_config = NULL,
secrets = data_frame(
file = I(list()),
secret_id = character(0),
secret_name = character(0)),
configs = data_frame(
file = I(list()),
config_id = character(0),
config_name = character(0)),
isolation = NA_character_)
spec <- list(
plugin_spec = NULL,
container_spec = container_spec,
resources = list(
limits = list(
nano_cpus = NA_integer_,
memory_bytes = NA_integer_,
generic_resources = data_frame(
named_resource_spec = I(list()),
discrete_resource_spec = I(list()))),
reservation = NULL),
restart_policy = list(
condition = "any",
delay = NA_integer_,
max_attempts = 0L,
window = NA_integer_),
placement = list(
constraints = character(0),
preferences = data_frame(
spread = I(list())),
platforms = data_frame(architecture = character(0),
os = character(0))),
force_update = NA_integer_,
runtime = NA_character_,
networks = data_frame(
target = character(),
aliases = I(list())),
log_driver = NULL)
status1 <- list(
timestamp = "2016-06-07T21:07:31.290032978Z",
state = "running",
message = "started",
err = NA_character_,
container_status = list(
container_id = "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035",
pid = 677L,
exit_code = NA_integer_))
status2 <- list(
timestamp = "2016-06-07T21:07:30.202183143Z",
state = "shutdown",
message = "shutdown",
err = NA_character_,
container_status = list(
container_id = "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213",
pid = NA_integer_,
exit_code = NA_integer_))
generic_resources <- data_frame(
named_resource_spec = I(list()),
discrete_resource_spec = I(list()))
data_frame(
id = c("0kzzo1i0y4jz6027t0k7aezc7", "1yljwbmlr8er2waf8orvqpwms"),
version = I(list(list(index = 71L), list(index = 30L))),
created_at = c("2016-06-07T21:07:31.171892745Z",
"2016-06-07T21:07:30.019104782Z"),
updated_at = c("2016-06-07T21:07:31.376370513Z",
"2016-06-07T21:07:30.231958098Z"),
name = c(NA_character_, "hopeful_cori"),
labels = I(list(character(0), character(0))),
spec = I(list(spec, spec)),
service_id = c("9mnpnzenvg8p8tdbtq4wvbkcz", "9mnpnzenvg8p8tdbtq4wvbkcz"),
slot = c(1L, 1L),
node_id = c("60gvrl6tm78dmak4yl7srz94v", "60gvrl6tm78dmak4yl7srz94v"),
assigned_generic_resources = I(list(generic_resources, generic_resources)),
status = I(list(status1, status2)),
desired_state = c("running", "shutdown"))
| /tests/testthat/sample_responses/v1.35/task_list.R | no_license | cran/stevedore | R | false | false | 5,797 | r | ## version: 1.35
## method: get
## path: /tasks
## code: 200
## response: [{"ID":"0kzzo1i0y4jz6027t0k7aezc7","Version":{"Index":71},"CreatedAt":"2016-06-07T21:07:31.171892745Z","UpdatedAt":"2016-06-07T21:07:31.376370513Z","Spec":{"ContainerSpec":{"Image":"redis"},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0},"Placement":{}},"ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"NodeID":"60gvrl6tm78dmak4yl7srz94v","Status":{"Timestamp":"2016-06-07T21:07:31.290032978Z","State":"running","Message":"started","ContainerStatus":{"ContainerID":"e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035","PID":677}},"DesiredState":"running","NetworksAttachments":[{"Network":{"ID":"4qvuz4ko70xaltuqbt8956gd1","Version":{"Index":18},"CreatedAt":"2016-06-07T20:31:11.912919752Z","UpdatedAt":"2016-06-07T21:07:29.955277358Z","Spec":{"Name":"ingress","Labels":{"com.docker.swarm.internal":"true"},"DriverConfiguration":{},"IPAMOptions":{"Driver":{},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"IPAMOptions":{"Driver":{"Name":"default"},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"Addresses":"10.255.0.10/16"}]},{"ID":"1yljwbmlr8er2waf8orvqpwms","Version":{"Index":30},"CreatedAt":"2016-06-07T21:07:30.019104782Z","UpdatedAt":"2016-06-07T21:07:30.231958098Z","Name":"hopeful_cori","Spec":{"ContainerSpec":{"Image":"redis"},"Resources":{"Limits":{},"Reservations":{}},"RestartPolicy":{"Condition":"any","MaxAttempts":0},"Placement":{}},"ServiceID":"9mnpnzenvg8p8tdbtq4wvbkcz","Slot":1,"NodeID":"60gvrl6tm78dmak4yl7srz94v","Status":{"Timestamp":"2016-06-07T21:07:30.202183143Z","State":"shutdown","Message":"shutdown","ContainerStatus":{"ContainerID":"1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213"}},"DesiredState":"shutdown","NetworksAttachments":[{"Network":{"ID":"4qvuz4ko70xaltuqbt8956gd1","Version":{"Index":18},"CreatedAt":"2016-06-07T20:31:11.912919752Z","UpdatedAt":"2016-06-07T21:07:29.955277358Z","Spec":{"Name":"ingress","Labels":{"com.docker.swarm.internal":"true"},"DriverConfiguration":{},"IPAMOptions":{"Driver":{},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"DriverState":{"Name":"overlay","Options":{"com.docker.network.driver.overlay.vxlanid_list":"256"}},"IPAMOptions":{"Driver":{"Name":"default"},"Configs":[{"Subnet":"10.255.0.0/16","Gateway":"10.255.0.1"}]}},"Addresses":"10.255.0.5/16"}]}]
NULL
container_spec <- list(
image = "redis",
labels = NULL,
command = character(0),
args = character(0),
hostname = NA_character_,
env = character(0),
dir = NA_character_,
user = NA_character_,
groups = character(0),
privileges = NULL,
tty = NA,
open_stdin = NA,
read_only = NA,
mounts = data_frame(
target = character(0),
source = character(0),
type = character(0),
read_only = logical(0),
consistency = character(0),
bind_options = I(list()),
volume_options = I(list()),
tmpfs_options = I(list())),
stop_signal = NA_character_,
stop_grace_period = NA_integer_,
health_check = NULL,
hosts = character(0),
dns_config = NULL,
secrets = data_frame(
file = I(list()),
secret_id = character(0),
secret_name = character(0)),
configs = data_frame(
file = I(list()),
config_id = character(0),
config_name = character(0)),
isolation = NA_character_)
spec <- list(
plugin_spec = NULL,
container_spec = container_spec,
resources = list(
limits = list(
nano_cpus = NA_integer_,
memory_bytes = NA_integer_,
generic_resources = data_frame(
named_resource_spec = I(list()),
discrete_resource_spec = I(list()))),
reservation = NULL),
restart_policy = list(
condition = "any",
delay = NA_integer_,
max_attempts = 0L,
window = NA_integer_),
placement = list(
constraints = character(0),
preferences = data_frame(
spread = I(list())),
platforms = data_frame(architecture = character(0),
os = character(0))),
force_update = NA_integer_,
runtime = NA_character_,
networks = data_frame(
target = character(),
aliases = I(list())),
log_driver = NULL)
status1 <- list(
timestamp = "2016-06-07T21:07:31.290032978Z",
state = "running",
message = "started",
err = NA_character_,
container_status = list(
container_id = "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035",
pid = 677L,
exit_code = NA_integer_))
status2 <- list(
timestamp = "2016-06-07T21:07:30.202183143Z",
state = "shutdown",
message = "shutdown",
err = NA_character_,
container_status = list(
container_id = "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213",
pid = NA_integer_,
exit_code = NA_integer_))
generic_resources <- data_frame(
named_resource_spec = I(list()),
discrete_resource_spec = I(list()))
data_frame(
id = c("0kzzo1i0y4jz6027t0k7aezc7", "1yljwbmlr8er2waf8orvqpwms"),
version = I(list(list(index = 71L), list(index = 30L))),
created_at = c("2016-06-07T21:07:31.171892745Z",
"2016-06-07T21:07:30.019104782Z"),
updated_at = c("2016-06-07T21:07:31.376370513Z",
"2016-06-07T21:07:30.231958098Z"),
name = c(NA_character_, "hopeful_cori"),
labels = I(list(character(0), character(0))),
spec = I(list(spec, spec)),
service_id = c("9mnpnzenvg8p8tdbtq4wvbkcz", "9mnpnzenvg8p8tdbtq4wvbkcz"),
slot = c(1L, 1L),
node_id = c("60gvrl6tm78dmak4yl7srz94v", "60gvrl6tm78dmak4yl7srz94v"),
assigned_generic_resources = I(list(generic_resources, generic_resources)),
status = I(list(status1, status2)),
desired_state = c("running", "shutdown"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vlist.R
\name{vlist}
\alias{vlist}
\title{Generation of a sublist}
\usage{
vlist(list, skip = NULL, first = NULL, select = NULL)
}
\arguments{
\item{list}{list you want to print details of}
\item{skip}{Skip first that many list-elements}
\item{first}{Only display first that many list-elements}
\item{select}{Display only selected list-elements}
}
\value{
Selected elements of a list
}
\description{
Internal function to write a couple of list entries in a new list
}
\examples{
data(ex_pop)
vlist(ex_pop$breeding[[1]], select=3:10)
}
| /man/vlist.Rd | no_license | cran/MoBPS | R | false | true | 643 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vlist.R
\name{vlist}
\alias{vlist}
\title{Generation of a sublist}
\usage{
vlist(list, skip = NULL, first = NULL, select = NULL)
}
\arguments{
\item{list}{list you want to print details of}
\item{skip}{Skip first that many list-elements}
\item{first}{Only display first that many list-elements}
\item{select}{Display only selected list-elements}
}
\value{
Selected elements of a list
}
\description{
Internal function to write a couple of list entries in a new list
}
\examples{
data(ex_pop)
vlist(ex_pop$breeding[[1]], select=3:10)
}
|
\name{getFoo}
\alias{gets}
\alias{gets,Rcpp_PDV-method}
\alias{gets,Rcpp_CPS-method}
\alias{getx}
\alias{getx,Rcpp_PDV-method}
\alias{getx,Rcpp_CPS-method}
\alias{gety}
\alias{gety,Rcpp_PDV-method}
\alias{gety,Rcpp_CPS-method}
\alias{getz}
\alias{getz,Rcpp_PDV-method}
\alias{getz,Rcpp_CPS-method}
\alias{getstate}
\alias{getstate,Rcpp_CPS-method}
\alias{getstatus}
\alias{getstatus,Rcpp_CPS-method}
\alias{getniter}
\alias{getniter,Rcpp_CPS-method}
\alias{getparams}
\alias{getparams,Rcpp_CTRL-method}
\title{
Extractor methods for reference class objects
}
\description{
Returns a member of reference class objects.
}
\usage{
\S4method{getx}{Rcpp_PDV}(object)
\S4method{getx}{Rcpp_CPS}(object)
\S4method{gety}{Rcpp_PDV}(object)
\S4method{gety}{Rcpp_CPS}(object)
\S4method{gets}{Rcpp_PDV}(object)
\S4method{gets}{Rcpp_CPS}(object)
\S4method{getz}{Rcpp_PDV}(object)
\S4method{getz}{Rcpp_CPS}(object)
\S4method{getstate}{Rcpp_CPS}(object)
\S4method{getstatus}{Rcpp_CPS}(object)
\S4method{getniter}{Rcpp_CPS}(object)
\S4method{getparams}{Rcpp_CTRL}(object)
}
\arguments{
\item{object}{An object of either reference-class \code{Rcpp_PDV} or
\code{Rcpp_CPS}, or \code{Rcpp_CTRL}.}
}
\value{
The relevant member object of the class.
}
\keyword{optimize}
| /man/getFoo.Rd | no_license | bpfaff/cccp | R | false | false | 1,290 | rd | \name{getFoo}
\alias{gets}
\alias{gets,Rcpp_PDV-method}
\alias{gets,Rcpp_CPS-method}
\alias{getx}
\alias{getx,Rcpp_PDV-method}
\alias{getx,Rcpp_CPS-method}
\alias{gety}
\alias{gety,Rcpp_PDV-method}
\alias{gety,Rcpp_CPS-method}
\alias{getz}
\alias{getz,Rcpp_PDV-method}
\alias{getz,Rcpp_CPS-method}
\alias{getstate}
\alias{getstate,Rcpp_CPS-method}
\alias{getstatus}
\alias{getstatus,Rcpp_CPS-method}
\alias{getniter}
\alias{getniter,Rcpp_CPS-method}
\alias{getparams}
\alias{getparams,Rcpp_CTRL-method}
\title{
Extractor methods for reference class objects
}
\description{
Returns a member of reference class objects.
}
\usage{
\S4method{getx}{Rcpp_PDV}(object)
\S4method{getx}{Rcpp_CPS}(object)
\S4method{gety}{Rcpp_PDV}(object)
\S4method{gety}{Rcpp_CPS}(object)
\S4method{gets}{Rcpp_PDV}(object)
\S4method{gets}{Rcpp_CPS}(object)
\S4method{getz}{Rcpp_PDV}(object)
\S4method{getz}{Rcpp_CPS}(object)
\S4method{getstate}{Rcpp_CPS}(object)
\S4method{getstatus}{Rcpp_CPS}(object)
\S4method{getniter}{Rcpp_CPS}(object)
\S4method{getparams}{Rcpp_CTRL}(object)
}
\arguments{
\item{object}{An object of either reference-class \code{Rcpp_PDV} or
\code{Rcpp_CPS}, or \code{Rcpp_CTRL}.}
}
\value{
The relevant member object of the class.
}
\keyword{optimize}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
#inversion already done
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
#check if the matrix is square
if(nrow(data)!=ncol(data)){
message("matrix not square")
return(data)
}
m <- solve(data)
x$setinverse(m)
m
} | /cachematrix.R | no_license | RomainPhilippe/ProgrammingAssignment2 | R | false | false | 875 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
#inversion already done
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
#check if the matrix is square
if(nrow(data)!=ncol(data)){
message("matrix not square")
return(data)
}
m <- solve(data)
x$setinverse(m)
m
} |
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22801808222998e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615772818-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 362 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22801808222998e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_packet_set_ACTIVITY2.R,
% R/parse_packet_set_CAPSENSE&METADATA.R, R/parse_packet_set_EVENT.R,
% R/parse_packet_set_SENSOR_DATA.R, R/parse_packet_set_SENSOR_SCHEMA.R,
% R/parse_packet_set__generic_&_simple_methods.R
\name{parse_packet_set.ACTIVITY2}
\alias{parse_packet_set.ACTIVITY2}
\alias{capsense_payload}
\alias{parse_packet_set.CAPSENSE}
\alias{parse_packet_set.METADATA}
\alias{parse_packet_set.EVENT}
\alias{parse_packet_set.SENSOR_DATA}
\alias{parse_packet_set.SENSOR_SCHEMA}
\alias{parse_packet_set}
\alias{parse_packet_set.default}
\alias{parse_packet_set.PARAMETERS}
\alias{parse_packet_set.BATTERY}
\title{Parse all packets of a given type}
\usage{
\method{parse_packet_set}{ACTIVITY2}(set, log, tz = "UTC", verbose = FALSE,
info, events, ...)
capsense_payload(payload)
\method{parse_packet_set}{CAPSENSE}(set, log, tz = "UTC", verbose = FALSE,
...)
\method{parse_packet_set}{METADATA}(set, log, tz = "UTC", verbose = FALSE,
payload = NULL, ...)
\method{parse_packet_set}{EVENT}(set, log, tz = "UTC", verbose = FALSE,
info, ...)
\method{parse_packet_set}{SENSOR_DATA}(set, log, tz = "UTC",
verbose = FALSE, parameters, schema, ...)
\method{parse_packet_set}{SENSOR_SCHEMA}(set, log, tz = "UTC",
verbose = FALSE, payload = NULL, ...)
parse_packet_set(set, log, tz = "UTC", verbose = FALSE, ...)
\method{parse_packet_set}{default}(set, log, tz = "UTC", verbose = FALSE,
...)
\method{parse_packet_set}{PARAMETERS}(set, log, tz = "UTC", verbose = FALSE,
...)
\method{parse_packet_set}{BATTERY}(set, log, tz = "UTC", verbose = FALSE,
...)
}
\arguments{
\item{set}{the set of record headers corresponding to each packet}
\item{log}{the raw data from \code{log.bin}}
\item{tz}{character. The timezone to use}
\item{verbose}{logical. Print updates to console?}
\item{info}{the result of \code{\link{parse_info_txt}}}
\item{events}{the result of parsing EVENTS packets
(internal use)}
\item{...}{further arguments passed to methods}
\item{payload}{a raw vector containing packet payload}
\item{parameters}{A PARAMETERS object}
\item{schema}{A SENSOR_SCHEMA object}
}
\description{
Parse all packets of a given type
}
\keyword{internal}
| /man/parse_packet_set.Rd | permissive | Thulasi-vk/AGread | R | false | true | 2,266 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parse_packet_set_ACTIVITY2.R,
% R/parse_packet_set_CAPSENSE&METADATA.R, R/parse_packet_set_EVENT.R,
% R/parse_packet_set_SENSOR_DATA.R, R/parse_packet_set_SENSOR_SCHEMA.R,
% R/parse_packet_set__generic_&_simple_methods.R
\name{parse_packet_set.ACTIVITY2}
\alias{parse_packet_set.ACTIVITY2}
\alias{capsense_payload}
\alias{parse_packet_set.CAPSENSE}
\alias{parse_packet_set.METADATA}
\alias{parse_packet_set.EVENT}
\alias{parse_packet_set.SENSOR_DATA}
\alias{parse_packet_set.SENSOR_SCHEMA}
\alias{parse_packet_set}
\alias{parse_packet_set.default}
\alias{parse_packet_set.PARAMETERS}
\alias{parse_packet_set.BATTERY}
\title{Parse all packets of a given type}
\usage{
\method{parse_packet_set}{ACTIVITY2}(set, log, tz = "UTC", verbose = FALSE,
info, events, ...)
capsense_payload(payload)
\method{parse_packet_set}{CAPSENSE}(set, log, tz = "UTC", verbose = FALSE,
...)
\method{parse_packet_set}{METADATA}(set, log, tz = "UTC", verbose = FALSE,
payload = NULL, ...)
\method{parse_packet_set}{EVENT}(set, log, tz = "UTC", verbose = FALSE,
info, ...)
\method{parse_packet_set}{SENSOR_DATA}(set, log, tz = "UTC",
verbose = FALSE, parameters, schema, ...)
\method{parse_packet_set}{SENSOR_SCHEMA}(set, log, tz = "UTC",
verbose = FALSE, payload = NULL, ...)
parse_packet_set(set, log, tz = "UTC", verbose = FALSE, ...)
\method{parse_packet_set}{default}(set, log, tz = "UTC", verbose = FALSE,
...)
\method{parse_packet_set}{PARAMETERS}(set, log, tz = "UTC", verbose = FALSE,
...)
\method{parse_packet_set}{BATTERY}(set, log, tz = "UTC", verbose = FALSE,
...)
}
\arguments{
\item{set}{the set of record headers corresponding to each packet}
\item{log}{the raw data from \code{log.bin}}
\item{tz}{character. The timezone to use}
\item{verbose}{logical. Print updates to console?}
\item{info}{the result of \code{\link{parse_info_txt}}}
\item{events}{the result of parsing EVENTS packets
(internal use)}
\item{...}{further arguments passed to methods}
\item{payload}{a raw vector containing packet payload}
\item{parameters}{A PARAMETERS object}
\item{schema}{A SENSOR_SCHEMA object}
}
\description{
Parse all packets of a given type
}
\keyword{internal}
|
##### Fitting VBGF for Pristipomoides filamentosus with Mark Recapture Data
#### Written by: Stephen Scherrer with some code modified from Erik Franklin (2017)
#### Written: Feb - March 2018
#### Contact: scherrer@hawaii.edu
#### All Wrongs Preserved
#### Laslett et al 2004 implementation of Fabens Method using tagging data as well as length frequency and direct ageing data
##### Workspace Setup #####
## Clearing workspace
rm(list = ls())
print('Opakapaka Growth Analysis')
initial_run_time = Sys.time()
print(initial_run_time)
## Setting a Script Timer
script_timer = proc.time()
## Declaring Directory Path
proj_dir = getwd()
# if(!"Okamoto_Mark_Recapture" %in% strsplit(proj_dir, '/')[[1]]){
# proj_dir = file.path(getwd(), "Okamoto_Mark_Recapture")
# }
data_dir = file.path(proj_dir, "data")
src_dir = file.path(proj_dir, "src")
results_dir = file.path(proj_dir, "results")
## Creating a run specific results folder
run_results_dir = file.path(results_dir, paste('run', initial_run_time))
dir.create(run_results_dir)
print(paste('proj_dir:', proj_dir))
print(paste('src_dir:', src_dir))
print(paste('run_results_dir:', run_results_dir))
## Installing Principle Dependencies
print('Installing principle dependencies')
# library('notifyR') # ## send_push()
library('doParallel')
# library('beepr')
library('mixtools')
## Sourcing R Scripts provided by Eveson/Laslett
print('Sourcing files')
source(file.path(src_dir, "Laslett Functions/joint_lkhd.r"))
source(file.path(src_dir, "Laslett Functions/growth_functions.r"))
source(file.path(src_dir, "Laslett Functions/tag_lkhd.r"))
## Reading in literature parameter values
print('Loading Data')
lit_vbgc_params = read.csv(file.path(data_dir, "Parameter Estimates.csv"), stringsAsFactors = FALSE)
lit_vbgc_params = lit_vbgc_params[!is.na(lit_vbgc_params$Linf), ]
colnames(lit_vbgc_params) = c('author', 'n', 'linf', 'k', 't0', 'region', 'method')
lit_vbgc_params = lit_vbgc_params[c(1:20, 22:25), ]
## Assigning cores for parallel processing
registerDoParallel(cores = detectCores()-1)
##### Defining Utility Functions #####
# Modifying our JOINT LIKELIHOOD function to accept more than one set of a given data type:
joint.logl.f <- function(param, npf, npA, tagdat = NULL, tagdat2 = NULL ,otodat = NULL, otodat2 = NULL, otodat3 = NULL, otodat4 = NULL, lfdat = NULL, lfdat2 = NULL, wt.oto=0, wt.oto2=0, wt.oto3=0, wt.oto4=0, wt.tag=0, wt.tag2=0,wt.lf=0, wt.lf2=0)
{
neglogl.tag<- 0
neglogl.tag2<- 0
neglogl.oto<- 0
neglogl.oto2<- 0
neglogl.oto3<- 0
neglogl.oto4<- 0
neglogl.lf<- 0
neglogl.lf2<- 0
if(wt.tag>0) neglogl.tag <- logl.ssnl.f(param,npf,npA,tagdat)
if(wt.tag2>0) neglogl.tag2 <- logl.ssnl.f(param,npf,npA,tagdat2)
if(wt.oto>0) neglogl.oto <- logl.oto.f(param,npf,npA,otodat)
if(wt.oto2>0) neglogl.oto2 <- logl.oto.f(param,npf,npA,otodat2)
if(wt.oto3>0) neglogl.oto3 <- logl.oto.f(param,npf,npA,otodat3)
if(wt.oto4>0) neglogl.oto4 <- logl.oto.f(param,npf,npA,otodat4)
if(wt.lf>0) neglogl.lf <- logl.lf.f(param,npf,npA,lfdat)
if(wt.lf2>0) neglogl.lf2 <- logl.lf.f(param,npf,npA,lfdat2)
neglogl <- wt.tag*neglogl.tag + wt.tag2*neglogl.tag2 + wt.oto*neglogl.oto+ wt.oto2*neglogl.oto2 + wt.oto3*neglogl.oto3 + wt.oto4*neglogl.oto4 + wt.lf*neglogl.lf + wt.lf2*neglogl.lf2
# print(param)
# print(c(neglogl.tag, neglogl.tag2, neglogl.oto, neglogl.oto2, neglogl.oto3, neglogl.oto4, neglogl.lf, neglogl.lf2, neglogl))
return(neglogl)
}
## Predicting length at recapture
predict_recapture_length = function(Lm, dt, linf = 65.95546, k = 0.2369113, a = 0){
## Get estimated length at recapture of a given individual using von Bertalanffy function as paramterized by Faben
#return(linf * ((1 - exp(-k * (a + dt))) - (1 - exp(-k * a))))
return(Lm + (linf - Lm) * (1 - exp(-k * dt)))
}
calculate_predictive_variance = function(Lm, dt, linf, k, Lr_obs){
return(sum((predict_recapture_length(Lm = Lm, dt = dt, linf = linf, k = k) - Lr_obs)^2) / length(Lr_obs))
}
std_error = function(x){
#### Calculates standard error of set (x)
sqrt(var(x)/length(x))
}
#### Determining how long it takes to reach a threshold % of Linf under each model
yrs_to_.9_linf = function(linf, k, a0 = 0, threshold = 0.90){
t = log(1 - (linf * threshold/linf)) / (-1 * k) + a0
return(t)
}
#### Bootstrapping functions
### A function to bootstrap length-frequency data
lf_boot = function(pseudo_data){
set.seed(Sys.time())
## For each month bin, replace with resampling the number of fish caught that month
boot_lf_dat = NULL
if(!exists('pseudo_data$curr_month_year')){
pseudo_data$curr_month_year = pseudo_data$month_year
}
for(i in 1:length(unique(pseudo_data$curr_month_year))){
monthly_pseudo_data = pseudo_data[pseudo_data$curr_month_year == unique(pseudo_data$curr_month_year)[i], ]
monthly_n_fish = dim(monthly_pseudo_data)[1]
boot_lf_dat = rbind(boot_lf_dat, monthly_pseudo_data[sample(x = 1:monthly_n_fish, size = monthly_n_fish, replace = TRUE), ])
}
return(boot_lf_dat)
}
## A function for bootstrap sampling with replacement
bootstrap_growth = function(boot_iterations = 10000, tagdat = NULL, tagdat2 = NULL, otodat = NULL, otodat2 = NULL, otodat3 = NULL, otodat4 = NULL, pseudolf = NULL, pseudolf2 = NULL, wt.oto = 1, wt.oto2 = 0, wt.oto3 = 0, wt.oto4 = 0, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0){
boot_param_ests = NULL
boot_param_ests = foreach(1:boot_iterations, .combine = rbind) %dopar%{
boot_parms = NULL
## Resampling data
if(!is.null(tagdat)){boot.tagdat = tagdat[sample(nrow(tagdat), size = nrow(tagdat), replace = TRUE), ]}else{boot.tagdat = NULL}
if(!is.null(tagdat2)){boot.tagdat2 = tagdat[sample(nrow(tagdat2), size = nrow(tagdat2), replace = TRUE), ]}else{boot.tagdat2 = NULL}
if(!is.null(otodat)){boot.otodat = otodat[sample(nrow(otodat), size = nrow(otodat), replace = TRUE), ]}else{boot.otodat = NULL}
if(!is.null(otodat2)){boot.otodat2 = otodat2[sample(nrow(otodat2), size = nrow(otodat2), replace = TRUE), ]}else{boot.otodat2 = NULL}
if(!is.null(otodat3)){boot.otodat3 = otodat3[sample(nrow(otodat3), size = nrow(otodat3), replace = TRUE), ]}else{boot.otodat3 = NULL}
if(!is.null(otodat4)){boot.otodat4 = otodat4[sample(nrow(otodat4), size = nrow(otodat4), replace = TRUE), ]}else{boot.otodat4 = NULL}
boot.lfdat = NULL
if(!is.null(pseudolf)){
while(class(boot.lfdat) != 'data.frame'){
boot.lfdat = try(length_freq_decomp(lf_boot(pseudolf)), silent = TRUE)
}
}
boot.lfdat2 = NULL
if(!is.null(pseudolf2)){
while(class(boot.lfdat2) != 'data.frame'){
boot.lfdat2 = length_freq_decomp(lf_boot(pseudolf))
}
}
## Refitting MLE Object
boot.fit.vb = try(boot.fit.vb <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=boot.tagdat, tagdat2 = boot.tagdat2 ,otodat=boot.otodat, otodat2 = boot.otodat2, otodat3 = boot.otodat3, otodat4 = boot.otodat4, lfdat=boot.lfdat, lfdat2 = boot.lfdat2, wt.oto=wt.oto, wt.oto2=wt.oto2, wt.oto3=wt.oto3, wt.oto4=wt.oto4, wt.tag=wt.tag, wt.tag2=wt.tag2, wt.lf=wt.lf, wt.lf2=wt.lf2), silent = TRUE)
## Writing out parameters if model converged, otherwise writing out NAs
if(is.list(boot.fit.vb)){
return(boot.fit.vb$par)
} else {
return(rep(NA, 10))
}
}
boot_param_ests = as.data.frame(boot_param_ests)
colnames(boot_param_ests) = c('mu.L', 'sig.L', 'k', 'mu.A', 'sig.A', 'sig.sci', 'sig.f', 'a0', 'sig.oto', 'sig.lf')
return(boot_param_ests)
}
## A function for producing summary statistics from the bootstrap parameter distributions
calc_boot_stats = function(boot_param_ests){
boot_param_ests = boot_param_ests[!is.na(boot_param_ests[,1]), ]
two.five.percent = dim(boot_param_ests)[1] * 0.025
ninetyseven.five.percent = dim(boot_param_ests)[1] * 0.975
boot_stats = data.frame(matrix(NA, nrow = dim(boot_param_ests)[2], ncol = 3), stringsAsFactors = FALSE)
colnames(boot_stats) = c('Median', '2.5%', '97.5%')
rownames(boot_stats) = colnames(boot_param_ests)
for(i in 1:dim(boot_param_ests)[2]){
boot_stats[i, 1] = median(boot_param_ests[ ,i], na.rm = TRUE)
if(floor(two.five.percent) != two.five.percent){
boot_stats[i, 2] = (sort(boot_param_ests[ ,i])[floor(two.five.percent)] * abs(two.five.percent - floor(two.five.percent))) + (sort(boot_param_ests[ ,i])[ceiling(two.five.percent)] * abs(two.five.percent - ceiling(two.five.percent)))
} else {
boot_stats[i, 2] = sort(boot_param_ests[ ,i])[floor(two.five.percent)]
}
if(floor(ninetyseven.five.percent) != ninetyseven.five.percent){
boot_stats[i, 3] = (sort(boot_param_ests[ ,i])[floor(ninetyseven.five.percent)] * abs(ninetyseven.five.percent - floor(ninetyseven.five.percent))) + (sort(boot_param_ests[ ,i])[ceiling(ninetyseven.five.percent)] * abs(ninetyseven.five.percent - ceiling(ninetyseven.five.percent)))
} else {
boot_stats[i, 3] = sort(boot_param_ests[ ,i])[floor(ninetyseven.five.percent)]
}
}
return(t(boot_stats))
}
## A function to bring all bootstrapped function results together
bootstrap_growth_params = function(boot_iterations = 10000, filename = NULL, tagdat = NULL, tagdat2 = NULL, otodat = NULL, otodat2 = NULL, otodat3 = NULL, otodat4 = NULL, pseudolf = NULL, pseudolf2 = NULL, wt.oto = 1, wt.oto2 = 0, wt.oto3 = 0, wt.oto4 = 0, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0){
if(boot_iterations < 40){
print('Error: boot_iterations must be > 40')
return()
}
## First run initial bootstrapping pass
booted_param_ests = bootstrap_growth(boot_iterations, tagdat, tagdat2, otodat, otodat2, otodat3, otodat4, pseudolf, pseudolf2, wt.oto, wt.oto2, wt.oto3, wt.oto4, wt.tag, wt.tag2, wt.lf, wt.lf2)
## Then rerun to fill in iterations that failed to converge
while(dim(booted_param_ests[!is.na(booted_param_ests$mu.L), ])[1] < boot_iterations){
booted_param_ests = rbind(booted_param_ests, bootstrap_growth(boot_iterations = length(which(is.na(booted_param_ests$mu.L))), tagdat, tagdat2, otodat, otodat2, otodat3, otodat4, pseudolf, pseudolf2, wt.oto, wt.oto2, wt.oto3, wt.oto4, wt.tag, wt.tag2, wt.lf, wt.lf2))
}
## What percentage failed to converge?
convergence_failure_rate = (length(which(is.na(booted_param_ests$mu.L))) / dim(booted_param_ests)[1]) * 100
print(paste('Convergence failure rate =', round(convergence_failure_rate, digits = 2), '%'))
## Getting summary stats and writing them out
boot_stats = calc_boot_stats(booted_param_ests)
if(!is.null(filename)){
write.csv(boot_stats, file.path(run_results_dir, paste(filename, '.csv', sep = "")))
write.csv(booted_param_ests, file.path(run_results_dir, paste(filename, '_raw.csv', sep = "")))
}
## Writing out results
results = list()
results$raw_boot_data = booted_param_ests
results$boot_stats = boot_stats
results$convergence_failure_rate = convergence_failure_rate
return(results)
}
##### Loading and Cleaning Data Files #####
#### Mark Recapture Data
mark_recapture_data = read.csv(file.path(data_dir, 'HO Mstr, temp (version 1).csv'), stringsAsFactors = FALSE)
### Renaming data columns
colnames(mark_recapture_data) = c('tag_date', 'location', 'station', 'depth_f', 'species', 'previously_tagged', 'tag_id','fork_length_in', 'remarks', 'recapture_1_date', 'recapture_1_location', 'recapture_1_station', 'recapture_1_depth_f', 'recapture_1_fork_length_in', 'weight_1_lbs', 'days_1_free', 'growth_1_in', 'distance_1_miles','retagged_1',
'recapture_2_date', 'recapture_2_location', 'recapture_2_station', 'recapture_2_depth_f', 'recapture_2_fork_length_in', 'weight_2_lbs', 'days_2_free', 'growth_2_in', 'distance_2_miles', 'retagged_2',
'recapture_3_date', 'recapture_3_location', 'recapture_3_station', 'recapture_3_depth_f', 'recapture_3_fork_length_in', 'weight_3_lbs', 'days_3_free', 'growth_3_in', 'distance_3_miles', 'retagged_3',
'recapture_4_date', 'recapture_4_location', 'recapture_4_station', 'recapture_4_depth_f', 'recapture_4_fork_length_in', 'weight_4_lbs', 'days_4_free', 'growth_4_in', 'distance_4_miles', 'x_retagged')
## How many total fish do we have in the data set?
dim(mark_recapture_data)[1] # 4245!
### Subsetting out only Opakapaka with tag IDs - That is, fish that were marked
mark_recapture_data = mark_recapture_data[mark_recapture_data$species == '1' & mark_recapture_data$tag_id != '', ]
dim(mark_recapture_data)[1] # This gets you to the previously published 4179 tagged paka number from Kobayashi, Okamoto, & Oishi . for some reason doesn't exclude fish marked 'died'
#### Adusting Data Classes
### Formatting Dates (Converting Characters to POSIXct)
mark_recapture_data$tag_date = as.POSIXct(mark_recapture_data$tag_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_1_date = as.POSIXct(mark_recapture_data$recapture_1_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_2_date = as.POSIXct(mark_recapture_data$recapture_2_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_3_date = as.POSIXct(mark_recapture_data$recapture_3_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_4_date = as.POSIXct(mark_recapture_data$recapture_4_date, format = "%Y-%m-%d")
### Formatting fork lengths
## Note: There are a couple fork lengths that have ?, *, or have no lengths recorded.
## I have no idea what these are but they're qualifiers and so I'm going to let them go to NA and get dropped from analysis
in_to_cm = 2.54
mark_recapture_data$fork_length_cm = as.numeric(mark_recapture_data$fork_length_in) * in_to_cm
mark_recapture_data$recapture_1_fork_length_cm = as.numeric(mark_recapture_data$recapture_1_fork_length_in) * in_to_cm
mark_recapture_data$recapture_2_fork_length_cm = as.numeric(mark_recapture_data$recapture_2_fork_length_in) * in_to_cm
mark_recapture_data$recapture_3_fork_length_cm = as.numeric(mark_recapture_data$recapture_3_fork_length_in) * in_to_cm
mark_recapture_data$recapture_4_fork_length_cm = as.numeric(mark_recapture_data$recapture_4_fork_length_in) * in_to_cm
#### Now we want to format a table with lm (length at marking), lr (length at recapture), and dt (elapsed time)
### Note: If a fish was recaptured multiple times, there is a single entry for that individual corrosponding to the length at initial marking and the length at final recapture
paka_growth = data.frame(stringsAsFactors = FALSE)
for(i in 1:length(mark_recapture_data$tag_id)){
if(!is.na(mark_recapture_data$recapture_4_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_4_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_4_date[i], 'n_recaptures' = 4, stringsAsFactors = FALSE))
}else if(!is.na(mark_recapture_data$recapture_3_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_3_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_3_date[i], 'n_recaptures' = 3, stringsAsFactors = FALSE))
}else if(!is.na(mark_recapture_data$recapture_2_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_2_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_2_date[i], 'n_recaptures' = 2, stringsAsFactors = FALSE))
}else if(!is.na(mark_recapture_data$recapture_1_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_1_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_1_date[i], 'n_recaptures' = 1, stringsAsFactors = FALSE))
}
}
paka_growth$dt = abs(difftime(paka_growth$tm, paka_growth$tr, units = "days")) ## Converting dt from days to years
paka_growth$dt = as.numeric(paka_growth$dt) / 365 # Converting to years
### Constructing derived variable dl (change in growth)
paka_growth$dL = paka_growth$Lr - paka_growth$Lm
### Removing any fish that have a NA value for dL or dt (There is a single fish which had no tagging length and 7 fish with no recapture dates)
length(which(is.na(paka_growth$dL))) # 1
length(which(is.na(paka_growth$dt))) # 7
paka_growth = paka_growth[!is.na(paka_growth$dL) & !is.na(paka_growth$dt), ]
n_recaps = data.frame('recapture events' = unique(paka_growth$n_recaptures), 'n_fish' = 0)
i = 1
for(n_recap in unique(paka_growth$n_recaptures)){
n_recaps$n_fish[i] = dim(paka_growth[paka_growth$n_recaptures == n_recap, ])[1]
i = i+1
}
#### Creating a subset data frame that removes recording errors in length and time
# paka_growth = subset(paka_growth, dL > 0)
length(which(paka_growth$dt < 60/365)) #46
paka_growth = subset(paka_growth, dt >= 60/365)
tagdat = as.matrix(data.frame('L1' = paka_growth$Lm, "L2" = paka_growth$Lr, " " = rep(0, length(paka_growth$Lr)), "dt" = paka_growth$dt, "L2measurer" = rep(0, length(paka_growth$Lr))))
#### Creating Second tagging dataset from PIFG data
pifg20072013 = read.csv(file.path(data_dir, 'PIFG 2007-2013.csv'), stringsAsFactors = FALSE)
pifg20072013$rel_date = as.POSIXct(pifg20072013$rel_date, format = "%m/%d/%Y")
pifg20072013$recap_date = as.POSIXct(pifg20072013$recap_date, format = "%m/%d/%Y")
pifg20072013$dt = difftime(pifg20072013$recap_date, pifg20072013$rel_date)
### 2014-2015 data
pifg20142015 = read.csv(file.path(data_dir, 'PIFG 2014-2015.csv'), stringsAsFactors = FALSE)
pifg20142015$rel_date = as.POSIXct(pifg20142015$rel_date, format = "%m/%d/%Y")
pifg20142015$recap_date = as.POSIXct(pifg20142015$recap_date, format = "%m/%d/%Y")
pifg20142015$rel_FL[pifg20142015$Units == 'in'] = pifg20142015$rel_FL[pifg20142015$Units == 'in'] * in_to_cm
pifg20142015$recap_FL[pifg20142015$Units == 'in'] = pifg20142015$recap_FL[pifg20142015$Units == 'in'] * in_to_cm
pifg20142015$dt = difftime(pifg20142015$recap_date, pifg20142015$rel_date)
pifg_data = data.frame('L1' = c(pifg20072013$rel_FL, pifg20142015$rel_FL), 'L2' = c(pifg20072013$recap_FL, pifg20142015$recap_FL), " " = rep(0, length(c(pifg20072013$recap_FL, pifg20142015$recap_FL))), 'dt' = c(pifg20072013$dt, pifg20142015$dt) / 365, "L2measurer" = rep(0, length(c(pifg20072013$recap_FL, pifg20142015$recap_FL))))
## Removing any pifg data with time at liberty < 60 days
pifg_data = pifg_data[pifg_data$dt >= 60/365, ]
tagdat2 = pifg_data
#### Otolith Data (Ralston and Miyamoto 1983, DeMartini et al. 1994, Andrews et al. 2012)
otodat = read.csv(file.path(data_dir, "RalstonMiyamotoandDemartiniAndrews.csv"))
colnames(otodat) = c("age", "len", "source")
#### Length Frequency Data
### Extrapolated from Moffitt and Parrish 1996 using earlier version of manuscript (1994)
### Monthly fish length counts were extrapolated from histograms in paper first by adjusting rotation so histograms were 'level', then fitting equally spaced bars for each n across y axis and comparing bar heights
### I ended up with one more record than they do but pretty close! (1048 vs. 1047). number next to each month is the total number of fish for that month estimated from the histograms. * means this number was estimated a second time and matched
oct_1989 = data.frame('date' = as.POSIXct('1989-10-01'), 'val' = c(0, 0, 1, 2, 1, 7, 1, 2, 1, 8, 7, 18, 5, 3, 0, 0, 0, 0, 0), len = 6:24) # 56 *
nov_1989 = data.frame('date' = as.POSIXct('1989-11-01'), 'val' = c(0, 0, 1, 4, 11, 7, 6, 4, 4, 3, 11, 12, 5, 1, 0, 0, 0, 0, 0), len = 6:24) # 69 *
jan_1990 = data.frame('date' = as.POSIXct('1990-01-01'), 'val' = c(0, 0, 0, 1, 6, 10, 12, 13, 20, 8, 1, 5, 2, 6, 3, 5, 1, 0, 1), len = 6:24) # 94 *
feb_1990 = data.frame('date' = as.POSIXct('1990-02-01'), 'val' = c(0, 0, 0, 0, 0, 4, 20, 26, 22, 10, 8, 3, 3, 5, 2, 0,0,0,0), len = 6:24) # 103 *
mar_1990 = data.frame('date' = as.POSIXct('1990-03-01'), 'val' = c(0, 0, 0, 0, 1, 1, 20, 14, 27, 14, 8, 4, 0, 0, 0, 0, 0, 0, 0), len = 6:24) # 89 *
apr_1990 = data.frame('date' = as.POSIXct('1990-04-01'), 'val' = c(0, 0, 0, 0, 0, 1, 6, 17, 17, 15, 14, 4, 4, 3, 0, 0, 0, 0, 0), len = 6:24) # 81 *
jun_1990 = data.frame('date' = as.POSIXct('1990-06-01'), 'val' = c(0, 0, 0, 0, 0, 0, 2, 13, 26, 19, 24, 13, 3, 3, 0, 1, 0, 0, 0), len = 6:24) # 104 *
aug_1990 = data.frame('date' = as.POSIXct('1990-08-01'), 'val' = c(0, 0, 0, 0, 0, 0, 1, 2, 6, 23, 26, 28, 9, 8, 2, 0, 0, 0, 0), len = 6:24) # 105 *
sep_1990 = data.frame('date' = as.POSIXct('1990-09-01'), 'val' = c(0, 0, 0, 0, 0, 0, 0, 1, 2, 5, 22, 27, 25, 7, 3, 4, 3, 1, 1), len = 6:24) # 101 *
oct_1990 = data.frame('date' = as.POSIXct('1990-10-01'), 'val' = c(0, 0, 0, 0, 1, 0, 0, 2, 6, 17, 17, 15, 7, 5, 5, 0, 0, 0, 0), len = 6:24) # 75
nov_1990 = data.frame('date' = as.POSIXct('1990-11-01'), 'val' = c(0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 3, 8, 5, 2, 1, 0, 0, 0, 0), len = 6:24) # 26 *
jan_1991 = data.frame('date' = as.POSIXct('1991-01-01'), 'val' = c(0, 0, 0, 0, 0, 0, 0, 3, 2, 0, 12, 32, 30, 8, 3, 0, 0, 0, 0), len = 6:24) # 90 *
feb_1991 = data.frame('date' = as.POSIXct('1991-02-01'), 'val' = c(0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 1, 6, 12, 14, 10, 0, 0, 0, 0), len = 6:24) # 55 *
## Combinging All of this data together into a single dataset
tagging_data = rbind(oct_1989, nov_1989, jan_1990, feb_1990, mar_1990, apr_1990, jun_1990, aug_1990, sep_1990, oct_1990, nov_1990, jan_1991, feb_1991)
## Making up pseudo tagging records. Columns are: date fish was caught, and length of the fish.
pseudo_data = data.frame()
for(i in 1:length(tagging_data$date)){
if(tagging_data$val[i] != 0){
pseudo_data = rbind(pseudo_data, data.frame('date' = rep(tagging_data$date[i], times = tagging_data$val[i]), 'len' = rep(tagging_data$len[i], times = tagging_data$val[i])))
}
}
colnames(pseudo_data) = c('date', 'length')
## Stripping out the month and year from each date object, then creating a new variable that is the month and year smushed together (matches histograms in moffit & parrish)
pseudo_data$month = months(pseudo_data$date)
pseudo_data$year = format(pseudo_data$date, "%Y")
pseudo_data$month_year = paste(pseudo_data$month, as.character(pseudo_data$year))
### Age at recruitment to juvenile fishing grounds.
## Peak spawning occurs in July according to Luers, Demartini, Humphreys 2017
## Difference between first sampling trip (Oct) and peak spawning (July) = 3 months
age_at_recruitment = 3/12
## Estimating starting means from data
start_means = rbind(c(1, 17), c(11, 18), c(13, 20), c(14, 20), c(15, NA), c(15, NA), c(16, NA), c(17, NA), c(16, NA), c(10, 16), c(13, 16), c(13, 18), c(15, 17))
constrain_means = rbind(c(11, 17),c(11, 18), c(14, 19), c(13, 19), c(14, NA), c(14, NA), c(14, NA), c(17, NA), c(15, NA), c(10, 16.5), c(13, 17), c(13, 17), c(15, 19))
## Creating a function that we can use later for bootstrapping
length_freq_decomp = function(pseudo_data, plot = FALSE, fixed_modes = FALSE){
lfdat = data.frame(stringsAsFactors = FALSE)
for(i in 1:length(unique(pseudo_data$date))){
curr_month_year = as.character(unique(pseudo_data$date)[i])
month_data = pseudo_data[pseudo_data$date == curr_month_year, ]
## Determing the mean age of fish once they've recruited, assuming that they were born during peak spawning.
mode.age = as.numeric(difftime(unique(month_data$date), as.POSIXct('1989-07-01'), "days")) / 365 # in years
mode.age = c(mode.age, mode.age + 1) # Assumption is that if two cohorts are present, the second is one year older than the first
# When we get to the second year of data, YOY for first year becomes 1+ year old, new cohort recruits. Because we based age on difftime for first cohort, we need to remove 1 year from all ages
if(min(mode.age) > (1 + age_at_recruitment)){
mode.age = mode.age - 1
}
## We need to decompose distributions for each cohort present into age of cohort, mean length of cohort, se of length of cohort, and number of fish falling into each cohort
## During Oct - Feb, two age cohorts present, we need to decompose two normal distributions from data (Moffitt and Parrish 1996)
if (months(unique(month_data$date)) %in% c('October', 'November', 'January', 'February')) {
decomp = NULL
while(class(decomp) != 'mixEM'){
k = 2 # Number of cohorts
if(fixed_modes == TRUE){
decomp = try(normalmixEM(month_data$length, mu = start_means[i, ], k = k, arbvar = TRUE, mean.constr = constrain_means[i, ]), silent = TRUE) # abvar = FALSE would both cohorts to have same sigma. See justification for this in Laslett et. al 2004
} else {
decomp = try(normalmixEM(month_data$length, mu = start_means[i, ], k = k, arbvar = TRUE), silent = TRUE) # abvar = FALSE would both cohorts to have same sigma. See justification for this in Laslett et. al 2004
}
}
mode.len = decomp$mu[order(decomp$mu)] # Sometimes things pop out in a weird order. We assume that the smaller size class is the younger cohort
est.n = c(decomp$lambda * dim(month_data)[1])[order(decomp$mu)]
mode.se = (decomp$sigma / sqrt(est.n))[order(decomp$mu)]
lfdat = rbind(lfdat, cbind(mode.age, mode.len, mode.se, est.n, curr_month_year))
} else {
k = 1 # Number of cohorts
est.n = length(month_data$length) # Number of fish in cohort
## For times when only a single cohort exists, we need to figure out the relative age of that cohort
if (format(month_data$date, "%m")[1] < 7) { # if month is less than october (month that new recruits show up)
mode.age = mode.age[1] # Go with the younger year class because fish are less than 1 year old
} else {
mode.age = mode.age[2] # Go with the older year class because fish are older than 1 (YOY have not recruited yet)
}
mode.len = mean(month_data$length)
mode.se = sd(month_data$length) / sqrt(est.n)
if(fixed_modes == TRUE){
mode.len = constrain_means[i, ][which(!is.na(constrain_means[i, ]))]
mode.se = sqrt(sum((mode.len - month_data$length)^2)/(est.n - 1)) / est.n
}
lfdat = rbind(lfdat, cbind(mode.age, mode.len, mode.se, est.n, curr_month_year))
}
}
## We may need to reclass our data depending on if we wrote curr_month_year into lfdat or not (this turns stuff into factors)
if(is.factor(lfdat$mode.age)){
lfdat$mode.age = as.numeric(levels(lfdat$mode.age)[lfdat$mode.age])
lfdat$mode.len = as.numeric(levels(lfdat$mode.len)[lfdat$mode.len])
lfdat$mode.se = as.numeric(levels(lfdat$mode.se)[lfdat$mode.se])
}
## Sorting lfdat by the mean age. This makes it easier to visually inspect that fish are getting larger as they get older. This can get messed up during bimodal composition.
lfdat = lfdat[order(lfdat$mode.age), ]
if(plot){
plot(x = lfdat$mode.age, y = lfdat$mode.len, pch = 19)
}
return(lfdat)
}
### Creating table of fitted components for gausian and guassian mixture models
print('lfdat-ing')
lfdat = length_freq_decomp(pseudo_data, plot = TRUE, fixed_modes = TRUE)
##### Model Fitting #####
print('Fitting models')
results = data.frame(stringsAsFactors = FALSE)
#### Fitting VB model
growth.ssnl.f<- growthvb.f
npf <- 1 #number of parameters passed to growth.ssnl.f (in this case k)
npA <- 2 #number of parameters in distribution of release ages for tag model
#### Fitting each data stream individually
print('Estimating Model Parameters')
### 1. Mark Recapture Data
#Specifying starting parameters, as well as upper and lower bounds for parameter estimation
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 0)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, 0, 0, 0)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 0, 0, 0)
fit.vb.tagging.all.data <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=0,wt.tag=1,wt.lf=0)
results = rbind(results, cbind('Model 5 - Mark Recapture - All Data', t(as.vector(fit.vb.tagging.all.data$par))))
### 2. Length at Age Data
## Setting intial params for otolith data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
fit.vb.oto <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=1,wt.tag=0,wt.lf=0)
results = rbind(results, cbind('Length at Age', t(as.vector(fit.vb.oto$par))))
### 3a. Length Frequency Data - Unconstrained Linf
### First some notes about replicating Results of Moffitt and Parrish 1996 - ELEFAN model they used did not estimate a0. a0 is soaking up some of the observed variability that otherwise goes to K. In the function logl.lf.f within the script file joint_lkhd.r, uncommenting the line a0 = 0 will force this model.
### So which model is appropriate? Lets use AICc to find out
## Unconstrained Fit
## Setting intial params for length frequency data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
fit.vb.lfu <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=0, wt.tag=0, wt.lf=1)
results = rbind(results, cbind('Length Frequency (Unconstrained)', t(as.vector(fit.vb.lfu$par))))
### 3b. Length Frequency Data - Linf constrained by larger linf from oto/mr data
## Constraining Linf to a constant - In this case, maximum Linf from oto or mark recapture
if(is.factor(results[ ,2])){
lic = max(levels(results[ ,2])[results[ ,2]])
} else {
lic = max(results[ ,2]) # Note: second column is mu.L parameter (mean of Linf)
}
lic = 78 # Same as used by Moffitt and Parrish (1996)
## Setting intial params for length frequency data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( lic, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( lic, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( lic, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
fit.vb.lfc <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=0, wt.tag=0, wt.lf=1)
results = rbind(results, cbind('Length Frequency (Constrained)', t(as.vector(fit.vb.lfc$par))))
#### Is it appropriate to try to measure a0 using such limited data?
### Lets use AICc to find out
## AICc = 2k - 2log(L) + ((2k^2 + 2k) / (n-k-1))
aicc_with_a0_and_sig.lf = 2*3 + 2*(40.02605) + ((2*3^2 + 2*3) / (21 - 3 - 1)) # 87.46386
aicc_without_a0 = 2*2 + 2*(62.30009) + ((2*2^2 + 2*2) / (21 - 2 - 1)) # 129.2668
aicc_without_sig.lf = 2*2 + 2*(359.5163) + ((2*2^2 + 2*2) / (21 - 2 - 1)) # 359.5163
aicc_without_a0_or_sig.lf = 2*1 + 2*(7565.03) + ((2*1^2 + 2*1) / (21 - 1 - 1)) # 7565.03
### Conclusion: Yes, definitely, because AICc with a0 and sig.lf is more than 40 units lower
### Setting intial params for all data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( 40, 0.01, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
### 6. Model including all Data sources - Equal weighting to each data type
fit.vb.equalwt.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, otodat=otodat, lfdat=lfdat, wt.oto=1/dim(otodat)[1], wt.tag=1/dim(tagdat)[1], wt.lf=1/length(lfdat$curr_month_year))
results = rbind(results, cbind('Model 6 - All Data - Equal Weighting', t(as.vector(fit.vb.equalwt.grouped$par))))
### 7. Model including all Data sources - weighting based on number of sample size
fit.vb.byn.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, otodat=otodat, lfdat=lfdat, wt.oto=1, wt.tag=1, wt.lf=1)
results = rbind(results, cbind('Model 7 - All Data - Weighted by n', t(as.vector(fit.vb.byn.grouped$par))))
### 8. Model including all Data sources treated individually - with equal weighting
fit.vb.equalwt.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 0, wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 8 - Separated Data - Equal Weighting', t(as.vector(fit.vb.equalwt.indv$par))))
### 9. Model including all Data sources treated individually - weighting based on number of sample size
fit.vb.byn.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 9 - Separated Data - Weighted by n', t(as.vector(fit.vb.byn.indv$par))))
### 10. Model without Ralston & Miyamoto - Equal weighting (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 0, wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 10 - Separated Data - Equal Weighting - No R&M', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
### 11. Model without Ralston & Miyamoto - weighted by n (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 11 - Separated Data - Weighted by n - No R&M', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
##### NOW WITH PIFG DATA
### 6. Model including all Data sources - Equal weighting to each data type
fit.vb.equalwt.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat, lfdat=lfdat, wt.oto=1/dim(otodat)[1], wt.tag=1/dim(tagdat)[1], wt.tag2 = 1/dim(tagdat2)[1], wt.lf=1/length(lfdat$curr_month_year))
results = rbind(results, cbind('Model 6 - All Data - Equal Weighting + PIFG', t(as.vector(fit.vb.equalwt.grouped$par))))
### 7. Model including all Data sources - weighting based on number of sample size
fit.vb.byn.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat, lfdat=lfdat, wt.oto=1, wt.tag=1, wt.tag2 = 1, wt.lf=1)
results = rbind(results, cbind('Model 7 - All Data - Weighted by n + PIFG', t(as.vector(fit.vb.byn.grouped$par))))
### 8. Model including all Data sources treated individually - with equal weighting
fit.vb.equalwt.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 1/dim(tagdat2)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 8 - Separated Data - Equal Weighting + PIFG', t(as.vector(fit.vb.equalwt.indv$par))))
### 9. Model including all Data sources treated individually - weighting based on number of sample size
fit.vb.byn.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 9 - Separated Data - Weighted by n + PIFG', t(as.vector(fit.vb.byn.indv$par))))
### 10. Model without Ralston & Miyamoto - Equal weighting (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 1/dim(tagdat2)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 10 - Separated Data - Equal Weighting - No R&M + PIFG', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
### 11. Model without Ralston & Miyamoto - weighted by n (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 11 - Separated Data - Weighted by n - No R&M + PIFG', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
### Cleaning up results and writing them out to a .csv
colnames(results) = c('model_id', 'mu.L', 'sig.L', 'k', 'mu.A', 'sig.A', 'sig.sci', 'sig.f', 'a0', 'sig.oto', 'sig.lf')
results$`time to 90%` = yrs_to_.9_linf(linf = as.numeric(levels(results$mu.L)[results$mu.L]), k = as.numeric(levels(results$k)[results$k]), a0 = as.numeric(levels(results$a0)[results$a0]))
print(results)
write.csv(results, file = file.path(run_results_dir, 'likelihood_parameter_estimates_with_full_data.csv'))
##### Determining the prefered model structure ####
print('Evaluating Model Structures')
## We will do this by comparing each model's parameters from training data to observations in validation data
## Model scoring metric is as follows: sum((predicted - observed)^2) / n
## Lower scoring metric indicates better model fit
n_train = round(dim(tagdat)[1] * (2/3))
n_train2 = round(dim(tagdat2)[1] * (2/3))
evaluate_models = function(cross_validation_iterations = 10000){
mod_eval_results = data.frame(stringsAsFactors = FALSE)
model_na_results = rep(0, 7)
lit_vbgf = lit_vbgc_params[lit_vbgc_params$region %in% c('Hawaii - MHI & NWHI', 'Hawaii - MHI', 'Hawaii - NWHI'), ]
lit_vbgf_for_train = lit_vbgf[!(lit_vbgf$author %in% paste('Bayesian Model',1:4)), ]
bayes_models = lit_vbgf[(lit_vbgf$author %in% paste('Bayesian Model',1:4)), ]
mod_eval_results = foreach(i = 1:cross_validation_iterations, .combine = rbind) %dopar% {
train_index = sample(1:dim(tagdat)[1], size = n_train, replace = FALSE)
tagdat_train = tagdat[train_index, ]
train2_index = sample(1:dim(tagdat2)[1], size = n_train2, replace = FALSE)
tagdat2_train = tagdat2[train2_index, ]
tagdat_validate = rbind(tagdat[-train_index, ], tagdat2[-train2_index, ])
score = rep(NA, 7)
### Setting intial params for all data
# mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, .3, 1, 1)
lb <- c( 40, 0.01, .05, 0.01, .05, 0.01, 0, -.4, 0.01, 0.01)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, -.2, 15, 15)
var5 = NULL
var5 = try(nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat_train, tagdat2 = tagdat2_train, wt.oto=0,wt.tag=1, wt.tag2 = 1, wt.lf=0)$par, silent = TRUE)
if(is.numeric(var5[1])){
score[1] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var5[1], k = var5[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[1] = model_na_results[1] + 1
}
ub <- c( 110, 15.0, 10, 1.5, .50, 15.0, 0, 15, 15, 15)
var6 = NULL
var6 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat, lfdat=lfdat, wt.oto=1/dim(otodat)[1], wt.tag=1/dim(tagdat_train)[1], wt.tag2=1/dim(tagdat2_train)[1], wt.lf=1/length(lfdat$curr_month_year))$par, silent = TRUE)
if(is.numeric(var6[1])){
score[2] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var6[1], k = var6[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[2] = model_na_results[2] + 1
}
var7 = NULL
var7 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat, lfdat=lfdat, wt.oto=1, wt.tag=1, wt.tag2 = 1, wt.lf=1)$par, silent = TRUE)
if(is.numeric(var7[1])){
score[3] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var7[1], k = var7[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[3] = model_na_results[3] + 1
}
var8 = NULL
var8 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat_train)[1], wt.tag2 = 1/dim(tagdat2_train)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var8[1])){
score[4] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var8[1], k = var8[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[4] = model_na_results[4] + 1
}
var9 = NULL
var9 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var9[1])){
score[5] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var9[1], k = var9[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[5] = model_na_results[5] + 1
}
var10 = NULL
var10 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat_train)[1], wt.tag2=1/dim(tagdat2_train)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var10[1])){
score[6] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var10[1], k = var10[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[6] = model_na_results[6] + 1
}
var11 = NULL
var11 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat = lfdat, lfdat2 = NULL, wt.oto = 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var11[1])){
score[7] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var11[1], k = var11[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[7] = model_na_results[7] + 1
}
## Now getting fits from literature data
## lit_models
lit_var_scores = rep(0, length(lit_vbgf_for_train$author))
for(i in 1:length(lit_vbgf_for_train$author)){
lit_var_scores[i] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = lit_vbgf_for_train$linf[i], k = lit_vbgf_for_train$k[i], Lr_obs = tagdat_validate[ ,2])
}
## Comparing bootstrapped model to Bayes_models
bayes_models = lit_vbgc_params[21:24, ]
bayes_var_scores = rep(0, length(bayes_models$author))
for(i in 1:length(bayes_models$author)){
bayes_var_scores[i] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = bayes_models$linf[i], k = bayes_models$k[i], Lr_obs = tagdat_validate[ ,2])
}
## Best Overall Model
return(c(score, lit_var_scores, bayes_var_scores))
}
colnames(mod_eval_results) = c(paste('model', 5:11), lit_vbgf_for_train$author, bayes_models$author)
return(invisible(mod_eval_results))
}
#### What was prefered model?
n_iterations = 10000
mod_eval_results = as.data.frame(evaluate_models(cross_validation_iterations = n_iterations))
mod_eval_results_lf = as.data.frame(t(mod_eval_results[ ,1:7]))
mod_eval_results_lf$model_id = rownames(mod_eval_results_lf)
mod_eval_results_lf = reshape(mod_eval_results_lf, varying = colnames(mod_eval_results_lf[1:n_iterations]), idvar = 'model_id', direction = "long")
mod_eval_results_lf = mod_eval_results_lf
boxplot(mod_eval_results_lf$result ~ mod_eval_results_lf$model_id, ylim = c(0, 15))
#### Declaring the best model - The model that has the lowest mean evaluation result
## First finding best structure for integrative model
integrative_models = mod_eval_results[ ,2:7]
integrative_model_scores = c()
for(i in 1:dim(integrative_models)[1]){
integrative_model_scores = c(integrative_model_scores, names(which.min(integrative_models[i, ])))
}
## Which model was most frequently the best one?
best_integrative_model = names(which.max(table(integrative_model_scores)))
## Now comparing best integrative model to just tagging data
integrative_vs_tagging = mod_eval_results[ ,which(colnames(mod_eval_results) %in% c('model 5', best_integrative_model))]
integrative_vs_tagging_model_scores = c()
for(i in 1:dim(integrative_models)[1]){
integrative_vs_tagging_model_scores = c(integrative_vs_tagging_model_scores, names(which.min(integrative_vs_tagging[i, ])))
}
best_model = names(which.max(table(integrative_vs_tagging_model_scores)))
pdf(file.path(run_results_dir, 'Barplot of tagging vs. best integrative model.pdf'), width = 11, height = 8.5)
barplot(prop.table(table(integrative_vs_tagging_model_scores)))
dev.off()
### Comparing against all lit models for the region
nll_names = colnames(mod_eval_results)[1:7]
lit_names = colnames(mod_eval_results)[8:18]
bayes_names = colnames(mod_eval_results)[19:22]
lit_vs_int_vs_bayes = mod_eval_results[ ,colnames(mod_eval_results) %in% c(best_integrative_model, nll_names, bayes_names)]
lit_vs_int_vs_bayes_scores = c()
for(i in 1:dim(integrative_models)[1]){
lit_vs_int_vs_bayes_scores = c(lit_vs_int_vs_bayes_scores, names(which.min(lit_vs_int_vs_bayes[i, ])))
}
best_model_lit_bayes_integrated = names(which.max(table(lit_vs_int_vs_bayes_scores)))
pdf(file.path(run_results_dir, 'Barplot of lit vs. bayes vs. best integrative model.pdf'), width = 11, height = 8.5)
par(las = 2)
barplot(prop.table(table(lit_vs_int_vs_bayes_scores)))
dev.off()
##### Computing Model Comparison Stats #####
print('Computing Model Comparision Stats')
### Subsetting model structures 6-11
nll_eval_results = mod_eval_results[, 2:7]
## Determining the number of NA iterations
na_index = c()
for(i in 1:length(nll_eval_results[ ,1])){
if(any(is.na(nll_eval_results[i, ]))){
na_index = c(na_index, i)
}
}
na_index = unique(na_index)
### How many iterations failed to converge?
print(paste('Iterations failing to converge:', length(na_index)))
# nll_eval_results = nll_eval_results[-na_index, ]
## Getting summary stats for NLL models
print('Summary stats of competing model structures')
print(paste('Range: ', range(nll_eval_results, na.rm = TRUE)))
nll_vec = as.vector(nll_eval_results)
nll_vec = nll_vec[!is.na(nll_vec)]
print(paste('mean:', mean(nll_vec)))
print(paste('standard deviation:', sd(nll_vec)))
### Determining which model performed best over all iterations
best_models = c()
for(i in 1:dim(nll_eval_results)[1]){
best_models = c(best_models, names(which.min(nll_eval_results[i, ])))
}
print('Best Models')
table(best_models)
### Getting stats on the best performing model
print('Summary Stats for prefered integrated model')
best_nll_mod = mod_eval_results[ ,best_model]
print(paste('range:', range(best_nll_mod, na.rm = TRUE)))
print(paste('mean:', mean(best_nll_mod, na.rm = TRUE)))
print(paste('standard deviation:', sd(best_nll_mod, na.rm = TRUE)))
### Getting stats on the model based only on tagging data
print('Summary Stats for Tagging Only Model (Model 5)')
mod_5 = as.vector(mod_eval_results[ ,'model 5'])
print(paste('range:', range(mod_5, na.rm = TRUE)))
print(paste('mean:', mean(mod_5[!is.na(mod_5)])))
print(paste('standard deviation:', sd(mod_5[!is.na(mod_5)])))
### Comparing the perfered model to the tagging data only model
print('Comparing prefered integrative and tagging only models')
tagging_vs_composite_df = cbind(mod_eval_results$`model 5`, mod_eval_results[ ,best_model])
colnames(tagging_vs_composite_df) = c('model 5', best_model)
tagging_vs_composite = c()
for(i in 1:length(tagging_vs_composite_df[ ,1])){
tagging_vs_composite = c(tagging_vs_composite, colnames(tagging_vs_composite_df)[which.min(tagging_vs_composite_df[i, ])])
}
table(tagging_vs_composite)
### Summary stats on tagging and integrative models
pred_var_diff_tvc = tagging_vs_composite_df[ ,1] - tagging_vs_composite_df[ ,2]
print(paste('range in predicteve difference:', range(pred_var_diff_tvc, na.rm = TRUE)))
print(paste('mean:', mean(pred_var_diff_tvc, na.rm = TRUE)))
print(paste('standard deviation:', sd( as.vector(pred_var_diff_tvc)[!is.na(as.vector(pred_var_diff_tvc))])))
#### Getting summary stats on all literature models
print('Summary Statistics for Literature Models')
lit = mod_eval_results[, 8:18]
print(paste('range:', range(lit, na.rm = TRUE)))
lit_vec = as.vector(lit)
lit_vec = lit_vec[!is.na(lit_vec)]
print(paste('mean:', mean(lit_vec)))
print(paste('Standard Deviation:', sd(lit_vec)))
## Comparing Literatuere, MLE, and Bayesian models
print('Comparing Literature, MLE, and Bayesian Models')
model_structure_selection = data.frame()
nll_names = colnames(pref_mod)
lit_names = colnames(mod_eval_results)[8:18]
bayes_names = colnames(mod_eval_results)[19:22]
for(i in 1:length(mod_eval_results[ ,1])){
score_ens = min(pref_mod[i], na.rm = TRUE)
best_ens = best_model
score_lit = min(mod_eval_results[i,8:18], na.rm = TRUE)
best_lit = lit_names[which(mod_eval_results[i,8:18] == score_lit)]
score_bayes = min(mod_eval_results[i,19:22], na.rm = TRUE)
best_bayes = bayes_names[which(mod_eval_results[i,19:22] == score_bayes)]
best_overall = c('MLE', 'Lit', 'Bayes')[which.min(c(score_ens, score_lit, score_bayes))]
best_mod = c(best_ens, best_lit, best_bayes)[which.min(c(score_ens, score_lit, score_bayes))]
write_line = data.frame('best_ll_mod' = best_ens, 'score_ensemble' = score_ens, 'best_lit_mod' = best_lit, 'score_lit' = score_lit, 'best_bayes_mod' = best_bayes, 'score_bayes' = score_bayes, 'best_model' = best_overall, 'best_mod' = best_mod)
model_structure_selection = rbind(model_structure_selection, write_line)
}
lit_eval_results_table = aggregate(model_structure_selection$best_lit_mod, by = list(model_structure_selection$best_lit_mod), FUN = length)
best_lit_mod = lit_eval_results_table$Group.1[which.max(lit_eval_results_table$x)]
### Getting summary stats on the best performing literature model
print('Summary Stats of best performing lit mod')
best_lit = mod_eval_results[ ,as.character(best_lit_mod)]
print(paste('range:', range(best_lit, na.rm = TRUE)))
best_lit_vec = as.vector(best_lit)
best_lit_vec = best_lit_vec[!is.na(best_lit_vec)]
print(paste('mean:', mean(best_lit_vec)))
print(paste('standard deviation:', sd(best_lit_vec)))
## Getting summary stats for the second best performing literature model
print('Summary Stats of second best performing literature model')
second_best_lit_mod = as.character(lit_eval_results_table$Group.1[order(lit_eval_results_table$x, decreasing = TRUE)[2]])
second_best_lit = mod_eval_results[ ,as.character(second_best_lit_mod)]
second_best_lit_vec = as.vector(second_best_lit)
print(paste('range:', range(second_best_lit_vec, na.rm = TRUE)))
print(paste('mean:', mean(second_best_lit_vec, na.rm = TRUE)))
print(paste('standard deviation:', sd(second_best_lit_vec, na.rm = TRUE)))
## Write results out
save.image(file = file.path(run_results_dir, 'workspace_image_preboot.RData'))
##### Bootstrapping tagging only and prefered models #####
print('Bootstrapping model 5 and prefered model')
boot_iterations = 10000
bootstrap_results = list()
### We'll begin by bootstrapping Model 5 (just tagging data)
## Specifying starting parameters, as well as upper and lower bounds for parameter estimation
# mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1.0, .10, 1, 0, 0, 0, 0)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, 0, 0, 0)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 0, 0, 0)
print('Booting Model 5')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 5")
timer5full = proc.time()
bootstrap_results$booted_param_ests_model5 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_5', boot_iterations = boot_iterations, wt.oto = 0, wt.lf = 0, wt.tag = 1, tagdat = tagdat)
bootstrap_results$booted_param_ests_model5withPIFG = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_5', boot_iterations = boot_iterations, wt.oto = 0, wt.lf = 0, wt.tag = 1, tagdat = tagdat, tagdat2 = tagdat2, wt.tag2 = 1)
bootstrap_results$booted_param_ests_model5justPIFG = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_5', boot_iterations = boot_iterations, wt.oto = 0, wt.lf = 0, wt.tag = 0, tagdat = tagdat, tagdat2 = tagdat2, wt.tag2 = 1)
boot_time = (proc.time() - timer5full)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 5 complete!"))
### Now we'll bootstrap the prefered model structure
## Setting intial params for all data
# mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .1, 1, 0, 0, 1, 1)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
#if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 6') {
## 6. Model including all Data sources - Equal weighting to each data type
print('Booting Model 6')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 6")
timer6 = proc.time()
bootstrap_results$booted_param_ests_model6 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_6',boot_iterations = boot_iterations, wt.oto = 1/length(otodat$age), wt.lf = 1/length(lfdat$curr_month_year), wt.tag = 1/dim(tagdat)[1], otodat = otodat, tagdat = tagdat, pseudolf = pseudo_data)
boot_time = (proc.time() - timer6)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr36tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 6 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 7') {
## 7. Model including all Data sources - weighting based on number of sample size
print('Booting Model 7')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 7")
timer7 = proc.time()
bootstrap_results$booted_param_ests_model7 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_7_all_data', boot_iterations = boot_iterations,tagdat=tagdat, otodat=otodat, pseudolf=pseudo_data, wt.oto=1, wt.tag=1, wt.lf=1)
boot_time = (proc.time() - timer7)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 7 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 8') {
## 8. Model including all Data sources treated individually - with equal weighting
print('Booting Model 8')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 8")
timer8 = proc.time()
bootstrap_results$booted_param_ests_model8 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_8_all_data', boot_iterations = boot_iterations, tagdat=tagdat, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.lf = 1/length(pseudolf$curr_month_year), wt.lf2 = 0)
boot_time = (proc.time() - timer8)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 8 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 9') {
## 9. Model including all Data sources treated individually - weighting based on number of sample size
print('Booting Model 9')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 9")
timer9 = proc.time()
bootstrap_results$booted_param_ests_model9 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_9', boot_iterations = boot_iterations, tagdat=tagdat, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.lf = 1, wt.lf2 = 0)
boot_time = (proc.time() - timer9)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 9 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 10') {
## 10. Model without Ralston & Miyamoto - Equal weighting (Because Brett said this was shit!)
print('Booting Model 10')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 10")
timer10 = proc.time()
bootstrap_results$booted_param_ests_model10 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_10', boot_iterations = boot_iterations, tagdat=tagdat, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2 = NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.lf = 1/length(pseudolf$curr_month_year), wt.lf2 = 0)
boot_time = (proc.time() - timer10)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 10 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 11') {
### 11. Model without Ralston & Miyamoto - weighted by n (Because Brett said this was shit!)
print('Booting Model 11')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 11")
timer11 = proc.time()
bootstrap_results$booted_param_ests_model12 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_11', boot_iterations = boot_iterations, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2=NULL, wt.oto= 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)
boot_time = (proc.time() - timer11)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 11 complete!"))
#}
save.image(file = file.path(src_dir, 'workspace_image.RData'))
| /Analysis/src/Integrative_Growth_Model_Analysis.R | permissive | stevescherrer/Ch4-Opakapaka-Growth | R | false | false | 64,711 | r | ##### Fitting VBGF for Pristipomoides filamentosus with Mark Recapture Data
#### Written by: Stephen Scherrer with some code modified from Erik Franklin (2017)
#### Written: Feb - March 2018
#### Contact: scherrer@hawaii.edu
#### All Wrongs Preserved
#### Laslett et al 2004 implementation of Fabens Method using tagging data as well as length frequency and direct ageing data
##### Workspace Setup #####
## Clearing workspace
rm(list = ls())
print('Opakapaka Growth Analysis')
initial_run_time = Sys.time()
print(initial_run_time)
## Setting a Script Timer
script_timer = proc.time()
## Declaring Directory Path
proj_dir = getwd()
# if(!"Okamoto_Mark_Recapture" %in% strsplit(proj_dir, '/')[[1]]){
# proj_dir = file.path(getwd(), "Okamoto_Mark_Recapture")
# }
data_dir = file.path(proj_dir, "data")
src_dir = file.path(proj_dir, "src")
results_dir = file.path(proj_dir, "results")
## Creating a run specific results folder
run_results_dir = file.path(results_dir, paste('run', initial_run_time))
dir.create(run_results_dir)
print(paste('proj_dir:', proj_dir))
print(paste('src_dir:', src_dir))
print(paste('run_results_dir:', run_results_dir))
## Installing Principle Dependencies
print('Installing principle dependencies')
# library('notifyR') # ## send_push()
library('doParallel')
# library('beepr')
library('mixtools')
## Sourcing R Scripts provided by Eveson/Laslett
print('Sourcing files')
source(file.path(src_dir, "Laslett Functions/joint_lkhd.r"))
source(file.path(src_dir, "Laslett Functions/growth_functions.r"))
source(file.path(src_dir, "Laslett Functions/tag_lkhd.r"))
## Reading in literature parameter values
print('Loading Data')
lit_vbgc_params = read.csv(file.path(data_dir, "Parameter Estimates.csv"), stringsAsFactors = FALSE)
lit_vbgc_params = lit_vbgc_params[!is.na(lit_vbgc_params$Linf), ]
colnames(lit_vbgc_params) = c('author', 'n', 'linf', 'k', 't0', 'region', 'method')
lit_vbgc_params = lit_vbgc_params[c(1:20, 22:25), ]
## Assigning cores for parallel processing
registerDoParallel(cores = detectCores()-1)
##### Defining Utility Functions #####
# Modifying our JOINT LIKELIHOOD function to accept more than one set of a given data type:
joint.logl.f <- function(param, npf, npA, tagdat = NULL, tagdat2 = NULL ,otodat = NULL, otodat2 = NULL, otodat3 = NULL, otodat4 = NULL, lfdat = NULL, lfdat2 = NULL, wt.oto=0, wt.oto2=0, wt.oto3=0, wt.oto4=0, wt.tag=0, wt.tag2=0,wt.lf=0, wt.lf2=0)
{
neglogl.tag<- 0
neglogl.tag2<- 0
neglogl.oto<- 0
neglogl.oto2<- 0
neglogl.oto3<- 0
neglogl.oto4<- 0
neglogl.lf<- 0
neglogl.lf2<- 0
if(wt.tag>0) neglogl.tag <- logl.ssnl.f(param,npf,npA,tagdat)
if(wt.tag2>0) neglogl.tag2 <- logl.ssnl.f(param,npf,npA,tagdat2)
if(wt.oto>0) neglogl.oto <- logl.oto.f(param,npf,npA,otodat)
if(wt.oto2>0) neglogl.oto2 <- logl.oto.f(param,npf,npA,otodat2)
if(wt.oto3>0) neglogl.oto3 <- logl.oto.f(param,npf,npA,otodat3)
if(wt.oto4>0) neglogl.oto4 <- logl.oto.f(param,npf,npA,otodat4)
if(wt.lf>0) neglogl.lf <- logl.lf.f(param,npf,npA,lfdat)
if(wt.lf2>0) neglogl.lf2 <- logl.lf.f(param,npf,npA,lfdat2)
neglogl <- wt.tag*neglogl.tag + wt.tag2*neglogl.tag2 + wt.oto*neglogl.oto+ wt.oto2*neglogl.oto2 + wt.oto3*neglogl.oto3 + wt.oto4*neglogl.oto4 + wt.lf*neglogl.lf + wt.lf2*neglogl.lf2
# print(param)
# print(c(neglogl.tag, neglogl.tag2, neglogl.oto, neglogl.oto2, neglogl.oto3, neglogl.oto4, neglogl.lf, neglogl.lf2, neglogl))
return(neglogl)
}
## Predicting length at recapture
predict_recapture_length = function(Lm, dt, linf = 65.95546, k = 0.2369113, a = 0){
## Get estimated length at recapture of a given individual using von Bertalanffy function as paramterized by Faben
#return(linf * ((1 - exp(-k * (a + dt))) - (1 - exp(-k * a))))
return(Lm + (linf - Lm) * (1 - exp(-k * dt)))
}
calculate_predictive_variance = function(Lm, dt, linf, k, Lr_obs){
return(sum((predict_recapture_length(Lm = Lm, dt = dt, linf = linf, k = k) - Lr_obs)^2) / length(Lr_obs))
}
std_error = function(x){
#### Calculates standard error of set (x)
sqrt(var(x)/length(x))
}
#### Determining how long it takes to reach a threshold % of Linf under each model
yrs_to_.9_linf = function(linf, k, a0 = 0, threshold = 0.90){
t = log(1 - (linf * threshold/linf)) / (-1 * k) + a0
return(t)
}
#### Bootstrapping functions
### A function to bootstrap length-frequency data
lf_boot = function(pseudo_data){
set.seed(Sys.time())
## For each month bin, replace with resampling the number of fish caught that month
boot_lf_dat = NULL
if(!exists('pseudo_data$curr_month_year')){
pseudo_data$curr_month_year = pseudo_data$month_year
}
for(i in 1:length(unique(pseudo_data$curr_month_year))){
monthly_pseudo_data = pseudo_data[pseudo_data$curr_month_year == unique(pseudo_data$curr_month_year)[i], ]
monthly_n_fish = dim(monthly_pseudo_data)[1]
boot_lf_dat = rbind(boot_lf_dat, monthly_pseudo_data[sample(x = 1:monthly_n_fish, size = monthly_n_fish, replace = TRUE), ])
}
return(boot_lf_dat)
}
## A function for bootstrap sampling with replacement
bootstrap_growth = function(boot_iterations = 10000, tagdat = NULL, tagdat2 = NULL, otodat = NULL, otodat2 = NULL, otodat3 = NULL, otodat4 = NULL, pseudolf = NULL, pseudolf2 = NULL, wt.oto = 1, wt.oto2 = 0, wt.oto3 = 0, wt.oto4 = 0, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0){
boot_param_ests = NULL
boot_param_ests = foreach(1:boot_iterations, .combine = rbind) %dopar%{
boot_parms = NULL
## Resampling data
if(!is.null(tagdat)){boot.tagdat = tagdat[sample(nrow(tagdat), size = nrow(tagdat), replace = TRUE), ]}else{boot.tagdat = NULL}
if(!is.null(tagdat2)){boot.tagdat2 = tagdat[sample(nrow(tagdat2), size = nrow(tagdat2), replace = TRUE), ]}else{boot.tagdat2 = NULL}
if(!is.null(otodat)){boot.otodat = otodat[sample(nrow(otodat), size = nrow(otodat), replace = TRUE), ]}else{boot.otodat = NULL}
if(!is.null(otodat2)){boot.otodat2 = otodat2[sample(nrow(otodat2), size = nrow(otodat2), replace = TRUE), ]}else{boot.otodat2 = NULL}
if(!is.null(otodat3)){boot.otodat3 = otodat3[sample(nrow(otodat3), size = nrow(otodat3), replace = TRUE), ]}else{boot.otodat3 = NULL}
if(!is.null(otodat4)){boot.otodat4 = otodat4[sample(nrow(otodat4), size = nrow(otodat4), replace = TRUE), ]}else{boot.otodat4 = NULL}
boot.lfdat = NULL
if(!is.null(pseudolf)){
while(class(boot.lfdat) != 'data.frame'){
boot.lfdat = try(length_freq_decomp(lf_boot(pseudolf)), silent = TRUE)
}
}
boot.lfdat2 = NULL
if(!is.null(pseudolf2)){
while(class(boot.lfdat2) != 'data.frame'){
boot.lfdat2 = length_freq_decomp(lf_boot(pseudolf))
}
}
## Refitting MLE Object
boot.fit.vb = try(boot.fit.vb <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=boot.tagdat, tagdat2 = boot.tagdat2 ,otodat=boot.otodat, otodat2 = boot.otodat2, otodat3 = boot.otodat3, otodat4 = boot.otodat4, lfdat=boot.lfdat, lfdat2 = boot.lfdat2, wt.oto=wt.oto, wt.oto2=wt.oto2, wt.oto3=wt.oto3, wt.oto4=wt.oto4, wt.tag=wt.tag, wt.tag2=wt.tag2, wt.lf=wt.lf, wt.lf2=wt.lf2), silent = TRUE)
## Writing out parameters if model converged, otherwise writing out NAs
if(is.list(boot.fit.vb)){
return(boot.fit.vb$par)
} else {
return(rep(NA, 10))
}
}
boot_param_ests = as.data.frame(boot_param_ests)
colnames(boot_param_ests) = c('mu.L', 'sig.L', 'k', 'mu.A', 'sig.A', 'sig.sci', 'sig.f', 'a0', 'sig.oto', 'sig.lf')
return(boot_param_ests)
}
## A function for producing summary statistics from the bootstrap parameter distributions
calc_boot_stats = function(boot_param_ests){
boot_param_ests = boot_param_ests[!is.na(boot_param_ests[,1]), ]
two.five.percent = dim(boot_param_ests)[1] * 0.025
ninetyseven.five.percent = dim(boot_param_ests)[1] * 0.975
boot_stats = data.frame(matrix(NA, nrow = dim(boot_param_ests)[2], ncol = 3), stringsAsFactors = FALSE)
colnames(boot_stats) = c('Median', '2.5%', '97.5%')
rownames(boot_stats) = colnames(boot_param_ests)
for(i in 1:dim(boot_param_ests)[2]){
boot_stats[i, 1] = median(boot_param_ests[ ,i], na.rm = TRUE)
if(floor(two.five.percent) != two.five.percent){
boot_stats[i, 2] = (sort(boot_param_ests[ ,i])[floor(two.five.percent)] * abs(two.five.percent - floor(two.five.percent))) + (sort(boot_param_ests[ ,i])[ceiling(two.five.percent)] * abs(two.five.percent - ceiling(two.five.percent)))
} else {
boot_stats[i, 2] = sort(boot_param_ests[ ,i])[floor(two.five.percent)]
}
if(floor(ninetyseven.five.percent) != ninetyseven.five.percent){
boot_stats[i, 3] = (sort(boot_param_ests[ ,i])[floor(ninetyseven.five.percent)] * abs(ninetyseven.five.percent - floor(ninetyseven.five.percent))) + (sort(boot_param_ests[ ,i])[ceiling(ninetyseven.five.percent)] * abs(ninetyseven.five.percent - ceiling(ninetyseven.five.percent)))
} else {
boot_stats[i, 3] = sort(boot_param_ests[ ,i])[floor(ninetyseven.five.percent)]
}
}
return(t(boot_stats))
}
## A function to bring all bootstrapped function results together
bootstrap_growth_params = function(boot_iterations = 10000, filename = NULL, tagdat = NULL, tagdat2 = NULL, otodat = NULL, otodat2 = NULL, otodat3 = NULL, otodat4 = NULL, pseudolf = NULL, pseudolf2 = NULL, wt.oto = 1, wt.oto2 = 0, wt.oto3 = 0, wt.oto4 = 0, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0){
if(boot_iterations < 40){
print('Error: boot_iterations must be > 40')
return()
}
## First run initial bootstrapping pass
booted_param_ests = bootstrap_growth(boot_iterations, tagdat, tagdat2, otodat, otodat2, otodat3, otodat4, pseudolf, pseudolf2, wt.oto, wt.oto2, wt.oto3, wt.oto4, wt.tag, wt.tag2, wt.lf, wt.lf2)
## Then rerun to fill in iterations that failed to converge
while(dim(booted_param_ests[!is.na(booted_param_ests$mu.L), ])[1] < boot_iterations){
booted_param_ests = rbind(booted_param_ests, bootstrap_growth(boot_iterations = length(which(is.na(booted_param_ests$mu.L))), tagdat, tagdat2, otodat, otodat2, otodat3, otodat4, pseudolf, pseudolf2, wt.oto, wt.oto2, wt.oto3, wt.oto4, wt.tag, wt.tag2, wt.lf, wt.lf2))
}
## What percentage failed to converge?
convergence_failure_rate = (length(which(is.na(booted_param_ests$mu.L))) / dim(booted_param_ests)[1]) * 100
print(paste('Convergence failure rate =', round(convergence_failure_rate, digits = 2), '%'))
## Getting summary stats and writing them out
boot_stats = calc_boot_stats(booted_param_ests)
if(!is.null(filename)){
write.csv(boot_stats, file.path(run_results_dir, paste(filename, '.csv', sep = "")))
write.csv(booted_param_ests, file.path(run_results_dir, paste(filename, '_raw.csv', sep = "")))
}
## Writing out results
results = list()
results$raw_boot_data = booted_param_ests
results$boot_stats = boot_stats
results$convergence_failure_rate = convergence_failure_rate
return(results)
}
##### Loading and Cleaning Data Files #####
#### Mark Recapture Data
mark_recapture_data = read.csv(file.path(data_dir, 'HO Mstr, temp (version 1).csv'), stringsAsFactors = FALSE)
### Renaming data columns
colnames(mark_recapture_data) = c('tag_date', 'location', 'station', 'depth_f', 'species', 'previously_tagged', 'tag_id','fork_length_in', 'remarks', 'recapture_1_date', 'recapture_1_location', 'recapture_1_station', 'recapture_1_depth_f', 'recapture_1_fork_length_in', 'weight_1_lbs', 'days_1_free', 'growth_1_in', 'distance_1_miles','retagged_1',
'recapture_2_date', 'recapture_2_location', 'recapture_2_station', 'recapture_2_depth_f', 'recapture_2_fork_length_in', 'weight_2_lbs', 'days_2_free', 'growth_2_in', 'distance_2_miles', 'retagged_2',
'recapture_3_date', 'recapture_3_location', 'recapture_3_station', 'recapture_3_depth_f', 'recapture_3_fork_length_in', 'weight_3_lbs', 'days_3_free', 'growth_3_in', 'distance_3_miles', 'retagged_3',
'recapture_4_date', 'recapture_4_location', 'recapture_4_station', 'recapture_4_depth_f', 'recapture_4_fork_length_in', 'weight_4_lbs', 'days_4_free', 'growth_4_in', 'distance_4_miles', 'x_retagged')
## How many total fish do we have in the data set?
dim(mark_recapture_data)[1] # 4245!
### Subsetting out only Opakapaka with tag IDs - That is, fish that were marked
mark_recapture_data = mark_recapture_data[mark_recapture_data$species == '1' & mark_recapture_data$tag_id != '', ]
dim(mark_recapture_data)[1] # This gets you to the previously published 4179 tagged paka number from Kobayashi, Okamoto, & Oishi . for some reason doesn't exclude fish marked 'died'
#### Adusting Data Classes
### Formatting Dates (Converting Characters to POSIXct)
mark_recapture_data$tag_date = as.POSIXct(mark_recapture_data$tag_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_1_date = as.POSIXct(mark_recapture_data$recapture_1_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_2_date = as.POSIXct(mark_recapture_data$recapture_2_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_3_date = as.POSIXct(mark_recapture_data$recapture_3_date, format = "%Y-%m-%d")
mark_recapture_data$recapture_4_date = as.POSIXct(mark_recapture_data$recapture_4_date, format = "%Y-%m-%d")
### Formatting fork lengths
## Note: There are a couple fork lengths that have ?, *, or have no lengths recorded.
## I have no idea what these are but they're qualifiers and so I'm going to let them go to NA and get dropped from analysis
in_to_cm = 2.54
mark_recapture_data$fork_length_cm = as.numeric(mark_recapture_data$fork_length_in) * in_to_cm
mark_recapture_data$recapture_1_fork_length_cm = as.numeric(mark_recapture_data$recapture_1_fork_length_in) * in_to_cm
mark_recapture_data$recapture_2_fork_length_cm = as.numeric(mark_recapture_data$recapture_2_fork_length_in) * in_to_cm
mark_recapture_data$recapture_3_fork_length_cm = as.numeric(mark_recapture_data$recapture_3_fork_length_in) * in_to_cm
mark_recapture_data$recapture_4_fork_length_cm = as.numeric(mark_recapture_data$recapture_4_fork_length_in) * in_to_cm
#### Now we want to format a table with lm (length at marking), lr (length at recapture), and dt (elapsed time)
### Note: If a fish was recaptured multiple times, there is a single entry for that individual corrosponding to the length at initial marking and the length at final recapture
paka_growth = data.frame(stringsAsFactors = FALSE)
for(i in 1:length(mark_recapture_data$tag_id)){
if(!is.na(mark_recapture_data$recapture_4_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_4_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_4_date[i], 'n_recaptures' = 4, stringsAsFactors = FALSE))
}else if(!is.na(mark_recapture_data$recapture_3_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_3_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_3_date[i], 'n_recaptures' = 3, stringsAsFactors = FALSE))
}else if(!is.na(mark_recapture_data$recapture_2_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_2_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_2_date[i], 'n_recaptures' = 2, stringsAsFactors = FALSE))
}else if(!is.na(mark_recapture_data$recapture_1_fork_length_cm[i])){
paka_growth = rbind(paka_growth, data.frame('tag_id' = mark_recapture_data$tag_id[i], 'Lm' = mark_recapture_data$fork_length_cm[i], 'Lr' = mark_recapture_data$recapture_1_fork_length_cm[i], 'tm' = mark_recapture_data$tag_date[i], 'tr' = mark_recapture_data$recapture_1_date[i], 'n_recaptures' = 1, stringsAsFactors = FALSE))
}
}
paka_growth$dt = abs(difftime(paka_growth$tm, paka_growth$tr, units = "days")) ## Converting dt from days to years
paka_growth$dt = as.numeric(paka_growth$dt) / 365 # Converting to years
### Constructing derived variable dl (change in growth)
paka_growth$dL = paka_growth$Lr - paka_growth$Lm
### Removing any fish that have a NA value for dL or dt (There is a single fish which had no tagging length and 7 fish with no recapture dates)
length(which(is.na(paka_growth$dL))) # 1
length(which(is.na(paka_growth$dt))) # 7
paka_growth = paka_growth[!is.na(paka_growth$dL) & !is.na(paka_growth$dt), ]
n_recaps = data.frame('recapture events' = unique(paka_growth$n_recaptures), 'n_fish' = 0)
i = 1
for(n_recap in unique(paka_growth$n_recaptures)){
n_recaps$n_fish[i] = dim(paka_growth[paka_growth$n_recaptures == n_recap, ])[1]
i = i+1
}
#### Creating a subset data frame that removes recording errors in length and time
# paka_growth = subset(paka_growth, dL > 0)
length(which(paka_growth$dt < 60/365)) #46
paka_growth = subset(paka_growth, dt >= 60/365)
tagdat = as.matrix(data.frame('L1' = paka_growth$Lm, "L2" = paka_growth$Lr, " " = rep(0, length(paka_growth$Lr)), "dt" = paka_growth$dt, "L2measurer" = rep(0, length(paka_growth$Lr))))
#### Creating Second tagging dataset from PIFG data
pifg20072013 = read.csv(file.path(data_dir, 'PIFG 2007-2013.csv'), stringsAsFactors = FALSE)
pifg20072013$rel_date = as.POSIXct(pifg20072013$rel_date, format = "%m/%d/%Y")
pifg20072013$recap_date = as.POSIXct(pifg20072013$recap_date, format = "%m/%d/%Y")
pifg20072013$dt = difftime(pifg20072013$recap_date, pifg20072013$rel_date)
### 2014-2015 data
pifg20142015 = read.csv(file.path(data_dir, 'PIFG 2014-2015.csv'), stringsAsFactors = FALSE)
pifg20142015$rel_date = as.POSIXct(pifg20142015$rel_date, format = "%m/%d/%Y")
pifg20142015$recap_date = as.POSIXct(pifg20142015$recap_date, format = "%m/%d/%Y")
pifg20142015$rel_FL[pifg20142015$Units == 'in'] = pifg20142015$rel_FL[pifg20142015$Units == 'in'] * in_to_cm
pifg20142015$recap_FL[pifg20142015$Units == 'in'] = pifg20142015$recap_FL[pifg20142015$Units == 'in'] * in_to_cm
pifg20142015$dt = difftime(pifg20142015$recap_date, pifg20142015$rel_date)
pifg_data = data.frame('L1' = c(pifg20072013$rel_FL, pifg20142015$rel_FL), 'L2' = c(pifg20072013$recap_FL, pifg20142015$recap_FL), " " = rep(0, length(c(pifg20072013$recap_FL, pifg20142015$recap_FL))), 'dt' = c(pifg20072013$dt, pifg20142015$dt) / 365, "L2measurer" = rep(0, length(c(pifg20072013$recap_FL, pifg20142015$recap_FL))))
## Removing any pifg data with time at liberty < 60 days
pifg_data = pifg_data[pifg_data$dt >= 60/365, ]
tagdat2 = pifg_data
#### Otolith Data (Ralston and Miyamoto 1983, DeMartini et al. 1994, Andrews et al. 2012)
otodat = read.csv(file.path(data_dir, "RalstonMiyamotoandDemartiniAndrews.csv"))
colnames(otodat) = c("age", "len", "source")
#### Length Frequency Data
### Extrapolated from Moffitt and Parrish 1996 using earlier version of manuscript (1994)
### Monthly fish length counts were extrapolated from histograms in paper first by adjusting rotation so histograms were 'level', then fitting equally spaced bars for each n across y axis and comparing bar heights
### I ended up with one more record than they do but pretty close! (1048 vs. 1047). number next to each month is the total number of fish for that month estimated from the histograms. * means this number was estimated a second time and matched
oct_1989 = data.frame('date' = as.POSIXct('1989-10-01'), 'val' = c(0, 0, 1, 2, 1, 7, 1, 2, 1, 8, 7, 18, 5, 3, 0, 0, 0, 0, 0), len = 6:24) # 56 *
nov_1989 = data.frame('date' = as.POSIXct('1989-11-01'), 'val' = c(0, 0, 1, 4, 11, 7, 6, 4, 4, 3, 11, 12, 5, 1, 0, 0, 0, 0, 0), len = 6:24) # 69 *
jan_1990 = data.frame('date' = as.POSIXct('1990-01-01'), 'val' = c(0, 0, 0, 1, 6, 10, 12, 13, 20, 8, 1, 5, 2, 6, 3, 5, 1, 0, 1), len = 6:24) # 94 *
feb_1990 = data.frame('date' = as.POSIXct('1990-02-01'), 'val' = c(0, 0, 0, 0, 0, 4, 20, 26, 22, 10, 8, 3, 3, 5, 2, 0,0,0,0), len = 6:24) # 103 *
mar_1990 = data.frame('date' = as.POSIXct('1990-03-01'), 'val' = c(0, 0, 0, 0, 1, 1, 20, 14, 27, 14, 8, 4, 0, 0, 0, 0, 0, 0, 0), len = 6:24) # 89 *
apr_1990 = data.frame('date' = as.POSIXct('1990-04-01'), 'val' = c(0, 0, 0, 0, 0, 1, 6, 17, 17, 15, 14, 4, 4, 3, 0, 0, 0, 0, 0), len = 6:24) # 81 *
jun_1990 = data.frame('date' = as.POSIXct('1990-06-01'), 'val' = c(0, 0, 0, 0, 0, 0, 2, 13, 26, 19, 24, 13, 3, 3, 0, 1, 0, 0, 0), len = 6:24) # 104 *
aug_1990 = data.frame('date' = as.POSIXct('1990-08-01'), 'val' = c(0, 0, 0, 0, 0, 0, 1, 2, 6, 23, 26, 28, 9, 8, 2, 0, 0, 0, 0), len = 6:24) # 105 *
sep_1990 = data.frame('date' = as.POSIXct('1990-09-01'), 'val' = c(0, 0, 0, 0, 0, 0, 0, 1, 2, 5, 22, 27, 25, 7, 3, 4, 3, 1, 1), len = 6:24) # 101 *
oct_1990 = data.frame('date' = as.POSIXct('1990-10-01'), 'val' = c(0, 0, 0, 0, 1, 0, 0, 2, 6, 17, 17, 15, 7, 5, 5, 0, 0, 0, 0), len = 6:24) # 75
nov_1990 = data.frame('date' = as.POSIXct('1990-11-01'), 'val' = c(0, 0, 0, 0, 0, 1, 1, 2, 0, 3, 3, 8, 5, 2, 1, 0, 0, 0, 0), len = 6:24) # 26 *
jan_1991 = data.frame('date' = as.POSIXct('1991-01-01'), 'val' = c(0, 0, 0, 0, 0, 0, 0, 3, 2, 0, 12, 32, 30, 8, 3, 0, 0, 0, 0), len = 6:24) # 90 *
feb_1991 = data.frame('date' = as.POSIXct('1991-02-01'), 'val' = c(0, 0, 0, 0, 0, 0, 0, 0, 5, 7, 1, 6, 12, 14, 10, 0, 0, 0, 0), len = 6:24) # 55 *
## Combinging All of this data together into a single dataset
tagging_data = rbind(oct_1989, nov_1989, jan_1990, feb_1990, mar_1990, apr_1990, jun_1990, aug_1990, sep_1990, oct_1990, nov_1990, jan_1991, feb_1991)
## Making up pseudo tagging records. Columns are: date fish was caught, and length of the fish.
pseudo_data = data.frame()
for(i in 1:length(tagging_data$date)){
if(tagging_data$val[i] != 0){
pseudo_data = rbind(pseudo_data, data.frame('date' = rep(tagging_data$date[i], times = tagging_data$val[i]), 'len' = rep(tagging_data$len[i], times = tagging_data$val[i])))
}
}
colnames(pseudo_data) = c('date', 'length')
## Stripping out the month and year from each date object, then creating a new variable that is the month and year smushed together (matches histograms in moffit & parrish)
pseudo_data$month = months(pseudo_data$date)
pseudo_data$year = format(pseudo_data$date, "%Y")
pseudo_data$month_year = paste(pseudo_data$month, as.character(pseudo_data$year))
### Age at recruitment to juvenile fishing grounds.
## Peak spawning occurs in July according to Luers, Demartini, Humphreys 2017
## Difference between first sampling trip (Oct) and peak spawning (July) = 3 months
age_at_recruitment = 3/12
## Estimating starting means from data
start_means = rbind(c(1, 17), c(11, 18), c(13, 20), c(14, 20), c(15, NA), c(15, NA), c(16, NA), c(17, NA), c(16, NA), c(10, 16), c(13, 16), c(13, 18), c(15, 17))
constrain_means = rbind(c(11, 17),c(11, 18), c(14, 19), c(13, 19), c(14, NA), c(14, NA), c(14, NA), c(17, NA), c(15, NA), c(10, 16.5), c(13, 17), c(13, 17), c(15, 19))
## Creating a function that we can use later for bootstrapping
length_freq_decomp = function(pseudo_data, plot = FALSE, fixed_modes = FALSE){
lfdat = data.frame(stringsAsFactors = FALSE)
for(i in 1:length(unique(pseudo_data$date))){
curr_month_year = as.character(unique(pseudo_data$date)[i])
month_data = pseudo_data[pseudo_data$date == curr_month_year, ]
## Determing the mean age of fish once they've recruited, assuming that they were born during peak spawning.
mode.age = as.numeric(difftime(unique(month_data$date), as.POSIXct('1989-07-01'), "days")) / 365 # in years
mode.age = c(mode.age, mode.age + 1) # Assumption is that if two cohorts are present, the second is one year older than the first
# When we get to the second year of data, YOY for first year becomes 1+ year old, new cohort recruits. Because we based age on difftime for first cohort, we need to remove 1 year from all ages
if(min(mode.age) > (1 + age_at_recruitment)){
mode.age = mode.age - 1
}
## We need to decompose distributions for each cohort present into age of cohort, mean length of cohort, se of length of cohort, and number of fish falling into each cohort
## During Oct - Feb, two age cohorts present, we need to decompose two normal distributions from data (Moffitt and Parrish 1996)
if (months(unique(month_data$date)) %in% c('October', 'November', 'January', 'February')) {
decomp = NULL
while(class(decomp) != 'mixEM'){
k = 2 # Number of cohorts
if(fixed_modes == TRUE){
decomp = try(normalmixEM(month_data$length, mu = start_means[i, ], k = k, arbvar = TRUE, mean.constr = constrain_means[i, ]), silent = TRUE) # abvar = FALSE would both cohorts to have same sigma. See justification for this in Laslett et. al 2004
} else {
decomp = try(normalmixEM(month_data$length, mu = start_means[i, ], k = k, arbvar = TRUE), silent = TRUE) # abvar = FALSE would both cohorts to have same sigma. See justification for this in Laslett et. al 2004
}
}
mode.len = decomp$mu[order(decomp$mu)] # Sometimes things pop out in a weird order. We assume that the smaller size class is the younger cohort
est.n = c(decomp$lambda * dim(month_data)[1])[order(decomp$mu)]
mode.se = (decomp$sigma / sqrt(est.n))[order(decomp$mu)]
lfdat = rbind(lfdat, cbind(mode.age, mode.len, mode.se, est.n, curr_month_year))
} else {
k = 1 # Number of cohorts
est.n = length(month_data$length) # Number of fish in cohort
## For times when only a single cohort exists, we need to figure out the relative age of that cohort
if (format(month_data$date, "%m")[1] < 7) { # if month is less than october (month that new recruits show up)
mode.age = mode.age[1] # Go with the younger year class because fish are less than 1 year old
} else {
mode.age = mode.age[2] # Go with the older year class because fish are older than 1 (YOY have not recruited yet)
}
mode.len = mean(month_data$length)
mode.se = sd(month_data$length) / sqrt(est.n)
if(fixed_modes == TRUE){
mode.len = constrain_means[i, ][which(!is.na(constrain_means[i, ]))]
mode.se = sqrt(sum((mode.len - month_data$length)^2)/(est.n - 1)) / est.n
}
lfdat = rbind(lfdat, cbind(mode.age, mode.len, mode.se, est.n, curr_month_year))
}
}
## We may need to reclass our data depending on if we wrote curr_month_year into lfdat or not (this turns stuff into factors)
if(is.factor(lfdat$mode.age)){
lfdat$mode.age = as.numeric(levels(lfdat$mode.age)[lfdat$mode.age])
lfdat$mode.len = as.numeric(levels(lfdat$mode.len)[lfdat$mode.len])
lfdat$mode.se = as.numeric(levels(lfdat$mode.se)[lfdat$mode.se])
}
## Sorting lfdat by the mean age. This makes it easier to visually inspect that fish are getting larger as they get older. This can get messed up during bimodal composition.
lfdat = lfdat[order(lfdat$mode.age), ]
if(plot){
plot(x = lfdat$mode.age, y = lfdat$mode.len, pch = 19)
}
return(lfdat)
}
### Creating table of fitted components for gausian and guassian mixture models
print('lfdat-ing')
lfdat = length_freq_decomp(pseudo_data, plot = TRUE, fixed_modes = TRUE)
##### Model Fitting #####
print('Fitting models')
results = data.frame(stringsAsFactors = FALSE)
#### Fitting VB model
growth.ssnl.f<- growthvb.f
npf <- 1 #number of parameters passed to growth.ssnl.f (in this case k)
npA <- 2 #number of parameters in distribution of release ages for tag model
#### Fitting each data stream individually
print('Estimating Model Parameters')
### 1. Mark Recapture Data
#Specifying starting parameters, as well as upper and lower bounds for parameter estimation
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 0)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, 0, 0, 0)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 0, 0, 0)
fit.vb.tagging.all.data <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=0,wt.tag=1,wt.lf=0)
results = rbind(results, cbind('Model 5 - Mark Recapture - All Data', t(as.vector(fit.vb.tagging.all.data$par))))
### 2. Length at Age Data
## Setting intial params for otolith data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
fit.vb.oto <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=1,wt.tag=0,wt.lf=0)
results = rbind(results, cbind('Length at Age', t(as.vector(fit.vb.oto$par))))
### 3a. Length Frequency Data - Unconstrained Linf
### First some notes about replicating Results of Moffitt and Parrish 1996 - ELEFAN model they used did not estimate a0. a0 is soaking up some of the observed variability that otherwise goes to K. In the function logl.lf.f within the script file joint_lkhd.r, uncommenting the line a0 = 0 will force this model.
### So which model is appropriate? Lets use AICc to find out
## Unconstrained Fit
## Setting intial params for length frequency data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
fit.vb.lfu <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=0, wt.tag=0, wt.lf=1)
results = rbind(results, cbind('Length Frequency (Unconstrained)', t(as.vector(fit.vb.lfu$par))))
### 3b. Length Frequency Data - Linf constrained by larger linf from oto/mr data
## Constraining Linf to a constant - In this case, maximum Linf from oto or mark recapture
if(is.factor(results[ ,2])){
lic = max(levels(results[ ,2])[results[ ,2]])
} else {
lic = max(results[ ,2]) # Note: second column is mu.L parameter (mean of Linf)
}
lic = 78 # Same as used by Moffitt and Parrish (1996)
## Setting intial params for length frequency data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( lic, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( lic, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( lic, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
fit.vb.lfc <- nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat,otodat=otodat,lfdat=lfdat, wt.oto=0, wt.tag=0, wt.lf=1)
results = rbind(results, cbind('Length Frequency (Constrained)', t(as.vector(fit.vb.lfc$par))))
#### Is it appropriate to try to measure a0 using such limited data?
### Lets use AICc to find out
## AICc = 2k - 2log(L) + ((2k^2 + 2k) / (n-k-1))
aicc_with_a0_and_sig.lf = 2*3 + 2*(40.02605) + ((2*3^2 + 2*3) / (21 - 3 - 1)) # 87.46386
aicc_without_a0 = 2*2 + 2*(62.30009) + ((2*2^2 + 2*2) / (21 - 2 - 1)) # 129.2668
aicc_without_sig.lf = 2*2 + 2*(359.5163) + ((2*2^2 + 2*2) / (21 - 2 - 1)) # 359.5163
aicc_without_a0_or_sig.lf = 2*1 + 2*(7565.03) + ((2*1^2 + 2*1) / (21 - 1 - 1)) # 7565.03
### Conclusion: Yes, definitely, because AICc with a0 and sig.lf is more than 40 units lower
### Setting intial params for all data
## mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, 0, 1, 1)
lb <- c( 40, 0.01, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
### 6. Model including all Data sources - Equal weighting to each data type
fit.vb.equalwt.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, otodat=otodat, lfdat=lfdat, wt.oto=1/dim(otodat)[1], wt.tag=1/dim(tagdat)[1], wt.lf=1/length(lfdat$curr_month_year))
results = rbind(results, cbind('Model 6 - All Data - Equal Weighting', t(as.vector(fit.vb.equalwt.grouped$par))))
### 7. Model including all Data sources - weighting based on number of sample size
fit.vb.byn.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, otodat=otodat, lfdat=lfdat, wt.oto=1, wt.tag=1, wt.lf=1)
results = rbind(results, cbind('Model 7 - All Data - Weighted by n', t(as.vector(fit.vb.byn.grouped$par))))
### 8. Model including all Data sources treated individually - with equal weighting
fit.vb.equalwt.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 0, wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 8 - Separated Data - Equal Weighting', t(as.vector(fit.vb.equalwt.indv$par))))
### 9. Model including all Data sources treated individually - weighting based on number of sample size
fit.vb.byn.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 9 - Separated Data - Weighted by n', t(as.vector(fit.vb.byn.indv$par))))
### 10. Model without Ralston & Miyamoto - Equal weighting (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 0, wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 10 - Separated Data - Equal Weighting - No R&M', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
### 11. Model without Ralston & Miyamoto - weighted by n (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = NULL, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 0, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 11 - Separated Data - Weighted by n - No R&M', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
##### NOW WITH PIFG DATA
### 6. Model including all Data sources - Equal weighting to each data type
fit.vb.equalwt.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat, lfdat=lfdat, wt.oto=1/dim(otodat)[1], wt.tag=1/dim(tagdat)[1], wt.tag2 = 1/dim(tagdat2)[1], wt.lf=1/length(lfdat$curr_month_year))
results = rbind(results, cbind('Model 6 - All Data - Equal Weighting + PIFG', t(as.vector(fit.vb.equalwt.grouped$par))))
### 7. Model including all Data sources - weighting based on number of sample size
fit.vb.byn.grouped <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat, lfdat=lfdat, wt.oto=1, wt.tag=1, wt.tag2 = 1, wt.lf=1)
results = rbind(results, cbind('Model 7 - All Data - Weighted by n + PIFG', t(as.vector(fit.vb.byn.grouped$par))))
### 8. Model including all Data sources treated individually - with equal weighting
fit.vb.equalwt.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 1/dim(tagdat2)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 8 - Separated Data - Equal Weighting + PIFG', t(as.vector(fit.vb.equalwt.indv$par))))
### 9. Model including all Data sources treated individually - weighting based on number of sample size
fit.vb.byn.indv <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 9 - Separated Data - Weighted by n + PIFG', t(as.vector(fit.vb.byn.indv$par))))
### 10. Model without Ralston & Miyamoto - Equal weighting (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.tag2 = 1/dim(tagdat2)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)
results = rbind(results, cbind('Model 10 - Separated Data - Equal Weighting - No R&M + PIFG', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
### 11. Model without Ralston & Miyamoto - weighted by n (Because Brett said this was shit!)
fit.vb.byn.indv.no.ralston <- nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)
results = rbind(results, cbind('Model 11 - Separated Data - Weighted by n - No R&M + PIFG', t(as.vector(fit.vb.byn.indv.no.ralston$par))))
### Cleaning up results and writing them out to a .csv
colnames(results) = c('model_id', 'mu.L', 'sig.L', 'k', 'mu.A', 'sig.A', 'sig.sci', 'sig.f', 'a0', 'sig.oto', 'sig.lf')
results$`time to 90%` = yrs_to_.9_linf(linf = as.numeric(levels(results$mu.L)[results$mu.L]), k = as.numeric(levels(results$k)[results$k]), a0 = as.numeric(levels(results$a0)[results$a0]))
print(results)
write.csv(results, file = file.path(run_results_dir, 'likelihood_parameter_estimates_with_full_data.csv'))
##### Determining the prefered model structure ####
print('Evaluating Model Structures')
## We will do this by comparing each model's parameters from training data to observations in validation data
## Model scoring metric is as follows: sum((predicted - observed)^2) / n
## Lower scoring metric indicates better model fit
n_train = round(dim(tagdat)[1] * (2/3))
n_train2 = round(dim(tagdat2)[1] * (2/3))
evaluate_models = function(cross_validation_iterations = 10000){
mod_eval_results = data.frame(stringsAsFactors = FALSE)
model_na_results = rep(0, 7)
lit_vbgf = lit_vbgc_params[lit_vbgc_params$region %in% c('Hawaii - MHI & NWHI', 'Hawaii - MHI', 'Hawaii - NWHI'), ]
lit_vbgf_for_train = lit_vbgf[!(lit_vbgf$author %in% paste('Bayesian Model',1:4)), ]
bayes_models = lit_vbgf[(lit_vbgf$author %in% paste('Bayesian Model',1:4)), ]
mod_eval_results = foreach(i = 1:cross_validation_iterations, .combine = rbind) %dopar% {
train_index = sample(1:dim(tagdat)[1], size = n_train, replace = FALSE)
tagdat_train = tagdat[train_index, ]
train2_index = sample(1:dim(tagdat2)[1], size = n_train2, replace = FALSE)
tagdat2_train = tagdat2[train2_index, ]
tagdat_validate = rbind(tagdat[-train_index, ], tagdat2[-train2_index, ])
score = rep(NA, 7)
### Setting intial params for all data
# mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .10, 1, 0, .3, 1, 1)
lb <- c( 40, 0.01, .05, 0.01, .05, 0.01, 0, -.4, 0.01, 0.01)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, -.2, 15, 15)
var5 = NULL
var5 = try(nlminb(p0,joint.logl.f,lower=lb,upper=ub,npf=npf,npA=npA,tagdat=tagdat_train, tagdat2 = tagdat2_train, wt.oto=0,wt.tag=1, wt.tag2 = 1, wt.lf=0)$par, silent = TRUE)
if(is.numeric(var5[1])){
score[1] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var5[1], k = var5[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[1] = model_na_results[1] + 1
}
ub <- c( 110, 15.0, 10, 1.5, .50, 15.0, 0, 15, 15, 15)
var6 = NULL
var6 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat, lfdat=lfdat, wt.oto=1/dim(otodat)[1], wt.tag=1/dim(tagdat_train)[1], wt.tag2=1/dim(tagdat2_train)[1], wt.lf=1/length(lfdat$curr_month_year))$par, silent = TRUE)
if(is.numeric(var6[1])){
score[2] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var6[1], k = var6[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[2] = model_na_results[2] + 1
}
var7 = NULL
var7 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat, lfdat=lfdat, wt.oto=1, wt.tag=1, wt.tag2 = 1, wt.lf=1)$par, silent = TRUE)
if(is.numeric(var7[1])){
score[3] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var7[1], k = var7[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[3] = model_na_results[3] + 1
}
var8 = NULL
var8 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat_train)[1], wt.tag2 = 1/dim(tagdat2_train)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var8[1])){
score[4] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var8[1], k = var8[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[4] = model_na_results[4] + 1
}
var9 = NULL
var9 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var9[1])){
score[5] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var9[1], k = var9[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[5] = model_na_results[5] + 1
}
var10 = NULL
var10 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat=lfdat, lfdat2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat_train)[1], wt.tag2=1/dim(tagdat2_train)[1], wt.lf = 1/length(lfdat$curr_month_year), wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var10[1])){
score[6] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var10[1], k = var10[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[6] = model_na_results[6] + 1
}
var11 = NULL
var11 = try(nlminb(p0, joint.logl.f, lower=lb, upper=ub, npf=npf, npA=npA, tagdat=tagdat_train, tagdat2 = tagdat2_train, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], lfdat = lfdat, lfdat2 = NULL, wt.oto = 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)$par, silent = TRUE)
if(is.numeric(var11[1])){
score[7] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = var11[1], k = var11[3], Lr_obs = tagdat_validate[ ,2])
} else {
model_na_results[7] = model_na_results[7] + 1
}
## Now getting fits from literature data
## lit_models
lit_var_scores = rep(0, length(lit_vbgf_for_train$author))
for(i in 1:length(lit_vbgf_for_train$author)){
lit_var_scores[i] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = lit_vbgf_for_train$linf[i], k = lit_vbgf_for_train$k[i], Lr_obs = tagdat_validate[ ,2])
}
## Comparing bootstrapped model to Bayes_models
bayes_models = lit_vbgc_params[21:24, ]
bayes_var_scores = rep(0, length(bayes_models$author))
for(i in 1:length(bayes_models$author)){
bayes_var_scores[i] = calculate_predictive_variance(Lm = tagdat_validate[ ,1], dt = tagdat_validate[ ,4], linf = bayes_models$linf[i], k = bayes_models$k[i], Lr_obs = tagdat_validate[ ,2])
}
## Best Overall Model
return(c(score, lit_var_scores, bayes_var_scores))
}
colnames(mod_eval_results) = c(paste('model', 5:11), lit_vbgf_for_train$author, bayes_models$author)
return(invisible(mod_eval_results))
}
#### What was prefered model?
n_iterations = 10000
mod_eval_results = as.data.frame(evaluate_models(cross_validation_iterations = n_iterations))
mod_eval_results_lf = as.data.frame(t(mod_eval_results[ ,1:7]))
mod_eval_results_lf$model_id = rownames(mod_eval_results_lf)
mod_eval_results_lf = reshape(mod_eval_results_lf, varying = colnames(mod_eval_results_lf[1:n_iterations]), idvar = 'model_id', direction = "long")
mod_eval_results_lf = mod_eval_results_lf
boxplot(mod_eval_results_lf$result ~ mod_eval_results_lf$model_id, ylim = c(0, 15))
#### Declaring the best model - The model that has the lowest mean evaluation result
## First finding best structure for integrative model
integrative_models = mod_eval_results[ ,2:7]
integrative_model_scores = c()
for(i in 1:dim(integrative_models)[1]){
integrative_model_scores = c(integrative_model_scores, names(which.min(integrative_models[i, ])))
}
## Which model was most frequently the best one?
best_integrative_model = names(which.max(table(integrative_model_scores)))
## Now comparing best integrative model to just tagging data
integrative_vs_tagging = mod_eval_results[ ,which(colnames(mod_eval_results) %in% c('model 5', best_integrative_model))]
integrative_vs_tagging_model_scores = c()
for(i in 1:dim(integrative_models)[1]){
integrative_vs_tagging_model_scores = c(integrative_vs_tagging_model_scores, names(which.min(integrative_vs_tagging[i, ])))
}
best_model = names(which.max(table(integrative_vs_tagging_model_scores)))
pdf(file.path(run_results_dir, 'Barplot of tagging vs. best integrative model.pdf'), width = 11, height = 8.5)
barplot(prop.table(table(integrative_vs_tagging_model_scores)))
dev.off()
### Comparing against all lit models for the region
nll_names = colnames(mod_eval_results)[1:7]
lit_names = colnames(mod_eval_results)[8:18]
bayes_names = colnames(mod_eval_results)[19:22]
lit_vs_int_vs_bayes = mod_eval_results[ ,colnames(mod_eval_results) %in% c(best_integrative_model, nll_names, bayes_names)]
lit_vs_int_vs_bayes_scores = c()
for(i in 1:dim(integrative_models)[1]){
lit_vs_int_vs_bayes_scores = c(lit_vs_int_vs_bayes_scores, names(which.min(lit_vs_int_vs_bayes[i, ])))
}
best_model_lit_bayes_integrated = names(which.max(table(lit_vs_int_vs_bayes_scores)))
pdf(file.path(run_results_dir, 'Barplot of lit vs. bayes vs. best integrative model.pdf'), width = 11, height = 8.5)
par(las = 2)
barplot(prop.table(table(lit_vs_int_vs_bayes_scores)))
dev.off()
##### Computing Model Comparison Stats #####
print('Computing Model Comparision Stats')
### Subsetting model structures 6-11
nll_eval_results = mod_eval_results[, 2:7]
## Determining the number of NA iterations
na_index = c()
for(i in 1:length(nll_eval_results[ ,1])){
if(any(is.na(nll_eval_results[i, ]))){
na_index = c(na_index, i)
}
}
na_index = unique(na_index)
### How many iterations failed to converge?
print(paste('Iterations failing to converge:', length(na_index)))
# nll_eval_results = nll_eval_results[-na_index, ]
## Getting summary stats for NLL models
print('Summary stats of competing model structures')
print(paste('Range: ', range(nll_eval_results, na.rm = TRUE)))
nll_vec = as.vector(nll_eval_results)
nll_vec = nll_vec[!is.na(nll_vec)]
print(paste('mean:', mean(nll_vec)))
print(paste('standard deviation:', sd(nll_vec)))
### Determining which model performed best over all iterations
best_models = c()
for(i in 1:dim(nll_eval_results)[1]){
best_models = c(best_models, names(which.min(nll_eval_results[i, ])))
}
print('Best Models')
table(best_models)
### Getting stats on the best performing model
print('Summary Stats for prefered integrated model')
best_nll_mod = mod_eval_results[ ,best_model]
print(paste('range:', range(best_nll_mod, na.rm = TRUE)))
print(paste('mean:', mean(best_nll_mod, na.rm = TRUE)))
print(paste('standard deviation:', sd(best_nll_mod, na.rm = TRUE)))
### Getting stats on the model based only on tagging data
print('Summary Stats for Tagging Only Model (Model 5)')
mod_5 = as.vector(mod_eval_results[ ,'model 5'])
print(paste('range:', range(mod_5, na.rm = TRUE)))
print(paste('mean:', mean(mod_5[!is.na(mod_5)])))
print(paste('standard deviation:', sd(mod_5[!is.na(mod_5)])))
### Comparing the perfered model to the tagging data only model
print('Comparing prefered integrative and tagging only models')
tagging_vs_composite_df = cbind(mod_eval_results$`model 5`, mod_eval_results[ ,best_model])
colnames(tagging_vs_composite_df) = c('model 5', best_model)
tagging_vs_composite = c()
for(i in 1:length(tagging_vs_composite_df[ ,1])){
tagging_vs_composite = c(tagging_vs_composite, colnames(tagging_vs_composite_df)[which.min(tagging_vs_composite_df[i, ])])
}
table(tagging_vs_composite)
### Summary stats on tagging and integrative models
pred_var_diff_tvc = tagging_vs_composite_df[ ,1] - tagging_vs_composite_df[ ,2]
print(paste('range in predicteve difference:', range(pred_var_diff_tvc, na.rm = TRUE)))
print(paste('mean:', mean(pred_var_diff_tvc, na.rm = TRUE)))
print(paste('standard deviation:', sd( as.vector(pred_var_diff_tvc)[!is.na(as.vector(pred_var_diff_tvc))])))
#### Getting summary stats on all literature models
print('Summary Statistics for Literature Models')
lit = mod_eval_results[, 8:18]
print(paste('range:', range(lit, na.rm = TRUE)))
lit_vec = as.vector(lit)
lit_vec = lit_vec[!is.na(lit_vec)]
print(paste('mean:', mean(lit_vec)))
print(paste('Standard Deviation:', sd(lit_vec)))
## Comparing Literatuere, MLE, and Bayesian models
print('Comparing Literature, MLE, and Bayesian Models')
model_structure_selection = data.frame()
nll_names = colnames(pref_mod)
lit_names = colnames(mod_eval_results)[8:18]
bayes_names = colnames(mod_eval_results)[19:22]
for(i in 1:length(mod_eval_results[ ,1])){
score_ens = min(pref_mod[i], na.rm = TRUE)
best_ens = best_model
score_lit = min(mod_eval_results[i,8:18], na.rm = TRUE)
best_lit = lit_names[which(mod_eval_results[i,8:18] == score_lit)]
score_bayes = min(mod_eval_results[i,19:22], na.rm = TRUE)
best_bayes = bayes_names[which(mod_eval_results[i,19:22] == score_bayes)]
best_overall = c('MLE', 'Lit', 'Bayes')[which.min(c(score_ens, score_lit, score_bayes))]
best_mod = c(best_ens, best_lit, best_bayes)[which.min(c(score_ens, score_lit, score_bayes))]
write_line = data.frame('best_ll_mod' = best_ens, 'score_ensemble' = score_ens, 'best_lit_mod' = best_lit, 'score_lit' = score_lit, 'best_bayes_mod' = best_bayes, 'score_bayes' = score_bayes, 'best_model' = best_overall, 'best_mod' = best_mod)
model_structure_selection = rbind(model_structure_selection, write_line)
}
lit_eval_results_table = aggregate(model_structure_selection$best_lit_mod, by = list(model_structure_selection$best_lit_mod), FUN = length)
best_lit_mod = lit_eval_results_table$Group.1[which.max(lit_eval_results_table$x)]
### Getting summary stats on the best performing literature model
print('Summary Stats of best performing lit mod')
best_lit = mod_eval_results[ ,as.character(best_lit_mod)]
print(paste('range:', range(best_lit, na.rm = TRUE)))
best_lit_vec = as.vector(best_lit)
best_lit_vec = best_lit_vec[!is.na(best_lit_vec)]
print(paste('mean:', mean(best_lit_vec)))
print(paste('standard deviation:', sd(best_lit_vec)))
## Getting summary stats for the second best performing literature model
print('Summary Stats of second best performing literature model')
second_best_lit_mod = as.character(lit_eval_results_table$Group.1[order(lit_eval_results_table$x, decreasing = TRUE)[2]])
second_best_lit = mod_eval_results[ ,as.character(second_best_lit_mod)]
second_best_lit_vec = as.vector(second_best_lit)
print(paste('range:', range(second_best_lit_vec, na.rm = TRUE)))
print(paste('mean:', mean(second_best_lit_vec, na.rm = TRUE)))
print(paste('standard deviation:', sd(second_best_lit_vec, na.rm = TRUE)))
## Write results out
save.image(file = file.path(run_results_dir, 'workspace_image_preboot.RData'))
##### Bootstrapping tagging only and prefered models #####
print('Bootstrapping model 5 and prefered model')
boot_iterations = 10000
bootstrap_results = list()
### We'll begin by bootstrapping Model 5 (just tagging data)
## Specifying starting parameters, as well as upper and lower bounds for parameter estimation
# mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1.0, .10, 1, 0, 0, 0, 0)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, 0, 0, 0)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 0, 0, 0)
print('Booting Model 5')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 5")
timer5full = proc.time()
bootstrap_results$booted_param_ests_model5 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_5', boot_iterations = boot_iterations, wt.oto = 0, wt.lf = 0, wt.tag = 1, tagdat = tagdat)
bootstrap_results$booted_param_ests_model5withPIFG = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_5', boot_iterations = boot_iterations, wt.oto = 0, wt.lf = 0, wt.tag = 1, tagdat = tagdat, tagdat2 = tagdat2, wt.tag2 = 1)
bootstrap_results$booted_param_ests_model5justPIFG = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_5', boot_iterations = boot_iterations, wt.oto = 0, wt.lf = 0, wt.tag = 0, tagdat = tagdat, tagdat2 = tagdat2, wt.tag2 = 1)
boot_time = (proc.time() - timer5full)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 5 complete!"))
### Now we'll bootstrap the prefered model structure
## Setting intial params for all data
# mu.L, sig.L, k, mu.A, sig.A, sig.sci, sig.f, a0, sig.oto, sig.lf
p0 <- c( 70, 1, .10, 1, .1, 1, 0, 0, 1, 1)
lb <- c( 50, 0.1, .05, 0.1, .05, 0.1, 0, -10, 0.1, 0.1)
ub <- c( 110, 15.0, .50, 1.5, .50, 15.0, 0, 10, 15, 15)
#if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 6') {
## 6. Model including all Data sources - Equal weighting to each data type
print('Booting Model 6')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 6")
timer6 = proc.time()
bootstrap_results$booted_param_ests_model6 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_6',boot_iterations = boot_iterations, wt.oto = 1/length(otodat$age), wt.lf = 1/length(lfdat$curr_month_year), wt.tag = 1/dim(tagdat)[1], otodat = otodat, tagdat = tagdat, pseudolf = pseudo_data)
boot_time = (proc.time() - timer6)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr36tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 6 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 7') {
## 7. Model including all Data sources - weighting based on number of sample size
print('Booting Model 7')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 7")
timer7 = proc.time()
bootstrap_results$booted_param_ests_model7 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_7_all_data', boot_iterations = boot_iterations,tagdat=tagdat, otodat=otodat, pseudolf=pseudo_data, wt.oto=1, wt.tag=1, wt.lf=1)
boot_time = (proc.time() - timer7)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 7 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 8') {
## 8. Model including all Data sources treated individually - with equal weighting
print('Booting Model 8')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 8")
timer8 = proc.time()
bootstrap_results$booted_param_ests_model8 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_8_all_data', boot_iterations = boot_iterations, tagdat=tagdat, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2=NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 1/dim(otodat[otodat$source == 'ralston and miyamoto', ])[1], wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.lf = 1/length(pseudolf$curr_month_year), wt.lf2 = 0)
boot_time = (proc.time() - timer8)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 8 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 9') {
## 9. Model including all Data sources treated individually - weighting based on number of sample size
print('Booting Model 9')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 9")
timer9 = proc.time()
bootstrap_results$booted_param_ests_model9 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_9', boot_iterations = boot_iterations, tagdat=tagdat, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2=NULL, wt.oto= 1, wt.oto2= 1, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.lf = 1, wt.lf2 = 0)
boot_time = (proc.time() - timer9)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 9 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 10') {
## 10. Model without Ralston & Miyamoto - Equal weighting (Because Brett said this was shit!)
print('Booting Model 10')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 10")
timer10 = proc.time()
bootstrap_results$booted_param_ests_model10 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_10', boot_iterations = boot_iterations, tagdat=tagdat, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2 = NULL, wt.oto= 1/dim(otodat[otodat$source == 'demartini', ])[1], wt.oto2= 0, wt.oto3=1/dim(otodat[otodat$source == 'andrews bomb carbon', ])[1], wt.oto4=1/dim(otodat[otodat$source == 'andrews lead radium', ])[1], wt.tag = 1/dim(tagdat)[1], wt.lf = 1/length(pseudolf$curr_month_year), wt.lf2 = 0)
boot_time = (proc.time() - timer10)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 10 complete!"))
#} else if (mod_eval_results_table[which.max(mod_eval_results_table[ ,2]), 1] == 'model 11') {
### 11. Model without Ralston & Miyamoto - weighted by n (Because Brett said this was shit!)
print('Booting Model 11')
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = "model 11")
timer11 = proc.time()
bootstrap_results$booted_param_ests_model12 = bootstrap_growth_params(filename = 'bootstrapped_parameter_estimates_model_11', boot_iterations = boot_iterations, tagdat=tagdat, tagdat2 = tagdat2, otodat=otodat[otodat$source == 'demartini', ], otodat2=otodat[otodat$source == 'ralston and miyamoto', ], otodat3=otodat[otodat$source == 'andrews bomb carbon', ], otodat4=otodat[otodat$source == 'andrews lead radium', ], pseudolf=pseudo_data, pseudolf2=NULL, wt.oto= 1, wt.oto2= 0, wt.oto3=1, wt.oto4=1, wt.tag = 1, wt.tag2 = 1, wt.lf = 1, wt.lf2 = 0)
boot_time = (proc.time() - timer11)[3] / 60 / 60
# send_push(user = 'uGEHvA4hr37tsrCCtpSv4sUUxVuTqN', message = paste(round(boot_time, digits = 2), "Hours later, bootstrapping model 11 complete!"))
#}
save.image(file = file.path(src_dir, 'workspace_image.RData'))
|
###########################
#
# Auxiliary functions for
# adegenet objects
#
# T. Jombart
###########################
##############################
# Method truenames for genind
##############################
setGeneric("truenames", function(x) standardGeneric("truenames"))
setMethod("truenames", signature(x="genind"), function(x){
message("This accessor is now deprecated. Please use 'tab' instead.")
return(x@tab)
})
##############################
# Method truenames for genpop
##############################
setMethod("truenames",signature(x="genpop"), function(x){
message("This accessor is now deprecated. Please use 'tab' instead.")
return(x@tab)
})
###########################
## Generic / methods 'tab'
###########################
#'
#' Access allele counts or frequencies
#'
#' This accessor is used to retrieve a matrix of allele data.
#' By default, a matrix of integers representing allele counts is returned.
#' If \code{freq} is TRUE, then data are standardised as frequencies, so that for any individual and any locus the data sum to 1.
#' The argument \code{NA.method} allows to replace missing data (NAs).
#' This accessor replaces the previous function \code{truenames} as well as the function \code{makefreq}.
#'
#' @export
#'
#' @aliases tab
#'
#' @rdname tab
#'
#' @docType methods
#'
#' @param x a \linkS4class{genind} or \linkS4class{genpop} object.
#' @param freq a logical indicating if data should be transformed into relative frequencies (TRUE); defaults to FALSE.
#' @param NA.method a method to replace NA; asis: leave NAs as is; mean: replace by the mean allele frequencies; zero: replace by zero
#' @param ... further arguments passed to other methods.
#' @return a matrix of integers or numeric
#'
#' @examples
#'
#' data(microbov)
#' head(tab(microbov))
#' head(tab(microbov,freq=TRUE))
#'
#'
setGeneric("tab", function(x, ...) standardGeneric("tab"))
.tabGetter <- function(x, freq=FALSE, NA.method=c("asis","mean","zero"), ...){
## handle arguments
NA.method <- match.arg(NA.method)
# outdim <- dim(x@tab)
## get matrix of data
if (!freq){
out <- x@tab
} else {
out <- x@tab/x@ploidy
}
## replace NAs if needed
if(NA.method=="mean"){
f1 <- function(vec){
m <- mean(vec,na.rm=TRUE)
vec[is.na(vec)] <- m
return(vec)
}
out <- apply(out, 2, f1)
}
if(NA.method=="zero"){
out[is.na(out)] <- ifelse(freq, 0, 0L)
}
# dim(out) <- outdim
## return output
return(out)
}
#' @rdname tab
#' @aliases tab,genind-methods
#' @aliases tab.genind
setMethod("tab", signature(x = "genind"),
function (x, freq = FALSE, NA.method = c("asis","mean","zero"), ...){
.tabGetter(x, freq = freq, NA.method = NA.method, ...)
})
#' @rdname tab
#' @aliases tab,genpop-methods
#' @aliases tab.genpop
setMethod("tab", signature(x="genpop"), function(x, freq=FALSE, NA.method=c("asis","mean","zero"), ...){
## handle arguments
NA.method <- match.arg(NA.method)
# outdim <- dim(x@tab)
## get matrix of data
if(!freq) {
out <- x@tab
} else {
out <- x@tab
f1 <- function(vec) return(vec/sum(vec,na.rm=TRUE))
## compute frequencies
fac <- x@loc.fac
if (is.null(fac)) fac <- rep(1, nLoc(x))
out <- apply(x@tab, 1, tapply, fac, f1)
if (ncol(x@tab) > 1){
## reshape into matrix
col.names <- do.call(c,lapply(out[[1]],names))
row.names <- names(out)
out <- matrix(unlist(out), byrow=TRUE, nrow=nrow(x@tab),
dimnames=list(row.names, col.names))
## reorder columns
out <- out[, colnames(x@tab), drop = FALSE]
} else {
out <- matrix(out, nrow = length(out), ncol = 1,
dimnames = list(rownames(x@tab), colnames(x@tab)))
}
}
## replace NAs if needed
if(NA.method=="mean"){
f1 <- function(vec){
m <- mean(vec, na.rm=TRUE)
vec[is.na(vec)] <- m
return(vec)
}
out <- apply(out, 2, f1)
}
if(NA.method=="zero"){
out[is.na(out)] <- ifelse(freq, 0, 0L)
}
# dim(out) <- outdim
## return output
return(out)
})
###########################
# Method seploc for genind
###########################
setGeneric("seploc", function(x, ...) standardGeneric("seploc"))
setMethod("seploc", signature(x="genind"), function(x,truenames=TRUE,res.type=c("genind","matrix")){
truenames <- TRUE # this argument will be deprecated
if(x@type=="PA"){
msg <- paste("seploc is not implemented for presence/absence markers")
cat("\n",msg,"\n")
return(invisible())
}
if(!is.genind(x)) stop("x is not a valid genind object")
res.type <- match.arg(res.type)
## make separate tables
kX <- list()
locfac.char <- as.character(x@loc.fac)
for(i in locNames(x)){
kX[[i]] <- x@tab[, i==locfac.char,drop=FALSE]
}
prevcall <- match.call()
if(res.type=="genind"){
## ploidy bug fixed by Zhian N. Kamvar
##kX <- lapply(kX, genind, pop=x@pop, prevcall=prevcall)
kX <- lapply(kX, genind, pop=x@pop, prevcall=prevcall, ploidy=x@ploidy, type=x@type)
for(i in 1:length(kX)){
kX[[i]]@other <- x@other
}
}
return(kX)
})
###########################
# Method seploc for genpop
###########################
setMethod("seploc", signature(x="genpop"), function(x,truenames=TRUE,res.type=c("genpop","matrix")){
truenames <- TRUE # this argument will be deprecated
if(x@type=="PA"){
msg <- paste("seploc is not implemented for presence/absence markers")
cat("\n",msg,"\n")
return(invisible())
}
if(!is.genpop(x)) stop("x is not a valid genpop object")
res.type <- match.arg(res.type)
if(res.type=="genpop") { truenames <- TRUE }
temp <- x@loc.fac
nloc <- length(levels(temp))
levels(temp) <- 1:nloc
## make separate tables
kX <- list()
locfac.char <- as.character(x@loc.fac)
for(i in locNames(x)){
kX[[i]] <- x@tab[,i==locfac.char,drop=FALSE]
}
names(kX) <- locNames(x)
prevcall <- match.call()
if(res.type=="genpop"){
kX <- lapply(kX, genpop, prevcall=prevcall, ploidy=x@ploidy, type=x@type)
for(i in 1:length(kX)){
kX[[i]]@other <- x@other
}
}
return(kX)
})
###############
# '$' operator
###############
setMethod("$","genind",function(x,name) {
return(slot(x,name))
})
setMethod("$<-","genind",function(x,name,value) {
slot(x,name,check=TRUE) <- value
return(x)
})
##################
# Function seppop
##################
setGeneric("seppop", function(x, ...) standardGeneric("seppop"))
## genind
setMethod("seppop", signature(x="genind"), function(x,pop=NULL,truenames=TRUE,res.type=c("genind","matrix"),
drop=FALSE, treatOther=TRUE, quiet=TRUE){
## checkType(x)
truenames <- TRUE # this argument will be deprecated
## misc checks
if(!is.genind(x)) stop("x is not a valid genind object")
if(is.null(pop)) { # pop taken from @pop
if(is.null(x@pop)) stop("pop not provided and x@pop is empty")
pop <- pop(x)
} else if (is.language(pop)){
setPop(x) <- pop
pop <- pop(x)
} else {
pop <- factor(pop)
}
res.type <- match.arg(res.type)
## pop <- x@pop # comment to take pop arg into account
## make a list of genind objects
kObj <- lapply(levels(pop), function(lev) x[pop==lev, , drop=drop, treatOther=treatOther, quiet=quiet])
names(kObj) <- levels(pop)
## res is a list of genind
if(res.type=="genind"){ return(kObj) }
## res is list of matrices
res <- lapply(kObj, function(obj) tab(obj))
return(res)
}) # end seppop
## #####################
## # Methods na.replace
## #####################
## setGeneric("na.replace", function(x, ...) standardGeneric("na.replace"))
## ## genind method
## setMethod("na.replace", signature(x="genind"), function(x, method, quiet=FALSE){
## ## checkType(x)
## ## preliminary stuff
## validObject(x)
## if(!any(is.na(x@tab))) {
## if(!quiet) cat("\n Replaced 0 missing values \n")
## return(x)
## }
## method <- tolower(method)
## method <- match.arg(method, c("0","mean"))
## res <- x
## if(method=="0"){
## res@tab[is.na(x@tab)] <- 0
## }
## if(method=="mean"){
## f1 <- function(vec){
## m <- mean(vec,na.rm=TRUE)
## vec[is.na(vec)] <- m
## return(vec)
## }
## res@tab <- apply(x@tab, 2, f1)
## }
## if(!quiet){
## Nna <- sum(is.na(x@tab))
## cat("\n Replaced",Nna,"missing values \n")
## }
## return(res)
## })
## ## genpop method
## setMethod("na.replace", signature(x="genpop"), function(x,method, quiet=FALSE){
## ## checkType(x)
## ## preliminary stuff
## validObject(x)
## if(!any(is.na(x@tab))) {
## if(!quiet) cat("\n Replaced 0 missing values \n")
## return(x)
## }
## method <- tolower(method)
## method <- match.arg(method, c("0","chi2"))
## res <- x
## if(method=="0"){
## res@tab[is.na(x@tab)] <- 0
## }
## if(method=="chi2"){
## ## compute theoretical counts
## ## (same as in a Chi-squared)
## X <- x@tab
## sumPop <- apply(X,1,sum,na.rm=TRUE)
## sumLoc <- apply(X,2,sum,na.rm=TRUE)
## X.theo <- sumPop %o% sumLoc / sum(X,na.rm=TRUE)
## X[is.na(X)] <- X.theo[is.na(X)]
## res@tab <- X
## }
## if(!quiet){
## Nna <- sum(is.na(x@tab))
## cat("\n Replaced",Nna,"missing values \n")
## }
## return(res)
## })
# Function to bind strata from a list of genind objects and return a single
# genind object.
.rbind_strata <- function(myList, res){
strata_list <- lapply(myList, slot, "strata")
null_strata <- vapply(strata_list, is.null, TRUE)
if (!all(null_strata)){
# NULL strata must be converted to data frames.
# Solution: take the first non-empty strata, and create a new one
# with one variable.
if (any(null_strata)){
# Extract the name of the first column of the first full strata
fullname <- names(strata_list[[which(!null_strata)[1]]])[1]
# loop over all the empty strata and replace them with a data
# frame that has the same number of elements as the samples in that
# genlight object.
for (i in which(null_strata)){
replace_strata <- data.frame(rep(NA, nInd(myList[[i]])))
names(replace_strata) <- fullname
strata_list[[i]] <- replace_strata
}
}
strata(res) <- as.data.frame(suppressWarnings(dplyr::bind_rows(strata_list)))
} else {
res@strata <- NULL
}
return(res)
}
#'
#' Pool several genotypes into a single dataset
#'
#' The function \code{repool} allows to merge genotypes from different
#' \linkS4class{genind} objects into a single 'pool' (i.e. a new \linkS4class{genind}).
#' The markers have to be the same for all objects to be merged, but
#' there is no constraint on alleles.\cr
#'
#' This function can be useful, for instance, when hybrids are created
#' using \code{\link{hybridize}}, to merge hybrids with their parent
#' population for further analyses. Note that \code{repool} can also
#' reverse the action of \code{\link{seppop}}.
#'
#' @author Thibaut Jombart \email{t.jombart@@imperial.ac.uk}
#'
#' @seealso \code{\link{seploc}}, \code{\link{seppop}}
#'
#' @examples
#' \dontrun{
#' ## use the cattle breeds dataset
#' data(microbov)
#' temp <- seppop(microbov)
#' names(temp)
#' ## hybridize salers and zebu -- nasty cattle
#' zebler <- hybridize(temp$Salers, temp$Zebu, n=40)
#' zebler
#' ## now merge zebler with other cattle breeds
#' nastyCattle <- repool(microbov, zebler)
#' nastyCattle
#' }
#'
#' @export
#'
#' @param ... a list of \linkS4class{genind} objects, or a series of \linkS4class{genind} objects separated by commas
#' @param list a logical indicating whether a list of objects with matched alleles shall be returned (TRUE), or a single \linkS4class{genind} object (FALSE, default).
#'
#'
#'
repool <- function(..., list=FALSE){
## PRELIMINARY STUFF
x <- list(...)
old.names <- names(x)
if(is.list(x[[1]])) x <- x[[1]] ## if ... is a list, keep this list for x
if(!inherits(x,"list")) stop("x must be a list")
if(!all(sapply(x,is.genind))) stop("x is does not contain only valid genind objects")
temp <- sapply(x,function(e) locNames(e))
if(!all(table(temp)==length(x))) stop("markers are not the same for all objects")
## temp <- sapply(x,function(e) e$ploidy)
## if(length(unique(temp)) != as.integer(1)) stop("objects have different levels of ploidy")
## MAKE A LIST OF OBJECTS
listTab <- lapply(x,genind2df,usepop=FALSE,sep="/")
newPloidy <- unlist(lapply(x,ploidy))
## SET POPS IF MISSING
## STORE OLD POP
old.pop <- lapply(x, pop)
for(i in 1:length(x)){
if(is.null(pop(x[[i]]))){
pop(x[[i]]) <- rep(paste("unknown",i,sep="_"), nInd(x[[i]]))
}
}
new.pop <- lapply(x, pop)
## MERGE RAW DATASETS
## reorder columns like in first dataset
markNames <- colnames(listTab[[1]])
listTab <- lapply(listTab, function(tab) tab[,markNames,drop=FALSE]) # resorting of the tabs
## bind all tabs by rows
tab <- listTab[[1]]
for(i in 2:length(x)){
tab <- rbind(tab,listTab[[i]])
}
## GET SINGLE GENIND
res <- df2genind(tab, ploidy=newPloidy, type=x[[1]]@type, sep="/")
pop(res) <- unlist(new.pop)
res <- .rbind_strata(x, res)
res@hierarchy <- NULL
res$call <- match.call()
## IF A LIST OF GENIND IS TO BE RETURNED
if(list){
## SEPARATE DATASETS
old.n <- sapply(x, nInd)
new.pop <- rep(1:length(x), old.n)
pop(res) <- new.pop
res <- seppop(res)
## RESTORE OLD OTHER AND POP
old.other <- lapply(x, other)
for(i in 1:length(res)){
other(res[[i]]) <- old.other[[i]]
pop(res[[i]]) <- old.pop[[i]]
}
## SET OBJECT NAMES
names(res) <- old.names
}
## RETURN
return(res)
} # end repool
#############
# selpopsize
#############
setGeneric("selPopSize", function(x, ...) standardGeneric("selPopSize"))
## genind method ##
setMethod("selPopSize", signature(x="genind"), function(x,pop=NULL,nMin=10){
## misc checks
## checkType(x)
if(!is.genind(x)) stop("x is not a valid genind object")
if(is.null(pop)) { # pop taken from @pop
if(is.null(x@pop)) stop("pop not provided and x@pop is empty")
pop <- pop(x)
} else{
pop <- factor(pop)
}
## select retained individuals
effPop <- table(pop)
popOk <- names(effPop)[effPop >= nMin]
toKeep <- pop %in% popOk
## build result
res <- x[toKeep]
pop(res) <- pop[toKeep]
return(res)
}) # end selPopSize
#########
# isPoly
#########
setGeneric("isPoly", function(x, ...) standardGeneric("isPoly"))
## genind method ##
setMethod("isPoly", signature(x="genind"), function(x, by=c("locus","allele"), thres=1/100){
## misc checks
## checkType(x)
if(!is.genind(x)) stop("x is not a valid genind object")
by <- match.arg(by)
## main computations ##
## PA case ##
if(x@type=="PA") {
allNb <- apply(x@tab, 2, mean, na.rm=TRUE) # allele frequencies
toKeep <- (allNb >= thres) & (allNb <= (1-thres))
return(toKeep)
}
## codom case ##
allNb <- apply(x@tab, 2, sum, na.rm=TRUE) # allele absolute frequencies
if(by=="locus"){
f1 <- function(vec){
if(sum(vec) < 1e-10) return(FALSE)
vec <- vec/sum(vec, na.rm=TRUE)
if(sum(vec >= thres) >= 2) return(TRUE)
return(FALSE)
}
toKeep <- tapply(allNb, x@loc.fac, f1)
} else { # i.e. if mode==allele
toKeep <- (allNb >= thres)
}
return(toKeep)
}) # end isPoly
## ## genpop method ##
## setMethod("isPoly", signature(x="genpop"), function(x, by=c("locus","allele"), thres=1/100){
## ## misc checks
## checkType(x)
## if(!is.genpop(x)) stop("x is not a valid genind object")
## by <- match.arg(by)
## ## main computations ##
## ## ## PA case ##
## ## if(x@type=="PA") {
## ## allNb <- apply(x@tab, 2, mean, na.rm=TRUE) # allele frequencies
## ## toKeep <- (allNb >= thres) & (allNb <= (1-thres))
## ## return(toKeep)
## ## }
## ## codom case ##
## allNb <- apply(x@tab, 2, sum, na.rm=TRUE) # alleles absolute frequencies
## if(by=="locus"){
## f1 <- function(vec){
## if(sum(vec) < 1e-10) return(FALSE)
## vec <- vec/sum(vec, na.rm=TRUE)
## if(sum(vec >= thres) >= 2) return(TRUE)
## return(FALSE)
## }
## toKeep <- tapply(allNb, x@loc.fac, f1)
## } else { # i.e. if mode==allele
## toKeep <- allNb >= thres
## }
## return(toKeep)
## }) # end isPoly
| /R/handling.R | no_license | libor-m/adegenet | R | false | false | 17,469 | r | ###########################
#
# Auxiliary functions for
# adegenet objects
#
# T. Jombart
###########################
##############################
# Method truenames for genind
##############################
setGeneric("truenames", function(x) standardGeneric("truenames"))
setMethod("truenames", signature(x="genind"), function(x){
message("This accessor is now deprecated. Please use 'tab' instead.")
return(x@tab)
})
##############################
# Method truenames for genpop
##############################
setMethod("truenames",signature(x="genpop"), function(x){
message("This accessor is now deprecated. Please use 'tab' instead.")
return(x@tab)
})
###########################
## Generic / methods 'tab'
###########################
#'
#' Access allele counts or frequencies
#'
#' This accessor is used to retrieve a matrix of allele data.
#' By default, a matrix of integers representing allele counts is returned.
#' If \code{freq} is TRUE, then data are standardised as frequencies, so that for any individual and any locus the data sum to 1.
#' The argument \code{NA.method} allows to replace missing data (NAs).
#' This accessor replaces the previous function \code{truenames} as well as the function \code{makefreq}.
#'
#' @export
#'
#' @aliases tab
#'
#' @rdname tab
#'
#' @docType methods
#'
#' @param x a \linkS4class{genind} or \linkS4class{genpop} object.
#' @param freq a logical indicating if data should be transformed into relative frequencies (TRUE); defaults to FALSE.
#' @param NA.method a method to replace NA; asis: leave NAs as is; mean: replace by the mean allele frequencies; zero: replace by zero
#' @param ... further arguments passed to other methods.
#' @return a matrix of integers or numeric
#'
#' @examples
#'
#' data(microbov)
#' head(tab(microbov))
#' head(tab(microbov,freq=TRUE))
#'
#'
setGeneric("tab", function(x, ...) standardGeneric("tab"))
.tabGetter <- function(x, freq=FALSE, NA.method=c("asis","mean","zero"), ...){
## handle arguments
NA.method <- match.arg(NA.method)
# outdim <- dim(x@tab)
## get matrix of data
if (!freq){
out <- x@tab
} else {
out <- x@tab/x@ploidy
}
## replace NAs if needed
if(NA.method=="mean"){
f1 <- function(vec){
m <- mean(vec,na.rm=TRUE)
vec[is.na(vec)] <- m
return(vec)
}
out <- apply(out, 2, f1)
}
if(NA.method=="zero"){
out[is.na(out)] <- ifelse(freq, 0, 0L)
}
# dim(out) <- outdim
## return output
return(out)
}
#' @rdname tab
#' @aliases tab,genind-methods
#' @aliases tab.genind
setMethod("tab", signature(x = "genind"),
function (x, freq = FALSE, NA.method = c("asis","mean","zero"), ...){
.tabGetter(x, freq = freq, NA.method = NA.method, ...)
})
#' @rdname tab
#' @aliases tab,genpop-methods
#' @aliases tab.genpop
setMethod("tab", signature(x="genpop"), function(x, freq=FALSE, NA.method=c("asis","mean","zero"), ...){
## handle arguments
NA.method <- match.arg(NA.method)
# outdim <- dim(x@tab)
## get matrix of data
if(!freq) {
out <- x@tab
} else {
out <- x@tab
f1 <- function(vec) return(vec/sum(vec,na.rm=TRUE))
## compute frequencies
fac <- x@loc.fac
if (is.null(fac)) fac <- rep(1, nLoc(x))
out <- apply(x@tab, 1, tapply, fac, f1)
if (ncol(x@tab) > 1){
## reshape into matrix
col.names <- do.call(c,lapply(out[[1]],names))
row.names <- names(out)
out <- matrix(unlist(out), byrow=TRUE, nrow=nrow(x@tab),
dimnames=list(row.names, col.names))
## reorder columns
out <- out[, colnames(x@tab), drop = FALSE]
} else {
out <- matrix(out, nrow = length(out), ncol = 1,
dimnames = list(rownames(x@tab), colnames(x@tab)))
}
}
## replace NAs if needed
if(NA.method=="mean"){
f1 <- function(vec){
m <- mean(vec, na.rm=TRUE)
vec[is.na(vec)] <- m
return(vec)
}
out <- apply(out, 2, f1)
}
if(NA.method=="zero"){
out[is.na(out)] <- ifelse(freq, 0, 0L)
}
# dim(out) <- outdim
## return output
return(out)
})
###########################
# Method seploc for genind
###########################
setGeneric("seploc", function(x, ...) standardGeneric("seploc"))
setMethod("seploc", signature(x="genind"), function(x,truenames=TRUE,res.type=c("genind","matrix")){
truenames <- TRUE # this argument will be deprecated
if(x@type=="PA"){
msg <- paste("seploc is not implemented for presence/absence markers")
cat("\n",msg,"\n")
return(invisible())
}
if(!is.genind(x)) stop("x is not a valid genind object")
res.type <- match.arg(res.type)
## make separate tables
kX <- list()
locfac.char <- as.character(x@loc.fac)
for(i in locNames(x)){
kX[[i]] <- x@tab[, i==locfac.char,drop=FALSE]
}
prevcall <- match.call()
if(res.type=="genind"){
## ploidy bug fixed by Zhian N. Kamvar
##kX <- lapply(kX, genind, pop=x@pop, prevcall=prevcall)
kX <- lapply(kX, genind, pop=x@pop, prevcall=prevcall, ploidy=x@ploidy, type=x@type)
for(i in 1:length(kX)){
kX[[i]]@other <- x@other
}
}
return(kX)
})
###########################
# Method seploc for genpop
###########################
setMethod("seploc", signature(x="genpop"), function(x,truenames=TRUE,res.type=c("genpop","matrix")){
truenames <- TRUE # this argument will be deprecated
if(x@type=="PA"){
msg <- paste("seploc is not implemented for presence/absence markers")
cat("\n",msg,"\n")
return(invisible())
}
if(!is.genpop(x)) stop("x is not a valid genpop object")
res.type <- match.arg(res.type)
if(res.type=="genpop") { truenames <- TRUE }
temp <- x@loc.fac
nloc <- length(levels(temp))
levels(temp) <- 1:nloc
## make separate tables
kX <- list()
locfac.char <- as.character(x@loc.fac)
for(i in locNames(x)){
kX[[i]] <- x@tab[,i==locfac.char,drop=FALSE]
}
names(kX) <- locNames(x)
prevcall <- match.call()
if(res.type=="genpop"){
kX <- lapply(kX, genpop, prevcall=prevcall, ploidy=x@ploidy, type=x@type)
for(i in 1:length(kX)){
kX[[i]]@other <- x@other
}
}
return(kX)
})
###############
# '$' operator
###############
setMethod("$","genind",function(x,name) {
return(slot(x,name))
})
setMethod("$<-","genind",function(x,name,value) {
slot(x,name,check=TRUE) <- value
return(x)
})
##################
# Function seppop
##################
setGeneric("seppop", function(x, ...) standardGeneric("seppop"))
## genind
setMethod("seppop", signature(x="genind"), function(x,pop=NULL,truenames=TRUE,res.type=c("genind","matrix"),
drop=FALSE, treatOther=TRUE, quiet=TRUE){
## checkType(x)
truenames <- TRUE # this argument will be deprecated
## misc checks
if(!is.genind(x)) stop("x is not a valid genind object")
if(is.null(pop)) { # pop taken from @pop
if(is.null(x@pop)) stop("pop not provided and x@pop is empty")
pop <- pop(x)
} else if (is.language(pop)){
setPop(x) <- pop
pop <- pop(x)
} else {
pop <- factor(pop)
}
res.type <- match.arg(res.type)
## pop <- x@pop # comment to take pop arg into account
## make a list of genind objects
kObj <- lapply(levels(pop), function(lev) x[pop==lev, , drop=drop, treatOther=treatOther, quiet=quiet])
names(kObj) <- levels(pop)
## res is a list of genind
if(res.type=="genind"){ return(kObj) }
## res is list of matrices
res <- lapply(kObj, function(obj) tab(obj))
return(res)
}) # end seppop
## #####################
## # Methods na.replace
## #####################
## setGeneric("na.replace", function(x, ...) standardGeneric("na.replace"))
## ## genind method
## setMethod("na.replace", signature(x="genind"), function(x, method, quiet=FALSE){
## ## checkType(x)
## ## preliminary stuff
## validObject(x)
## if(!any(is.na(x@tab))) {
## if(!quiet) cat("\n Replaced 0 missing values \n")
## return(x)
## }
## method <- tolower(method)
## method <- match.arg(method, c("0","mean"))
## res <- x
## if(method=="0"){
## res@tab[is.na(x@tab)] <- 0
## }
## if(method=="mean"){
## f1 <- function(vec){
## m <- mean(vec,na.rm=TRUE)
## vec[is.na(vec)] <- m
## return(vec)
## }
## res@tab <- apply(x@tab, 2, f1)
## }
## if(!quiet){
## Nna <- sum(is.na(x@tab))
## cat("\n Replaced",Nna,"missing values \n")
## }
## return(res)
## })
## ## genpop method
## setMethod("na.replace", signature(x="genpop"), function(x,method, quiet=FALSE){
## ## checkType(x)
## ## preliminary stuff
## validObject(x)
## if(!any(is.na(x@tab))) {
## if(!quiet) cat("\n Replaced 0 missing values \n")
## return(x)
## }
## method <- tolower(method)
## method <- match.arg(method, c("0","chi2"))
## res <- x
## if(method=="0"){
## res@tab[is.na(x@tab)] <- 0
## }
## if(method=="chi2"){
## ## compute theoretical counts
## ## (same as in a Chi-squared)
## X <- x@tab
## sumPop <- apply(X,1,sum,na.rm=TRUE)
## sumLoc <- apply(X,2,sum,na.rm=TRUE)
## X.theo <- sumPop %o% sumLoc / sum(X,na.rm=TRUE)
## X[is.na(X)] <- X.theo[is.na(X)]
## res@tab <- X
## }
## if(!quiet){
## Nna <- sum(is.na(x@tab))
## cat("\n Replaced",Nna,"missing values \n")
## }
## return(res)
## })
# Function to bind strata from a list of genind objects and return a single
# genind object.
.rbind_strata <- function(myList, res){
strata_list <- lapply(myList, slot, "strata")
null_strata <- vapply(strata_list, is.null, TRUE)
if (!all(null_strata)){
# NULL strata must be converted to data frames.
# Solution: take the first non-empty strata, and create a new one
# with one variable.
if (any(null_strata)){
# Extract the name of the first column of the first full strata
fullname <- names(strata_list[[which(!null_strata)[1]]])[1]
# loop over all the empty strata and replace them with a data
# frame that has the same number of elements as the samples in that
# genlight object.
for (i in which(null_strata)){
replace_strata <- data.frame(rep(NA, nInd(myList[[i]])))
names(replace_strata) <- fullname
strata_list[[i]] <- replace_strata
}
}
strata(res) <- as.data.frame(suppressWarnings(dplyr::bind_rows(strata_list)))
} else {
res@strata <- NULL
}
return(res)
}
#'
#' Pool several genotypes into a single dataset
#'
#' The function \code{repool} allows to merge genotypes from different
#' \linkS4class{genind} objects into a single 'pool' (i.e. a new \linkS4class{genind}).
#' The markers have to be the same for all objects to be merged, but
#' there is no constraint on alleles.\cr
#'
#' This function can be useful, for instance, when hybrids are created
#' using \code{\link{hybridize}}, to merge hybrids with their parent
#' population for further analyses. Note that \code{repool} can also
#' reverse the action of \code{\link{seppop}}.
#'
#' @author Thibaut Jombart \email{t.jombart@@imperial.ac.uk}
#'
#' @seealso \code{\link{seploc}}, \code{\link{seppop}}
#'
#' @examples
#' \dontrun{
#' ## use the cattle breeds dataset
#' data(microbov)
#' temp <- seppop(microbov)
#' names(temp)
#' ## hybridize salers and zebu -- nasty cattle
#' zebler <- hybridize(temp$Salers, temp$Zebu, n=40)
#' zebler
#' ## now merge zebler with other cattle breeds
#' nastyCattle <- repool(microbov, zebler)
#' nastyCattle
#' }
#'
#' @export
#'
#' @param ... a list of \linkS4class{genind} objects, or a series of \linkS4class{genind} objects separated by commas
#' @param list a logical indicating whether a list of objects with matched alleles shall be returned (TRUE), or a single \linkS4class{genind} object (FALSE, default).
#'
#'
#'
repool <- function(..., list=FALSE){
## PRELIMINARY STUFF
x <- list(...)
old.names <- names(x)
if(is.list(x[[1]])) x <- x[[1]] ## if ... is a list, keep this list for x
if(!inherits(x,"list")) stop("x must be a list")
if(!all(sapply(x,is.genind))) stop("x is does not contain only valid genind objects")
temp <- sapply(x,function(e) locNames(e))
if(!all(table(temp)==length(x))) stop("markers are not the same for all objects")
## temp <- sapply(x,function(e) e$ploidy)
## if(length(unique(temp)) != as.integer(1)) stop("objects have different levels of ploidy")
## MAKE A LIST OF OBJECTS
listTab <- lapply(x,genind2df,usepop=FALSE,sep="/")
newPloidy <- unlist(lapply(x,ploidy))
## SET POPS IF MISSING
## STORE OLD POP
old.pop <- lapply(x, pop)
for(i in 1:length(x)){
if(is.null(pop(x[[i]]))){
pop(x[[i]]) <- rep(paste("unknown",i,sep="_"), nInd(x[[i]]))
}
}
new.pop <- lapply(x, pop)
## MERGE RAW DATASETS
## reorder columns like in first dataset
markNames <- colnames(listTab[[1]])
listTab <- lapply(listTab, function(tab) tab[,markNames,drop=FALSE]) # resorting of the tabs
## bind all tabs by rows
tab <- listTab[[1]]
for(i in 2:length(x)){
tab <- rbind(tab,listTab[[i]])
}
## GET SINGLE GENIND
res <- df2genind(tab, ploidy=newPloidy, type=x[[1]]@type, sep="/")
pop(res) <- unlist(new.pop)
res <- .rbind_strata(x, res)
res@hierarchy <- NULL
res$call <- match.call()
## IF A LIST OF GENIND IS TO BE RETURNED
if(list){
## SEPARATE DATASETS
old.n <- sapply(x, nInd)
new.pop <- rep(1:length(x), old.n)
pop(res) <- new.pop
res <- seppop(res)
## RESTORE OLD OTHER AND POP
old.other <- lapply(x, other)
for(i in 1:length(res)){
other(res[[i]]) <- old.other[[i]]
pop(res[[i]]) <- old.pop[[i]]
}
## SET OBJECT NAMES
names(res) <- old.names
}
## RETURN
return(res)
} # end repool
#############
# selpopsize
#############
setGeneric("selPopSize", function(x, ...) standardGeneric("selPopSize"))
## genind method ##
setMethod("selPopSize", signature(x="genind"), function(x,pop=NULL,nMin=10){
## misc checks
## checkType(x)
if(!is.genind(x)) stop("x is not a valid genind object")
if(is.null(pop)) { # pop taken from @pop
if(is.null(x@pop)) stop("pop not provided and x@pop is empty")
pop <- pop(x)
} else{
pop <- factor(pop)
}
## select retained individuals
effPop <- table(pop)
popOk <- names(effPop)[effPop >= nMin]
toKeep <- pop %in% popOk
## build result
res <- x[toKeep]
pop(res) <- pop[toKeep]
return(res)
}) # end selPopSize
#########
# isPoly
#########
setGeneric("isPoly", function(x, ...) standardGeneric("isPoly"))
## genind method ##
setMethod("isPoly", signature(x="genind"), function(x, by=c("locus","allele"), thres=1/100){
## misc checks
## checkType(x)
if(!is.genind(x)) stop("x is not a valid genind object")
by <- match.arg(by)
## main computations ##
## PA case ##
if(x@type=="PA") {
allNb <- apply(x@tab, 2, mean, na.rm=TRUE) # allele frequencies
toKeep <- (allNb >= thres) & (allNb <= (1-thres))
return(toKeep)
}
## codom case ##
allNb <- apply(x@tab, 2, sum, na.rm=TRUE) # allele absolute frequencies
if(by=="locus"){
f1 <- function(vec){
if(sum(vec) < 1e-10) return(FALSE)
vec <- vec/sum(vec, na.rm=TRUE)
if(sum(vec >= thres) >= 2) return(TRUE)
return(FALSE)
}
toKeep <- tapply(allNb, x@loc.fac, f1)
} else { # i.e. if mode==allele
toKeep <- (allNb >= thres)
}
return(toKeep)
}) # end isPoly
## ## genpop method ##
## setMethod("isPoly", signature(x="genpop"), function(x, by=c("locus","allele"), thres=1/100){
## ## misc checks
## checkType(x)
## if(!is.genpop(x)) stop("x is not a valid genind object")
## by <- match.arg(by)
## ## main computations ##
## ## ## PA case ##
## ## if(x@type=="PA") {
## ## allNb <- apply(x@tab, 2, mean, na.rm=TRUE) # allele frequencies
## ## toKeep <- (allNb >= thres) & (allNb <= (1-thres))
## ## return(toKeep)
## ## }
## ## codom case ##
## allNb <- apply(x@tab, 2, sum, na.rm=TRUE) # alleles absolute frequencies
## if(by=="locus"){
## f1 <- function(vec){
## if(sum(vec) < 1e-10) return(FALSE)
## vec <- vec/sum(vec, na.rm=TRUE)
## if(sum(vec >= thres) >= 2) return(TRUE)
## return(FALSE)
## }
## toKeep <- tapply(allNb, x@loc.fac, f1)
## } else { # i.e. if mode==allele
## toKeep <- allNb >= thres
## }
## return(toKeep)
## }) # end isPoly
|
##Merge the training and the test sets to create one data set
#training sets
x_train<-read.table('./UCI HAR Dataset/train/X_train.txt')
y_train<-read.table('./UCI HAR Dataset/train/y_train.txt')
subject_train<-read.table('./UCI HAR Dataset/train/subject_train.txt')
train<-cbind(subject_train,y_train,x_train)
#test sets
x_test<-read.table('./UCI HAR Dataset/test/X_test.txt')
y_test<-read.table('./UCI HAR Dataset/test/y_test.txt')
subject_test<-read.table('./UCI HAR Dataset/test/subject_test.txt')
test<-cbind(subject_test,y_test,x_test)
#merge them
activity<-rbind(train,test)
##Extract only the mean and standard deviation for each measurement
features<-read.table('./UCI HAR Dataset/features.txt')
extractInd<-sort(c(grep('mean()',features$V2,fixed=TRUE),grep('std()',features$V2,fixed=TRUE)))
activity<-activity[,c(1,2,extractInd+2)]
##Use descriptive activity names to name the activities in the data set
actlabel<-read.table('./UCI HAR Dataset/activity_labels.txt')
activity[,2]<-factor(activity[,2],labels=actlabel$V2)
##Appropriately label the data set with descriptive variable names
varname<-as.character(features$V2[extractInd])
varname[61:66]<-c('fBodyAccJerkMag-mean()','fBodyAccJerkMag-std()','fBodyGyroMag-mean()','fBodyGyroMag-std()','fBodyGyroJerkMag-mean()','fBodyGyroJerkMag-std()')
colnames(activity)<-c('subject','activity',varname)
##Create a tidy data set with the average of each variable for each activity and each subject
tidyact<-activity
tidyact$activity<-as.numeric(tidyact$activity)
tidyact<-apply(tidyact,2,function(elt) as.numeric(tapply(elt,interaction(activity$subject,activity$activity),mean)))
tidyact<-as.data.frame(tidyact)
tidyact$activity<-factor(tidyact$activity,labels=actlabel$V2)
write.table(tidyact,'tidyact.txt',row.names=FALSE)
| /run_analysis.R | no_license | shuai114/CleanDataProject | R | false | false | 1,782 | r | ##Merge the training and the test sets to create one data set
#training sets
x_train<-read.table('./UCI HAR Dataset/train/X_train.txt')
y_train<-read.table('./UCI HAR Dataset/train/y_train.txt')
subject_train<-read.table('./UCI HAR Dataset/train/subject_train.txt')
train<-cbind(subject_train,y_train,x_train)
#test sets
x_test<-read.table('./UCI HAR Dataset/test/X_test.txt')
y_test<-read.table('./UCI HAR Dataset/test/y_test.txt')
subject_test<-read.table('./UCI HAR Dataset/test/subject_test.txt')
test<-cbind(subject_test,y_test,x_test)
#merge them
activity<-rbind(train,test)
##Extract only the mean and standard deviation for each measurement
features<-read.table('./UCI HAR Dataset/features.txt')
extractInd<-sort(c(grep('mean()',features$V2,fixed=TRUE),grep('std()',features$V2,fixed=TRUE)))
activity<-activity[,c(1,2,extractInd+2)]
##Use descriptive activity names to name the activities in the data set
actlabel<-read.table('./UCI HAR Dataset/activity_labels.txt')
activity[,2]<-factor(activity[,2],labels=actlabel$V2)
##Appropriately label the data set with descriptive variable names
varname<-as.character(features$V2[extractInd])
varname[61:66]<-c('fBodyAccJerkMag-mean()','fBodyAccJerkMag-std()','fBodyGyroMag-mean()','fBodyGyroMag-std()','fBodyGyroJerkMag-mean()','fBodyGyroJerkMag-std()')
colnames(activity)<-c('subject','activity',varname)
##Create a tidy data set with the average of each variable for each activity and each subject
tidyact<-activity
tidyact$activity<-as.numeric(tidyact$activity)
tidyact<-apply(tidyact,2,function(elt) as.numeric(tapply(elt,interaction(activity$subject,activity$activity),mean)))
tidyact<-as.data.frame(tidyact)
tidyact$activity<-factor(tidyact$activity,labels=actlabel$V2)
write.table(tidyact,'tidyact.txt',row.names=FALSE)
|
## Load the full dataset
dataComplete <- read.csv("household_power_consumption.txt", header=T, sep=';',
na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F,
comment.char="", quote='\"')
## Convert dates to allow subsetting
dataComplete$Date <- as.Date(dataComplete$Date, format="%d/%m/%Y")
## Subset the data
dataComplete$Date <- as.Date(dataComplete$Date, format="%d/%m/%Y")
data <- subset(dataComplete, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(dataComplete)
## Merge date and time into one field
data$Datetime <- as.POSIXct(paste(as.Date(data$Date), data$Time))
## Open png file
png(filename="plot4.png", height=480, width=480)
# Generate plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
})
## Lose png file
dev.off() | /plot4.R | no_license | RebeccahM/ExData_Plotting1 | R | false | false | 1,420 | r | ## Load the full dataset
dataComplete <- read.csv("household_power_consumption.txt", header=T, sep=';',
na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F,
comment.char="", quote='\"')
## Convert dates to allow subsetting
dataComplete$Date <- as.Date(dataComplete$Date, format="%d/%m/%Y")
## Subset the data
dataComplete$Date <- as.Date(dataComplete$Date, format="%d/%m/%Y")
data <- subset(dataComplete, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(dataComplete)
## Merge date and time into one field
data$Datetime <- as.POSIXct(paste(as.Date(data$Date), data$Time))
## Open png file
png(filename="plot4.png", height=480, width=480)
# Generate plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l",
ylab="Energy sub metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
})
## Lose png file
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_rs.R
\name{simple_rs_probabilities}
\alias{simple_rs_probabilities}
\title{Inclusion Probabilities: Simple Random Sampling}
\usage{
simple_rs_probabilities(
N,
prob = NULL,
prob_unit = NULL,
check_inputs = TRUE,
simple = TRUE
)
}
\arguments{
\item{N}{The number of units. N must be a positive integer. (required)}
\item{prob}{prob is the probability of being sampled must be a real number between 0 and 1 inclusive, and must be of length 1. (optional)}
\item{prob_unit}{prob is the probability of being sampled must be a real number between 0 and 1 inclusive, and must be of length N. (optional)}
\item{check_inputs}{logical. Defaults to TRUE.}
\item{simple}{logical. internal use only.}
}
\value{
A vector length N indicating the probability of being sampled.
}
\description{
Inclusion Probabilities: Simple Random Sampling
}
\examples{
probs <- simple_ra_probabilities(N = 100)
table(probs)
probs <- simple_ra_probabilities(N = 100, prob = 0.3)
table(probs)
}
| /man/simple_rs_probabilities.Rd | no_license | cran/randomizr | R | false | true | 1,063 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simple_rs.R
\name{simple_rs_probabilities}
\alias{simple_rs_probabilities}
\title{Inclusion Probabilities: Simple Random Sampling}
\usage{
simple_rs_probabilities(
N,
prob = NULL,
prob_unit = NULL,
check_inputs = TRUE,
simple = TRUE
)
}
\arguments{
\item{N}{The number of units. N must be a positive integer. (required)}
\item{prob}{prob is the probability of being sampled must be a real number between 0 and 1 inclusive, and must be of length 1. (optional)}
\item{prob_unit}{prob is the probability of being sampled must be a real number between 0 and 1 inclusive, and must be of length N. (optional)}
\item{check_inputs}{logical. Defaults to TRUE.}
\item{simple}{logical. internal use only.}
}
\value{
A vector length N indicating the probability of being sampled.
}
\description{
Inclusion Probabilities: Simple Random Sampling
}
\examples{
probs <- simple_ra_probabilities(N = 100)
table(probs)
probs <- simple_ra_probabilities(N = 100, prob = 0.3)
table(probs)
}
|
source("rankhospital.R")
best <- function(state, outcome){
rankhospital(state, outcome)
}
| /02_R-Programming/best.R | no_license | olistroemer/datasciencecoursera | R | false | false | 93 | r | source("rankhospital.R")
best <- function(state, outcome){
rankhospital(state, outcome)
}
|
library(event)
### Name: cprocess
### Title: Plot Counting Process Data
### Aliases: cprocess
### Keywords: hplot
### ** Examples
times <- rgamma(20,2,scale=4)
cprocess(times)
| /data/genthat_extracted_code/event/examples/cprocess.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 183 | r | library(event)
### Name: cprocess
### Title: Plot Counting Process Data
### Aliases: cprocess
### Keywords: hplot
### ** Examples
times <- rgamma(20,2,scale=4)
cprocess(times)
|
##' Compare gene clusters functional profile
##' Given a list of gene set, this function will compute profiles of each gene
##' cluster.
##'
##'
##' @param geneClusters a list of entrez gene id.
##' @param fun One of "groupGO", "enrichGO", "enrichKEGG", "enrichDO" or "enrichPathway" .
##' @param ... Other arguments.
##' @return A \code{clusterProfResult} instance.
##' @importFrom methods new
##' @importFrom plyr llply
##' @importFrom plyr ldply
##' @importFrom plyr rename
##' @export
##' @author Guangchuang Yu \url{http://ygc.name}
##' @seealso \code{\link{compareClusterResult-class}}, \code{\link{groupGO}}
##' \code{\link{enrichGO}}
##' @keywords manip
##' @examples
##'
##' data(gcSample)
##' xx <- compareCluster(gcSample, fun="enrichKEGG", organism="human", pvalueCutoff=0.05)
##' #summary(xx)
##' #plot(xx, type="dot", caption="KEGG Enrichment Comparison")
##'
compareCluster <- function(geneClusters, fun="enrichGO", ...) {
fun <- eval(parse(text=fun))
clProf <- llply(geneClusters,
.fun=function(i) {
x=fun(i, ...)
if (class(x) == "enrichResult" || class(x) == "groupGOResult") {
summary(x)
}
}
)
clProf.df <- ldply(clProf, rbind)
##colnames(clProf.df)[1] <- "Cluster"
clProf.df <- rename(clProf.df, c(.id="Cluster"))
new("compareClusterResult",
compareClusterResult = clProf.df,
geneClusters = geneClusters,
fun = fun
)
}
##' Class "compareClusterResult"
##' This class represents the comparison result of gene clusters by GO
##' categories at specific level or GO enrichment analysis.
##'
##'
##' @name compareClusterResult-class
##' @aliases compareClusterResult-class show,compareClusterResult-method
##' summary,compareClusterResult-method plot,compareClusterResult-method
##' @docType class
##' @slot compareClusterResult cluster comparing result
##' @slot geneClusters a list of genes
##' @slot fun one of groupGO, enrichGO and enrichKEGG
##' @exportClass compareClusterResult
##' @author Guangchuang Yu \url{http://ygc.name}
##' @exportClass compareClusterResult
##' @seealso \code{\linkS4class{groupGOResult}}
##' \code{\linkS4class{enrichResult}} \code{\link{compareCluster}}
##' @keywords classes
setClass("compareClusterResult",
representation = representation(
compareClusterResult = "data.frame",
geneClusters = "list",
fun = "function"
)
)
##' show method for \code{compareClusterResult} instance
##'
##'
##' @name show
##' @docType methods
##' @rdname show-methods
##'
##' @title show method
##' @param object A \code{compareClusterResult} instance.
##' @return message
##' @importFrom methods show
##' @author Guangchuang Yu \url{http://ygc.name}
setMethod("show", signature(object="compareClusterResult"),
function (object){
geneClusterLen <- length(object@geneClusters)
#fun <- object@fun
#fun <- as.character(substitute(fun))
#if (fun == "enrichKEGG") {
# analysis <- "KEGG Enrichment Analysis"
# } else if (fun == "groupGO") {
# analysis <- "GO Profiling Analysis"
# } else if (fun == "enrichGO") {
# analysis <- "GO Enrichment Analysis"
# } else if (fun == "enrichDO") {
# analysis <- "DO Enrichment Analysis"
# } else {
# analysis <- "User specify Analysis"
# }
# cat ("Compare", geneClusterLen, "gene clusters using", analysis, "\n")
cat ("Result of Comparing", geneClusterLen, "gene clusters", "\n")
}
)
##' summary method for \code{compareClusterResult} instance
##'
##'
##' @name summary
##' @docType methods
##' @rdname summary-methods
##'
##' @title summary method
##' @param object A \code{compareClusterResult} instance.
##' @return A data frame
##' @importFrom stats4 summary
##' @exportMethod summary
##' @author Guangchuang Yu \url{http://ygc.name}
setMethod("summary", signature(object="compareClusterResult"),
function(object) {
return(object@compareClusterResult)
}
)
##' @rdname plot-methods
##' @aliases plot,compareClusterResult,ANY-method
##' @importFrom plyr ddply
##' @importFrom plyr mdply
##' @importFrom plyr .
setMethod("plot", signature(x="compareClusterResult"),
function(x,
type="dot",
title="",
font.size=12,
showCategory=5,
by="percentage") {
clProf.df <- summary(x)
## get top 5 (default) categories of each gene cluster.
if (is.null(showCategory)) {
result <- clProf.df
} else {
Cluster <- NULL # to satisfy codetools
result <- ddply(.data = clProf.df,
.variables = .(Cluster),
.fun = function(df, N) {
if (length(df$Count) > N) {
idx <- order(df$Count, decreasing=T)[1:N]
return(df[idx,])
} else {
return(df)
}
},
N=showCategory
)
}
## remove zero count
result$Description <- as.character(result$Description) ## un-factor
GOlevel <- result[,c(2,3)] ## GO ID and Term
GOlevel <- unique(GOlevel)
result <- result[result$Count != 0, ]
result$Description <- factor(result$Description,
levels=rev(GOlevel[,2]))
if (by=="percentage") {
Description <- Count <- NULL # to satisfy codetools
result <- ddply(result,
.(Description),
transform,
Percentage = Count/sum(Count),
Total = sum(Count))
## label GO Description with gene counts.
x <- mdply(result[, c("Description", "Total")], paste, sep=" (")
y <- sapply(x[,3], paste, ")", sep="")
result$Description <- y
## restore the original order of GO Description
xx <- result[,c(2,3)]
xx <- unique(xx)
rownames(xx) <- xx[,1]
Termlevel <- xx[as.character(GOlevel[,1]),2]
##drop the *Total* column
result <- result[, colnames(result) != "Total"]
result$Description <- factor(result$Description,
levels=rev(Termlevel))
} else if (by == "count") {
} else {
}
p <- plotting.clusterProfile(result, type, by, title, font.size)
return(p)
}
)
| /2X/2.13/clusterProfiler/R/compareCluster.R | no_license | GuangchuangYu/bioc-release | R | false | false | 8,000 | r | ##' Compare gene clusters functional profile
##' Given a list of gene set, this function will compute profiles of each gene
##' cluster.
##'
##'
##' @param geneClusters a list of entrez gene id.
##' @param fun One of "groupGO", "enrichGO", "enrichKEGG", "enrichDO" or "enrichPathway" .
##' @param ... Other arguments.
##' @return A \code{clusterProfResult} instance.
##' @importFrom methods new
##' @importFrom plyr llply
##' @importFrom plyr ldply
##' @importFrom plyr rename
##' @export
##' @author Guangchuang Yu \url{http://ygc.name}
##' @seealso \code{\link{compareClusterResult-class}}, \code{\link{groupGO}}
##' \code{\link{enrichGO}}
##' @keywords manip
##' @examples
##'
##' data(gcSample)
##' xx <- compareCluster(gcSample, fun="enrichKEGG", organism="human", pvalueCutoff=0.05)
##' #summary(xx)
##' #plot(xx, type="dot", caption="KEGG Enrichment Comparison")
##'
compareCluster <- function(geneClusters, fun="enrichGO", ...) {
fun <- eval(parse(text=fun))
clProf <- llply(geneClusters,
.fun=function(i) {
x=fun(i, ...)
if (class(x) == "enrichResult" || class(x) == "groupGOResult") {
summary(x)
}
}
)
clProf.df <- ldply(clProf, rbind)
##colnames(clProf.df)[1] <- "Cluster"
clProf.df <- rename(clProf.df, c(.id="Cluster"))
new("compareClusterResult",
compareClusterResult = clProf.df,
geneClusters = geneClusters,
fun = fun
)
}
##' Class "compareClusterResult"
##' This class represents the comparison result of gene clusters by GO
##' categories at specific level or GO enrichment analysis.
##'
##'
##' @name compareClusterResult-class
##' @aliases compareClusterResult-class show,compareClusterResult-method
##' summary,compareClusterResult-method plot,compareClusterResult-method
##' @docType class
##' @slot compareClusterResult cluster comparing result
##' @slot geneClusters a list of genes
##' @slot fun one of groupGO, enrichGO and enrichKEGG
##' @exportClass compareClusterResult
##' @author Guangchuang Yu \url{http://ygc.name}
##' @exportClass compareClusterResult
##' @seealso \code{\linkS4class{groupGOResult}}
##' \code{\linkS4class{enrichResult}} \code{\link{compareCluster}}
##' @keywords classes
setClass("compareClusterResult",
representation = representation(
compareClusterResult = "data.frame",
geneClusters = "list",
fun = "function"
)
)
##' show method for \code{compareClusterResult} instance
##'
##'
##' @name show
##' @docType methods
##' @rdname show-methods
##'
##' @title show method
##' @param object A \code{compareClusterResult} instance.
##' @return message
##' @importFrom methods show
##' @author Guangchuang Yu \url{http://ygc.name}
setMethod("show", signature(object="compareClusterResult"),
function (object){
geneClusterLen <- length(object@geneClusters)
#fun <- object@fun
#fun <- as.character(substitute(fun))
#if (fun == "enrichKEGG") {
# analysis <- "KEGG Enrichment Analysis"
# } else if (fun == "groupGO") {
# analysis <- "GO Profiling Analysis"
# } else if (fun == "enrichGO") {
# analysis <- "GO Enrichment Analysis"
# } else if (fun == "enrichDO") {
# analysis <- "DO Enrichment Analysis"
# } else {
# analysis <- "User specify Analysis"
# }
# cat ("Compare", geneClusterLen, "gene clusters using", analysis, "\n")
cat ("Result of Comparing", geneClusterLen, "gene clusters", "\n")
}
)
##' summary method for \code{compareClusterResult} instance
##'
##'
##' @name summary
##' @docType methods
##' @rdname summary-methods
##'
##' @title summary method
##' @param object A \code{compareClusterResult} instance.
##' @return A data frame
##' @importFrom stats4 summary
##' @exportMethod summary
##' @author Guangchuang Yu \url{http://ygc.name}
setMethod("summary", signature(object="compareClusterResult"),
function(object) {
return(object@compareClusterResult)
}
)
##' @rdname plot-methods
##' @aliases plot,compareClusterResult,ANY-method
##' @importFrom plyr ddply
##' @importFrom plyr mdply
##' @importFrom plyr .
setMethod("plot", signature(x="compareClusterResult"),
function(x,
type="dot",
title="",
font.size=12,
showCategory=5,
by="percentage") {
clProf.df <- summary(x)
## get top 5 (default) categories of each gene cluster.
if (is.null(showCategory)) {
result <- clProf.df
} else {
Cluster <- NULL # to satisfy codetools
result <- ddply(.data = clProf.df,
.variables = .(Cluster),
.fun = function(df, N) {
if (length(df$Count) > N) {
idx <- order(df$Count, decreasing=T)[1:N]
return(df[idx,])
} else {
return(df)
}
},
N=showCategory
)
}
## remove zero count
result$Description <- as.character(result$Description) ## un-factor
GOlevel <- result[,c(2,3)] ## GO ID and Term
GOlevel <- unique(GOlevel)
result <- result[result$Count != 0, ]
result$Description <- factor(result$Description,
levels=rev(GOlevel[,2]))
if (by=="percentage") {
Description <- Count <- NULL # to satisfy codetools
result <- ddply(result,
.(Description),
transform,
Percentage = Count/sum(Count),
Total = sum(Count))
## label GO Description with gene counts.
x <- mdply(result[, c("Description", "Total")], paste, sep=" (")
y <- sapply(x[,3], paste, ")", sep="")
result$Description <- y
## restore the original order of GO Description
xx <- result[,c(2,3)]
xx <- unique(xx)
rownames(xx) <- xx[,1]
Termlevel <- xx[as.character(GOlevel[,1]),2]
##drop the *Total* column
result <- result[, colnames(result) != "Total"]
result$Description <- factor(result$Description,
levels=rev(Termlevel))
} else if (by == "count") {
} else {
}
p <- plotting.clusterProfile(result, type, by, title, font.size)
return(p)
}
)
|
/MLib/helpMe.r | no_license | markusMiksa/ETFportfolio | R | false | false | 2,428 | r | ||
# libraries ---------------------------------------------------------------
library(tidyverse)
library(brolgar)
library(extrafont)
library(directlabels)
# data ----------------------------------------------------------------------
college_dat <-
read_csv("/Users/shortessay/Downloads/Data_10-7-2019.csv")
colnames(college_dat)
# clean data ------------------------------------------------------------------
college_pct <-
college_dat %>%
mutate(`2017` = `Black or African American total (EF2017A All students Undergraduate total)`/`Grand total (EF2017A All students Undergraduate total)`,
`2016` = `Black or African American total (EF2016A_RV All students Undergraduate total)`/`Grand total (EF2016A_RV All students Undergraduate total)`,
`2015` = `Black or African American total (EF2015A_RV All students Undergraduate total)`/`Grand total (EF2015A_RV All students Undergraduate total)`,
`2014` = `Black or African American total (EF2014A_RV All students Undergraduate total)`/`Grand total (EF2014A_RV All students Undergraduate total)`,
`2013` = `Black or African American total (EF2013A_RV All students Undergraduate total)`/`Grand total (EF2013A_RV All students Undergraduate total)`,
`2012` = `Black or African American total (EF2012A_RV All students Undergraduate total)`/`Grand total (EF2012A_RV All students Undergraduate total)`,
`2011` = `Black or African American total (EF2011A_RV All students Undergraduate total)`/`Grand total (EF2011A_RV All students Undergraduate total)`,
`2010` = `Black or African American total (EF2010A_RV All students Undergraduate total)`/`Grand total (EF2010A_RV All students Undergraduate total)`,
`Institution Name` = case_when(str_detect(`Institution Name`, "Columbia") ~ "Columbia University",
TRUE ~ `Institution Name`),
`Institution Name` = case_when(str_detect(`Institution Name`, "Massachusetts") ~ "MIT",
TRUE ~ `Institution Name`)) %>%
select(`Institution Name`, `2017`:`2010`) %>%
gather(year, percentage, -1) %>%
mutate(year = as.numeric(year))
college_ts <- # create a time series
college_pct %>%
mutate(key = group_indices(., `Institution Name`)) %>%
as_tsibble(index = year, key = key)
# brolgar -----------------------------------------------------------------
college_ts %>%
features(percentage,
feat_five_num)
college_ts %>%
features(percentage,
feat_monotonic)
# graphs ------------------------------------------------------------------
label_wrap_gen <- function(width = 120) {
function(variable, value) {
lapply(strwrap(as.character(value), width = width, simplify = FALSE),
paste, collapse="\n")
}
}
college_pct %>%
ggplot(aes(x = year, y = percentage, color = `Institution Name`, group = `Institution Name`)) +
geom_line() +
scale_y_continuous(labels = scales::percent_format(), limits = c(0, .1)) +
facet_grid(~ `Institution Name`,
labeller=label_wrap_gen(width = .1)) +
theme_minimal() +
theme(text = element_text(family = "Rockwell"),
plot.title = element_text(size = 18),
legend.position = "none",
axis.text.x = element_text(angle = 45,hjust = 1)) +
scale_color_manual(values = c("#293333", "#4D5340", "#FF6600", "#8FB350", "#00CCCC",
"#FFAD01", "#B31A01", "#60B332", "#B3A87E", "#09015C", "#9F5BB3")) +
labs(title = "Percentage of Fall Undergraduates that are Black/African American",
caption = "IPEDS Data Center Session Guest_60394508853")
college_pct %>%
ggplot(aes(x = as.numeric(year), y = percentage, group = `Institution Name`, color = `Institution Name`)) +
geom_line() +
scale_x_continuous(expand = c(0.25, 0)) +
scale_y_continuous(labels = scales::percent_format(), limits = c(0, .1)) +
theme_minimal() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
text = element_text(family = "Rockwell"),
plot.title = element_text(size = 18),
legend.position = "none") +
geom_dl(aes(label = `Institution Name`), method = list(dl.combine("last.bumpup"), cex = 0.8)) +
scale_color_manual(values = c("#293333", "#4D5340", "#FF6600", "#8FB350", "#00CCCC",
"#FFAD01", "#B31A01", "#60B332", "#B3A87E", "#09015C", "#9F5BB3")) +
labs(title = "Percentage of Fall Undergraduates that are Black/African American",
caption = "IPEDS Data Center Session Guest_60394508853")
# percent change ----------------------------------------------------------
college_pct_change <-
college_pct %>%
filter(year %in% c("2010", "2017")) %>%
group_by(`Institution Name`) %>%
arrange(year, .by_group = TRUE) %>%
mutate(pct_change = (percentage/lag(percentage) - 1) * 100)
| /college-demographics/ivyplus.R | no_license | ivelasq/data-visualization-portfolio | R | false | false | 4,919 | r |
# libraries ---------------------------------------------------------------
library(tidyverse)
library(brolgar)
library(extrafont)
library(directlabels)
# data ----------------------------------------------------------------------
college_dat <-
read_csv("/Users/shortessay/Downloads/Data_10-7-2019.csv")
colnames(college_dat)
# clean data ------------------------------------------------------------------
college_pct <-
college_dat %>%
mutate(`2017` = `Black or African American total (EF2017A All students Undergraduate total)`/`Grand total (EF2017A All students Undergraduate total)`,
`2016` = `Black or African American total (EF2016A_RV All students Undergraduate total)`/`Grand total (EF2016A_RV All students Undergraduate total)`,
`2015` = `Black or African American total (EF2015A_RV All students Undergraduate total)`/`Grand total (EF2015A_RV All students Undergraduate total)`,
`2014` = `Black or African American total (EF2014A_RV All students Undergraduate total)`/`Grand total (EF2014A_RV All students Undergraduate total)`,
`2013` = `Black or African American total (EF2013A_RV All students Undergraduate total)`/`Grand total (EF2013A_RV All students Undergraduate total)`,
`2012` = `Black or African American total (EF2012A_RV All students Undergraduate total)`/`Grand total (EF2012A_RV All students Undergraduate total)`,
`2011` = `Black or African American total (EF2011A_RV All students Undergraduate total)`/`Grand total (EF2011A_RV All students Undergraduate total)`,
`2010` = `Black or African American total (EF2010A_RV All students Undergraduate total)`/`Grand total (EF2010A_RV All students Undergraduate total)`,
`Institution Name` = case_when(str_detect(`Institution Name`, "Columbia") ~ "Columbia University",
TRUE ~ `Institution Name`),
`Institution Name` = case_when(str_detect(`Institution Name`, "Massachusetts") ~ "MIT",
TRUE ~ `Institution Name`)) %>%
select(`Institution Name`, `2017`:`2010`) %>%
gather(year, percentage, -1) %>%
mutate(year = as.numeric(year))
college_ts <- # create a time series
college_pct %>%
mutate(key = group_indices(., `Institution Name`)) %>%
as_tsibble(index = year, key = key)
# brolgar -----------------------------------------------------------------
college_ts %>%
features(percentage,
feat_five_num)
college_ts %>%
features(percentage,
feat_monotonic)
# graphs ------------------------------------------------------------------
label_wrap_gen <- function(width = 120) {
function(variable, value) {
lapply(strwrap(as.character(value), width = width, simplify = FALSE),
paste, collapse="\n")
}
}
college_pct %>%
ggplot(aes(x = year, y = percentage, color = `Institution Name`, group = `Institution Name`)) +
geom_line() +
scale_y_continuous(labels = scales::percent_format(), limits = c(0, .1)) +
facet_grid(~ `Institution Name`,
labeller=label_wrap_gen(width = .1)) +
theme_minimal() +
theme(text = element_text(family = "Rockwell"),
plot.title = element_text(size = 18),
legend.position = "none",
axis.text.x = element_text(angle = 45,hjust = 1)) +
scale_color_manual(values = c("#293333", "#4D5340", "#FF6600", "#8FB350", "#00CCCC",
"#FFAD01", "#B31A01", "#60B332", "#B3A87E", "#09015C", "#9F5BB3")) +
labs(title = "Percentage of Fall Undergraduates that are Black/African American",
caption = "IPEDS Data Center Session Guest_60394508853")
college_pct %>%
ggplot(aes(x = as.numeric(year), y = percentage, group = `Institution Name`, color = `Institution Name`)) +
geom_line() +
scale_x_continuous(expand = c(0.25, 0)) +
scale_y_continuous(labels = scales::percent_format(), limits = c(0, .1)) +
theme_minimal() +
theme(axis.title.x = element_blank(),
axis.title.y = element_blank(),
text = element_text(family = "Rockwell"),
plot.title = element_text(size = 18),
legend.position = "none") +
geom_dl(aes(label = `Institution Name`), method = list(dl.combine("last.bumpup"), cex = 0.8)) +
scale_color_manual(values = c("#293333", "#4D5340", "#FF6600", "#8FB350", "#00CCCC",
"#FFAD01", "#B31A01", "#60B332", "#B3A87E", "#09015C", "#9F5BB3")) +
labs(title = "Percentage of Fall Undergraduates that are Black/African American",
caption = "IPEDS Data Center Session Guest_60394508853")
# percent change ----------------------------------------------------------
college_pct_change <-
college_pct %>%
filter(year %in% c("2010", "2017")) %>%
group_by(`Institution Name`) %>%
arrange(year, .by_group = TRUE) %>%
mutate(pct_change = (percentage/lag(percentage) - 1) * 100)
|
\encoding{utf-8}
\name{hyp2f1}
\alias{hyp2f1}
\title{Compute the Gaussian hypergeometric function with complex arguments}
\usage{
hyp2f1(a, b, c, z,
algorithm = c("michel.stoitsov", "forrey"))
}
\arguments{
\item{a}{complex parameter}
\item{b}{complex parameter}
\item{c}{complex parameter}
\item{z}{complex variable}
\item{algorithm}{either \dQuote{michel.stoitsov}
(default) or \dQuote{forrey} (see the details)}
}
\value{
The complex value of the Gaussian hypergeometric
function.
}
\description{
Two different algorithms can be used.
}
\details{
The first, default, algorithm uses Fortran code in
\dQuote{hyp_2F1.f90} from N. L. J. Michel and M. V.
Stoitsov, which is available at
\url{http://cpc.cs.qub.ac.uk/summaries/AEAE}. The
corresponding background reference is N. L. J. Michel and
M. V. Stoitsov (2008): Fast computation of the Gauss
hypergeometric function with all its parameters complex
with application to the Pöschl-Teller-Ginocchio potential
wave functions, Computer Physics Communications
178:535-551.
The second algorithm uses Fortran code in \dQuote{cyp.f}
from R. C. Forrey is used which is available at
\url{http://physics.bk.psu.edu/codes/chyp.f}. The
corresponding background reference is R. C. Forrey
(1997): Computing the hypergeometric function, Journal of
Computational Physics 137:79-100.
}
\examples{
## library(appell)
## compare the results of both algorithms
## for random test data.
## todo: add better tests trying to replicate published results?
nTest <- 100L
set.seed(38)
a <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
b <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
c <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
z <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
tableHyp2f1 <- matrix(nrow=nTest,
ncol=2L,
dimnames=
list(NULL,
c("forrey", "michel.stoitsov")))
for(i in seq_len(nTest))
{
tableHyp2f1[i, "forrey"] <- hyp2f1(a[i], b[i], c[i], z[i],
algorithm="forrey")
tableHyp2f1[i, "michel.stoitsov"] <- hyp2f1(a[i], b[i], c[i], z[i],
algorithm="michel.stoitsov")
}
tableHyp2f1
abs(tableHyp2f1[, "forrey"] - tableHyp2f1[, "michel.stoitsov"])
## so very small differences,
## at least in this range of function parameters.
}
\author{
Daniel Sabanes Bove
\email{daniel.sabanesbove@ifspm.uzh.ch}
}
\keyword{math}
| /man/hyp2f1.Rd | no_license | mbedward/appell | R | false | false | 2,597 | rd | \encoding{utf-8}
\name{hyp2f1}
\alias{hyp2f1}
\title{Compute the Gaussian hypergeometric function with complex arguments}
\usage{
hyp2f1(a, b, c, z,
algorithm = c("michel.stoitsov", "forrey"))
}
\arguments{
\item{a}{complex parameter}
\item{b}{complex parameter}
\item{c}{complex parameter}
\item{z}{complex variable}
\item{algorithm}{either \dQuote{michel.stoitsov}
(default) or \dQuote{forrey} (see the details)}
}
\value{
The complex value of the Gaussian hypergeometric
function.
}
\description{
Two different algorithms can be used.
}
\details{
The first, default, algorithm uses Fortran code in
\dQuote{hyp_2F1.f90} from N. L. J. Michel and M. V.
Stoitsov, which is available at
\url{http://cpc.cs.qub.ac.uk/summaries/AEAE}. The
corresponding background reference is N. L. J. Michel and
M. V. Stoitsov (2008): Fast computation of the Gauss
hypergeometric function with all its parameters complex
with application to the Pöschl-Teller-Ginocchio potential
wave functions, Computer Physics Communications
178:535-551.
The second algorithm uses Fortran code in \dQuote{cyp.f}
from R. C. Forrey is used which is available at
\url{http://physics.bk.psu.edu/codes/chyp.f}. The
corresponding background reference is R. C. Forrey
(1997): Computing the hypergeometric function, Journal of
Computational Physics 137:79-100.
}
\examples{
## library(appell)
## compare the results of both algorithms
## for random test data.
## todo: add better tests trying to replicate published results?
nTest <- 100L
set.seed(38)
a <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
b <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
c <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
z <- complex(real=rnorm(nTest),
imaginary=rnorm(nTest))
tableHyp2f1 <- matrix(nrow=nTest,
ncol=2L,
dimnames=
list(NULL,
c("forrey", "michel.stoitsov")))
for(i in seq_len(nTest))
{
tableHyp2f1[i, "forrey"] <- hyp2f1(a[i], b[i], c[i], z[i],
algorithm="forrey")
tableHyp2f1[i, "michel.stoitsov"] <- hyp2f1(a[i], b[i], c[i], z[i],
algorithm="michel.stoitsov")
}
tableHyp2f1
abs(tableHyp2f1[, "forrey"] - tableHyp2f1[, "michel.stoitsov"])
## so very small differences,
## at least in this range of function parameters.
}
\author{
Daniel Sabanes Bove
\email{daniel.sabanesbove@ifspm.uzh.ch}
}
\keyword{math}
|
#========== SETUP DATA FRAMES =================
# COLLECT AIRPORT DATA
airports <- read.csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat", header = FALSE)
colnames(airports) <- c("ID", "name", "city", "country", "IATA_FAA", "ICAO", "lat", "lon", "altitude", "timezone", "DST")
sort(unique(airports$country))
us_airports <- subset(airports, country=='United States')
# COLLECT ROUTE DATA
routes <- read.csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/routes.dat", header=F)
colnames(routes) <- c("airline", "airlineID", "sourceAirport", "sourceAirportID", "destinationAirport", "destinationAirportID", "codeshare", "stops", "equipment")
# MERGE LAT LON from flights
routesD <- merge(routes, us_airports, by.x = "destinationAirportID", by.y="ID")
routesAll <- merge(routesD, us_airports, by.x = "sourceAirportID", by.y="ID")
#========== START BASE MAP VISUALIZATION =========================
library(maps)
library(mapproj)
map("world", col="#F1F1F1", fill=TRUE, bg="#FFFFFF", lwd=0.05, mar=rep(0,4))
map("state", col="#000000", fill = FALSE, add = TRUE)
map.scale(
grconvertX(0.01, "npc"),
grconvertY(0.07, "npc"),
metric = FALSE,
relwidth = .18,
ratio = FALSE
)
points(
mapproject(us_airports$lon, us_airports$lat),
col = "#4C4D8B",
bg = rgb(
red = 132,
green = 134,
blue = 242,
alpha = 125,
max = 255
),
pch = 21,
cex = 1
)
#============ US BASEMAP ===============================
xlim <- c(-171.738281, -56.601563)
ylim <- c(12.039321, 71.856229)
map("world", col="#f2f2f2", fill=TRUE, bg="light blue", lwd=0.05, xlim=xlim, ylim=ylim)
map("state", col="#000000", fill = FALSE, add = TRUE)
map.scale(grconvertX(0.01, "npc"), grconvertY(0.07, "npc"), metric = FALSE, relwidth = .18, ratio = FALSE)
#============ PLOT POINTS ==============================
help(points)
points(mapproject(us_airports$lon, us_airports$lat), col="#000000", pch=19, cex= .5 )
points(mapproject(us_airports$lon, us_airports$lat), col="#4C4D8B", bg=rgb( red=132, green=134, blue=242, alpha = 125, max=255 ), pch=21, cex= 1 )
#============ PLOT GREAT CIRCLES ========================
laflights <- routesAll[routesAll$sourceAirport=='LAX',]
sfoflights <- routesAll[routesAll$sourceAirport=='SFO',]
map("world", col="#f2f2f2", fill=TRUE, bg="light blue", lwd=0.05, xlim=xlim, ylim=ylim)
#PLOT LINES WITH NO COLOR SCALE
library(geosphere)
help(gcIntermediate)
for (j in 1:nrow(laflights)) {
inter <- gcIntermediate(c(laflights$lon.y[j], laflights$lat.y[j]), c(laflights$lon.x[j], laflights$lat.x[j]), n=100, addStartEnd=TRUE)
lines(inter, col="red", lwd=0.8)
}
help(gcIntermediate)
for (j in 1:nrow(sfoflights)) {
inter <- gcIntermediate(c(sfoflights$lon.y[j], sfoflights$lat.y[j]), c(sfoflights$lon.x[j], sfoflights$lat.x[j]), n=100, addStartEnd=TRUE)
lines(inter, col="blue", lwd=0.8)
}
#============== PUT IT ALL TOGETHER ========================
# PLOT BASE MAP
map("world", col="#f2f2f2", fill=TRUE, bg="light blue", lwd=0.05, xlim=xlim, ylim=ylim)
# PLOT FLIGHTS FROM LAX
for (j in 1:nrow(laflights)) {
inter <- gcIntermediate(c(laflights$lon.y[j], laflights$lat.y[j]), c(laflights$lon.x[j], laflights$lat.x[j]), n=100, addStartEnd=TRUE)
lines(inter, col="gray", lwd=0.8)
}
# PLOT DESTINATION MARK
points(mapproject(laflights$lon.x, laflights$lat.x), col="#4C4D8B", bg=rgb( red=132, green=134, blue=242, alpha = 125, max=255 ), pch=21, cex= 1 )
# PLOT ORIGIN MARK
points(mapproject(laflights$lon.y, laflights$lat.y), col="#ff9147", bg="#FFED47", pch=25, cex= 2 )
# ADD DESTINATION TEXT
text(laflights$lon.x, laflights$lat.x, laflights$destinationAirport, cex=.5, pos=4, col="#000000")
# ADD SCALE
map.scale(grconvertX(0.01, "npc"), grconvertY(0.07, "npc"), metric = FALSE, relwidth = .18, ratio = FALSE)
# Add Marks Legend
legend(
grconvertX(0.01, "npc"),
grconvertY(0.15, "npc"),
c('Origin', 'Destination'),
pch=c(25, 21),
col=c("#ff9147", "#4C4D8B"),
pt.bg=c("#FFED47",rgb( red=132, green=134, blue=242, alpha = 125, max=255 ) ),
cex = 1,
bty = "n",
ncol=2,
pt.cex = 1.5
)
| /R/maps tutorial.r | no_license | ashokmvbc/MSBITraining | R | false | false | 4,189 | r | #========== SETUP DATA FRAMES =================
# COLLECT AIRPORT DATA
airports <- read.csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/airports.dat", header = FALSE)
colnames(airports) <- c("ID", "name", "city", "country", "IATA_FAA", "ICAO", "lat", "lon", "altitude", "timezone", "DST")
sort(unique(airports$country))
us_airports <- subset(airports, country=='United States')
# COLLECT ROUTE DATA
routes <- read.csv("https://raw.githubusercontent.com/jpatokal/openflights/master/data/routes.dat", header=F)
colnames(routes) <- c("airline", "airlineID", "sourceAirport", "sourceAirportID", "destinationAirport", "destinationAirportID", "codeshare", "stops", "equipment")
# MERGE LAT LON from flights
routesD <- merge(routes, us_airports, by.x = "destinationAirportID", by.y="ID")
routesAll <- merge(routesD, us_airports, by.x = "sourceAirportID", by.y="ID")
#========== START BASE MAP VISUALIZATION =========================
library(maps)
library(mapproj)
map("world", col="#F1F1F1", fill=TRUE, bg="#FFFFFF", lwd=0.05, mar=rep(0,4))
map("state", col="#000000", fill = FALSE, add = TRUE)
map.scale(
grconvertX(0.01, "npc"),
grconvertY(0.07, "npc"),
metric = FALSE,
relwidth = .18,
ratio = FALSE
)
points(
mapproject(us_airports$lon, us_airports$lat),
col = "#4C4D8B",
bg = rgb(
red = 132,
green = 134,
blue = 242,
alpha = 125,
max = 255
),
pch = 21,
cex = 1
)
#============ US BASEMAP ===============================
xlim <- c(-171.738281, -56.601563)
ylim <- c(12.039321, 71.856229)
map("world", col="#f2f2f2", fill=TRUE, bg="light blue", lwd=0.05, xlim=xlim, ylim=ylim)
map("state", col="#000000", fill = FALSE, add = TRUE)
map.scale(grconvertX(0.01, "npc"), grconvertY(0.07, "npc"), metric = FALSE, relwidth = .18, ratio = FALSE)
#============ PLOT POINTS ==============================
help(points)
points(mapproject(us_airports$lon, us_airports$lat), col="#000000", pch=19, cex= .5 )
points(mapproject(us_airports$lon, us_airports$lat), col="#4C4D8B", bg=rgb( red=132, green=134, blue=242, alpha = 125, max=255 ), pch=21, cex= 1 )
#============ PLOT GREAT CIRCLES ========================
laflights <- routesAll[routesAll$sourceAirport=='LAX',]
sfoflights <- routesAll[routesAll$sourceAirport=='SFO',]
map("world", col="#f2f2f2", fill=TRUE, bg="light blue", lwd=0.05, xlim=xlim, ylim=ylim)
#PLOT LINES WITH NO COLOR SCALE
library(geosphere)
help(gcIntermediate)
for (j in 1:nrow(laflights)) {
inter <- gcIntermediate(c(laflights$lon.y[j], laflights$lat.y[j]), c(laflights$lon.x[j], laflights$lat.x[j]), n=100, addStartEnd=TRUE)
lines(inter, col="red", lwd=0.8)
}
help(gcIntermediate)
for (j in 1:nrow(sfoflights)) {
inter <- gcIntermediate(c(sfoflights$lon.y[j], sfoflights$lat.y[j]), c(sfoflights$lon.x[j], sfoflights$lat.x[j]), n=100, addStartEnd=TRUE)
lines(inter, col="blue", lwd=0.8)
}
#============== PUT IT ALL TOGETHER ========================
# PLOT BASE MAP
map("world", col="#f2f2f2", fill=TRUE, bg="light blue", lwd=0.05, xlim=xlim, ylim=ylim)
# PLOT FLIGHTS FROM LAX
for (j in 1:nrow(laflights)) {
inter <- gcIntermediate(c(laflights$lon.y[j], laflights$lat.y[j]), c(laflights$lon.x[j], laflights$lat.x[j]), n=100, addStartEnd=TRUE)
lines(inter, col="gray", lwd=0.8)
}
# PLOT DESTINATION MARK
points(mapproject(laflights$lon.x, laflights$lat.x), col="#4C4D8B", bg=rgb( red=132, green=134, blue=242, alpha = 125, max=255 ), pch=21, cex= 1 )
# PLOT ORIGIN MARK
points(mapproject(laflights$lon.y, laflights$lat.y), col="#ff9147", bg="#FFED47", pch=25, cex= 2 )
# ADD DESTINATION TEXT
text(laflights$lon.x, laflights$lat.x, laflights$destinationAirport, cex=.5, pos=4, col="#000000")
# ADD SCALE
map.scale(grconvertX(0.01, "npc"), grconvertY(0.07, "npc"), metric = FALSE, relwidth = .18, ratio = FALSE)
# Add Marks Legend
legend(
grconvertX(0.01, "npc"),
grconvertY(0.15, "npc"),
c('Origin', 'Destination'),
pch=c(25, 21),
col=c("#ff9147", "#4C4D8B"),
pt.bg=c("#FFED47",rgb( red=132, green=134, blue=242, alpha = 125, max=255 ) ),
cex = 1,
bty = "n",
ncol=2,
pt.cex = 1.5
)
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170322e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615781624-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170322e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
#' this function should only ever be used to reload/maintain Reactome caches
#' all the files are as downloaded from the Reactome/Ensembl website but gzipped
#' FIXME: add loading of interaction maps for all of the supported organisms.
#'
#' @param TxDbLitePath where the TxDbLite source build root can be found
#'
#' @return invisibly, a list of the cache and pathway name listings.
#'
#' @export
loadReactome <- function(TxDbLitePath="~/Dropbox/TxDbLite") {
filePath <- system.file("extdata", "reactome", package="TxDbLite")
ensemblToReactome <- read.csv(paste0(filePath, "/Ensembl2Reactome.txt.gz"),
sep="\t", header=FALSE,
stringsAsFactors=FALSE)[, 1:2]
names(ensemblToReactome) <- c("ID", "term")
reactomeOrganisms <- getSupportedAbbreviations("reactome")
reactomeCache <- split(ensemblToReactome[, 1:2],
sapply(ensemblToReactome$term,
strpop, "-", 2))[reactomeOrganisms]
reactomeCache <- lapply(reactomeCache, function(x) split(x$term, x$ID))
save(reactomeCache,
file=paste0(TxDbLitePath, "/data/reactomeCache.rda"),
compress="xz")
reactomePathways <- read.csv(paste0(filePath, "/ReactomePathways.txt.gz"),
sep="\t", header=FALSE, stringsAsFactors=FALSE)
reactomePathways <- reactomePathways[!duplicated(reactomePathways[,1]),]
reactomePathways <- split(reactomePathways[,2], reactomePathways[,1])
save(reactomePathways,
file=paste0(TxDbLitePath, "/data/reactomePathways.rda"),
compress="xz")
res <- list(cache=reactomeCache,
pathways=reactomePathways)
invisible(res)
}
| /R/loadReactome.R | no_license | RamsinghLab/TxDbLite | R | false | false | 1,729 | r | #' this function should only ever be used to reload/maintain Reactome caches
#' all the files are as downloaded from the Reactome/Ensembl website but gzipped
#' FIXME: add loading of interaction maps for all of the supported organisms.
#'
#' @param TxDbLitePath where the TxDbLite source build root can be found
#'
#' @return invisibly, a list of the cache and pathway name listings.
#'
#' @export
loadReactome <- function(TxDbLitePath="~/Dropbox/TxDbLite") {
filePath <- system.file("extdata", "reactome", package="TxDbLite")
ensemblToReactome <- read.csv(paste0(filePath, "/Ensembl2Reactome.txt.gz"),
sep="\t", header=FALSE,
stringsAsFactors=FALSE)[, 1:2]
names(ensemblToReactome) <- c("ID", "term")
reactomeOrganisms <- getSupportedAbbreviations("reactome")
reactomeCache <- split(ensemblToReactome[, 1:2],
sapply(ensemblToReactome$term,
strpop, "-", 2))[reactomeOrganisms]
reactomeCache <- lapply(reactomeCache, function(x) split(x$term, x$ID))
save(reactomeCache,
file=paste0(TxDbLitePath, "/data/reactomeCache.rda"),
compress="xz")
reactomePathways <- read.csv(paste0(filePath, "/ReactomePathways.txt.gz"),
sep="\t", header=FALSE, stringsAsFactors=FALSE)
reactomePathways <- reactomePathways[!duplicated(reactomePathways[,1]),]
reactomePathways <- split(reactomePathways[,2], reactomePathways[,1])
save(reactomePathways,
file=paste0(TxDbLitePath, "/data/reactomePathways.rda"),
compress="xz")
res <- list(cache=reactomeCache,
pathways=reactomePathways)
invisible(res)
}
|
# Copyright Mark Niemann-Ross, 2017
# Author: Mark Niemann-Ross. mark.niemannross@gmail.com
# LinkedIn: https://www.linkedin.com/in/markniemannross/
# Github: https://github.com/mnr
# More Learning: http://niemannross.com/link/mnratlil
# Description: Heatmap example for Tidyverse
# Appreciation to Martin Hadley - https://www.linkedin.com/in/martinjohnhadley/
# data from https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236
# OriginState
# DepDelayMinutes: Difference in minutes between scheduled and actual departure time. Early departures set to 0.
# ArrDelayMinutes: Difference in minutes between scheduled and actual arrival time. Early arrivals set to 0.
# Distance between airports (miles)
# install.packages("tidyverse")
library(tidyverse)
read_csv("../40797218_T_ONTIME.csv") %>%
mutate(DISTANCE = DISTANCE / 100) %>%
gather(key = stat,
value = value,
-ORIGIN_STATE_ABR,
na.rm = TRUE) %>%
mutate(value = as.numeric(value)) %>%
group_by(ORIGIN_STATE_ABR, stat) %>%
summarise(mean_value = mean(value, na.rm = TRUE)) %>%
ungroup() %>%
mutate(ORIGIN_STATE_ABR = fct_rev(ORIGIN_STATE_ABR)) %>%
ggplot(aes(x = stat, y = ORIGIN_STATE_ABR)) +
geom_tile(aes(fill = mean_value)) +
scale_fill_continuous(low = "green", high = "red")
| /02_04 tidyverse comparison/heatmap_Tidyverse.R | no_license | anhnguyendepocen/R-Programming-in-Data-Science-Setup-and-Start | R | false | false | 1,294 | r | # Copyright Mark Niemann-Ross, 2017
# Author: Mark Niemann-Ross. mark.niemannross@gmail.com
# LinkedIn: https://www.linkedin.com/in/markniemannross/
# Github: https://github.com/mnr
# More Learning: http://niemannross.com/link/mnratlil
# Description: Heatmap example for Tidyverse
# Appreciation to Martin Hadley - https://www.linkedin.com/in/martinjohnhadley/
# data from https://www.transtats.bts.gov/DL_SelectFields.asp?Table_ID=236
# OriginState
# DepDelayMinutes: Difference in minutes between scheduled and actual departure time. Early departures set to 0.
# ArrDelayMinutes: Difference in minutes between scheduled and actual arrival time. Early arrivals set to 0.
# Distance between airports (miles)
# install.packages("tidyverse")
library(tidyverse)
read_csv("../40797218_T_ONTIME.csv") %>%
mutate(DISTANCE = DISTANCE / 100) %>%
gather(key = stat,
value = value,
-ORIGIN_STATE_ABR,
na.rm = TRUE) %>%
mutate(value = as.numeric(value)) %>%
group_by(ORIGIN_STATE_ABR, stat) %>%
summarise(mean_value = mean(value, na.rm = TRUE)) %>%
ungroup() %>%
mutate(ORIGIN_STATE_ABR = fct_rev(ORIGIN_STATE_ABR)) %>%
ggplot(aes(x = stat, y = ORIGIN_STATE_ABR)) +
geom_tile(aes(fill = mean_value)) +
scale_fill_continuous(low = "green", high = "red")
|
# Exercise 7: using dplyr on external data
# Load the `dplyr` library
# Use the `read.csv()` function to read in the included data set. Remember to
# save it as a variable.
# View the data frame you loaded, and get some basic information about the
# number of rows/columns.
# Note the "X" preceding some of the column titles as well as the "*" following
# the names of teams that made it to the playoffs that year.
# Add a column that gives the turnovers to steals ratio (TOV / STL) for each team
# Sort the teams from lowest turnover/steal ratio to highest
# Which team has the lowest turnover/steal ratio?
# Using the pipe operator, create a new column of assists per game (AST / G)
# AND sort the data.frame by this new column in descending order.
# Create a data frame called `good_offense` of teams that scored more than
# 8700 points (PTS) in the season
# Create a data frame called `good_defense` of teams that had more than
# 470 blocks (BLK)
# Create a data frame called `offense_stats` that only shows offensive
# rebounds (ORB), field-goal % (FG.), and assists (AST) along with the team name.
# Create a data frame called `defense_stats` that only shows defensive
# rebounds (DRB), steals (STL), and blocks (BLK) along with the team name.
# Create a function called `better_shooters` that takes in two teams and returns
# a data frame of the team with the better field-goal percentage. Include the
# team name, field-goal percentage, and total points in your resulting data frame
# Call the function on two teams to compare them (remember the `*` if needed)
| /chapter-11-exercises/exercise-7/exercise.R | permissive | sparkoparko/book-exercises | R | false | false | 1,601 | r | # Exercise 7: using dplyr on external data
# Load the `dplyr` library
# Use the `read.csv()` function to read in the included data set. Remember to
# save it as a variable.
# View the data frame you loaded, and get some basic information about the
# number of rows/columns.
# Note the "X" preceding some of the column titles as well as the "*" following
# the names of teams that made it to the playoffs that year.
# Add a column that gives the turnovers to steals ratio (TOV / STL) for each team
# Sort the teams from lowest turnover/steal ratio to highest
# Which team has the lowest turnover/steal ratio?
# Using the pipe operator, create a new column of assists per game (AST / G)
# AND sort the data.frame by this new column in descending order.
# Create a data frame called `good_offense` of teams that scored more than
# 8700 points (PTS) in the season
# Create a data frame called `good_defense` of teams that had more than
# 470 blocks (BLK)
# Create a data frame called `offense_stats` that only shows offensive
# rebounds (ORB), field-goal % (FG.), and assists (AST) along with the team name.
# Create a data frame called `defense_stats` that only shows defensive
# rebounds (DRB), steals (STL), and blocks (BLK) along with the team name.
# Create a function called `better_shooters` that takes in two teams and returns
# a data frame of the team with the better field-goal percentage. Include the
# team name, field-goal percentage, and total points in your resulting data frame
# Call the function on two teams to compare them (remember the `*` if needed)
|
#The two functions below are used in conjuction to calculate/caches inverse of matrices
#The makeCacheMatrix function takes in an argument, which is the matrix whose inverse will be caculated.
#The default value is an empty matrix.
#It will return a list of 4 functions, set, get, setmean and getmean. Set function sets the new matrix
#to be calculated; Get function returns the matrix; Setmean function "saves" the inverse value; Get
#function returns the inverse
makeCacheMatrix<-function (x=matrix()){
inv<-NULL
set<-function(y){
x<<-y
inv<<-NULL
}
get<-function ()x
setinv<-function (inverse) inv<<-inverse
getinv<-function() inv
list(set=set,get=get,setinv=setinv,getinv=getinv)
}
#cacheSolve will be used after makeCacheSolve function.
#It takes in an argument x, which will be the list returned at the end of the makeCacheMatrix function
#And then it sees if the matrix has already been calculated or not. If yes, it returns the cached value
#If not, it caculates the inverse and cache it
cacheSolve<-function (x,...){
#check to see if it is new matrix, if yes, calculate the inverse and chaces it.
inv1<-x$getinv()
if (!is.null(inv1)){
message("getting cached data")
return (inv1)
}
data<-x$get()
inv1<-solve(data,...)
x$setinv(inv1)
inv1
}
| /cachematrix.R | no_license | irene0204/ProgrammingAssignment2 | R | false | false | 1,310 | r | #The two functions below are used in conjuction to calculate/caches inverse of matrices
#The makeCacheMatrix function takes in an argument, which is the matrix whose inverse will be caculated.
#The default value is an empty matrix.
#It will return a list of 4 functions, set, get, setmean and getmean. Set function sets the new matrix
#to be calculated; Get function returns the matrix; Setmean function "saves" the inverse value; Get
#function returns the inverse
makeCacheMatrix<-function (x=matrix()){
inv<-NULL
set<-function(y){
x<<-y
inv<<-NULL
}
get<-function ()x
setinv<-function (inverse) inv<<-inverse
getinv<-function() inv
list(set=set,get=get,setinv=setinv,getinv=getinv)
}
#cacheSolve will be used after makeCacheSolve function.
#It takes in an argument x, which will be the list returned at the end of the makeCacheMatrix function
#And then it sees if the matrix has already been calculated or not. If yes, it returns the cached value
#If not, it caculates the inverse and cache it
cacheSolve<-function (x,...){
#check to see if it is new matrix, if yes, calculate the inverse and chaces it.
inv1<-x$getinv()
if (!is.null(inv1)){
message("getting cached data")
return (inv1)
}
data<-x$get()
inv1<-solve(data,...)
x$setinv(inv1)
inv1
}
|
library(GMSE);
context("Landscape initialisation");
land <- make_landscape(model = "IBM",
rows = 10,
cols = 10,
cell_types = 1,
cell_val_mn = 1,
cell_val_sd = 0,
cell_val_max = 1,
cell_val_min = 1,
layers = 3,
ownership = 1,
owner_pr = NULL
);
test_that("Landscape dimensions are initialised accurately", {
expect_equal(dim(land), c(10, 10, 3));
})
test_that("Landscape values are initialised accurately", {
expect_equal(max(land), 1);
expect_equal(min(land), 1);
})
test_that("Landscape values are reset when needed", {
expect_equal(age_land(land+1, land, 2)[,,2], land[,,2]);
}) | /tests/testthat/test-make_landscape.R | no_license | nbunne/gmse | R | false | false | 927 | r | library(GMSE);
context("Landscape initialisation");
land <- make_landscape(model = "IBM",
rows = 10,
cols = 10,
cell_types = 1,
cell_val_mn = 1,
cell_val_sd = 0,
cell_val_max = 1,
cell_val_min = 1,
layers = 3,
ownership = 1,
owner_pr = NULL
);
test_that("Landscape dimensions are initialised accurately", {
expect_equal(dim(land), c(10, 10, 3));
})
test_that("Landscape values are initialised accurately", {
expect_equal(max(land), 1);
expect_equal(min(land), 1);
})
test_that("Landscape values are reset when needed", {
expect_equal(age_land(land+1, land, 2)[,,2], land[,,2]);
}) |
setwd("D:/HCL/LikeMe")
#Loading the required packages.
library(formattable)
library(data.table)
library(shiny)
library(shinydashboard)
library(quanteda, irlba)
library(ggplot2)
#library(e1071)
#library(lattice)
library(zoo)
library(lubridate)
#library(fiftystater)
library(forecast)
library(rvest)
library(tibble)
library(randomForest)
library(tseries)
#library(maps)
#library(mapproj)
#library(tmap)
#library(maptools)
library(dplyr)
library(openxlsx)
#library(xml2)
library(sp)
library(plotly)
library(radarchart)
library(fmsb)
library(DT)
library(stringr)
library(caret)
#Reading the required csv files.
demand <- data.frame( fread("demand.csv", stringsAsFactors = FALSE))
demand.dump <-data.frame( fread("dump2.csv", stringsAsFactors = FALSE))
demand.upload <- demand.dump
demand.upload$V1 <- NULL
demand.dump$quarter <- quarter(dmy(demand.dump$Approval.Date))
demand.dump$year <- year(dmy(demand.dump$Approval.Date))
demand.dump$month <- month(dmy(demand.dump$Approval.Date))
maxdate <- max(dmy(demand.dump$Approval.Date))
datasetexp<-data.frame(fread("excel1.csv", stringsAsFactors = FALSE))
colors <- c('#4AC6B7', '#2457C5', '#DF0B0B',"#24C547", '#E71BB6')
indiadistance<-data.frame( fread("indaiusa Distance1.csv"))
demandda<-demand.dump
alternatives<-data.frame(fread("alternatives.csv"))
rowman<-data.frame(fread("ronnames chan1.csv"))
dd<-data.frame(fread("consolidated_skills1.csv", stringsAsFactors = FALSE))
cons <- data.frame(fread("Consolidated.csv", stringsAsFactors = F))
#Initial data preparation for skill radar.
row.names(indiadistance)<-rowman$actual
colnames(indiadistance)<-rowman$actual
customer<-as.data.frame(unique(demandda$Customer))
names(customer)<-"customer"
colnames(dd)<-rowman$actual
dd1<-dd
dd1$customer<-demandda$Customer
skill<-colnames(dd)
tdd<- t(dd)
tdddataframe<-data.frame(tdd,stringsAsFactors=FALSE)
tdd1<-tdddataframe
cons$date <- dmy(cons$Req.Date)
cons$week <- quarter(cons$date)
cons$year <- year(cons$date)
dem <- cons
dd_skills <- data.frame(fread("list_of_skills.csv", stringsAsFactors = F)) # We have identtified and removed some generic keywords from the identified keywords list
dd_skills <- subset(dd_skills, dd_skills$Pbb==0) # removing the generic keywords
##################################################Newmancodes##########################################
#####Like Me Fuctions for first module: Skill Radar
#Function for automatic filtering of the UI input with respect to customer
list_customer<- function (customer){
if (customer!=""){
f<- as.data.frame( dd1[dd1$customer==customer,-1])
d<-f[, colSums(f != 0) > 0]
skill_list<-colnames(d) } #returns the skill list that correspond to the input customer
else {
skill_list<-c("",as.character(unique(colnames(dd1))))
}
return(skill_list)
}
#Function for automatic filtering of the UI input with respect to skill
act_customer<- function (skill){
if (skill!=""){
cust_list<- c("", as.character(unique( dd1$customer[ dd1[,skill]>0])))
}
else {
cust_list<-c("",as.character(unique((demandda$Customer))))
}
return (cust_list)#returns the customer list that correspond to the input skil
}
#Function for automatic filtering of the UI input with respect to skill
act_skill<- function (skill){
if (skill!=""){
cust_list<- c("", as.character(unique( demandda$Skill.Bucket[ dd1[,skill]>0])))
}
else {
cust_list<-c("",as.character(unique((demandda$Skill.Bucket))))
}
return (cust_list)#returns the skill bucket list that correspond to the input skil
}
#Function for automatic filtering of the UI input with respect to skill
act_location<- function (skill){
if (skill!=""){
cust_list<- c("", as.character(unique( demandda$Personal.SubArea[ dd1[,skill]>0])))
}
else {
cust_list<-c("",as.character(unique((demandda$Personal.SubArea))))
}
return (cust_list)#returns the subarea list that correspond to the input skil
}
#Function for automatic filtering of the UI input with respect to customer
list_skillbucket<- function (customer){
if (customer!=""){
skill_list<-unique(demandda$Skill.Bucket[demandda$Customer==customer])
}
else {
skill_list<-c("",as.character(unique(demandda$Skill.Bucket)))
}
return(skill_list) #returns the skill bucket list that correspond to the input customer
}
#Function for automatic filtering of the UI input with respect to customer
list_location<- function (customer){
if(customer!=""){
skill_list<-unique(demandda$Personal.SubArea[demandda$Customer==customer])
}
else {
skill_list<-c("",as.character(unique(demandda$Personal.SubArea)))
}
return(skill_list) #returns the subarea list that correspond to the input customer
}
#Function for Skill radar computes distance by Pearson corelation and makes out the radar
newman<-function(input, n, skillbucket, subarea,customer,raio, yea){
# Only if Skill is mentioned by the user compute the closest skills
if (input!=""){
#Receiving all the user input and filtering the job descriptions
A<-1:nrow(demandda)
if (customer!=""){
A<-which(demandda$Customer == customer)}
B<-1:nrow(demandda)
if (subarea!=""){
B<-which(demandda$Personal.SubArea == subarea)}
C<-1:nrow(demandda)
if (skillbucket!="") {
C<-which(demandda$Skill.Bucket==skillbucket)}
X<-1:nrow(demandda)
if (yea!="") {
X<-which(demandda$year==as.numeric(yea))}
D<-intersect(A,B)
E<-intersect(D,C)
E<-intersect(E,X)
if (length(E)==0){
return(list(data.frame("none"=""), "", "No JD"))
}
tdddataframe<-as.data.frame(tdddataframe[,E]) #Final filtered table
row.names(tdddataframe)<-skill
#Adding an additional empty column
no<-length(tdddataframe)+1
tdddataframe[,no]<-0
d<- tdddataframe[input,]
#TO know how the total frequency of the word in all the job decsriptions
coun<-d[, colSums(d == 0)== 0]
freq<- length(coun)
if (freq==0){
return(list(data.frame("none"=""), "", "No JD"))
}
#Computing distance using Pearson's Correlation.
dista <- function(x) ((1-cor(t(x)))/2)
jd<-length(tdddataframe)-1
#if no filters are applied to the dataframe then use the correlation matrix uploaded
if (jd==31049){
#print("using India Distance")
distmatrix<-indiadistance
}
#Compution of the Pearson correlation between the skills for the filtered matrix
else {
d1 <- dista(tdddataframe)
distmatrix<-as.data.frame(d1)
}
#Seperate out the disatnce of input
Skills_new<-as.data.frame(distmatrix[,input])
str(Skills_new)
names(Skills_new)<-"dist"
Skills_new$skills<-skill
Skills_new<-Skills_new[is.element(Skills_new$skills,dd_skills$Skills),]
#apply the threshold
data1<-Skills_new$skills[(Skills_new$dist<=0.5)]
data2<-head( (Skills_new[order(Skills_new$dist, decreasing=FALSE),]),n)
data2<- data2[data2$skills!=input,]
data<-intersect(data1,data2$skills)
data2<-data2[is.element(data2$skills,data),]
data2<- data2[order(data2$dist, decreasing=FALSE),]
data2$dist<-as.numeric(lapply(data2$dist, function(x) 1-x)) #distance computation by perason colrrelation
#Preparation of the table for displaying in Radar format
d<-max(data2$dist)+0.02
data2$max<-d
f<-min(data2$dist)-0.02
data2$min<-f
data3<-data2[c(4,3,1)]
tra<-data.frame(t(data3))
names(tra)<- data2$skills
return(list(tra, jd, freq))
}
# Only if Skill is not mentioned by the user instead uses only customer/skill bucket/area
else {
#Receiving all the user input and filtering the job descriptions
A<-1:nrow(demandda)
if (customer!=""){
A<-which(demandda$Customer == customer)}
B<-1:nrow(demandda)
if (subarea!=""){
B<-which(demandda$Personal.SubArea == subarea)}
C<-1:nrow(demandda)
if (skillbucket!="") {
C<-which(demandda$Skill.Bucket==skillbucket)}
X<-1:nrow(demandda)
if (yea!="") {
X<-which(demandda$year==as.numeric(yea))}
D<-intersect(A,B)
E<-intersect(D,C)
E<-intersect(E,X)
if (length(E)==0){
return(list(data.frame("none"=""), "", "No JD"))
}
tdddataframe<-as.data.frame(tdddataframe[,E])
row.names(tdddataframe)<-skill
#Adding a row for a reference with all 1s
addition<-nrow(tdddataframe)+1
tdddataframe[addition,]<-1
#Adding an additional empty column to allow for the computation of statdard deviation
no<-length(tdddataframe)+1
tdddataframe[,no]<-0
freq<- length(tdddataframe)-1
if (freq==0){
return(list(data.frame("none"="no Skills"), "", "No JD"))
}
#Computing distance using Pearson's Correlation.
dista <- function(x) ((1-cor(t(x)))/2)
jd<-length(tdddataframe)-1
#if no filters are applied to the dataframe then use the correlation matrix uploaded
if (jd==31049){
#print("using India Distance")
distmatrix<-indiadistance
}
else {
d1 <- dista(tdddataframe)
distmatrix<-as.data.frame(d1) #computing Perason's correlation
}
#Seperate out the disatnce of the referece vector to the skills
Skills_new<-as.data.frame(distmatrix[,addition])
str(Skills_new)
names(Skills_new)<-"dist"
Skills_new$skills<-row.names(tdddataframe)
Skills_new<-Skills_new[is.element(Skills_new$skills,dd_skills$Skills),]
#apply the threshold
data1<-Skills_new$skills[which(Skills_new$dist<=0.5)]
data2<-head( (Skills_new[order(Skills_new$dist, decreasing=FALSE),]),n)
data2<- data2[data2$skills!=addition,]
data<-intersect(data1,data2$skills)
data2<-data2[is.element(data2$skills,data),]
data2<- data2[order(data2$dist, decreasing=FALSE),]
data2$dist<-as.numeric(lapply(data2$dist, function(x) 1-x))#distance computation
#preparation of the table for the radar output
d<-max(data2$dist)+0.02
data2$max<-d
f<-min(data2$dist)-0.02
data2$min<-f
data3<-data2[c(4,3,1)]
tra<-data.frame(t(data3))
names(tra)<- data2$skills
return(list(tra, jd, freq))
}
}
#retrieve the alternatie skills
alter<-function (name){
if (name=="none"){
return("")
}
else{
return(alternatives$alternate[alternatives$Skillname==name])}
}
#retrieve the definition
defin<-function (name){
if (name=="none"){
return("")
}
else{ return(alternatives$definition[alternatives$Skillname==name])}
}
##############################################Contextual Search################################
#Function to search for resumes based on skills and job descriptions.
likeme <- function(skill1, job1, exp1, stype1, sk.ill, num1,clack, functional, systems, jobboard1){
setwd("D:/HCL/LikeMe")
#loading the skill set data and the stopwords data.
skills <- data.frame(fread("skillClustering.csv", header = TRUE, stringsAsFactors = FALSE))
stp <-data.frame( fread("stopwords.csv", header = TRUE, stringsAsFactors = FALSE))
#reading the candidate profiles from internal and external databases as per the user input
if(stype1 == "eser"){
candidates <- data.frame(fread("external.csv", stringsAsFactors = FALSE))
original <- data.frame( fread("external.csv", stringsAsFactors = FALSE))
candidates <- candidates[,c(1,3,4,5,6,7,8,9)]
original <- original[,c(1,3,4,5,6,7,8,9)]
if(sk.ill == "I have already entered the skills"){
candidates$requirement <- candidates$Profile#Add the skills
}else{
candidates$requirement <- paste("",candidates$Profile )
}
}else if(stype1 == "iser"){
#candidates <- data.frame( fread("internal.csv", stringsAsFactors = FALSE))
#original <- data.frame( fread("internal.csv", stringsAsFactors = FALSE) )
candidates <- read.csv("internal.csv", stringsAsFactors = FALSE)
original <- read.csv("internal.csv", stringsAsFactors = FALSE)
colnames(candidates)[1] <- "V1"
colnames(original)[1] <- "V1"
candidates <- candidates[,c(1,3,4,5,6,7,8,9)]
original <- original[,c(1,3,4,5,6,7,8,9)]
if(sk.ill == "I have already entered the skills"){
candidates$requirement <- candidates$Profile#Add the skills
}else{
candidates$requirement <- paste("",candidates$Profile )
}
}
#Candidate experience search preference.
if(exp1 == "No Preference"){
candidates <- candidates
original <- original
}else{
candidates <- subset(candidates, candidates$experience == exp1)
original <- subset(original, original$experience == exp1)
}
#Search preference based on skill.
if(jobboard1=="no"){
if(sk.ill == "I have already entered the skills"){
new_requirement <- data.frame(V1 = nrow(candidates)+1,File_Name = "",Mobile.Number = 9999999999,Email = "",Profile = job1, Education = "",Skills = skill1, TProfile = "")
new_requirement$requirement <- paste(new_requirement$Skills, new_requirement$Profile)
}else{
skill1 <- paste(colnames(data.frame(newman(sk.ill, num1, "","","","","")[1], check.names = F)),collapse = ",")
new_requirement <- data.frame(V1 = nrow(candidates)+1,File_Name = "",Mobile.Number = 999999999,Email = "", Profile = job1, Education = "",Skills = skill1, TProfile = "")
new_requirement$requirement <- paste(new_requirement$Skills, new_requirement$Profile)
print(new_requirement$requirement)
}
}else{
skill1 <- jobboard(skill1," "," ")$closely_related_skill_Dice_Insights[1]
new_requirement <- data.frame(V1 = nrow(candidates)+1,File_Name = "",Mobile.Number = 9999999999,Email = "",Profile = job1, Education = "",Skills = skill1, TProfile = "")
new_requirement$requirement <- paste(new_requirement$Skills, new_requirement$Profile)
}
print(new_requirement)
candidates <- rbind(new_requirement, candidates)
#functions for tf idf computation
term.frequency <- function(row) {
row / sum(row)
}
inverse.doc.freq <- function(col) {
corpus.size <- length(col)
doc.count <- length(which(col > 0))
log10(corpus.size / doc.count)
}
tf.idf <- function(x, idf) {
x * idf
}
candidates$TProfile <- as.character(candidates$TProfile)
candidates$TProfile[1] <- skill1
tokens2 <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens2 <- tokens_tolower(tokens2)
tokens2 <- tokens_select(tokens2, stp$TEXT, selection = "remove")
tokens2 <- as.character(tokens2)
#tokenisation of the profiles
if(grepl("^\\s*$", new_requirement$Skills) | length(tokens2) == 0){
score1 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, Score = rep(0,nrow(candidates)))
}else{
tokens <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens <- tokens_tolower(tokens)
tokens <- tokens_select(tokens, stp$TEXT, selection = "remove")
train.tokens.dfm <- dfm(tokens, tolower = FALSE)
tokens <- tokens_wordstem(tokens, language = "english")
tokens <- tokens_ngrams(tokens, n = 1)
if(length(tokens)==1){
print(1)
}
#Tokenizing the skills.
skills.tokens <- tokens(skills$value, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
skills.tokens <- tokens_tolower(skills.tokens)
skills.tokens <- tokens_select(skills.tokens, stp$TEXT, selection = "remove")
skills.tokens <- tokens_ngrams(skills.tokens, n = 1:5)
skills.tokens <- tokens_select(tokens, unlist(as.list(skills.tokens)), selection = "keep")
skills.tokens <- tokens_select(skills.tokens, stopwords(), selection = "remove")
tokens.set <- append(tokens, skills.tokens)
tokens1 <- tokens(as.character(candidates$TProfile), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1)
#tokens1 <- tryCatch(tokens_select(tokens1, unlist(as.list(tokens)), selection = "keep"), error = function(e){"no skill entered"})
tokens1 <- tokens_select(tokens1, unlist(as.list(tokens)), selection = "keep")
#print(tokens1)
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.matrix[tokens.matrix>0]<-1
tokens.df <- as.data.frame(tokens.matrix)
tokens <- as.matrix(tokens.df)
#Creating the tokenized matrix.
tokens <- t(tokens)
write.csv(data.frame(tokens),"score222.csv")
#Scoring the candidate based on skill.
library(lsa)
start.time <- Sys.time()
if(nrow(candidates)>1){
#Finding Cosine Similarity for skill scoring.
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score1 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, score = cos$text1)
score1 <- score1[order(score1$score, decreasing = TRUE),]
names <- data.frame(File = original$File_Name, Email = original$Email, Mobile.Number = original$Email, Skill = original$Skills)
score1 <- left_join(score1, names, by = "File")
colnames(score1) <- c("File","Mobile.Number", "Email", "Score", "em"," em1","Skill")
if(nrow(score1)==0){
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
#write.csv(score1,"score222.csv")
score1$Score[is.nan(score1$Score)] <- 0
score1 <- score1[order(score1$Email, decreasing = TRUE),]
}
#Check whether job description is available or not.
if(grepl("^\\s*$", job1)){
score2 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, Score = rep(0,nrow(candidates)))
}else{
tokens1 <- tokens(candidates$requirement, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
new.tokens <- tokens(as.character(new_requirement$Profile), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
new.tokens <- tokens_tolower(new.tokens)
new.tokens <- tokens_select(new.tokens, stopwords(), selection = "remove")
new.tokens <- tokens_ngrams(new.tokens, n = 1:5)
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(new.tokens)), selection = "keep")
new.tokens1 <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
new.tokens1 <- tokens_tolower(new.tokens1)
new.tokens1 <- tokens_select(new.tokens1, stopwords(), selection = "remove")
new.tokens1 <- tokens_ngrams(new.tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(new.tokens1)), selection = "remove")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
#Creating a tf-idf matrix
if(length(tokens.idf)>1){
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
}else{
tokens.tfidf <- tokens.df*tokens.idf
}
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
#Scoring the candidate based on context.
library(lsa)
start.time <- Sys.time()
if(nrow(candidates)>1){
#Finiding csine similarity
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score2 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, score = cos$text1)
score2 <- score2[order(score2$score, decreasing = TRUE),]
names <- data.frame(File = original$File_Name,Email = original$Email, Mobile.Number = original$Email, Skill = original$Skills)
score2 <- left_join(score2, names, by = "File")
colnames(score2) <- c("File","Mobile.Number", "Email", "Score", "em"," em1","Skill")
if(nrow(score2)==0){
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
score2$Score[is.nan(score2$Score)] <- 0
score2 <- score2[order(score2$Email, decreasing = TRUE),]
}
score1$scores <- score2$Score
score1$cumulative <- score1$Score+score1$scores
scoring <- function(candidates, context){
candidates$Profile <- as.character(candidates$Profile)
candidates$Profile[1] <- context
tokens1 <- tokens(candidates$Profile, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
new.tokens <- tokens(as.character(context), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
new.tokens <- tokens_tolower(new.tokens)
new.tokens <- tokens_select(new.tokens, stopwords(), selection = "remove")
new.tokens <- tokens_ngrams(new.tokens, n = 1:5)
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(new.tokens)), selection = "keep")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
#Scoring the candidated based on functional and system requirements.
if(nrow(candidates)>1){
#Finding Cosine Similarity
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, score = cos$text1)
score <- score[order(score$score, decreasing = TRUE),]
names <- data.frame(File = original$File_Name,Email = original$Email, Mobile.Number = original$Email, Skill = original$Skills)
score <- score[,c(1,4)]
if(nrow(score)==0){
score <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
return(score)
}
if(grepl("^\\s*$", functional)){
functional_score <- data.frame(File = score1$File, score = rep(0,nrow(score1)))
}else{
functional_score <- scoring(candidates, functional)
}
if(grepl("^\\s*$", systems)){
systems_score <- data.frame(File = score1$File, score = rep(0,nrow(score1)))
}else{
systems_score <- scoring(candidates, systems)
}
score1 <- left_join(score1,functional_score,by = 'File')
score1 <- left_join(score1,systems_score,by = 'File')
#Creating a scored table and sorting candidats based on their cumulative scores.
score1$cscores <- score1$score.x+score1$score.y
score1$cumulative <- score1$cumulative+score1$cscores
score1 <- score1[order(score1$cumulative, decreasing = TRUE),]
score1 <- subset(score1, score1$File!="")
score1 <- subset(score1, score1$Score>0.5)
score1$Mob <- NULL
score1$Skill<-NULL
if(ncol(score1)==9){
colnames(score1) <- c("File","Mobile Number","Email","Skill Score (Out of 1)",
"Context Score (Out of 1)",
"Cumulative Score (Out of 5)",
"Functional Score (Out of 1)",
"Systems Score (Out of 1)",
"FSC Score (Out of 3)")
}else{
colnames(score1) <- c("File","Mobile Number","Email","Skill Score (Out of 1)",
"Skill","em","Context Score (Out of 1)",
"Cumulative Score (Out of 5)",
"Functional Score (Out of 1)",
"Systems Score (Out of 1)",
"FSC Score (Out of 3)")
}
score1$Skill<-NULL
score1$em<-NULL
score1 <- score1[1:5,]
score1$`Skill Score (Out of 1)` <- round(score1$`Skill Score (Out of 1)`, digits = 2)
score1$`Context Score (Out of 1)` <- round(as.numeric(score1$`Context Score (Out of 1)`), digits = 2)
score1$`Cumulative Score (Out of 5)` <- round(as.numeric(score1$`Cumulative Score (Out of 5)`), digits = 2)
score1$`Functional Score (Out of 1)`<- round(as.numeric(score1$`Functional Score (Out of 1)`),digits = 2)
score1$`Systems Score (Out of 1)`<- round(as.numeric(score1$`Systems Score (Out of 1)`),digits = 2)
score1$`FSC Score (Out of 3)`<- round(as.numeric(score1$`FSC Score (Out of 3)`),digits = 2)
if(grepl("^\\s*$", job1)){
score1$`Context Score (Out of 1)`<-NULL
}
if(grepl("^\\s*$", functional)){
score1$`Functional Score (Out of 1)`<-NULL
}
if(grepl("^\\s*$", systems)){
score1$`Systems Score (Out of 1)`<-NULL
}
if(grepl("^\\s*$", functional) & grepl("^\\s*$", systems) ){
score1$`FSC Score (Out of 3)`<- NULL
}
if(grepl("^\\s*$", functional) & grepl("^\\s*$", systems) & grepl("^\\s*$", job1)){
score1$`Cumulative Score (Out of 5)`<- NULL
}
if(nrow(score1)>0){
tokens <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens <- tokens_tolower(tokens)
tokens <- tokens_select(tokens, stp$TEXT, selection = "remove")
train.tokens.dfm <- dfm(tokens, tolower = FALSE)
tokens <- tokens_wordstem(tokens, language = "english")
tokens <- tokens_ngrams(tokens, n = 1)
tokens1 <- tokens(as.character(candidates$TProfile), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1)
skilltokens <- list()
aaa <- character(0)
for(i in 1:nrow(candidates)){
if(!identical(aaa,unlist(tokens1[i]))){
skilltokens[i] <- paste(tokens_select(tokens, unlist(as.list(tokens1[i])), selection = "remove"),collapse = ",")
}else{
skilltokens[i]<-""
}
}
score3 <- data.frame(File = candidates$File_Name, Skills.not.present = unlist(skilltokens))
score1 <- left_join(score1, score3, by = "File")
}
if(!is.na(score1[1,1])){
return(score1)
}else if(jobboard1=="yes"){
return(data.frame(Error = "No Alternative skills found on Dice or job Description entered."))
}else{
return(data.frame(Error = "No Skill or job Description entered."))
}
}
#######trycatchfuction#####################################################
readUrl <- function(url) {
out <- tryCatch(
{
#message("This is the 'try' part")
read_html(url)
},
error=function(cond) {
#message(paste("URL does not seem to exist:", url))
#message("Here's the original error message:")
d<-as.character(cond)
if (isTRUE( grep("host", d))){
return ("MSG: Check the internet connection")
}
else {
return("MSG: Not Available in the database")
}
},
warning=function(cond) {
#message(paste("URL caused a warning:", url))
#message("Here's the original warning message:")
#message(cond)
return("MSG: none")
}
)
return(out)
}
######################################Like me - Job board search#################################################
#Function to search the job board for alternative skills.
jobboard<-function(skill1,skill2,skill3) {
#Receiving all the inputs as a list
l<-{}
l<-append(l,skill1)
l<-append(l,skill2)
l<-append(l,skill3)
l<-l[l!=""]
len<-length(l)
#creation of a dataframe
a_dummy<-data.frame(l)
names(a_dummy)<-"keywords"
a_dummy$no_of_searches<-0
a_dummy$closely_related_skill_Dice_Insights_Dice_Insights<-0
a_dummy$link<-0
#web scrapping from dice insights
for (i in 1:len){
d<-gsub(" ", "+", l[i], fixed=TRUE)
if (d=="Cascading+Style+Sheets+(CSS)"){
d<-gsub("(.*? )", "", a_dummy$l[i])}
if (d=="c"){d<-"C+C%2B%2B"}
if (d=="c++"){d<-"C+C%2B%2B"}
if (d=="vc++"){d<-"vc%2B%2B"}
if (d=="embedded"){d<-"embedded+system"}
if (d=="c#"){d<-"c%23"}
#closest skill module
url2 <- paste("https://www.dice.com/skills/",d,".html", sep="")
movie2<-readUrl(url2)
# if (movie2=="none"){
#
# return(data.frame("none"))
# }
#
if (isTRUE( grep("internet", as.character(movie2))) ){
g1<-"Check internet"
} else if (class(movie2)[1]=="xml_document"){
g1 <- movie2 %>% html_node(".col-md-7") %>% html_text()}
else {g1<- "Not avaialble in the database"}
s1<-gsub("\\\t", "", g1)
s1<-gsub("\\\n", " ", s1)
s1<-gsub("\\Related Skills", "", s1)
a_dummy$closely_related_skill_Dice_Insights[i]<-s1
a_dummy$link[i]<-url2
}
ddd<-a_dummy[,c("keywords", "closely_related_skill_Dice_Insights")]
return(ddd)
}
############################################Customer Forecast#################################################
#Function used to forecast the demand for customer
cust.forecast <- function(a,b,c, country){
cust.forecast <- Sys.time()
#Selecting data based on the region selected.
if(country=="India"){
setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
setwd("D:/HCL/LikeMe")
#demand <- data.frame(fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#setwd("D:/HCL/LikeMe/Demand")
#master.demand <- data.frame(fread("dump.csv"))
master.demand<-demand
#print("Start Maps")
demand.area <- master.demand
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
demand.area$month <- month(dmy(demand.area$Approval.Date))
if(a!="All"){
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket, demand.area$Customer), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Skill", "Customer", "Demand")
}else{
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year, demand.area$Customer), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Customer", "Demand")
}
demand.area$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
if(a!="All"){
Total <- subset(demand.area, demand.area$Skill == a & Year == c & Quarter ==b)
}else{
Total <- subset(demand.area, Year == c & Quarter ==b)
}
Total$Demand[is.na(Total$Demand)] <- 0
Total <- data.frame(Customer = Total$Customer, Demand = Total$Demand)
Total <- Total[order(Total$Demand,decreasing = TRUE),]
Total <- subset(Total, Total$Demand!=0)
Total <- Total[1:10,]
forecasting <- function(cust){
setwd("D:/HCL/LikeMe/Demand")
demand <- data.frame( fread("dump.csv",stringsAsFactors = F))
demand$date <- dmy(demand$Req.Date)
demand$quarter <- quarter(demand$date)
demand$month <- month(demand$date)
demand$year <- year(demand$date)
demand$week <- week(demand$date)
dates <- demand
if(a!="All"){
demand <- demand %>% filter(demand$Skill.Bucket == a)
}
location.demand <- aggregate(demand$InitialDemand, by=list(demand$Customer), FUN = sum)
location.demand <- location.demand[order(location.demand$x, decreasing = T),]
location.demand <- location.demand[1:3,]$Group.1
demand <- demand %>% filter(tolower(demand$Customer) == tolower(cust))
if(nrow(demand)==0){
return(0)
}else{
demand <- aggregate(demand$InitialDemand, by = list(demand$week, demand$year), FUN = sum)
colnames(demand) <- c("Week","Year","Demand")
setwd("D:/HCL/LikeMe")
template <-data.frame( fread("template2015.csv"))
colnames(template) <- c("Year", "Week")
demand <- merge(template, demand, all = TRUE)
demand$Demand[is.na(demand$Demand)] <- 0
if(month(max(dates$date)) %in% c(1,2,3)){
n <- length(unique(dates$year))-1
n <- n*52
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = n))
}
if(month(max(dates$date)) %in% c(4,5,6)){
n <- length(unique(dates$year))-1
n <- (n*52)+13
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(7,8,9)){
n <- length(unique(dates$year))-1
n <- (n*52)+26
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(10,11,12)){
n <- length(unique(dates$year))-1
n <- (n*52)+38
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
return(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12])))
}
}
toplocation <- Total$Customer
toplocation <- lapply(toplocation,function(x)forecasting(x))
Total$'Forecast for the Next Quarter' <- unlist(toplocation)
return(Total)
print(Sys.time() - cust.forecast.time)
}
############################################Combination Forecast############################################
#Function to forecast the demand for the different combinations of location and customers.
combopred <- function(a,b,c, country){
# if(country=="India"){
# #setwd("D:/HCL/LikeMe")
# #demand <- data.frame(fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
# demand <- subset(demand.dump, demand$country=="INDIA")
# # setwd("D:/HCL/LikeMe/Demand")
# # write.csv(demand,"demand.csv")
# # write.csv(demand, "dump.csv")
# }else{
# #setwd("D:/HCL/LikeMe")
# #demand <- data.frame(fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
# demand <- subset(demand.dump, demand.dump$country=="USA")
# #setwd("D:/HCL/LikeMe/Demand")
# #write.csv(demand,"demand.csv")
# #write.csv(demand, "dump.csv")
# }
#setwd("D:/HCL/LikeMe/Demand")
master.demand <-data.frame( fread("dump.csv"))
#master.demand<-demand
demand.area <- master.demand
demand.area$date <- dmy(demand.area$Approval.Date)
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
demand.area$month <- month(dmy(demand.area$Approval.Date))
demand.area$week <- week(dmy(demand.area$Approval.Date))
dem <- demand.area
if(a!="All"){
demand.location <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Personal.SubArea), FUN = sum)
colnames(demand.location) <- c("Quarter","Year", "Skill", "Location", "Demand")
demand.customer <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Customer), FUN = sum)
colnames(demand.customer) <- c("Quarter","Year", "Skill", "Customer", "Demand")
}else{
demand.location <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Personal.SubArea), FUN = sum)
colnames(demand.location) <- c("Quarter","Year", "Skill", "Location", "Demand")
demand.customer <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Customer), FUN = sum)
colnames(demand.customer) <- c("Quarter","Year", "Skill", "Customer", "Demand")
}
demand.location$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
demand.customer$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
if(a!="All"){
Total.location <- subset(demand.location, Year == c & Quarter ==b & demand.location$Skill == a)
Total.location <- Total.location[order(Total.location$Demand, decreasing = T),]
Total.location <- Total.location$Location[1:5]
Total.customer <- subset(demand.customer, Year == c & Quarter ==b & demand.customer$Skill == a)
Total.customer <- Total.customer[order(Total.customer$Demand, decreasing = T),]
Total.customer <- Total.customer$Customer[1:5]
}else{
Total.location <- subset(demand.location, Year == c & Quarter ==b)
Total.location <- Total.location[order(Total.location$Demand, decreasing = T),]
Total.location <- Total.location$Location[1:5]
Total.customer <- subset(demand.customer, Year == c & Quarter ==b)
Total.customer <- Total.customer[order(Total.customer$Demand, decreasing = T),]
Total.customer <- Total.customer$Customer[1:5]
}
grid <- expand.grid(Total.location, Total.customer)
colnames(grid) <- c("Location","Customer")
combination.forecasting <- function(Locat,Custo){
demand <- dem
dates <- dem
if(a!="All"){
demand <- subset(demand, demand$Skill.Bucket==a)
}
demand <- subset(demand, demand$Personal.SubArea==Locat)
demand <- subset(demand, demand$Customer==Custo)
if(nrow(demand)==0){
return("No Such Combination")
}else{
demand <- aggregate(demand$InitialDemand, by = list(demand$week, demand$year), FUN =sum)
colnames(demand) <- c("Week","Year","Demand")
setwd("D:/HCL/LikeMe")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("Year", "Week")
demand <- merge(template, demand, all = TRUE)
demand$Demand[is.na(demand$Demand)] <- 0
if(month(max(dates$date)) %in% c(1,2,3)){
n <- length(unique(dates$year))-1
n <- n*52
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = n))
}
if(month(max(dates$date)) %in% c(4,5,6)){
n <- length(unique(dates$year))-1
n <- (n*52)+13
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(7,8,9)){
n <- length(unique(dates$year))-1
n <- (n*52)+26
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(10,11,12)){
n <- length(unique(dates$year))-1
n <- (n*52)+38
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12]))<0){
return("Not Predictable")
}else{
return(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12])))
}
}
}
if(a!="All"){
Total <- data.frame(Skill = rep(a,nrow(grid)), grid, Forecast = mapply(combination.forecasting, grid$Location, grid$Customer))
Total <- subset(Total, Total$Forecast != "No Such Combination")
Total <- subset(Total, Total$Customer != "Others")
}else{
Total <- data.frame(No_Skill_Selected = "No Skill Selected so the predictions cannot be made for Customer and Location combinations if a Skill was not selected")
}
return(Total)
}
###########################################DSM+################################################################
#Function to forecast the overall demand
forecaster <- function(skill.input, country){
if(country=="India"){
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#setwd("D:/HCL/LikeMe/Demand")
#Read the demand data file from the folder.
#master <- data.frame(fread("demand.csv", header = TRUE, stringsAsFactors = FALSE))
master<-demand
#Create a variable called Total Fulfilled.
master$Total.Fulfilled <- master$Internal_Filled+master$External_Joined
#Create a variable called Unfulfilled Overdue.
master$Unfulfilled.Overdue <- master$InitialDemand-(master$Internal_Filled+master$External_Joined+master$DroppedPos)
#Select columns that is needed for analysis and import them.
master <- master[,c("V1", "ReqNo", "Joining.Level.2","Customer","Segment",
"Req.Date","Skill.Bucket","Primary.Skill.Area","Requisition.Source",
"Internal_Filled","External_Joined","Total.Fulfilled",
"Unfulfilled.Overdue","Vacancy","DroppedPos","InitialDemand","vAdditionalRemarks","Personal.SubArea")]
#Remove observations from the data that do not have any requisition date.
master <- master[complete.cases(master$Req.Date),]
#Modifying the column names.
colnames(master) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
"int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
"overall.demand","job.desc","Location")
#Changing the classes of the variables.
master$date <- dmy(master$date)
master$data.src <- factor(master$data.src)
master$l2 <- factor(master$l2)
master$segment <- factor(master$segment)
master$skill <- factor(master$skill)
master$req.sor <- factor(master$req.sor)
master1 <- master
master1$month <- month(master1$date)
master1$year <- year(master1$date)
#Removing duplicates.
master <- master[!duplicated(master),]
#Uncomment the following lines of code when the first new demand file is placed or uploaded.
#new.demand <- read.csv("newdemand.csv", stringsAsFactors = F)
#colnames(new.demand) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
# "int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
# "overall.demand","job.desc","Location")
#new.demand <- new.demand[complete.cases(new.demand$Req.Date),]
#colnames(new.demand) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
# "int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
# "overall.demand","job.desc","Location")
#new.demand$date <- dmy(new.demand$date)
#new.demand$data.src <- factor(new.demand$data.src)
#new.demand$l2 <- factor(new.demand$l2)
#new.demand$segment <- factor(new.demand$segment)
#new.demand$skill <- factor(new.demand$skill)
#new.demand$req.sor <- factor(new.demand$req.sor)
#Comment the next line when new demand is placed in the folder or uploaded.
new.demand <- master
master.length <- nrow(master)
new.length <- nrow(new.demand)
master.demand <- rbind(master, new.demand)
master.demand <- master.demand[!duplicated(master.demand),]
master.demand$requirement <- paste(master.demand$sr.skill,master.demand$job.desc)
#Use the package "quanteda" to work with the text data.
#tokenize the requirements.
full.tokens <- tokens(master.demand$requirement, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
#Lower case the tokens.
full.tokens <- tokens_tolower(full.tokens)
#Removing stop words.
full.tokens <- tokens_select(full.tokens, stopwords(), selection = "remove")
#performing stemming on the requirement text.
full.tokens <- tokens_wordstem(full.tokens, language = "english")
#Create bag of words.
full.tokens.dfm <- dfm(full.tokens, tolower = FALSE)
#Transform to matrix.
full.tokens.matrix <- as.matrix(full.tokens.dfm)
#Convert to dataframe.
full.tokens.df <- data.frame(full.tokens.matrix)
#Binding the skill bucket as the class label
full.tokens.df$class.label <- master.demand$skill
skills.list <- skill.input
#Check the whether there is any new demand that has been added. If present,
#1. Bucket the demand or,
#2. Forecast the demand directly
if(nrow(master.demand) > nrow(master)){
#Split and bucket the new demand.
#train <- full.tokens.df[1:master.length,]
#Separate the new demand.
test <- full.tokens.df[master.length+1:nrow(full.tokens.df),]
#Load the model that was created.
load("C:/Users/varun/Desktop/jije.RData")
#Train Random Forest
#rf.train <- randomForest(class.label~.-req.no-l2.name, data = train)
#Predict the buckets using the model that was created.
rf.predict <- predict(model, test)
#Add the predictions to the test dataset.
test$class.label <- rf.predict
#Bind the train and test.
train.test <- rbind(train,test)
#Add the skills back to the master demand.
master.demand$skill <- train.test$class.label
#Creating "month" and "year"
master.demand$week <- week(master.demand$date)
master.demand$month <- month(master.demand$date)
master.demand$year <- year(master.demand$date)
master.demand$mon_year <- as.yearmon(master.demand$date)
master.demand$quarter <- quarter(master.demand$date)
#Subset data after 2016 and subset the demands A & C.
demand.2016 <- subset(master.demand, year>2015)
demand.2016 <- subset(demand.2016, segment == "A" | segment == "C")
#Creating a skill list.
net.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
ovr.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
tot.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
for(i in 1:1){
#Function used for predicting the demand.
prediction <- function(ovrdemand.agg, ext.agg,int.agg, totful.agg){
colnames(ovrdemand.agg) <- c("month","year","demand")
colnames(ext.agg) <- c("month","year","demand")
colnames(int.agg) <- c("month","year","demand")
colnames(totful.agg) <- c("month","year","demand")
#Finding the last month and year.
ovrdemand.agg <- ovrdemand.agg[-c(nrow(ovrdemand.agg))]
ext.agg <- ext.agg[-c(nrow(ext.agg))]
int.agg <- int.agg[-c(nrow(int.agg))]
totful.agg <- totful.agg[-c(nrow(totful.agg))]
#Convert data to time series.
ovr.demandseries <- ts(ovrdemand.agg$demand, frequency = 52)
ext.demandseries <- ts(ext.agg$demand, frequency = 52)
int.demandseries <- ts(int.agg$demand, frequency = 52)
tot.demandseries <- ts(totful.agg$demand, frequency = 52)
order <-data.frame( fread("order.csv"))
if(skills.list!="All"){
order <- subset(order, order$skill == skills.list)
}else{
order<-order
}
#Forecast using the auto.arima function
ovr.forecast <- forecast(auto.arima(ovr.demandseries), h = 12)
final.results <- data.frame(month = c("Month 1", "Month 2","Month 3"),
overall = c(sum(ovr.forecast$mean[1:4]),sum(ovr.forecast$mean[5:8]),sum(ovr.forecast$mean[9:12])))
return(final.results)
}
#subset the demand by skill.
skill.demand <- subset(demand.2016, demand.2016$skill == skills.list)
#Aggregate the demand.
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ovrdemand.agg <- ovrdemand.agg[1:52,]
ext.agg <- ext.agg[1:52,]
int.agg <- int.agg[1:52,]
totful.agg <- totful.agg[1:52,]
#Predict for JFM
jfm <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ovrdemand.agg <- ovrdemand.agg[1:64,]
ext.agg <- ext.agg[1:64,]
int.agg <- int.agg[1:64,]
totful.agg <- totful.agg[1:64,]
#Predict for AMJ
amj <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ovrdemand.agg <- ovrdemand.agg[1:76,]
ext.agg <- ext.agg[1:76,]
int.agg <- int.agg[1:76,]
totful.agg <- totful.agg[1:76,]
#Predict the JAS
jas <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
jfm <- rbind(jfm,amj)
jfm <- rbind(jfm,jas)
}
}else{
#Creating "month" and "year"
master.demand$week <- week(master.demand$date)
master.demand$month <- month(master.demand$date)
master.demand$year <- year(master.demand$date)
master.demand$mon_year <- as.yearmon(master.demand$date)
master.demand$quarter <- quarter(master.demand$date)
#write.csv(master.demand, "master.csv")
#Subset data after 2016 and subset the demands A & C.
demand.2016 <- subset(master.demand, year>2015)
demand.2016 <- subset(demand.2016, segment == "A" | segment == "C")
#Creating a skill list.
net.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
ovr.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
tot.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
for(i in 1:1){
prediction <- function(ovrdemand.agg, ext.agg,int.agg, totful.agg){
#Finding the last month and year.
ovrdemand.agg <- ovrdemand.agg[-c(nrow(ovrdemand.agg))]
ext.agg <- ext.agg[-c(nrow(ext.agg))]
int.agg <- int.agg[-c(nrow(int.agg))]
totful.agg <- totful.agg[-c(nrow(totful.agg))]
#Convert data to time series.
ovr.demandseries <- tsclean(ts(ovrdemand.agg$demand, frequency = 52))
ext.demandseries <- tsclean(ts(ext.agg$demand, frequency = 52))
int.demandseries <- tsclean(ts(int.agg$demand, frequency = 52))
tot.demandseries <- tsclean(ts(totful.agg$demand, frequency = 52))
order <-data.frame( fread("order.csv"))
order <- subset(order, order$skill == as.character(skills.list))
#Forecast using auto.arima
ovr.forecast <- forecast(auto.arima(ovr.demandseries), h = 12)
final.results <- data.frame(month = c("Month 1"),
overall = c(sum(ovr.forecast$mean[1:12])))
return(final.results)
}
if(skills.list!="All"){
skill.demand <- subset(demand.2016, demand.2016$skill == as.character(skills.list))
}else{
skill.demand <- demand.2016
}
#Aggregate the demand.
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
colnames(ovrdemand.agg) <- c("week","year","demand")
colnames(ext.agg) <- c("week","year","demand")
colnames(int.agg) <- c("week","year","demand")
colnames(totful.agg) <- c("week","year","demand")
setwd("D:/HCL/LikeMe")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("year", "week")
ovrdemand.agg <- merge(template, ovrdemand.agg, all = TRUE)
ovrdemand.agg$demand[is.na(ovrdemand.agg$demand)] <- 0
ovrdemand.agg <- ovrdemand.agg[1:52,]
ext.agg <- ext.agg[1:52,]
int.agg <- int.agg[1:52,]
totful.agg <- totful.agg[1:52,]
#Prediction in JFM
jfm <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
colnames(ovrdemand.agg) <- c("week","year","demand")
colnames(ext.agg) <- c("week","year","demand")
colnames(int.agg) <- c("week","year","demand")
colnames(totful.agg) <- c("week","year","demand")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("year", "week")
ovrdemand.agg <- merge(template, ovrdemand.agg, all = TRUE)
ovrdemand.agg$demand[is.na(ovrdemand.agg$demand)] <- 0
ovrdemand.agg <- ovrdemand.agg[1:64,]
ext.agg <- ext.agg[1:64,]
int.agg <- int.agg[1:64,]
totful.agg <- totful.agg[1:64,]
#Prediction for April, May and June.
amj <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
colnames(ovrdemand.agg) <- c("week","year","demand")
colnames(ext.agg) <- c("week","year","demand")
colnames(int.agg) <- c("week","year","demand")
colnames(totful.agg) <- c("week","year","demand")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("year", "week")
ovrdemand.agg <- merge(template, ovrdemand.agg, all = TRUE)
ovrdemand.agg$demand[is.na(ovrdemand.agg$demand)] <- 0
#Logic to create forcast for the next quarter based on the dates in the data.
if(month(max(master.demand$date)) %in% c(1,2,3)){
n <- length(unique(master.demand$year))-1
n <- n*52
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
if(month(max(master.demand$date)) %in% c(4,5,6)){
n <- length(unique(master.demand$year))-1
n <- (n*52)+(13)
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
if(month(max(master.demand$date)) %in% c(7,8,9)){
n <- length(unique(master.demand$year))-1
n <- (n*52)+(26)
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
if(month(max(master.demand$date)) %in% c(10,11,12)){
n <- length(unique(master.demand$year))-1
n <- (n*52)+(38)
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
#Prediction for July, August and September.
jas <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
jfm <- rbind(jfm,amj)
jfm <- rbind(jfm,jas)
}
}
#Condidtion to check whether the prediction is for a skill or the complete data.
if(skills.list!="All"){
skill.demand <- subset(master1, master1$skill == as.character(skills.list))
}else{
skill.demand <- master1
}
#Subset the demand for years greater than 2015.
skill.demand <- subset(skill.demand, skill.demand$year >2015)
skill.demand$quarter <- quarter(skill.demand$date)
#Subset the demand for the segments A and C.
skill.demand <- subset(skill.demand, skill.demand$segment == "A" | skill.demand$segment == "C")
#Merge all the results together into one dataframe.
if(nrow(skill.demand)!= 0){
#Aggregate the overall demand, external fulfillment, internal fulfillment and total fulfillment.
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
#Bind all the aggregations together.
ovrdemand.agg <- cbind(ovrdemand.agg, ext.agg$x)
ovrdemand.agg <- cbind(ovrdemand.agg, int.agg$x)
ovrdemand.agg <- cbind(ovrdemand.agg, totful.agg$x)
colnames(ovrdemand.agg) <- c("quarter","year","overall","external","internal","total")
} else{
ovrdemand.agg <- data.frame(month = rep("month", 5), year = rep("2017",5), overall = rep("0",5), external = rep("0",5), internal = rep("0",5), total = rep("0",5))
}
#write.csv(ovrdemand.agg, "original.csv")
jfm$year <- rep(2017,3)
colnames(jfm) <- c("Quarter","Demand","Year")
jfm$Demand <- round(jfm$Demand)
setwd("D:/HCL/LikeMe")
qy <- data.frame( fread("quarteryear.csv"))
ovrdemand.agg <- merge(qy,ovrdemand.agg, all=TRUE)
ovrdemand.agg <- ovrdemand.agg[order(ovrdemand.agg$quarter),]
ovrdemand.agg <- ovrdemand.agg[order(ovrdemand.agg$year),]
if(month(max(master.demand$date)) %in% c(1,2,3)){
jfm$Quarter<- "Q1 - JFM"
}else if(month(max(master.demand$date)) %in% c(4,5,6)){
jfm$Quarter <- "Q2 - AMJ"
}else if(month(max(master.demand$date)) %in% c(7,8,9)){
jfm$Quarter <- "Q3 - JAS"
}else if(month(max(master.demand$date)) %in% c(10,11,12)){
jfm$Quarter <- "Q4 - OND"
}
return(jfm)
}
#Create the data for maps for ploting data.
maptable <- function(a,b,c, country){
if(country=="India"){
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#Set the working directory to the Demand folder.
#setwd("D:/HCL/LikeMe/Demand")
#master.demand <-data.frame( fread("dump.csv"))
master.demand <-demand
demand.area <- master.demand
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
if(a!="All"){
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Skill", "Location", "Demand")
}else{
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Location", "Demand")
}
demand.area$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
#Getting the list of Ststes in the Unites Sates of America.
all_states <- map_data("county")
#Renaming the columns
colnames(all_states) <- c("long","lat", "group", "order", "Location", "subregion")
#Converting the location to lower case
demand.area$Location <- tolower(demand.area$Location)
if(a!="All"){
Total <- subset(demand.area, demand.area$Skill == a & Year == c & Quarter ==b)
}else{
Total <- subset(demand.area, Year == c & Quarter ==b)
}
#Total <- merge(all_states, demand.area,all = TRUE)
Total <- Total[Total$Location!="district of columbia",]
setwd("D:/HCL/LikeMe")
states <- data.frame( fread("states.csv"))
colnames(states) <- c("Column1", "long", "lat", "order", "hole", "piece", "Location", "group")
st <- data.frame(Location = unique(map_data('county')$region))
Total <- merge(st, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- merge(states, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- data.frame(State = Total$Location, Demand = Total$Demand)
Total <- subset(Total, Total$State != "district of columbia")
Total <- subset(Total, tolower(Total$State) %in% tolower(unique(states$Location)))
#Demand for all the states have been calculated.
Total <- Total[1:50,]
#print("stop maptable")
return(Total)
}
#Function to create a heat map
maps <- function(a,b,c, country){
if(country=="India"){
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#setwd("D:/HCL/LikeMe/Demand")
#master.demand <-data.frame( fread("dump.csv"))
master.demand<-demand
#print("Start Maps")
demand.area <- master.demand
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
demand.area$month <- month(dmy(demand.area$Approval.Date))
if(a!="All"){
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Skill", "Location", "Demand")
}else{
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Location", "Demand")
}
demand.area$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
all_states <- map_data("county")
colnames(all_states) <- c("long","lat", "group", "order", "Location", "subregion")
demand.area$Location <- tolower(demand.area$Location)
if(a!="All"){
Total <- subset(demand.area, demand.area$Skill == a & Year == c & Quarter ==b)
}else{
Total <- subset(demand.area, Year == c & Quarter ==b)
}
Total <- Total[Total$Location!="district of columbia",]
setwd("D:/HCL/LikeMe")
states <- data.frame( fread("states.csv"))
colnames(states) <- c("Column1", "long", "lat", "order", "hole", "piece", "Location", "group")
st <- data.frame(Location = unique(map_data('county')$region))
Total <- merge(st, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- merge(states, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- data.frame(State = Total$Location, Demand = Total$Demand)
Total <- subset(Total, Total$State != "district of columbia")
Total <- Total[order(Total$Demand,decreasing = TRUE),]
Total <- subset(Total, Total$Demand!=0)
Total <- Total[1:5,]
forecasting <- function(loca){
setwd("D:/HCL/LikeMe/Demand")
#print(loca)
demand <- data.frame( fread("dump.csv",stringsAsFactors = F))
demand$date <- dmy(demand$Req.Date)
demand$quarter <- quarter(demand$date)
demand$month <- month(demand$date)
demand$year <- year(demand$date)
demand$week <- week(demand$date)
dates <- demand
if(a!="All"){
demand <- demand %>% filter(demand$Skill.Bucket == a)
}
location.demand <- aggregate(demand$InitialDemand, by=list(demand$Personal.SubArea), FUN = sum)
location.demand <- location.demand[order(location.demand$x, decreasing = T),]
location.demand <- location.demand[1:3,]$Group.1
demand <- demand %>% filter(tolower(demand$Personal.SubArea) == tolower(loca))
if(nrow(demand)==0){
return("No forecast available.")
}else{
demand <- aggregate(demand$InitialDemand, by = list(demand$week, demand$year), FUN = sum)
colnames(demand) <- c("Week","Year","Demand")
setwd("D:/HCL/LikeMe")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("Year", "Week")
demand <- merge(template, demand, all = TRUE)
demand$Demand[is.na(demand$Demand)] <- 0
if(month(max(dates$date)) %in% c(1,2,3)){
n <- length(unique(dates$year))-1
n <- n*52
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = n))
}
if(month(max(dates$date)) %in% c(4,5,6)){
n <- length(unique(dates$year))-1
n <- (n*52)+13
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(7,8,9)){
n <- length(unique(dates$year))-1
n <- (n*52)+26
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(10,11,12)){
n <- length(unique(dates$year))-1
n <- (n*52)+38
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
return(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12])))
}
}
toplocation <- Total$State
toplocation <- lapply(toplocation,function(x)forecasting(x))
Total$'Forecast for Next Quarter' <- unlist(toplocation)
Total <- subset(Total, !is.na(Total$Demand))
return(Total)
}
##############################################Customer Trend####################################
customer <- function(cid, year, quarter, number){
setwd("D:\\HCL\\LikeMe")
demand <- data.frame( fread("master.csv"))
customer.demand <- aggregate(demand$overall.demand,
by = list(demand$quarter, demand$year, demand$customer),
FUN = sum)
customer.select <- filter(customer.demand,
customer.demand$Group.1 == quarter, customer.demand$Group.2 == year, customer.demand$Group.3 == customer)
customer.notselect <- filter(customer.demand,
customer.demand$Group.1 == quarter, customer.demand$Group.2 == year, customer.demand$Group.3 != customer)
customer.notselect <- customer.notselect[order(customer.notselect$x, decreasing = TRUE),]
customer.together <- rbind(customer.select, customer.notselect)
colnames(customer.together) <- c("Quarter", "Year", "Customer", "Demand")
return(customer.together[1:number,])
}
##############################################Skill Vs Customer#########################################
custskill1 <- function(c, d, e){
setwd("D:/HCL/Likeme/Demand")
custmaster <- data.frame( fread("demand.csv", header = TRUE, stringsAsFactors = FALSE))
#custmaster<-demand
#Create a variable called Total Fulfilled.
custmaster$Total.Fulfilled <- custmaster$Internal_Filled+custmaster$External_Joined
#Create a variable called Unfulfilled Overdue.
custmaster$Unfulfilled.Overdue <- custmaster$InitialDemand-(custmaster$Internal_Filled+custmaster$External_Joined+custmaster$DroppedPos)
#Select columns that is needed for analysis and import them.
custmaster <- custmaster[,c("V1", "ReqNo", "Joining.Level.2","Customer","Segment",
"Req.Date","Skill.Bucket","Primary.Skill.Area","Requisition.Source",
"Internal_Filled","External_Joined","Total.Fulfilled",
"Unfulfilled.Overdue","Vacancy","DroppedPos","InitialDemand","vAdditionalRemarks","Personal.SubArea")]
#Remove observations from the data that do not have any requisition date.
custmaster <- custmaster[complete.cases(custmaster$Req.Date),]
#Modifying the column names.
colnames(custmaster) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
"int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
"overall.demand","job.desc","Location")
custmaster$date <- dmy(custmaster$date)
custmaster$data.src <- factor(custmaster$data.src)
custmaster$l2 <- factor(custmaster$l2)
custmaster$segment <- factor(custmaster$segment)
custmaster$skill <- factor(custmaster$skill)
custmaster$req.sor <- factor(custmaster$req.sor)
custmaster$quarter <- as.numeric(quarter(custmaster$date))
custmaster$year <- year(custmaster$date)
if(c!="All"){
fil.year <- subset(custmaster, custmaster$year == d & custmaster$quarter == e & custmaster$skill == c)
}else{
fil.year <- subset(custmaster, custmaster$year == d & custmaster$quarter == e )
}
agg.year <- aggregate(fil.year$overall.demand, by = list(fil.year$customer, fil.year$segment), FUN = sum)
colnames(agg.year) <- c("Customer","Segement", "Demand")
agg.year <- agg.year[order(agg.year$Demand, decreasing = TRUE),]
agg.year <- agg.year[1:10,]
if(sum(is.na(agg.year$Demand))>0){
agg.year <- agg.year[!is.na(agg.year$Demand),]
}
return(agg.year)
}
###############################################Dashboard tabs#####################################
tabs <- function(f,g,h){
setwd("D:/HCL/LikeMe/Demand")
master <- data.frame( fread("demand.csv", header = TRUE, stringsAsFactors = FALSE))
# master<-demand
master$Total.Fulfilled <- master$Internal_Filled+master$External_Joined
master$Unfulfilled.Overdue <- master$InitialDemand-(master$Internal_Filled+master$External_Joined+master$DroppedPos)
master <- master[,c("V1", "ReqNo", "Joining.Level.2","Customer","Segment",
"Req.Date","Skill.Bucket","Primary.Skill.Area","Requisition.Source",
"Internal_Filled","External_Joined","Total.Fulfilled",
"Unfulfilled.Overdue","Vacancy","DroppedPos","InitialDemand","vAdditionalRemarks","Personal.SubArea")]
master <- master[complete.cases(master$Req.Date),]
colnames(master) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
"int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
"overall.demand","job.desc","Location")
master$date <- dmy(master$date)
master$data.src <- factor(master$data.src)
master$l2 <- factor(master$l2)
master$segment <- factor(master$segment)
master$skill <- factor(master$skill)
master$req.sor <- factor(master$req.sor)
master$quarter <- quarter(master$date)
master$year <- year(master$date)
if(f!="All"){
fil.year <- subset(master, master$year == g & master$quarter == h & master$skill == f)
ovr.demand <- aggregate(fil.year$overall.demand, by = list(fil.year$skill), FUN = sum)
ful.demand <- aggregate(fil.year$tot.ful, by = list(fil.year$skill), FUN = sum)
drop.demand <- aggregate(fil.year$demand.drop, by = list(fil.year$skill), FUN = sum)
unful.demand <- aggregate(fil.year$un.od, by = list(fil.year$skill), FUN = sum)
}else{
fil.year <- subset(master, master$year == g & master$quarter == h)
ovr.demand <- aggregate(fil.year$overall.demand, by = list(fil.year$year,fil.year$quarter), FUN = sum)
ful.demand <- aggregate(fil.year$tot.ful, by = list(fil.year$year,fil.year$quarter), FUN = sum)
drop.demand <- aggregate(fil.year$demand.drop, by = list(fil.year$year,fil.year$quarter), FUN = sum)
unful.demand <- aggregate(fil.year$un.od, by = list(fil.year$year,fil.year$quarter), FUN = sum)
}
table.demaned <- data.frame(Overall = ovr.demand$x, Ful = ful.demand$x, drop = drop.demand$x, un.ud =unful.demand$x )
table.demaned$ful.per <- round((table.demaned$Ful/table.demaned$Overall)*100)
table.demaned$drop.per <- round((table.demaned$drop/table.demaned$Overall)*100)
table.demaned$od.per <- round((table.demaned$un.ud/table.demaned$Overall)*100)
return(table.demaned)
}
########################################################Popularity module#####
Popular <- function(country,customer, n, buky, expe, band, quat){
dd <- dd[,dd_skills$Skills]
dd<-as.matrix(dd)
dd[dd>0]<-1
dd <- dd*demand.dump$InitialDemand
dd1<- as.data.frame(dd)
dd<-dd1
A<-1:nrow(demand.dump)
if (customer!=""){
A<-which(demand.dump$Customer == customer)}
B<-1:nrow(demand.dump)
if (country!=""){
B<-which(demand.dump$country == country)}
C<-1:nrow(demand.dump)
if (buky!="") {
C<-which(demand.dump$Skill.Bucket==buky)}
X<-1:nrow(demand.dump)
if (expe!="") {
X<-which(demand.dump$Experience==expe)}
Y<-1:nrow(demand.dump)
if (band!="") {
Y<-which(demand.dump$Band==band)}
Z<-1:nrow(demand.dump)
if (quat!=""){
Z<-which(demand.dump$quarter==quat)
}
D<-intersect(A,B)
E<-intersect(D,C)
E<-intersect(E,X)
H<-intersect(E,Y)
G<-intersect(H,Z)
# demand.dump_USA <- demand.dump[demand.dump$country==country,]
#
# dd_USA<-dd[demand.dump$country==country,]
#
# dd_USA_customer<- dd_USA[demand.dump_USA$Customer==customer,]
#
# demand.dump_USA_Micro<-demand.dump_USA[demand.dump_USA$Customer==customer,]
#
demand.dump_USA_Micro<-demand.dump[G,]
dd_USA_customer<-dd[G,]
combi<-cbind(demand.dump_USA_Micro,dd_USA_customer)
combi_2016<- combi[combi$year==2016,]
Dat<- data.frame(Instances_2016= colSums(combi_2016[,dd_skills$Skills]))
Dat1<-data.frame(Dat, rownames(Dat))
str(Dat)
Dat12<- Dat1[order( Dat1$Instances_2016, decreasing =TRUE),]
Dat12$Rank_2016 <- seq.int(nrow(Dat12))
taba<- head(Dat12,n)
combi_2017<- combi[combi$year==2017,]
Dat<- data.frame(Instances_2017= colSums(combi_2017[,dd_skills$Skills]))
Dat1<-data.frame(Dat, rownames(Dat))
#str(Dat)
Dat123<- Dat1[order( Dat1$Instances_2017, decreasing =TRUE),]
Dat123$Rank_2017<- seq.int(nrow(Dat123))
Dat123_new<-merge(Dat123,Dat12)
Dat123_new$Delta<-Dat123_new$Rank_2016 - Dat123_new$Rank_2017
Dat123_new<- Dat123_new[order( Dat123_new$Instances_2017, decreasing =TRUE),]
#Dat123_new$rownames.Dat.<-lapply(Dat123_new$rownames.Dat., function (x) )
tabb<-head(Dat123_new,n)
#tabb$Rank_2017 <- seq.int(nrow(taba))
names(tabb) <- c("Skills","Demand in 2017", "Rank in 2017", "Demand in 2016", "Rank in 2016", "Delta")
#tabb <- tabb[,c(1,4,5,2,3,6)]
tabb <- subset(tabb, !(tabb$'Demand in 2017'==0 & tabb$'Demand in 2016'==0))
return(tabb)
}
############################################Recommendation System#########################################
candidate_recommendation <- function(j){
setwd("D:/HCL/Demand Forecast")
demand <-data.frame( fread("demand.csv", stringsAsFactors = FALSE) )
demand$date <- as.Date(demand$Req.Date, "%m/%d/%Y")
demand$open.days <- as.Date(Sys.Date(), "%m/%d/%Y")-demand$date
demand <- subset(demand, demand$Skill.Bucket == j)
demand <- subset(demand, demand$Data.Source == "Due or Overdue demands at the end of the month")
demand <- demand[order(demand$open.days, decreasing = TRUE),]
demand <- demand[!duplicated(demand$SR.No),]
demand <- demand[1:10,]
demand$rqrmt <- paste(demand$SR.Skill, demand$Requirement)
recommendations <- function(rqrmt){
setwd("D:/HCL/LikeMe")
skills <- data.frame( fread("skillClustering.csv", header = TRUE, stringsAsFactors = FALSE))
stp <-data.frame( fread("stopwords.csv", header = TRUE, stringsAsFactors = FALSE))
setwd("D:/HCL/LikeMe/Resumes/External")
candidates <- data.frame( fread("external.csv", stringsAsFactors = FALSE))
original <- data.frame( fread("external.csv", stringsAsFactors = FALSE))
candidates$requirement <- paste(candidates$Skills, candidates$TProfile)
candidates <- select(candidates,File_Name, Skills, TProfile, requirement)#, Customer.Flag, experience.flag, designation.flag, l2.flag, Employee.Code)
#print("Adding Requirement")
new_requirement <- data.frame(File_Name = "999999",Skills = "sndmnvs",TProfile = "sajshdb", requirement = rqrmt)
new_requirement <- select(new_requirement,File_Name, Skills,TProfile, requirement)#, Customer.Flag, experience.flag, designation.flag, l2.flag, Employee.Code)
candidates <- rbind(new_requirement, candidates)
term.frequency <- function(row) {
row / sum(row)
}
inverse.doc.freq <- function(col) {
corpus.size <- length(col)
doc.count <- length(which(col > 0))
log10(corpus.size / doc.count)
}
tf.idf <- function(x, idf) {
x * idf
}
tokens <- tokens(as.character(new_requirement$requirement), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens <- tokens_tolower(tokens)
tokens <- tokens_select(tokens, stp$TEXT, selection = "remove")
train.tokens.dfm <- dfm(tokens, tolower = FALSE)
tokens <- tokens_wordstem(tokens, language = "english")
tokens <- tokens_ngrams(tokens, n = 1:5)
skills.tokens <- tokens(skills$value, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
skills.tokens <- tokens_tolower(skills.tokens)
skills.tokens <- tokens_select(skills.tokens, stp$TEXT, selection = "remove")
skills.tokens <- tokens_ngrams(skills.tokens, n = 1:5)
skills.tokens <- tokens_select(tokens, unlist(as.list(skills.tokens)), selection = "keep")
skills.tokens <- tokens_select(skills.tokens, stopwords(), selection = "remove")
tokens.set <- append(tokens, skills.tokens)
tokens1 <- tokens(as.character(candidates$requirement), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(skills.tokens)), selection = "keep")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
library(lsa)
#print("Scoring")
start.time <- Sys.time()
if(nrow(candidates)>1){
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score1 <- data.frame(File_Name = candidates$File_Name, score = cos$text1)
score1 <- score1[order(score1$score, decreasing = TRUE),]
names <- data.frame(File_Name = original$File_Name, Name = original$Full_Name, skill = original$Skills, experience = original$Years.Exp, previous.employer = original$TProfile)
score1 <- left_join(score1, names, by = "File_Name")
colnames(score1) <- c("File Name", "Score", "Candidate Name", "Skills"," Experience", "Current Employer")
if(nrow(score1)==0){
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
score1 <- score1[order(score1$`Candidate Name`, decreasing = TRUE),]
tokens1 <- tokens(as.character(candidates$requirement), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(tokens.set)), selection = "keep")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
library(lsa)
start.time <- Sys.time()
if(nrow(candidates)>1){
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score2 <- data.frame(File_Name = candidates$File_Name, score = cos$text1)
score2 <- score2[order(score2$score, decreasing = TRUE),]
names <- data.frame(File_Name = original$File_Name, Name = original$Full_Name, skill = original$Skills, experience = original$Years.Exp, previous.employer = original$TProfile)
score2 <- left_join(score2, names, by = "File_Name")
colnames(score2) <- c("Employee Code", "Score", "Candidate Name", "Skills"," Experience", "Current Employer")
if(nrow(score2)==0){
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
score2 <- score2[order(score2$`Candidate Name`, decreasing = TRUE),]
score1$scores <- score2$Score
score1$cumulative <- score1$Score+score1$scores
score1 <- score1[order(score1$cumulative, decreasing = TRUE),]
score1 <- subset(score1, score1$Score<1.0 & score1$scores<1.0)
return(score1$`Candidate Name`[1:5])
}
demand$int.names <- lapply(demand$rqrmt,function (x) unlist( recommendations(x)))
demand$int.names <- vapply(demand$int.names, paste, collapse = ", ", character(1L))
demand$ext.names <- lapply(demand$rqrmt,function (x) unlist( recommendations(x)))
demand$ext.names <- vapply(demand$ext.names, paste, collapse = ", ", character(1L))
demand <- demand[,c("SR.No","Skill.Bucket","Customer.Name","open.days","Requirement","int.names","ext.names")]
colnames(demand) <- c("SR NO", "Skill Bucket","Customer","Open Days", "Job Description","Internal Candidates","External Candidates")
return(demand)
}
################################################Clue#############################################
#Meaning and atternate skills pulled from the alternate keywords
clue<- function(skillword){
if (length(tech$path[tolower(tech$Titile)==tolower(skillword)])==0){return("NA") }
else {
return(as.character(tech$path[tolower(tech$Titile)==tolower(skillword)]))
}
}
#Function to create the UI using the Shiny Dashboard Template.UI#######################
ui <- dashboardPage(#skin = "blue",
#Header for the App, The sidebar and the menu items.
dashboardHeader(title = "Recruitment Analytics"),
dashboardSidebar(
sidebarMenu(
menuItem("About", tabName = "about"),
menuItem("Like - Me", menuSubItem("Skill Radar", tabName = "skill", icon = icon("puzzle-piece")),
menuSubItem("Job Board Search", tabName = "search3", icon = icon("search")),
#menuSubItem("Content Based Search", tabName = "search1", icon = icon("search")),
menuSubItem("Context Based Search", tabName = "search2", icon = icon("search-plus")),
menuSubItem("Candidate Radar", tabName = "reco", icon = icon("search-plus")),icon = icon("id-card")
),
menuItem("DSM +",
#menuSubItem("Demand Forecast", tabName = "demand", icon = icon("line-chart")),
#menuSubItem("Location based Demand", tabName = "location"),
menuSubItem("Skill based Insights", tabName = "customer"),
icon = icon("bar-chart")),
menuItem("Skill Popularity",
#menuSubItem("Demand Forecast", tabName = "demand", icon = icon("line-chart")),
#menuSubItem("Location based Demand", tabName = "location"),
menuSubItem("Hottest Skills 2017", tabName = "Pop"),
icon = icon("bar-chart"))
)
),
#Dashboard Body with all the UI elements for different modules.
dashboardBody(tags$head(tags$style(HTML('.content{
background-color: white;
}
.skin-blue .main-header .navbar{
background-color:#003da5}
.skin-blue .main-header .logo{
background-color:#003da5
}
.skin-blue .sidebar-menu > li.active > a, .skin-blue .sidebar-menu > li:hover > a{
border-left-color:#003da5
}
h1{
font-family:"Cambria"
}'))),
tabItems(
tabItem(tabName = "reco",
tags$h1("Candidate Radar"),
fluidRow(
box(
title = "Get recommendations for the oldest Job Descriptions that are open.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("recoskill","Select Skill Bucket",choices = sort(unique(demand$Skill.Bucket))),
actionButton(inputId = "recogo",label = "Recommend",color="red")
),
mainPanel( DT::dataTableOutput("recoresults"))
)
),
tabItem(tabName = "search3",
tags$h1("Job Board Search"),
fluidRow(
box(
title = "Search the web for alternative skills.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
textInput("kill1","Keyword 1",""),
textInput("kill2","Keyword 2",""),
textInput("kill3","Keyword 3",""),
actionButton(inputId = "go6",label = "generate Keywords",color="red")
),
mainPanel( DT::dataTableOutput("results2"))
)),
tabItem(tabName = "demand",
tags$h1("Forecast Demand"),
fluidRow(
box(
title = "Demand Forecast Input",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("1skill","Select Skill",choices = sort(unique(demand$Skill.Bucket))),
actionButton(inputId = "go2", label = "Forecast Demand")
),
box(
title = "Actual Vs Forecast Plot",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
plotOutput("coolplot")),
mainPanel(DT::dataTableOutput("results"))
)),
tabItem(tabName = "location",
tags$h1("Location based Demand"),
fluidRow(
box(
title = "Select Skill, Year and Quarter.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("skill1","Select Skill",choices = sort(unique(demand$Skill.Bucket))),
selectInput("year","Select Year",choices = c(2014,2015,2016,2017)),
selectInput("quarter","Select Quarter",choices = c(1,2,3,4)),
actionButton(inputId = "go3", label = "Get Demand", color = "red")
),
box(
title = "Demand based on Location in the US",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotOutput("map1")),
box(
title = "Demand Statistics based on Location",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
DT::dataTableOutput("maptable1"))
)
),
tabItem(tabName = "about",
tags$h1("HCL's Recruitment Analytics Tool"),
tags$h3("A project undertaken to enhance recruiting and insert analytics for futureproofing Talent acquisition "),
tags$br(),
tags$h1("Like - Me:"),
tags$h3("Creating sourcing queries and striving to get a" ,tags$em("Content and Context"), "based results .
"),
tags$br(),
tags$h1("DSM+"),
tags$h3("Forecasting demand for On time fulfillment and create supply for",tags$em("heterogeneous"), "demand.")
),
tabItem(tabName = "skill",
tags$h1("Skill Radar"),
tags$h3("Data : 31049 Job descriptions (Jan 2016 to Aug 2017)"),
tags$h4("Results available for 582 Customers,33 Skill buckets,
65 different locations,2835 Technological keywords and all their combinations "),
fluidRow(
box(
title = "Input for skill radar",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
uiOutput("series"),
uiOutput("varun1"),
uiOutput("Box3"),
uiOutput("Box4"),
uiOutput("Box5"),
uiOutput("Box6"),
uiOutput("Box7"),
uiOutput("Box111"),
valueBoxOutput("frequency")
),
box(
title = "Skill Radar",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotOutput("skills")
),
box(
title = "Boolean Strings",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
dataTableOutput("skills3")
),
# box(
# title = "Customer Radar",
# status = "danger",
# solidHeader = TRUE,
# collapsible = TRUE,
# plotlyOutput("skills2")
# ),
mainPanel( dataTableOutput("links"))
)),
################################################UI Pouarity ##########################
tabItem(tabName = "Pop",
tags$h1("Hot SKills"),
fluidRow(
box(
title = "Customer name",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("Coun","Select the Region ", c("India" = "INDIA", "USA" = "USA")),
selectInput("cus","Select Customer",choices = c("", sort(unique(demand.dump$Customer)))),
sliderInput(inputId = "num", label = "Choose a number", value = 20, min=1, max = 100),
selectInput("quat","Select Quarter", choices =c("", sort(unique(demand.dump$quarter)))),
actionButton(inputId = "Pop2", label = "Go")
),
box(
title = "Customer name",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("buky","Select SkillBucket", choices =c("", sort(unique(demand.dump$Skill.Bucket)))),
selectInput("band","Select Band", choices =c("", sort(unique(demand.dump$Band)))),
selectInput("expe","Select Experiece", choices =c("", sort(unique(demand.dump$Experience))))
),
mainPanel( formattableOutput("Table"))
)),
tabItem(tabName = "search2",
tags$h1("Context Based Search"),
fluidRow(
box(
title = "Search for Candidates based on Skills and Context",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("stype","Where do you want to search?", c("Outside HCL" = "eser", "Inside HCL" = "iser")),
radioButtons("jobboard","Do you want to include similar skills?", c("Yes" = "yes", "No" = "no")),
textAreaInput("ski.ll", "Enter Skills*"),
tags$h3("OR"),
selectInput("sk.ill", "Select the Primary Skill*", choices = c("I have already entered the skills",as.character(unique(rowman$actual)))),
sliderInput(inputId = "num1", label = "Select the maximum number of skills to be used", value = 6, min=1, max = 50),
textAreaInput("job", "Job Description"),
textAreaInput("functional", "What are the functional requirements?"),
textAreaInput("systems", "What are the system requirements?"),
#textAreaInput("composition", "What are the composition requirements?"),
selectInput("exp", "Years of experience", choices = c("No Preference",unique(datasetexp$experience)[c(1:6,8)])),
selectInput("clack","Which customer are you hiring for?",choices = c(" ",unique(demandda$Customer))),
actionButton(inputId = "go", label = "Find Profiles")
),
mainPanel( DT::dataTableOutput("score"))
)
),
tabItem(
tabName = "customer",
tags$h1("Demand Dashboard"),
tags$h3(paste("The data available for forecast is from 2016-01-01 to", maxdate)),
fluidRow(
fluidRow(
box(
title = "Actuals",
#title = paste("Actuals for ",input$forecast.ss[1]," for the year ",input$forecast.yy[1],"and quarter ",input$forecast.qq[1]),
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
valueBoxOutput("overall",width = 3),
valueBoxOutput("fulfillment", width = 3),
valueBoxOutput("drop", width = 3),
valueBoxOutput("od", width = 3)
),
box(
title = "Forecasts",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
valueBoxOutput("frcst", width = 6),
valueBoxOutput("revenue", width = 6)
)
),
box(
title = "Select Skill bucket, year and quarter.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("custloc","Select the Region for Forecast.", c("India" = "India", "USA" = "USA")),
uiOutput("forecast.skill"),
uiOutput("forecast.year"),
uiOutput("forecast.quarter"),
actionButton(inputId = "cust", label = "Go", color = "red")
),
box(
title = "Upload new Demand Data",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
titlePanel("Appending New Demand"),
fileInput("file1", "Upload New Demand Data",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv"))
),
mainPanel(
DT::dataTableOutput("contents")
),
box(
title = "Top Customers",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotlyOutput("custplot"))
),
fluidRow(
box(title = "Demand Heat Map",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotlyOutput("plot")
),
box(
title = "Initial Demand Report for Various Locations.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = FALSE,
DT::dataTableOutput("maptable")
)
),
fluidRow(
box(title = "Fulfillment for different Location",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotlyOutput("ful.loc")
),
box(
title = "Fulfillment Percentage for different Customer",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = FALSE,
plotlyOutput("ful.cust")
)
),
fluidRow(
box(title = "Forecast for Combination of Top Skills and Top Customers for the selected skills",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
DT::dataTableOutput("combforecast")
),
box(
title = "Forecast for the top customers for the selected skill",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = FALSE,
DT::dataTableOutput("custforecast")
)
)
),
tabItem(
tabName = "popularity",
tags$h1("Popularity Dashboard"),
fluidRow(
box(
title = "Select the country and customer",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("poploc","Select region", c("INDIA" = "INDIA", "USA" = "USA")),
uiOutput("varun"),
uiOutput("skill.varun"),
actionButton(inputId = "popularity", label = "GO", color = "red")
)
),
fluidRow(
box(
title = "Top 10 gainers.",
status = "danger",
solidHeader = TRUE,
collapsible = FALSE,
DT::dataTableOutput("top10gainers")
),
box(
title = "Top 10 losers.",
status = "danger",
solidHeader = TRUE,
collapsible = FALSE,
DT::dataTableOutput("top10losers")
)
),
fluidRow(
plotlyOutput("pop.plot")
),
fluidRow(
DT::dataTableOutput("pop.table")
)
)
),
tags$a(tags$img(src = "http://www.oneindia.com/img/2015/05/25-1432549894-hcl-logo.jpg", height = 200, width = 400), href= "https://www.hcltech.com/geo-presence/united-states")
))
#############################Fulfillment Percentage##############################
#Function to generate the data for the fulfillment graph based on customer.
fulfillment.customer <- function(skill){
setwd("D:/HCL/LikeMe/Demand")
master <- data.frame( fread("dump.csv", stringsAsFactors = FALSE))
master$filled <- master$External_Joined+master$Internal_Filled
master <- subset(master, master$Skill.Bucket!="#N/A")
master$quarter <- quarter(dmy(master$Approval.Date))
master$month <- month(dmy(master$Approval.Date))
master$year <- year(dmy(master$Approval.Date))
if(skill!="All"){
master.skill <- subset(master, master$Skill.Bucket==skill)
}else{
master.skill <- master
}
master.skill.initial <- aggregate(master.skill$InitialDemand,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.filled <- aggregate(master.skill$filled,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.initial$fulfillment <- (master.skill.filled$x/master.skill.initial$x)*100
master.skill.initial <- master.skill.initial[order(master.skill.initial$fulfillment,decreasing = TRUE),]
if(nrow(master.skill.initial)>1){
master.skill.initial <- subset(master.skill.initial, master.skill.initial$x > mean(master.skill.initial$x))
}
master.skill.initial.customer <- aggregate(master.skill.initial$fulfillment, by = list(master.skill.initial$Group.1), FUN = mean)
master.skill.initial.customer <- master.skill.initial.customer[order(master.skill.initial.customer$x,decreasing = TRUE),]
return(master.skill.initial.customer)
}
#Function to generate the data for the fulfillment graphs based on location.
fulfillment.location <- function(skill){
setwd("D:/HCL/LikeMe/Demand")
master <- data.frame( fread("dump.csv", stringsAsFactors = FALSE))
master$filled <- master$External_Joined+master$Internal_Filled
master <- subset(master, master$Skill.Bucket!="#N/A")
master$quarter <- quarter(dmy(master$Approval.Date))
master$month <- month(dmy(master$Approval.Date))
master$year <- year(dmy(master$Approval.Date))
if(skill!="All"){
master.skill <- subset(master, master$Skill.Bucket==skill)
}else{
master.skill <- master
}
master.skill.initial <- aggregate(master.skill$InitialDemand,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.filled <- aggregate(master.skill$filled,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.initial$fulfillment <- (master.skill.filled$x/master.skill.initial$x)*100
master.skill.initial <- master.skill.initial[order(master.skill.initial$fulfillment,decreasing = TRUE),]
if(nrow(master.skill.initial)>1){
master.skill.initial <- subset(master.skill.initial, master.skill.initial$x > mean(master.skill.initial$x))
}
master.skill.initial.customer <- aggregate(master.skill.initial$fulfillment, by = list(master.skill.initial$Group.2), FUN = mean)
master.skill.initial.customer <- master.skill.initial.customer[order(master.skill.initial.customer$x,decreasing = TRUE),]
return(master.skill.initial.customer)
}
############################################POPULARITY#######################################################
#Function to created the popularity dashboard.
popularity <- function(ctry,cust, skillbucket){
cons <- dem
colnames(cons)[which(names(cons) == "C..")] <- "C++"
colnames(cons)[which(names(cons) == "C.")] <- "C#"
#colnames(cons)[which(names(cons) == "C..")] <- "C++"
cons[,137:2972] <- as.data.frame(lapply(cons[,137:2972], function(x){replace(x, x>1,1)}))
cons[,137:2972] <- cons[,137:2972]*cons$InitialDemand
cons <- subset(cons,cons$country==ctry)
cons <- cons %>% filter(cons$Customer==cust)
cons <- cons %>% filter(cons$Skill.Bucket==skillbucket)
max.year <- cons %>% filter(cons$year == max(cons$year))
min.year <- cons %>% filter(cons$year == min(cons$year))
cq <- quarter(Sys.Date())
if(cq==1){
cq = 4
pq = 3
}else{
cq = cq-1
pq = cq-1
}
max.year <- max.year %>% filter(max.year$week == cq)
min.year <- min.year %>% filter(min.year$week == pq)
max.year <- data.frame(colSums(max.year[,137:2972]))
max.year$skills <- row.names(max.year)
colnames(max.year) <- c("Value","skills")
max.year <- max.year[order(max.year$skills, decreasing = T),]
min.year <- data.frame(colSums(min.year[,137:2972]))
min.year$skills <- row.names(min.year)
colnames(min.year) <- c("Value","skills")
min.year <- min.year[order(min.year$skills, decreasing = T),]
skilllist <- cbind(max.year,min.year$Value)
skilllist$PercentageChange <- ((skilllist$Value- skilllist$`min.year$Value`)/skilllist$`min.year$Value`)*100
External2 <- skilllist
col.sums <- data.frame(colSums(cons[,137:2972]))
col.sums$skills <- row.names(col.sums)
colnames(col.sums) <- c("Value","skills")
col.sums <- col.sums[order(col.sums$Value, decreasing = T),]
topskills <- col.sums$skills[1:10]
#col.sums <- head(col.sums$skills,20)
col.sums <- col.sums$skills[1:5]
skill.aggregate <- aggregate(cons[,c(col.sums)], by = list(cons$week, cons$year), FUN = sum)
totalweeks <- ((max(cons$year)-min(cons$year))+1)*52
weeks <- data.frame(Week = rep(1:4,((max(cons$year)-min(cons$year))+1)))
years <- data.frame(Year = rep(min(cons$year), 4))
for(i in 2:((max(cons$year)-min(cons$year))+1)){
years <- rbind(years, data.frame(Year = rep(min(cons$year)+1,4)))
}
weeks <- cbind(years,weeks)
colnames(skill.aggregate) <- c("Week","Year", col.sums)
weeks <- merge(weeks, skill.aggregate, all = TRUE)
colnames(weeks) <- c("Year","Week", col.sums)
weeks[is.na(weeks)] <- 0
year.today <- year(Sys.Date())
week.today <- quarter(Sys.Date())
weeks <- weeks[1:6,]
weeks$year.quarter<- paste(weeks$Year," - " ,weeks$Week)
More.100 <- subset(External2,External2$PercentageChange>=100 )
Stable <- subset(External2,External2$PercentageChange==0)
No.Popularity <- subset(External2,(External2$PercentageChange)*(-1) >=100)
Top10 <- subset(External2, External2$skills %in% topskills)
Top10.gainers <- subset(Top10,Top10$PercentageChange >0)
Top10.losers <- subset(Top10,Top10$PercentageChange < 0 )
Gainers.Losers <- data.frame(Category = c("More than 100% popularity gain","No Loss No Gain",
"Forgotten Skills", "Highest gain in the top 10 list",
"Highest loss in the top 10 list"))
Gainers.Losers$Skills <- c(paste(subset(External2,External2$PercentageChange>=100 )$skills, collapse=", "),
paste(subset(External2,External2$PercentageChange==0)$skills, collapse=", "),
paste(subset(External2,(External2$PercentageChange)*(-1) >=100)$skills, collapse=", "),
paste(subset(Top10,Top10$PercentageChange >0)$skills, collapse=", "),
paste(subset(Top10,Top10$PercentageChange < 0 )$skills, collapse=", "))
More.100[,c(1,2,3)] <- NULL
More.100$PercentageChange[is.infinite(More.100$PercentageChange)] <- 100
More.100$PercentageChange <- round(More.100$PercentageChange)
Stable[,c(1,2,3)] <- NULL
Stable$PercentageChange <- round(Stable$PercentageChange)
No.Popularity[,c(1,2,3)] <- NULL
No.Popularity$PercentageChange <- round(No.Popularity$PercentageChange)
Top10.gainers[,c(1,2,3)] <- NULL
Top10.gainers$PercentageChange <- round(Top10.gainers$PercentageChange)
if(nrow(Top10.gainers)>0){
Top10.gainers$PercentageChange <- paste(Top10.gainers$PercentageChange,"%")
}
Top10.losers[,c(1,2,3)] <- NULL
Top10.losers$PercentageChange <- round(Top10.losers$PercentageChange)
if(nrow(Top10.losers)>0){
Top10.losers$PercentageChange <- paste(Top10.losers$PercentageChange,"%")
}
Top10.gainers <- subset(Top10.gainers, Top10.gainers$PercentageChange!="Inf %")
Top10.gainers <- subset(Top10.gainers, Top10.gainers$PercentageChange!="-Inf %")
Top10.losers <- subset(Top10.losers, Top10.losers$PercentageChange!="Inf %")
Top10.losers <- subset(Top10.losers, Top10.losers$PercentageChange!="-Inf %")
if(!year(Sys.Date())>max(cons$year)){
return(list(weeks,Gainers.Losers, Top10.gainers, Top10.losers))
}else{
weeks = weeks[1:4,]
return(list(weeks,Gainers.Losers, Top10.gainers, Top10.losers))
}
}
#Function for creating a local server.
server <- function(input, output, session) {
output$contents <- renderTable({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file1)
print(1)
df <- read.csv(input$file1$datapath)
df$X <- NULL
print(2)
#demand <- rbind(demand.upload, df)
print("uploaded")
setwd("D:\\HCL\\LikeMe")
write.csv(demand, "newdemand.csv")
print("added")
return(df)
})
output$series<-renderUI({
radioButtons("radio","Start Deep Dive with either,", c("Skill" = "Skill","Customer" = "Customer"))
})
output$varun1 <- renderUI({
if (is.null(input$radio))
return()
switch(input$radio,"Customer" = selectInput("custa", "Select the Customer",
choices = c("",as.character( unique(demandda$Customer))),
selected = "option2"),
"Skill" = selectInput("skilla", "Select Skill",
choices = c("",as.character(unique(colnames(dd)))),
selected = "option2"
)
)
})
output$Box3 = renderUI({
if ((input$radio=="Skill"))
return(selectInput("custa", "Select Customer", choices= act_customer(input$skilla)))
selectInput("skilla",
"Select Skill",
choices = c("", list_customer(input$custa))
)})
output$Box4 = renderUI({
if ((input$radio=="Skill"))
return(selectInput("bucks", "Select Skill Bucket", choices =c("", act_skill(input$skilla))))
selectInput("bucks","Select Skill Bucket",choices = c( "",list_skillbucket(input$custa)))
})
output$Box5 = renderUI({
if ((input$radio=="Skill"))
return(selectInput("subarea", "Select Location", choices =c("", act_location(input$skilla))))
selectInput("subarea","Select Location",choices = c("",list_location(input$custa)))
})
output$Box6 = renderUI(
sliderInput(inputId = "num", label = "Choose a number", value = 20, min=1, max = 50)
)
output$Box7 = renderUI(
actionButton(inputId = "go4", label = "Radar", color = "red") )
data <- eventReactive(input$go, {likeme(input$ski.ll[1], input$job[1], input$exp[1], input$stype[1], input$sk.ill[1], input$num1[1], input$clack[1],input$functional[1],
input$systems[1], input$jobboard[1])})
output$Box111= renderUI(selectInput("years", "Select Year", choices =c("","2016", "2017") ))
output$score <- DT::renderDataTable({
data()
})
#Creating reactive functions for various buttons included in the UI.
data1 <- eventReactive(input$cust, {forecaster(input$forecast.ss[1],input$custloc[1])})
data2 <- eventReactive(input$cust, {maps(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data3 <- eventReactive(input$cust, {maptable(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data4 <- eventReactive(input$go4, {newman(input$skilla[1], input$num, input$bucks, input$subarea, input$custa, input$radio, input$years)})
data5 <- eventReactive(input$go5,{manji(input$skills1,input$Experience, input$Customer, input$Job_family,input$Designation,input$Skill_category, input$L2, input$L3, input$Band, input$Sub_band, input$Personal_subarea)})
data6 <- eventReactive(input$go6,{jobboard(input$kill1,input$kill2, input$kill3)})
data7 <- eventReactive(input$cust,{custskill1(input$forecast.ss, input$forecast.yy, input$forecast.qq)})
data8 <- eventReactive(input$cust,{tabs(input$forecast.ss, input$forecast.yy, input$forecast.qq)})
recodata <- eventReactive(input$recogo, {candidate_recommendation(input$recoskill)})
data9 <- eventReactive(input$go4,{customer(input$skilla[1])})
data10 <- eventReactive(input$cust,{fulfillment.customer(input$forecast.ss[1])})
data11 <- eventReactive(input$cust,{fulfillment.location(input$forecast.ss[1])})
data.popularity <- eventReactive(input$popularity,{popularity(input$poploc,input$dynamic,input$dyna)})
data.combforecast <- eventReactive(input$cust,{combopred(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data.custforecast <- eventReactive(input$cust,{cust.forecast(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data.Pop <- eventReactive(input$Pop2,{Popular(input$Coun, input$cus,input$num, input$buky, input$expe, input$band, input$quat)})
#Functions to generate tables and graphs.
##############################################Table output Poularity #################
output$Table<- renderFormattable(
formattable(data.table(data.Pop()) , list(Delta = formatter(
"span",
style = x ~ style(color = ifelse(x < 0 , "red", ifelse(x>0,"green","gray"))),
x ~ icontext(ifelse(x < 0, "arrow-down", ifelse(x>0,"arrow-up","minus")), x))))
)
output$custforecast <- DT::renderDataTable({
data.custforecast()
})
output$combforecast <- DT::renderDataTable({
data.combforecast()
})
output$recoresults <- DT::renderDataTable({
recodata()
})
output$varun <- renderUI({
if (is.null(input$poploc))
return()
switch(input$poploc,
"INDIA" = selectInput("dynamic", "Select the Customer",
choices = unique(subset(demand.dump, demand.dump$country=="INDIA")$Customer),
selected = "option2"
),
"USA" = selectInput("dynamic", "Select the customer",
choices = unique(subset(demand.dump, demand.dump$country=="USA")$Customer),
selected = "option2"
)
)
})
output$forecast.skill <- renderUI({
selectInput("forecast.ss", "Select Skill Bucket",
choices = unique(subset(demand.dump, tolower(demand.dump$country)==tolower(input$custloc))$Skill.Bucket),
selected = "option3"
)
})
output$forecast.year <- renderUI({
selectInput("forecast.yy", "Select Year",
choices = unique(subset(demand.dump, tolower(demand.dump$country)==tolower(input$custloc) & tolower(demand.dump$Skill.Bucket)==tolower(input$forecast.ss))$year),
selected = "option3"
)
})
output$forecast.quarter <- renderUI({
selectInput("forecast.qq", "Select Quarter",
choices = unique(subset(demand.dump, tolower(demand.dump$country)==tolower(input$custloc) & tolower(demand.dump$Skill.Bucket)==tolower(input$forecast.ss) & demand.dump$year == input$forecast.yy)$quarter),
selected = "option3"
)
})
output$skill.varun <- renderUI({
selectInput("dyna", "Select Skill Bucket",
choices = unique(subset(subset(demand.dump, demand.dump$country==input$poploc),
subset(demand.dump, demand.dump$country==input$poploc)$Customer==input$dynamic)$Skill.Bucket),
selected = "option3"
)
})
#Generating the graphs for the popularity statistics.
output$pop.plot <- renderPlotly({
External1 <- data.frame(data.popularity()[1])
External1$year.quarter <- factor(External1$year.quarter, levels = External1[["year.quarter"]])
plot_ly(External1, x = ~year.quarter, y = ~External1[,3], name = colnames(External1)[3], type = 'scatter', mode = 'lines',
line = list(color = 'rgb(155, 9, 9)', width = 4)) %>%
add_trace(y = ~External1[,4], name = colnames(External1)[4], line = list(color = 'rgb(5, 14, 109)', width = 4)) %>%
add_trace(y = ~External1[,5], name = colnames(External1)[5], line = list(color = 'rgb(20, 109, 4)', width = 4)) %>%
add_trace(y = ~External1[,6], name = colnames(External1)[6], line = list(color = 'rgb(244, 244, 97)', width = 4)) %>%
add_trace(y = ~External1[,7], name = colnames(External1)[7], line = list(color = 'rgb(93, 7, 158)', width = 4)) %>%
layout(title = "The Popularity of top 5 skills over time",
xaxis = list(title = "Year - Quarter"),
yaxis = list (title = "Popularity in Numbers"))
})
output$pop.table <- DT::renderDataTable({
data.frame(data.popularity()[2])
})
output$top10losers <- DT::renderDataTable({
data.frame(data.popularity()[3])
})
output$top10gainers <- DT::renderDataTable({
data.frame(data.popularity()[4])
})
#Plot to display the statistics of demand on the US map.
output$plot <- renderPlotly({
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
lakecolor = toRGB('white')
)
plot_ly(z = data3()$Demand, text = data3()$State, locations = state.abb,
type = 'choropleth', locationmode = 'USA-states') %>%
layout(geo = g)
})
output$ful.loc <- renderPlotly({
plot_ly(
x = data11()$Group.1,
y = data11()$x,
name = "",
type = "bar"
)
})
#Plot to display statistics about the customer. Currently displayed.
output$ful.cust <- renderPlotly({
plot_ly(
x = data10()$Group.1,
y = data10()$x,
name = "",
type = "bar"
)
})
#Displays a table with skills separated with commas.
output$links<- DT::renderDataTable({
#data4()
datatable((data.frame(Skill = colnames(data.frame(data4()[1], check.names =FALSE )),
Definition = unlist(lapply(colnames(data.frame(data4()[1], check.names = FALSE)), function (x) {defin(x)})),
Alternatives= unlist(lapply(colnames(data.frame(data4()[1], check.names = FALSE)), function (x) {alter(x)})))), options = list(columnDefs = list(list(
targets = 3,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 400 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 400) + '...</span>' : data;",
"}")
))), callback = JS('table.page(3).draw(false);'))
})
#Displays a box with the Overall demand for the quarter and year selected.
output$overall <- renderValueBox({
valueBox(
paste0(data8()$Overall), "Overall Demand", icon = icon("group"),
color = "yellow"
)
})
#Displays a box with the Fulfillment percentage for the quarter and year selected.
output$fulfillment <- renderValueBox({
valueBox(
paste0(data8()$ful.per, "%"), "Fulfillment", icon = icon("thumbs-up"),
color = "olive"
)
})
#Displays a box with the Drop percentage for the quarter and year selected.
output$drop <- renderValueBox({
valueBox(
paste0(data8()$drop.per, "%"), "Drop", icon = icon("thumbs-down"),
color = "red"
)
})
#Displays a with the Unfulfilled Overdue percentage.
output$od <- renderValueBox({
valueBox(
paste0(data8()$od.per, "%"), "Unfulfilled Overdue", icon = icon("list"),
color = "orange"
)
})
#Displays a box with the Forecast for the next quarter.
output$frcst <- renderValueBox({
valueBox(
data1()[3,]$Demand, paste0("Next Quarter Forecast"), icon = icon("line-chart"),
color = "blue"
)
})
#Displays a box with the revenue.
output$revenue <- renderValueBox({
valueBox(
paste0("$",data1()[3,]$Demand*65*2080), paste0(data1()$quarter[nrow(data1())],"-",data1()$year[nrow(data1())],"Revenue"), icon = icon("dollar"),
color = "green"
)
})
#Displays the plot the demand for top10 customers.
output$custplot <- renderPlotly(
{
plot_ly(data7(), x = ~Customer, y = ~Demand, type = 'scatter',color = ~Segement,
size = ~Demand,
mode = 'markers',colors = colors,
marker = list(symbol = "circle", sizemode = 'diameter',
line = list(width = 3, color = '#FFFFFF'))) %>%
layout(title = paste(""),
xaxis = list(title = '',
gridcolor = 'rgb(255, 255, 255)',
zerolinewidth = 1,
ticklen = 5,
gridwidth = 2,
showticklabels = FALSE),
yaxis = list(title = '',
gridcolor = 'rgb(255, 255, 255)',
zerolinewidth = 1,
ticklen = 5,
gridwith = 2),
paper_bgcolor = 'rgb(243, 243, 243)',
plot_bgcolor = 'rgb(243, 243, 243)')
}
)
output$coolplot <- renderPlot({
ggplot(data1(), aes(x = paste(year,"-",quarter), y = Demand.Forecast, group = 1))+
geom_line(aes(color = "green"))+
geom_line(aes(y = Actual.Demand,color = "red"))+
theme(text = element_text(size=10),axis.text.x = element_text(angle=90, hjust=1))+
scale_size_manual(values = c(0.1, 1))+
xlab("Year - Quarter") + ylab("Demand in Numbers") + scale_fill_discrete(name="Type of Demand",
breaks=c("Forecast", "Actual"),labels=c("Forecast", "Actual"))+ggtitle(paste("Forecast for",input$skill[1]))
})
output$results <- DT::renderDataTable({
data1()
})
output$map <- renderPlot({
spplot(data2()['value'], title = paste("Demand throughout the US for",input$skill[1], "in Quarter",input$quarter[1],"of", input$year[1]))
})
output$maptable <- DT::renderDataTable({
data2()
})
output$skills <- renderPlot({
radarchart(data.frame(data4()[1], check.names = FALSE),pcol = "red")
})
##############################################indicator########################
output$skills2 <- renderPlotly({
if ((input$skilla==""))
return()
plot_ly(data=data9(),x = as.factor(data9()$custo),y = data9()$total, type = "bar")%>%layout(xaxis = list(categoryorder = "array",
categoryarray = (data9()$custo)))
})
output$skills3 <- renderDataTable({
datatable( data.frame(Boolean=paste(colnames(data.frame(data4()[1], check.names = FALSE)),collapse = ",")))
})
#newmanvalue box like e radar
output$frequency <- renderValueBox({
valueBox(
paste0(unlist(data4()[3])), "Job Description(s) used to generate the Skill Radar", icon = icon("list"),
color = "purple"
)
})
output$results1 <- DT::renderDataTable({
data5()
})
output$results2 <- DT::renderDataTable({
data6()
})
}
shinyApp(ui = ui, server = server)
| /LikeMe_3_0 with Datatable with minimal excel reads_updated_2.r | no_license | maddy6/temp | R | false | false | 134,676 | r | setwd("D:/HCL/LikeMe")
#Loading the required packages.
library(formattable)
library(data.table)
library(shiny)
library(shinydashboard)
library(quanteda, irlba)
library(ggplot2)
#library(e1071)
#library(lattice)
library(zoo)
library(lubridate)
#library(fiftystater)
library(forecast)
library(rvest)
library(tibble)
library(randomForest)
library(tseries)
#library(maps)
#library(mapproj)
#library(tmap)
#library(maptools)
library(dplyr)
library(openxlsx)
#library(xml2)
library(sp)
library(plotly)
library(radarchart)
library(fmsb)
library(DT)
library(stringr)
library(caret)
#Reading the required csv files.
demand <- data.frame( fread("demand.csv", stringsAsFactors = FALSE))
demand.dump <-data.frame( fread("dump2.csv", stringsAsFactors = FALSE))
demand.upload <- demand.dump
demand.upload$V1 <- NULL
demand.dump$quarter <- quarter(dmy(demand.dump$Approval.Date))
demand.dump$year <- year(dmy(demand.dump$Approval.Date))
demand.dump$month <- month(dmy(demand.dump$Approval.Date))
maxdate <- max(dmy(demand.dump$Approval.Date))
datasetexp<-data.frame(fread("excel1.csv", stringsAsFactors = FALSE))
colors <- c('#4AC6B7', '#2457C5', '#DF0B0B',"#24C547", '#E71BB6')
indiadistance<-data.frame( fread("indaiusa Distance1.csv"))
demandda<-demand.dump
alternatives<-data.frame(fread("alternatives.csv"))
rowman<-data.frame(fread("ronnames chan1.csv"))
dd<-data.frame(fread("consolidated_skills1.csv", stringsAsFactors = FALSE))
cons <- data.frame(fread("Consolidated.csv", stringsAsFactors = F))
#Initial data preparation for skill radar.
row.names(indiadistance)<-rowman$actual
colnames(indiadistance)<-rowman$actual
customer<-as.data.frame(unique(demandda$Customer))
names(customer)<-"customer"
colnames(dd)<-rowman$actual
dd1<-dd
dd1$customer<-demandda$Customer
skill<-colnames(dd)
tdd<- t(dd)
tdddataframe<-data.frame(tdd,stringsAsFactors=FALSE)
tdd1<-tdddataframe
cons$date <- dmy(cons$Req.Date)
cons$week <- quarter(cons$date)
cons$year <- year(cons$date)
dem <- cons
dd_skills <- data.frame(fread("list_of_skills.csv", stringsAsFactors = F)) # We have identtified and removed some generic keywords from the identified keywords list
dd_skills <- subset(dd_skills, dd_skills$Pbb==0) # removing the generic keywords
##################################################Newmancodes##########################################
#####Like Me Fuctions for first module: Skill Radar
#Function for automatic filtering of the UI input with respect to customer
list_customer<- function (customer){
if (customer!=""){
f<- as.data.frame( dd1[dd1$customer==customer,-1])
d<-f[, colSums(f != 0) > 0]
skill_list<-colnames(d) } #returns the skill list that correspond to the input customer
else {
skill_list<-c("",as.character(unique(colnames(dd1))))
}
return(skill_list)
}
#Function for automatic filtering of the UI input with respect to skill
act_customer<- function (skill){
if (skill!=""){
cust_list<- c("", as.character(unique( dd1$customer[ dd1[,skill]>0])))
}
else {
cust_list<-c("",as.character(unique((demandda$Customer))))
}
return (cust_list)#returns the customer list that correspond to the input skil
}
#Function for automatic filtering of the UI input with respect to skill
act_skill<- function (skill){
if (skill!=""){
cust_list<- c("", as.character(unique( demandda$Skill.Bucket[ dd1[,skill]>0])))
}
else {
cust_list<-c("",as.character(unique((demandda$Skill.Bucket))))
}
return (cust_list)#returns the skill bucket list that correspond to the input skil
}
#Function for automatic filtering of the UI input with respect to skill
act_location<- function (skill){
if (skill!=""){
cust_list<- c("", as.character(unique( demandda$Personal.SubArea[ dd1[,skill]>0])))
}
else {
cust_list<-c("",as.character(unique((demandda$Personal.SubArea))))
}
return (cust_list)#returns the subarea list that correspond to the input skil
}
#Function for automatic filtering of the UI input with respect to customer
list_skillbucket<- function (customer){
if (customer!=""){
skill_list<-unique(demandda$Skill.Bucket[demandda$Customer==customer])
}
else {
skill_list<-c("",as.character(unique(demandda$Skill.Bucket)))
}
return(skill_list) #returns the skill bucket list that correspond to the input customer
}
#Function for automatic filtering of the UI input with respect to customer
list_location<- function (customer){
if(customer!=""){
skill_list<-unique(demandda$Personal.SubArea[demandda$Customer==customer])
}
else {
skill_list<-c("",as.character(unique(demandda$Personal.SubArea)))
}
return(skill_list) #returns the subarea list that correspond to the input customer
}
#Function for Skill radar computes distance by Pearson corelation and makes out the radar
newman<-function(input, n, skillbucket, subarea,customer,raio, yea){
# Only if Skill is mentioned by the user compute the closest skills
if (input!=""){
#Receiving all the user input and filtering the job descriptions
A<-1:nrow(demandda)
if (customer!=""){
A<-which(demandda$Customer == customer)}
B<-1:nrow(demandda)
if (subarea!=""){
B<-which(demandda$Personal.SubArea == subarea)}
C<-1:nrow(demandda)
if (skillbucket!="") {
C<-which(demandda$Skill.Bucket==skillbucket)}
X<-1:nrow(demandda)
if (yea!="") {
X<-which(demandda$year==as.numeric(yea))}
D<-intersect(A,B)
E<-intersect(D,C)
E<-intersect(E,X)
if (length(E)==0){
return(list(data.frame("none"=""), "", "No JD"))
}
tdddataframe<-as.data.frame(tdddataframe[,E]) #Final filtered table
row.names(tdddataframe)<-skill
#Adding an additional empty column
no<-length(tdddataframe)+1
tdddataframe[,no]<-0
d<- tdddataframe[input,]
#TO know how the total frequency of the word in all the job decsriptions
coun<-d[, colSums(d == 0)== 0]
freq<- length(coun)
if (freq==0){
return(list(data.frame("none"=""), "", "No JD"))
}
#Computing distance using Pearson's Correlation.
dista <- function(x) ((1-cor(t(x)))/2)
jd<-length(tdddataframe)-1
#if no filters are applied to the dataframe then use the correlation matrix uploaded
if (jd==31049){
#print("using India Distance")
distmatrix<-indiadistance
}
#Compution of the Pearson correlation between the skills for the filtered matrix
else {
d1 <- dista(tdddataframe)
distmatrix<-as.data.frame(d1)
}
#Seperate out the disatnce of input
Skills_new<-as.data.frame(distmatrix[,input])
str(Skills_new)
names(Skills_new)<-"dist"
Skills_new$skills<-skill
Skills_new<-Skills_new[is.element(Skills_new$skills,dd_skills$Skills),]
#apply the threshold
data1<-Skills_new$skills[(Skills_new$dist<=0.5)]
data2<-head( (Skills_new[order(Skills_new$dist, decreasing=FALSE),]),n)
data2<- data2[data2$skills!=input,]
data<-intersect(data1,data2$skills)
data2<-data2[is.element(data2$skills,data),]
data2<- data2[order(data2$dist, decreasing=FALSE),]
data2$dist<-as.numeric(lapply(data2$dist, function(x) 1-x)) #distance computation by perason colrrelation
#Preparation of the table for displaying in Radar format
d<-max(data2$dist)+0.02
data2$max<-d
f<-min(data2$dist)-0.02
data2$min<-f
data3<-data2[c(4,3,1)]
tra<-data.frame(t(data3))
names(tra)<- data2$skills
return(list(tra, jd, freq))
}
# Only if Skill is not mentioned by the user instead uses only customer/skill bucket/area
else {
#Receiving all the user input and filtering the job descriptions
A<-1:nrow(demandda)
if (customer!=""){
A<-which(demandda$Customer == customer)}
B<-1:nrow(demandda)
if (subarea!=""){
B<-which(demandda$Personal.SubArea == subarea)}
C<-1:nrow(demandda)
if (skillbucket!="") {
C<-which(demandda$Skill.Bucket==skillbucket)}
X<-1:nrow(demandda)
if (yea!="") {
X<-which(demandda$year==as.numeric(yea))}
D<-intersect(A,B)
E<-intersect(D,C)
E<-intersect(E,X)
if (length(E)==0){
return(list(data.frame("none"=""), "", "No JD"))
}
tdddataframe<-as.data.frame(tdddataframe[,E])
row.names(tdddataframe)<-skill
#Adding a row for a reference with all 1s
addition<-nrow(tdddataframe)+1
tdddataframe[addition,]<-1
#Adding an additional empty column to allow for the computation of statdard deviation
no<-length(tdddataframe)+1
tdddataframe[,no]<-0
freq<- length(tdddataframe)-1
if (freq==0){
return(list(data.frame("none"="no Skills"), "", "No JD"))
}
#Computing distance using Pearson's Correlation.
dista <- function(x) ((1-cor(t(x)))/2)
jd<-length(tdddataframe)-1
#if no filters are applied to the dataframe then use the correlation matrix uploaded
if (jd==31049){
#print("using India Distance")
distmatrix<-indiadistance
}
else {
d1 <- dista(tdddataframe)
distmatrix<-as.data.frame(d1) #computing Perason's correlation
}
#Seperate out the disatnce of the referece vector to the skills
Skills_new<-as.data.frame(distmatrix[,addition])
str(Skills_new)
names(Skills_new)<-"dist"
Skills_new$skills<-row.names(tdddataframe)
Skills_new<-Skills_new[is.element(Skills_new$skills,dd_skills$Skills),]
#apply the threshold
data1<-Skills_new$skills[which(Skills_new$dist<=0.5)]
data2<-head( (Skills_new[order(Skills_new$dist, decreasing=FALSE),]),n)
data2<- data2[data2$skills!=addition,]
data<-intersect(data1,data2$skills)
data2<-data2[is.element(data2$skills,data),]
data2<- data2[order(data2$dist, decreasing=FALSE),]
data2$dist<-as.numeric(lapply(data2$dist, function(x) 1-x))#distance computation
#preparation of the table for the radar output
d<-max(data2$dist)+0.02
data2$max<-d
f<-min(data2$dist)-0.02
data2$min<-f
data3<-data2[c(4,3,1)]
tra<-data.frame(t(data3))
names(tra)<- data2$skills
return(list(tra, jd, freq))
}
}
#retrieve the alternatie skills
alter<-function (name){
if (name=="none"){
return("")
}
else{
return(alternatives$alternate[alternatives$Skillname==name])}
}
#retrieve the definition
defin<-function (name){
if (name=="none"){
return("")
}
else{ return(alternatives$definition[alternatives$Skillname==name])}
}
##############################################Contextual Search################################
#Function to search for resumes based on skills and job descriptions.
likeme <- function(skill1, job1, exp1, stype1, sk.ill, num1,clack, functional, systems, jobboard1){
setwd("D:/HCL/LikeMe")
#loading the skill set data and the stopwords data.
skills <- data.frame(fread("skillClustering.csv", header = TRUE, stringsAsFactors = FALSE))
stp <-data.frame( fread("stopwords.csv", header = TRUE, stringsAsFactors = FALSE))
#reading the candidate profiles from internal and external databases as per the user input
if(stype1 == "eser"){
candidates <- data.frame(fread("external.csv", stringsAsFactors = FALSE))
original <- data.frame( fread("external.csv", stringsAsFactors = FALSE))
candidates <- candidates[,c(1,3,4,5,6,7,8,9)]
original <- original[,c(1,3,4,5,6,7,8,9)]
if(sk.ill == "I have already entered the skills"){
candidates$requirement <- candidates$Profile#Add the skills
}else{
candidates$requirement <- paste("",candidates$Profile )
}
}else if(stype1 == "iser"){
#candidates <- data.frame( fread("internal.csv", stringsAsFactors = FALSE))
#original <- data.frame( fread("internal.csv", stringsAsFactors = FALSE) )
candidates <- read.csv("internal.csv", stringsAsFactors = FALSE)
original <- read.csv("internal.csv", stringsAsFactors = FALSE)
colnames(candidates)[1] <- "V1"
colnames(original)[1] <- "V1"
candidates <- candidates[,c(1,3,4,5,6,7,8,9)]
original <- original[,c(1,3,4,5,6,7,8,9)]
if(sk.ill == "I have already entered the skills"){
candidates$requirement <- candidates$Profile#Add the skills
}else{
candidates$requirement <- paste("",candidates$Profile )
}
}
#Candidate experience search preference.
if(exp1 == "No Preference"){
candidates <- candidates
original <- original
}else{
candidates <- subset(candidates, candidates$experience == exp1)
original <- subset(original, original$experience == exp1)
}
#Search preference based on skill.
if(jobboard1=="no"){
if(sk.ill == "I have already entered the skills"){
new_requirement <- data.frame(V1 = nrow(candidates)+1,File_Name = "",Mobile.Number = 9999999999,Email = "",Profile = job1, Education = "",Skills = skill1, TProfile = "")
new_requirement$requirement <- paste(new_requirement$Skills, new_requirement$Profile)
}else{
skill1 <- paste(colnames(data.frame(newman(sk.ill, num1, "","","","","")[1], check.names = F)),collapse = ",")
new_requirement <- data.frame(V1 = nrow(candidates)+1,File_Name = "",Mobile.Number = 999999999,Email = "", Profile = job1, Education = "",Skills = skill1, TProfile = "")
new_requirement$requirement <- paste(new_requirement$Skills, new_requirement$Profile)
print(new_requirement$requirement)
}
}else{
skill1 <- jobboard(skill1," "," ")$closely_related_skill_Dice_Insights[1]
new_requirement <- data.frame(V1 = nrow(candidates)+1,File_Name = "",Mobile.Number = 9999999999,Email = "",Profile = job1, Education = "",Skills = skill1, TProfile = "")
new_requirement$requirement <- paste(new_requirement$Skills, new_requirement$Profile)
}
print(new_requirement)
candidates <- rbind(new_requirement, candidates)
#functions for tf idf computation
term.frequency <- function(row) {
row / sum(row)
}
inverse.doc.freq <- function(col) {
corpus.size <- length(col)
doc.count <- length(which(col > 0))
log10(corpus.size / doc.count)
}
tf.idf <- function(x, idf) {
x * idf
}
candidates$TProfile <- as.character(candidates$TProfile)
candidates$TProfile[1] <- skill1
tokens2 <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens2 <- tokens_tolower(tokens2)
tokens2 <- tokens_select(tokens2, stp$TEXT, selection = "remove")
tokens2 <- as.character(tokens2)
#tokenisation of the profiles
if(grepl("^\\s*$", new_requirement$Skills) | length(tokens2) == 0){
score1 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, Score = rep(0,nrow(candidates)))
}else{
tokens <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens <- tokens_tolower(tokens)
tokens <- tokens_select(tokens, stp$TEXT, selection = "remove")
train.tokens.dfm <- dfm(tokens, tolower = FALSE)
tokens <- tokens_wordstem(tokens, language = "english")
tokens <- tokens_ngrams(tokens, n = 1)
if(length(tokens)==1){
print(1)
}
#Tokenizing the skills.
skills.tokens <- tokens(skills$value, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
skills.tokens <- tokens_tolower(skills.tokens)
skills.tokens <- tokens_select(skills.tokens, stp$TEXT, selection = "remove")
skills.tokens <- tokens_ngrams(skills.tokens, n = 1:5)
skills.tokens <- tokens_select(tokens, unlist(as.list(skills.tokens)), selection = "keep")
skills.tokens <- tokens_select(skills.tokens, stopwords(), selection = "remove")
tokens.set <- append(tokens, skills.tokens)
tokens1 <- tokens(as.character(candidates$TProfile), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1)
#tokens1 <- tryCatch(tokens_select(tokens1, unlist(as.list(tokens)), selection = "keep"), error = function(e){"no skill entered"})
tokens1 <- tokens_select(tokens1, unlist(as.list(tokens)), selection = "keep")
#print(tokens1)
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.matrix[tokens.matrix>0]<-1
tokens.df <- as.data.frame(tokens.matrix)
tokens <- as.matrix(tokens.df)
#Creating the tokenized matrix.
tokens <- t(tokens)
write.csv(data.frame(tokens),"score222.csv")
#Scoring the candidate based on skill.
library(lsa)
start.time <- Sys.time()
if(nrow(candidates)>1){
#Finding Cosine Similarity for skill scoring.
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score1 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, score = cos$text1)
score1 <- score1[order(score1$score, decreasing = TRUE),]
names <- data.frame(File = original$File_Name, Email = original$Email, Mobile.Number = original$Email, Skill = original$Skills)
score1 <- left_join(score1, names, by = "File")
colnames(score1) <- c("File","Mobile.Number", "Email", "Score", "em"," em1","Skill")
if(nrow(score1)==0){
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
#write.csv(score1,"score222.csv")
score1$Score[is.nan(score1$Score)] <- 0
score1 <- score1[order(score1$Email, decreasing = TRUE),]
}
#Check whether job description is available or not.
if(grepl("^\\s*$", job1)){
score2 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, Score = rep(0,nrow(candidates)))
}else{
tokens1 <- tokens(candidates$requirement, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
new.tokens <- tokens(as.character(new_requirement$Profile), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
new.tokens <- tokens_tolower(new.tokens)
new.tokens <- tokens_select(new.tokens, stopwords(), selection = "remove")
new.tokens <- tokens_ngrams(new.tokens, n = 1:5)
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(new.tokens)), selection = "keep")
new.tokens1 <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
new.tokens1 <- tokens_tolower(new.tokens1)
new.tokens1 <- tokens_select(new.tokens1, stopwords(), selection = "remove")
new.tokens1 <- tokens_ngrams(new.tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(new.tokens1)), selection = "remove")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
#Creating a tf-idf matrix
if(length(tokens.idf)>1){
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
}else{
tokens.tfidf <- tokens.df*tokens.idf
}
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
#Scoring the candidate based on context.
library(lsa)
start.time <- Sys.time()
if(nrow(candidates)>1){
#Finiding csine similarity
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score2 <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, score = cos$text1)
score2 <- score2[order(score2$score, decreasing = TRUE),]
names <- data.frame(File = original$File_Name,Email = original$Email, Mobile.Number = original$Email, Skill = original$Skills)
score2 <- left_join(score2, names, by = "File")
colnames(score2) <- c("File","Mobile.Number", "Email", "Score", "em"," em1","Skill")
if(nrow(score2)==0){
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
score2$Score[is.nan(score2$Score)] <- 0
score2 <- score2[order(score2$Email, decreasing = TRUE),]
}
score1$scores <- score2$Score
score1$cumulative <- score1$Score+score1$scores
scoring <- function(candidates, context){
candidates$Profile <- as.character(candidates$Profile)
candidates$Profile[1] <- context
tokens1 <- tokens(candidates$Profile, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
new.tokens <- tokens(as.character(context), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
new.tokens <- tokens_tolower(new.tokens)
new.tokens <- tokens_select(new.tokens, stopwords(), selection = "remove")
new.tokens <- tokens_ngrams(new.tokens, n = 1:5)
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(new.tokens)), selection = "keep")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
#Scoring the candidated based on functional and system requirements.
if(nrow(candidates)>1){
#Finding Cosine Similarity
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score <- data.frame(File = candidates$File_Name,Mobile.Number = candidates$Mobile.Number,Email = candidates$Email, score = cos$text1)
score <- score[order(score$score, decreasing = TRUE),]
names <- data.frame(File = original$File_Name,Email = original$Email, Mobile.Number = original$Email, Skill = original$Skills)
score <- score[,c(1,4)]
if(nrow(score)==0){
score <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
return(score)
}
if(grepl("^\\s*$", functional)){
functional_score <- data.frame(File = score1$File, score = rep(0,nrow(score1)))
}else{
functional_score <- scoring(candidates, functional)
}
if(grepl("^\\s*$", systems)){
systems_score <- data.frame(File = score1$File, score = rep(0,nrow(score1)))
}else{
systems_score <- scoring(candidates, systems)
}
score1 <- left_join(score1,functional_score,by = 'File')
score1 <- left_join(score1,systems_score,by = 'File')
#Creating a scored table and sorting candidats based on their cumulative scores.
score1$cscores <- score1$score.x+score1$score.y
score1$cumulative <- score1$cumulative+score1$cscores
score1 <- score1[order(score1$cumulative, decreasing = TRUE),]
score1 <- subset(score1, score1$File!="")
score1 <- subset(score1, score1$Score>0.5)
score1$Mob <- NULL
score1$Skill<-NULL
if(ncol(score1)==9){
colnames(score1) <- c("File","Mobile Number","Email","Skill Score (Out of 1)",
"Context Score (Out of 1)",
"Cumulative Score (Out of 5)",
"Functional Score (Out of 1)",
"Systems Score (Out of 1)",
"FSC Score (Out of 3)")
}else{
colnames(score1) <- c("File","Mobile Number","Email","Skill Score (Out of 1)",
"Skill","em","Context Score (Out of 1)",
"Cumulative Score (Out of 5)",
"Functional Score (Out of 1)",
"Systems Score (Out of 1)",
"FSC Score (Out of 3)")
}
score1$Skill<-NULL
score1$em<-NULL
score1 <- score1[1:5,]
score1$`Skill Score (Out of 1)` <- round(score1$`Skill Score (Out of 1)`, digits = 2)
score1$`Context Score (Out of 1)` <- round(as.numeric(score1$`Context Score (Out of 1)`), digits = 2)
score1$`Cumulative Score (Out of 5)` <- round(as.numeric(score1$`Cumulative Score (Out of 5)`), digits = 2)
score1$`Functional Score (Out of 1)`<- round(as.numeric(score1$`Functional Score (Out of 1)`),digits = 2)
score1$`Systems Score (Out of 1)`<- round(as.numeric(score1$`Systems Score (Out of 1)`),digits = 2)
score1$`FSC Score (Out of 3)`<- round(as.numeric(score1$`FSC Score (Out of 3)`),digits = 2)
if(grepl("^\\s*$", job1)){
score1$`Context Score (Out of 1)`<-NULL
}
if(grepl("^\\s*$", functional)){
score1$`Functional Score (Out of 1)`<-NULL
}
if(grepl("^\\s*$", systems)){
score1$`Systems Score (Out of 1)`<-NULL
}
if(grepl("^\\s*$", functional) & grepl("^\\s*$", systems) ){
score1$`FSC Score (Out of 3)`<- NULL
}
if(grepl("^\\s*$", functional) & grepl("^\\s*$", systems) & grepl("^\\s*$", job1)){
score1$`Cumulative Score (Out of 5)`<- NULL
}
if(nrow(score1)>0){
tokens <- tokens(as.character(new_requirement$Skills), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens <- tokens_tolower(tokens)
tokens <- tokens_select(tokens, stp$TEXT, selection = "remove")
train.tokens.dfm <- dfm(tokens, tolower = FALSE)
tokens <- tokens_wordstem(tokens, language = "english")
tokens <- tokens_ngrams(tokens, n = 1)
tokens1 <- tokens(as.character(candidates$TProfile), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1)
skilltokens <- list()
aaa <- character(0)
for(i in 1:nrow(candidates)){
if(!identical(aaa,unlist(tokens1[i]))){
skilltokens[i] <- paste(tokens_select(tokens, unlist(as.list(tokens1[i])), selection = "remove"),collapse = ",")
}else{
skilltokens[i]<-""
}
}
score3 <- data.frame(File = candidates$File_Name, Skills.not.present = unlist(skilltokens))
score1 <- left_join(score1, score3, by = "File")
}
if(!is.na(score1[1,1])){
return(score1)
}else if(jobboard1=="yes"){
return(data.frame(Error = "No Alternative skills found on Dice or job Description entered."))
}else{
return(data.frame(Error = "No Skill or job Description entered."))
}
}
#######trycatchfuction#####################################################
readUrl <- function(url) {
out <- tryCatch(
{
#message("This is the 'try' part")
read_html(url)
},
error=function(cond) {
#message(paste("URL does not seem to exist:", url))
#message("Here's the original error message:")
d<-as.character(cond)
if (isTRUE( grep("host", d))){
return ("MSG: Check the internet connection")
}
else {
return("MSG: Not Available in the database")
}
},
warning=function(cond) {
#message(paste("URL caused a warning:", url))
#message("Here's the original warning message:")
#message(cond)
return("MSG: none")
}
)
return(out)
}
######################################Like me - Job board search#################################################
#Function to search the job board for alternative skills.
jobboard<-function(skill1,skill2,skill3) {
#Receiving all the inputs as a list
l<-{}
l<-append(l,skill1)
l<-append(l,skill2)
l<-append(l,skill3)
l<-l[l!=""]
len<-length(l)
#creation of a dataframe
a_dummy<-data.frame(l)
names(a_dummy)<-"keywords"
a_dummy$no_of_searches<-0
a_dummy$closely_related_skill_Dice_Insights_Dice_Insights<-0
a_dummy$link<-0
#web scrapping from dice insights
for (i in 1:len){
d<-gsub(" ", "+", l[i], fixed=TRUE)
if (d=="Cascading+Style+Sheets+(CSS)"){
d<-gsub("(.*? )", "", a_dummy$l[i])}
if (d=="c"){d<-"C+C%2B%2B"}
if (d=="c++"){d<-"C+C%2B%2B"}
if (d=="vc++"){d<-"vc%2B%2B"}
if (d=="embedded"){d<-"embedded+system"}
if (d=="c#"){d<-"c%23"}
#closest skill module
url2 <- paste("https://www.dice.com/skills/",d,".html", sep="")
movie2<-readUrl(url2)
# if (movie2=="none"){
#
# return(data.frame("none"))
# }
#
if (isTRUE( grep("internet", as.character(movie2))) ){
g1<-"Check internet"
} else if (class(movie2)[1]=="xml_document"){
g1 <- movie2 %>% html_node(".col-md-7") %>% html_text()}
else {g1<- "Not avaialble in the database"}
s1<-gsub("\\\t", "", g1)
s1<-gsub("\\\n", " ", s1)
s1<-gsub("\\Related Skills", "", s1)
a_dummy$closely_related_skill_Dice_Insights[i]<-s1
a_dummy$link[i]<-url2
}
ddd<-a_dummy[,c("keywords", "closely_related_skill_Dice_Insights")]
return(ddd)
}
############################################Customer Forecast#################################################
#Function used to forecast the demand for customer
cust.forecast <- function(a,b,c, country){
cust.forecast <- Sys.time()
#Selecting data based on the region selected.
if(country=="India"){
setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
setwd("D:/HCL/LikeMe")
#demand <- data.frame(fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#setwd("D:/HCL/LikeMe/Demand")
#master.demand <- data.frame(fread("dump.csv"))
master.demand<-demand
#print("Start Maps")
demand.area <- master.demand
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
demand.area$month <- month(dmy(demand.area$Approval.Date))
if(a!="All"){
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket, demand.area$Customer), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Skill", "Customer", "Demand")
}else{
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year, demand.area$Customer), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Customer", "Demand")
}
demand.area$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
if(a!="All"){
Total <- subset(demand.area, demand.area$Skill == a & Year == c & Quarter ==b)
}else{
Total <- subset(demand.area, Year == c & Quarter ==b)
}
Total$Demand[is.na(Total$Demand)] <- 0
Total <- data.frame(Customer = Total$Customer, Demand = Total$Demand)
Total <- Total[order(Total$Demand,decreasing = TRUE),]
Total <- subset(Total, Total$Demand!=0)
Total <- Total[1:10,]
forecasting <- function(cust){
setwd("D:/HCL/LikeMe/Demand")
demand <- data.frame( fread("dump.csv",stringsAsFactors = F))
demand$date <- dmy(demand$Req.Date)
demand$quarter <- quarter(demand$date)
demand$month <- month(demand$date)
demand$year <- year(demand$date)
demand$week <- week(demand$date)
dates <- demand
if(a!="All"){
demand <- demand %>% filter(demand$Skill.Bucket == a)
}
location.demand <- aggregate(demand$InitialDemand, by=list(demand$Customer), FUN = sum)
location.demand <- location.demand[order(location.demand$x, decreasing = T),]
location.demand <- location.demand[1:3,]$Group.1
demand <- demand %>% filter(tolower(demand$Customer) == tolower(cust))
if(nrow(demand)==0){
return(0)
}else{
demand <- aggregate(demand$InitialDemand, by = list(demand$week, demand$year), FUN = sum)
colnames(demand) <- c("Week","Year","Demand")
setwd("D:/HCL/LikeMe")
template <-data.frame( fread("template2015.csv"))
colnames(template) <- c("Year", "Week")
demand <- merge(template, demand, all = TRUE)
demand$Demand[is.na(demand$Demand)] <- 0
if(month(max(dates$date)) %in% c(1,2,3)){
n <- length(unique(dates$year))-1
n <- n*52
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = n))
}
if(month(max(dates$date)) %in% c(4,5,6)){
n <- length(unique(dates$year))-1
n <- (n*52)+13
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(7,8,9)){
n <- length(unique(dates$year))-1
n <- (n*52)+26
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(10,11,12)){
n <- length(unique(dates$year))-1
n <- (n*52)+38
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
return(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12])))
}
}
toplocation <- Total$Customer
toplocation <- lapply(toplocation,function(x)forecasting(x))
Total$'Forecast for the Next Quarter' <- unlist(toplocation)
return(Total)
print(Sys.time() - cust.forecast.time)
}
############################################Combination Forecast############################################
#Function to forecast the demand for the different combinations of location and customers.
combopred <- function(a,b,c, country){
# if(country=="India"){
# #setwd("D:/HCL/LikeMe")
# #demand <- data.frame(fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
# demand <- subset(demand.dump, demand$country=="INDIA")
# # setwd("D:/HCL/LikeMe/Demand")
# # write.csv(demand,"demand.csv")
# # write.csv(demand, "dump.csv")
# }else{
# #setwd("D:/HCL/LikeMe")
# #demand <- data.frame(fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
# demand <- subset(demand.dump, demand.dump$country=="USA")
# #setwd("D:/HCL/LikeMe/Demand")
# #write.csv(demand,"demand.csv")
# #write.csv(demand, "dump.csv")
# }
#setwd("D:/HCL/LikeMe/Demand")
master.demand <-data.frame( fread("dump.csv"))
#master.demand<-demand
demand.area <- master.demand
demand.area$date <- dmy(demand.area$Approval.Date)
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
demand.area$month <- month(dmy(demand.area$Approval.Date))
demand.area$week <- week(dmy(demand.area$Approval.Date))
dem <- demand.area
if(a!="All"){
demand.location <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Personal.SubArea), FUN = sum)
colnames(demand.location) <- c("Quarter","Year", "Skill", "Location", "Demand")
demand.customer <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Customer), FUN = sum)
colnames(demand.customer) <- c("Quarter","Year", "Skill", "Customer", "Demand")
}else{
demand.location <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Personal.SubArea), FUN = sum)
colnames(demand.location) <- c("Quarter","Year", "Skill", "Location", "Demand")
demand.customer <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket,demand.area$Customer), FUN = sum)
colnames(demand.customer) <- c("Quarter","Year", "Skill", "Customer", "Demand")
}
demand.location$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
demand.customer$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
if(a!="All"){
Total.location <- subset(demand.location, Year == c & Quarter ==b & demand.location$Skill == a)
Total.location <- Total.location[order(Total.location$Demand, decreasing = T),]
Total.location <- Total.location$Location[1:5]
Total.customer <- subset(demand.customer, Year == c & Quarter ==b & demand.customer$Skill == a)
Total.customer <- Total.customer[order(Total.customer$Demand, decreasing = T),]
Total.customer <- Total.customer$Customer[1:5]
}else{
Total.location <- subset(demand.location, Year == c & Quarter ==b)
Total.location <- Total.location[order(Total.location$Demand, decreasing = T),]
Total.location <- Total.location$Location[1:5]
Total.customer <- subset(demand.customer, Year == c & Quarter ==b)
Total.customer <- Total.customer[order(Total.customer$Demand, decreasing = T),]
Total.customer <- Total.customer$Customer[1:5]
}
grid <- expand.grid(Total.location, Total.customer)
colnames(grid) <- c("Location","Customer")
combination.forecasting <- function(Locat,Custo){
demand <- dem
dates <- dem
if(a!="All"){
demand <- subset(demand, demand$Skill.Bucket==a)
}
demand <- subset(demand, demand$Personal.SubArea==Locat)
demand <- subset(demand, demand$Customer==Custo)
if(nrow(demand)==0){
return("No Such Combination")
}else{
demand <- aggregate(demand$InitialDemand, by = list(demand$week, demand$year), FUN =sum)
colnames(demand) <- c("Week","Year","Demand")
setwd("D:/HCL/LikeMe")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("Year", "Week")
demand <- merge(template, demand, all = TRUE)
demand$Demand[is.na(demand$Demand)] <- 0
if(month(max(dates$date)) %in% c(1,2,3)){
n <- length(unique(dates$year))-1
n <- n*52
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = n))
}
if(month(max(dates$date)) %in% c(4,5,6)){
n <- length(unique(dates$year))-1
n <- (n*52)+13
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(7,8,9)){
n <- length(unique(dates$year))-1
n <- (n*52)+26
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(10,11,12)){
n <- length(unique(dates$year))-1
n <- (n*52)+38
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12]))<0){
return("Not Predictable")
}else{
return(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12])))
}
}
}
if(a!="All"){
Total <- data.frame(Skill = rep(a,nrow(grid)), grid, Forecast = mapply(combination.forecasting, grid$Location, grid$Customer))
Total <- subset(Total, Total$Forecast != "No Such Combination")
Total <- subset(Total, Total$Customer != "Others")
}else{
Total <- data.frame(No_Skill_Selected = "No Skill Selected so the predictions cannot be made for Customer and Location combinations if a Skill was not selected")
}
return(Total)
}
###########################################DSM+################################################################
#Function to forecast the overall demand
forecaster <- function(skill.input, country){
if(country=="India"){
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#setwd("D:/HCL/LikeMe/Demand")
#Read the demand data file from the folder.
#master <- data.frame(fread("demand.csv", header = TRUE, stringsAsFactors = FALSE))
master<-demand
#Create a variable called Total Fulfilled.
master$Total.Fulfilled <- master$Internal_Filled+master$External_Joined
#Create a variable called Unfulfilled Overdue.
master$Unfulfilled.Overdue <- master$InitialDemand-(master$Internal_Filled+master$External_Joined+master$DroppedPos)
#Select columns that is needed for analysis and import them.
master <- master[,c("V1", "ReqNo", "Joining.Level.2","Customer","Segment",
"Req.Date","Skill.Bucket","Primary.Skill.Area","Requisition.Source",
"Internal_Filled","External_Joined","Total.Fulfilled",
"Unfulfilled.Overdue","Vacancy","DroppedPos","InitialDemand","vAdditionalRemarks","Personal.SubArea")]
#Remove observations from the data that do not have any requisition date.
master <- master[complete.cases(master$Req.Date),]
#Modifying the column names.
colnames(master) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
"int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
"overall.demand","job.desc","Location")
#Changing the classes of the variables.
master$date <- dmy(master$date)
master$data.src <- factor(master$data.src)
master$l2 <- factor(master$l2)
master$segment <- factor(master$segment)
master$skill <- factor(master$skill)
master$req.sor <- factor(master$req.sor)
master1 <- master
master1$month <- month(master1$date)
master1$year <- year(master1$date)
#Removing duplicates.
master <- master[!duplicated(master),]
#Uncomment the following lines of code when the first new demand file is placed or uploaded.
#new.demand <- read.csv("newdemand.csv", stringsAsFactors = F)
#colnames(new.demand) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
# "int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
# "overall.demand","job.desc","Location")
#new.demand <- new.demand[complete.cases(new.demand$Req.Date),]
#colnames(new.demand) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
# "int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
# "overall.demand","job.desc","Location")
#new.demand$date <- dmy(new.demand$date)
#new.demand$data.src <- factor(new.demand$data.src)
#new.demand$l2 <- factor(new.demand$l2)
#new.demand$segment <- factor(new.demand$segment)
#new.demand$skill <- factor(new.demand$skill)
#new.demand$req.sor <- factor(new.demand$req.sor)
#Comment the next line when new demand is placed in the folder or uploaded.
new.demand <- master
master.length <- nrow(master)
new.length <- nrow(new.demand)
master.demand <- rbind(master, new.demand)
master.demand <- master.demand[!duplicated(master.demand),]
master.demand$requirement <- paste(master.demand$sr.skill,master.demand$job.desc)
#Use the package "quanteda" to work with the text data.
#tokenize the requirements.
full.tokens <- tokens(master.demand$requirement, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
#Lower case the tokens.
full.tokens <- tokens_tolower(full.tokens)
#Removing stop words.
full.tokens <- tokens_select(full.tokens, stopwords(), selection = "remove")
#performing stemming on the requirement text.
full.tokens <- tokens_wordstem(full.tokens, language = "english")
#Create bag of words.
full.tokens.dfm <- dfm(full.tokens, tolower = FALSE)
#Transform to matrix.
full.tokens.matrix <- as.matrix(full.tokens.dfm)
#Convert to dataframe.
full.tokens.df <- data.frame(full.tokens.matrix)
#Binding the skill bucket as the class label
full.tokens.df$class.label <- master.demand$skill
skills.list <- skill.input
#Check the whether there is any new demand that has been added. If present,
#1. Bucket the demand or,
#2. Forecast the demand directly
if(nrow(master.demand) > nrow(master)){
#Split and bucket the new demand.
#train <- full.tokens.df[1:master.length,]
#Separate the new demand.
test <- full.tokens.df[master.length+1:nrow(full.tokens.df),]
#Load the model that was created.
load("C:/Users/varun/Desktop/jije.RData")
#Train Random Forest
#rf.train <- randomForest(class.label~.-req.no-l2.name, data = train)
#Predict the buckets using the model that was created.
rf.predict <- predict(model, test)
#Add the predictions to the test dataset.
test$class.label <- rf.predict
#Bind the train and test.
train.test <- rbind(train,test)
#Add the skills back to the master demand.
master.demand$skill <- train.test$class.label
#Creating "month" and "year"
master.demand$week <- week(master.demand$date)
master.demand$month <- month(master.demand$date)
master.demand$year <- year(master.demand$date)
master.demand$mon_year <- as.yearmon(master.demand$date)
master.demand$quarter <- quarter(master.demand$date)
#Subset data after 2016 and subset the demands A & C.
demand.2016 <- subset(master.demand, year>2015)
demand.2016 <- subset(demand.2016, segment == "A" | segment == "C")
#Creating a skill list.
net.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
ovr.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
tot.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
for(i in 1:1){
#Function used for predicting the demand.
prediction <- function(ovrdemand.agg, ext.agg,int.agg, totful.agg){
colnames(ovrdemand.agg) <- c("month","year","demand")
colnames(ext.agg) <- c("month","year","demand")
colnames(int.agg) <- c("month","year","demand")
colnames(totful.agg) <- c("month","year","demand")
#Finding the last month and year.
ovrdemand.agg <- ovrdemand.agg[-c(nrow(ovrdemand.agg))]
ext.agg <- ext.agg[-c(nrow(ext.agg))]
int.agg <- int.agg[-c(nrow(int.agg))]
totful.agg <- totful.agg[-c(nrow(totful.agg))]
#Convert data to time series.
ovr.demandseries <- ts(ovrdemand.agg$demand, frequency = 52)
ext.demandseries <- ts(ext.agg$demand, frequency = 52)
int.demandseries <- ts(int.agg$demand, frequency = 52)
tot.demandseries <- ts(totful.agg$demand, frequency = 52)
order <-data.frame( fread("order.csv"))
if(skills.list!="All"){
order <- subset(order, order$skill == skills.list)
}else{
order<-order
}
#Forecast using the auto.arima function
ovr.forecast <- forecast(auto.arima(ovr.demandseries), h = 12)
final.results <- data.frame(month = c("Month 1", "Month 2","Month 3"),
overall = c(sum(ovr.forecast$mean[1:4]),sum(ovr.forecast$mean[5:8]),sum(ovr.forecast$mean[9:12])))
return(final.results)
}
#subset the demand by skill.
skill.demand <- subset(demand.2016, demand.2016$skill == skills.list)
#Aggregate the demand.
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ovrdemand.agg <- ovrdemand.agg[1:52,]
ext.agg <- ext.agg[1:52,]
int.agg <- int.agg[1:52,]
totful.agg <- totful.agg[1:52,]
#Predict for JFM
jfm <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ovrdemand.agg <- ovrdemand.agg[1:64,]
ext.agg <- ext.agg[1:64,]
int.agg <- int.agg[1:64,]
totful.agg <- totful.agg[1:64,]
#Predict for AMJ
amj <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ovrdemand.agg <- ovrdemand.agg[1:76,]
ext.agg <- ext.agg[1:76,]
int.agg <- int.agg[1:76,]
totful.agg <- totful.agg[1:76,]
#Predict the JAS
jas <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
jfm <- rbind(jfm,amj)
jfm <- rbind(jfm,jas)
}
}else{
#Creating "month" and "year"
master.demand$week <- week(master.demand$date)
master.demand$month <- month(master.demand$date)
master.demand$year <- year(master.demand$date)
master.demand$mon_year <- as.yearmon(master.demand$date)
master.demand$quarter <- quarter(master.demand$date)
#write.csv(master.demand, "master.csv")
#Subset data after 2016 and subset the demands A & C.
demand.2016 <- subset(master.demand, year>2015)
demand.2016 <- subset(demand.2016, segment == "A" | segment == "C")
#Creating a skill list.
net.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
ovr.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
tot.results = data.frame( month = character() , Skill = character(), Seasonal_Naive = numeric())
for(i in 1:1){
prediction <- function(ovrdemand.agg, ext.agg,int.agg, totful.agg){
#Finding the last month and year.
ovrdemand.agg <- ovrdemand.agg[-c(nrow(ovrdemand.agg))]
ext.agg <- ext.agg[-c(nrow(ext.agg))]
int.agg <- int.agg[-c(nrow(int.agg))]
totful.agg <- totful.agg[-c(nrow(totful.agg))]
#Convert data to time series.
ovr.demandseries <- tsclean(ts(ovrdemand.agg$demand, frequency = 52))
ext.demandseries <- tsclean(ts(ext.agg$demand, frequency = 52))
int.demandseries <- tsclean(ts(int.agg$demand, frequency = 52))
tot.demandseries <- tsclean(ts(totful.agg$demand, frequency = 52))
order <-data.frame( fread("order.csv"))
order <- subset(order, order$skill == as.character(skills.list))
#Forecast using auto.arima
ovr.forecast <- forecast(auto.arima(ovr.demandseries), h = 12)
final.results <- data.frame(month = c("Month 1"),
overall = c(sum(ovr.forecast$mean[1:12])))
return(final.results)
}
if(skills.list!="All"){
skill.demand <- subset(demand.2016, demand.2016$skill == as.character(skills.list))
}else{
skill.demand <- demand.2016
}
#Aggregate the demand.
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
colnames(ovrdemand.agg) <- c("week","year","demand")
colnames(ext.agg) <- c("week","year","demand")
colnames(int.agg) <- c("week","year","demand")
colnames(totful.agg) <- c("week","year","demand")
setwd("D:/HCL/LikeMe")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("year", "week")
ovrdemand.agg <- merge(template, ovrdemand.agg, all = TRUE)
ovrdemand.agg$demand[is.na(ovrdemand.agg$demand)] <- 0
ovrdemand.agg <- ovrdemand.agg[1:52,]
ext.agg <- ext.agg[1:52,]
int.agg <- int.agg[1:52,]
totful.agg <- totful.agg[1:52,]
#Prediction in JFM
jfm <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
colnames(ovrdemand.agg) <- c("week","year","demand")
colnames(ext.agg) <- c("week","year","demand")
colnames(int.agg) <- c("week","year","demand")
colnames(totful.agg) <- c("week","year","demand")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("year", "week")
ovrdemand.agg <- merge(template, ovrdemand.agg, all = TRUE)
ovrdemand.agg$demand[is.na(ovrdemand.agg$demand)] <- 0
ovrdemand.agg <- ovrdemand.agg[1:64,]
ext.agg <- ext.agg[1:64,]
int.agg <- int.agg[1:64,]
totful.agg <- totful.agg[1:64,]
#Prediction for April, May and June.
amj <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$week, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$week, skill.demand$year), FUN = sum)
colnames(ovrdemand.agg) <- c("week","year","demand")
colnames(ext.agg) <- c("week","year","demand")
colnames(int.agg) <- c("week","year","demand")
colnames(totful.agg) <- c("week","year","demand")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("year", "week")
ovrdemand.agg <- merge(template, ovrdemand.agg, all = TRUE)
ovrdemand.agg$demand[is.na(ovrdemand.agg$demand)] <- 0
#Logic to create forcast for the next quarter based on the dates in the data.
if(month(max(master.demand$date)) %in% c(1,2,3)){
n <- length(unique(master.demand$year))-1
n <- n*52
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
if(month(max(master.demand$date)) %in% c(4,5,6)){
n <- length(unique(master.demand$year))-1
n <- (n*52)+(13)
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
if(month(max(master.demand$date)) %in% c(7,8,9)){
n <- length(unique(master.demand$year))-1
n <- (n*52)+(26)
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
if(month(max(master.demand$date)) %in% c(10,11,12)){
n <- length(unique(master.demand$year))-1
n <- (n*52)+(38)
ovrdemand.agg <- ovrdemand.agg[1:n,]
ext.agg <- ext.agg[1:n,]
int.agg <- int.agg[1:n,]
totful.agg <- totful.agg[1:n,]
}
#Prediction for July, August and September.
jas <- prediction(ovrdemand.agg, ext.agg, int.agg, totful.agg)
jfm <- rbind(jfm,amj)
jfm <- rbind(jfm,jas)
}
}
#Condidtion to check whether the prediction is for a skill or the complete data.
if(skills.list!="All"){
skill.demand <- subset(master1, master1$skill == as.character(skills.list))
}else{
skill.demand <- master1
}
#Subset the demand for years greater than 2015.
skill.demand <- subset(skill.demand, skill.demand$year >2015)
skill.demand$quarter <- quarter(skill.demand$date)
#Subset the demand for the segments A and C.
skill.demand <- subset(skill.demand, skill.demand$segment == "A" | skill.demand$segment == "C")
#Merge all the results together into one dataframe.
if(nrow(skill.demand)!= 0){
#Aggregate the overall demand, external fulfillment, internal fulfillment and total fulfillment.
ovrdemand.agg <- aggregate(skill.demand$overall.demand, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
ext.agg <- aggregate(skill.demand$ext.ful, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
int.agg <- aggregate(skill.demand$int.ful, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
totful.agg <- aggregate(skill.demand$tot.ful, by = list(skill.demand$quarter, skill.demand$year), FUN = sum)
#Bind all the aggregations together.
ovrdemand.agg <- cbind(ovrdemand.agg, ext.agg$x)
ovrdemand.agg <- cbind(ovrdemand.agg, int.agg$x)
ovrdemand.agg <- cbind(ovrdemand.agg, totful.agg$x)
colnames(ovrdemand.agg) <- c("quarter","year","overall","external","internal","total")
} else{
ovrdemand.agg <- data.frame(month = rep("month", 5), year = rep("2017",5), overall = rep("0",5), external = rep("0",5), internal = rep("0",5), total = rep("0",5))
}
#write.csv(ovrdemand.agg, "original.csv")
jfm$year <- rep(2017,3)
colnames(jfm) <- c("Quarter","Demand","Year")
jfm$Demand <- round(jfm$Demand)
setwd("D:/HCL/LikeMe")
qy <- data.frame( fread("quarteryear.csv"))
ovrdemand.agg <- merge(qy,ovrdemand.agg, all=TRUE)
ovrdemand.agg <- ovrdemand.agg[order(ovrdemand.agg$quarter),]
ovrdemand.agg <- ovrdemand.agg[order(ovrdemand.agg$year),]
if(month(max(master.demand$date)) %in% c(1,2,3)){
jfm$Quarter<- "Q1 - JFM"
}else if(month(max(master.demand$date)) %in% c(4,5,6)){
jfm$Quarter <- "Q2 - AMJ"
}else if(month(max(master.demand$date)) %in% c(7,8,9)){
jfm$Quarter <- "Q3 - JAS"
}else if(month(max(master.demand$date)) %in% c(10,11,12)){
jfm$Quarter <- "Q4 - OND"
}
return(jfm)
}
#Create the data for maps for ploting data.
maptable <- function(a,b,c, country){
if(country=="India"){
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#Set the working directory to the Demand folder.
#setwd("D:/HCL/LikeMe/Demand")
#master.demand <-data.frame( fread("dump.csv"))
master.demand <-demand
demand.area <- master.demand
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
if(a!="All"){
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Skill", "Location", "Demand")
}else{
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Location", "Demand")
}
demand.area$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
#Getting the list of Ststes in the Unites Sates of America.
all_states <- map_data("county")
#Renaming the columns
colnames(all_states) <- c("long","lat", "group", "order", "Location", "subregion")
#Converting the location to lower case
demand.area$Location <- tolower(demand.area$Location)
if(a!="All"){
Total <- subset(demand.area, demand.area$Skill == a & Year == c & Quarter ==b)
}else{
Total <- subset(demand.area, Year == c & Quarter ==b)
}
#Total <- merge(all_states, demand.area,all = TRUE)
Total <- Total[Total$Location!="district of columbia",]
setwd("D:/HCL/LikeMe")
states <- data.frame( fread("states.csv"))
colnames(states) <- c("Column1", "long", "lat", "order", "hole", "piece", "Location", "group")
st <- data.frame(Location = unique(map_data('county')$region))
Total <- merge(st, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- merge(states, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- data.frame(State = Total$Location, Demand = Total$Demand)
Total <- subset(Total, Total$State != "district of columbia")
Total <- subset(Total, tolower(Total$State) %in% tolower(unique(states$Location)))
#Demand for all the states have been calculated.
Total <- Total[1:50,]
#print("stop maptable")
return(Total)
}
#Function to create a heat map
maps <- function(a,b,c, country){
if(country=="India"){
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="INDIA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}else{
#setwd("D:/HCL/LikeMe")
#demand <- data.frame( fread("dump2.csv", header = TRUE, stringsAsFactors = FALSE))
demand <- subset(demand.dump, demand.dump$country=="USA")
#setwd("D:/HCL/LikeMe/Demand")
#write.csv(demand,"demand.csv")
#write.csv(demand, "dump.csv")
}
#setwd("D:/HCL/LikeMe/Demand")
#master.demand <-data.frame( fread("dump.csv"))
master.demand<-demand
#print("Start Maps")
demand.area <- master.demand
demand.area$quarter <- quarter(dmy(demand.area$Approval.Date))
demand.area$year <- year(dmy(demand.area$Approval.Date))
demand.area$month <- month(dmy(demand.area$Approval.Date))
if(a!="All"){
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year,demand.area$Skill.Bucket, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Skill", "Location", "Demand")
}else{
demand.area <- aggregate(demand.area$InitialDemand, by = list(demand.area$quarter,demand.area$year, demand.area$Personal.SubArea), FUN = sum)
colnames(demand.area) <- c("Quarter", "Year", "Location", "Demand")
}
demand.area$time <- paste("Q",demand.area$Quarter,"-",demand.area$Year)
all_states <- map_data("county")
colnames(all_states) <- c("long","lat", "group", "order", "Location", "subregion")
demand.area$Location <- tolower(demand.area$Location)
if(a!="All"){
Total <- subset(demand.area, demand.area$Skill == a & Year == c & Quarter ==b)
}else{
Total <- subset(demand.area, Year == c & Quarter ==b)
}
Total <- Total[Total$Location!="district of columbia",]
setwd("D:/HCL/LikeMe")
states <- data.frame( fread("states.csv"))
colnames(states) <- c("Column1", "long", "lat", "order", "hole", "piece", "Location", "group")
st <- data.frame(Location = unique(map_data('county')$region))
Total <- merge(st, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- merge(states, Total, all = TRUE)
Total$Demand[is.na(Total$Demand)] <- 0
Total <- data.frame(State = Total$Location, Demand = Total$Demand)
Total <- subset(Total, Total$State != "district of columbia")
Total <- Total[order(Total$Demand,decreasing = TRUE),]
Total <- subset(Total, Total$Demand!=0)
Total <- Total[1:5,]
forecasting <- function(loca){
setwd("D:/HCL/LikeMe/Demand")
#print(loca)
demand <- data.frame( fread("dump.csv",stringsAsFactors = F))
demand$date <- dmy(demand$Req.Date)
demand$quarter <- quarter(demand$date)
demand$month <- month(demand$date)
demand$year <- year(demand$date)
demand$week <- week(demand$date)
dates <- demand
if(a!="All"){
demand <- demand %>% filter(demand$Skill.Bucket == a)
}
location.demand <- aggregate(demand$InitialDemand, by=list(demand$Personal.SubArea), FUN = sum)
location.demand <- location.demand[order(location.demand$x, decreasing = T),]
location.demand <- location.demand[1:3,]$Group.1
demand <- demand %>% filter(tolower(demand$Personal.SubArea) == tolower(loca))
if(nrow(demand)==0){
return("No forecast available.")
}else{
demand <- aggregate(demand$InitialDemand, by = list(demand$week, demand$year), FUN = sum)
colnames(demand) <- c("Week","Year","Demand")
setwd("D:/HCL/LikeMe")
template <- data.frame( fread("template2015.csv"))
colnames(template) <- c("Year", "Week")
demand <- merge(template, demand, all = TRUE)
demand$Demand[is.na(demand$Demand)] <- 0
if(month(max(dates$date)) %in% c(1,2,3)){
n <- length(unique(dates$year))-1
n <- n*52
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = n))
}
if(month(max(dates$date)) %in% c(4,5,6)){
n <- length(unique(dates$year))-1
n <- (n*52)+13
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(7,8,9)){
n <- length(unique(dates$year))-1
n <- (n*52)+26
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
if(month(max(dates$date)) %in% c(10,11,12)){
n <- length(unique(dates$year))-1
n <- (n*52)+38
demand.ts <- tsclean(ts(demand[1:n,]$Demand,frequency = 52))
}
return(round(sum(forecast(auto.arima(demand.ts),h=12)$mean[1:12])))
}
}
toplocation <- Total$State
toplocation <- lapply(toplocation,function(x)forecasting(x))
Total$'Forecast for Next Quarter' <- unlist(toplocation)
Total <- subset(Total, !is.na(Total$Demand))
return(Total)
}
##############################################Customer Trend####################################
customer <- function(cid, year, quarter, number){
setwd("D:\\HCL\\LikeMe")
demand <- data.frame( fread("master.csv"))
customer.demand <- aggregate(demand$overall.demand,
by = list(demand$quarter, demand$year, demand$customer),
FUN = sum)
customer.select <- filter(customer.demand,
customer.demand$Group.1 == quarter, customer.demand$Group.2 == year, customer.demand$Group.3 == customer)
customer.notselect <- filter(customer.demand,
customer.demand$Group.1 == quarter, customer.demand$Group.2 == year, customer.demand$Group.3 != customer)
customer.notselect <- customer.notselect[order(customer.notselect$x, decreasing = TRUE),]
customer.together <- rbind(customer.select, customer.notselect)
colnames(customer.together) <- c("Quarter", "Year", "Customer", "Demand")
return(customer.together[1:number,])
}
##############################################Skill Vs Customer#########################################
custskill1 <- function(c, d, e){
setwd("D:/HCL/Likeme/Demand")
custmaster <- data.frame( fread("demand.csv", header = TRUE, stringsAsFactors = FALSE))
#custmaster<-demand
#Create a variable called Total Fulfilled.
custmaster$Total.Fulfilled <- custmaster$Internal_Filled+custmaster$External_Joined
#Create a variable called Unfulfilled Overdue.
custmaster$Unfulfilled.Overdue <- custmaster$InitialDemand-(custmaster$Internal_Filled+custmaster$External_Joined+custmaster$DroppedPos)
#Select columns that is needed for analysis and import them.
custmaster <- custmaster[,c("V1", "ReqNo", "Joining.Level.2","Customer","Segment",
"Req.Date","Skill.Bucket","Primary.Skill.Area","Requisition.Source",
"Internal_Filled","External_Joined","Total.Fulfilled",
"Unfulfilled.Overdue","Vacancy","DroppedPos","InitialDemand","vAdditionalRemarks","Personal.SubArea")]
#Remove observations from the data that do not have any requisition date.
custmaster <- custmaster[complete.cases(custmaster$Req.Date),]
#Modifying the column names.
colnames(custmaster) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
"int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
"overall.demand","job.desc","Location")
custmaster$date <- dmy(custmaster$date)
custmaster$data.src <- factor(custmaster$data.src)
custmaster$l2 <- factor(custmaster$l2)
custmaster$segment <- factor(custmaster$segment)
custmaster$skill <- factor(custmaster$skill)
custmaster$req.sor <- factor(custmaster$req.sor)
custmaster$quarter <- as.numeric(quarter(custmaster$date))
custmaster$year <- year(custmaster$date)
if(c!="All"){
fil.year <- subset(custmaster, custmaster$year == d & custmaster$quarter == e & custmaster$skill == c)
}else{
fil.year <- subset(custmaster, custmaster$year == d & custmaster$quarter == e )
}
agg.year <- aggregate(fil.year$overall.demand, by = list(fil.year$customer, fil.year$segment), FUN = sum)
colnames(agg.year) <- c("Customer","Segement", "Demand")
agg.year <- agg.year[order(agg.year$Demand, decreasing = TRUE),]
agg.year <- agg.year[1:10,]
if(sum(is.na(agg.year$Demand))>0){
agg.year <- agg.year[!is.na(agg.year$Demand),]
}
return(agg.year)
}
###############################################Dashboard tabs#####################################
tabs <- function(f,g,h){
setwd("D:/HCL/LikeMe/Demand")
master <- data.frame( fread("demand.csv", header = TRUE, stringsAsFactors = FALSE))
# master<-demand
master$Total.Fulfilled <- master$Internal_Filled+master$External_Joined
master$Unfulfilled.Overdue <- master$InitialDemand-(master$Internal_Filled+master$External_Joined+master$DroppedPos)
master <- master[,c("V1", "ReqNo", "Joining.Level.2","Customer","Segment",
"Req.Date","Skill.Bucket","Primary.Skill.Area","Requisition.Source",
"Internal_Filled","External_Joined","Total.Fulfilled",
"Unfulfilled.Overdue","Vacancy","DroppedPos","InitialDemand","vAdditionalRemarks","Personal.SubArea")]
master <- master[complete.cases(master$Req.Date),]
colnames(master) <- c("data.src","srn","l2","customer","segment","date","skill","sr.skill","req.sor",
"int.ful","ext.ful","tot.ful","un.od","net.demand","demand.drop",
"overall.demand","job.desc","Location")
master$date <- dmy(master$date)
master$data.src <- factor(master$data.src)
master$l2 <- factor(master$l2)
master$segment <- factor(master$segment)
master$skill <- factor(master$skill)
master$req.sor <- factor(master$req.sor)
master$quarter <- quarter(master$date)
master$year <- year(master$date)
if(f!="All"){
fil.year <- subset(master, master$year == g & master$quarter == h & master$skill == f)
ovr.demand <- aggregate(fil.year$overall.demand, by = list(fil.year$skill), FUN = sum)
ful.demand <- aggregate(fil.year$tot.ful, by = list(fil.year$skill), FUN = sum)
drop.demand <- aggregate(fil.year$demand.drop, by = list(fil.year$skill), FUN = sum)
unful.demand <- aggregate(fil.year$un.od, by = list(fil.year$skill), FUN = sum)
}else{
fil.year <- subset(master, master$year == g & master$quarter == h)
ovr.demand <- aggregate(fil.year$overall.demand, by = list(fil.year$year,fil.year$quarter), FUN = sum)
ful.demand <- aggregate(fil.year$tot.ful, by = list(fil.year$year,fil.year$quarter), FUN = sum)
drop.demand <- aggregate(fil.year$demand.drop, by = list(fil.year$year,fil.year$quarter), FUN = sum)
unful.demand <- aggregate(fil.year$un.od, by = list(fil.year$year,fil.year$quarter), FUN = sum)
}
table.demaned <- data.frame(Overall = ovr.demand$x, Ful = ful.demand$x, drop = drop.demand$x, un.ud =unful.demand$x )
table.demaned$ful.per <- round((table.demaned$Ful/table.demaned$Overall)*100)
table.demaned$drop.per <- round((table.demaned$drop/table.demaned$Overall)*100)
table.demaned$od.per <- round((table.demaned$un.ud/table.demaned$Overall)*100)
return(table.demaned)
}
########################################################Popularity module#####
Popular <- function(country,customer, n, buky, expe, band, quat){
dd <- dd[,dd_skills$Skills]
dd<-as.matrix(dd)
dd[dd>0]<-1
dd <- dd*demand.dump$InitialDemand
dd1<- as.data.frame(dd)
dd<-dd1
A<-1:nrow(demand.dump)
if (customer!=""){
A<-which(demand.dump$Customer == customer)}
B<-1:nrow(demand.dump)
if (country!=""){
B<-which(demand.dump$country == country)}
C<-1:nrow(demand.dump)
if (buky!="") {
C<-which(demand.dump$Skill.Bucket==buky)}
X<-1:nrow(demand.dump)
if (expe!="") {
X<-which(demand.dump$Experience==expe)}
Y<-1:nrow(demand.dump)
if (band!="") {
Y<-which(demand.dump$Band==band)}
Z<-1:nrow(demand.dump)
if (quat!=""){
Z<-which(demand.dump$quarter==quat)
}
D<-intersect(A,B)
E<-intersect(D,C)
E<-intersect(E,X)
H<-intersect(E,Y)
G<-intersect(H,Z)
# demand.dump_USA <- demand.dump[demand.dump$country==country,]
#
# dd_USA<-dd[demand.dump$country==country,]
#
# dd_USA_customer<- dd_USA[demand.dump_USA$Customer==customer,]
#
# demand.dump_USA_Micro<-demand.dump_USA[demand.dump_USA$Customer==customer,]
#
demand.dump_USA_Micro<-demand.dump[G,]
dd_USA_customer<-dd[G,]
combi<-cbind(demand.dump_USA_Micro,dd_USA_customer)
combi_2016<- combi[combi$year==2016,]
Dat<- data.frame(Instances_2016= colSums(combi_2016[,dd_skills$Skills]))
Dat1<-data.frame(Dat, rownames(Dat))
str(Dat)
Dat12<- Dat1[order( Dat1$Instances_2016, decreasing =TRUE),]
Dat12$Rank_2016 <- seq.int(nrow(Dat12))
taba<- head(Dat12,n)
combi_2017<- combi[combi$year==2017,]
Dat<- data.frame(Instances_2017= colSums(combi_2017[,dd_skills$Skills]))
Dat1<-data.frame(Dat, rownames(Dat))
#str(Dat)
Dat123<- Dat1[order( Dat1$Instances_2017, decreasing =TRUE),]
Dat123$Rank_2017<- seq.int(nrow(Dat123))
Dat123_new<-merge(Dat123,Dat12)
Dat123_new$Delta<-Dat123_new$Rank_2016 - Dat123_new$Rank_2017
Dat123_new<- Dat123_new[order( Dat123_new$Instances_2017, decreasing =TRUE),]
#Dat123_new$rownames.Dat.<-lapply(Dat123_new$rownames.Dat., function (x) )
tabb<-head(Dat123_new,n)
#tabb$Rank_2017 <- seq.int(nrow(taba))
names(tabb) <- c("Skills","Demand in 2017", "Rank in 2017", "Demand in 2016", "Rank in 2016", "Delta")
#tabb <- tabb[,c(1,4,5,2,3,6)]
tabb <- subset(tabb, !(tabb$'Demand in 2017'==0 & tabb$'Demand in 2016'==0))
return(tabb)
}
############################################Recommendation System#########################################
candidate_recommendation <- function(j){
setwd("D:/HCL/Demand Forecast")
demand <-data.frame( fread("demand.csv", stringsAsFactors = FALSE) )
demand$date <- as.Date(demand$Req.Date, "%m/%d/%Y")
demand$open.days <- as.Date(Sys.Date(), "%m/%d/%Y")-demand$date
demand <- subset(demand, demand$Skill.Bucket == j)
demand <- subset(demand, demand$Data.Source == "Due or Overdue demands at the end of the month")
demand <- demand[order(demand$open.days, decreasing = TRUE),]
demand <- demand[!duplicated(demand$SR.No),]
demand <- demand[1:10,]
demand$rqrmt <- paste(demand$SR.Skill, demand$Requirement)
recommendations <- function(rqrmt){
setwd("D:/HCL/LikeMe")
skills <- data.frame( fread("skillClustering.csv", header = TRUE, stringsAsFactors = FALSE))
stp <-data.frame( fread("stopwords.csv", header = TRUE, stringsAsFactors = FALSE))
setwd("D:/HCL/LikeMe/Resumes/External")
candidates <- data.frame( fread("external.csv", stringsAsFactors = FALSE))
original <- data.frame( fread("external.csv", stringsAsFactors = FALSE))
candidates$requirement <- paste(candidates$Skills, candidates$TProfile)
candidates <- select(candidates,File_Name, Skills, TProfile, requirement)#, Customer.Flag, experience.flag, designation.flag, l2.flag, Employee.Code)
#print("Adding Requirement")
new_requirement <- data.frame(File_Name = "999999",Skills = "sndmnvs",TProfile = "sajshdb", requirement = rqrmt)
new_requirement <- select(new_requirement,File_Name, Skills,TProfile, requirement)#, Customer.Flag, experience.flag, designation.flag, l2.flag, Employee.Code)
candidates <- rbind(new_requirement, candidates)
term.frequency <- function(row) {
row / sum(row)
}
inverse.doc.freq <- function(col) {
corpus.size <- length(col)
doc.count <- length(which(col > 0))
log10(corpus.size / doc.count)
}
tf.idf <- function(x, idf) {
x * idf
}
tokens <- tokens(as.character(new_requirement$requirement), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens <- tokens_tolower(tokens)
tokens <- tokens_select(tokens, stp$TEXT, selection = "remove")
train.tokens.dfm <- dfm(tokens, tolower = FALSE)
tokens <- tokens_wordstem(tokens, language = "english")
tokens <- tokens_ngrams(tokens, n = 1:5)
skills.tokens <- tokens(skills$value, what = "word", remove_numbers = TRUE, remove_punct = TRUE)
skills.tokens <- tokens_tolower(skills.tokens)
skills.tokens <- tokens_select(skills.tokens, stp$TEXT, selection = "remove")
skills.tokens <- tokens_ngrams(skills.tokens, n = 1:5)
skills.tokens <- tokens_select(tokens, unlist(as.list(skills.tokens)), selection = "keep")
skills.tokens <- tokens_select(skills.tokens, stopwords(), selection = "remove")
tokens.set <- append(tokens, skills.tokens)
tokens1 <- tokens(as.character(candidates$requirement), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(skills.tokens)), selection = "keep")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
library(lsa)
#print("Scoring")
start.time <- Sys.time()
if(nrow(candidates)>1){
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score1 <- data.frame(File_Name = candidates$File_Name, score = cos$text1)
score1 <- score1[order(score1$score, decreasing = TRUE),]
names <- data.frame(File_Name = original$File_Name, Name = original$Full_Name, skill = original$Skills, experience = original$Years.Exp, previous.employer = original$TProfile)
score1 <- left_join(score1, names, by = "File_Name")
colnames(score1) <- c("File Name", "Score", "Candidate Name", "Skills"," Experience", "Current Employer")
if(nrow(score1)==0){
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score1 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
score1 <- score1[order(score1$`Candidate Name`, decreasing = TRUE),]
tokens1 <- tokens(as.character(candidates$requirement), what = "word", remove_numbers = TRUE, remove_punct = TRUE)
tokens1 <- tokens_tolower(tokens1)
tokens1 <- tokens_select(tokens1, stopwords(), selection = "remove")
tokens1 <- tokens_ngrams(tokens1, n = 1:5)
tokens1 <- tokens_select(tokens1, unlist(as.list(tokens.set)), selection = "keep")
tokens.dfm <- dfm(tokens1, tolower = FALSE)
tokens.matrix <- as.matrix(tokens.dfm)
tokens.df <- as.data.frame(tokens.matrix)
tokens.df <- apply(tokens.matrix, 1, term.frequency)
tokens.idf <- apply(tokens.matrix, 2, inverse.doc.freq)
tokens.tfidf <- apply(tokens.df, 2, tf.idf, idf = tokens.idf)
tokens.tfidf <- t(tokens.tfidf)
incomplete.cases <- which(!complete.cases(tokens.tfidf))
tokens.tfidf[incomplete.cases,] <- rep(0.0, ncol(tokens.tfidf))
tokens.df <- as.data.frame(tokens.tfidf)
tokens <- as.matrix(tokens.df)
tokens <- t(tokens)
library(lsa)
start.time <- Sys.time()
if(nrow(candidates)>1){
cos <- cosine(tokens)
cos <- as.data.frame(cos)
score2 <- data.frame(File_Name = candidates$File_Name, score = cos$text1)
score2 <- score2[order(score2$score, decreasing = TRUE),]
names <- data.frame(File_Name = original$File_Name, Name = original$Full_Name, skill = original$Skills, experience = original$Years.Exp, previous.employer = original$TProfile)
score2 <- left_join(score2, names, by = "File_Name")
colnames(score2) <- c("Employee Code", "Score", "Candidate Name", "Skills"," Experience", "Current Employer")
if(nrow(score2)==0){
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
}else{
score2 <- data.frame(NO = character(), MATCHING = character(), PROFILE = character(), FOUND = character())
}
total.time <- Sys.time() - start.time
total.time
score2 <- score2[order(score2$`Candidate Name`, decreasing = TRUE),]
score1$scores <- score2$Score
score1$cumulative <- score1$Score+score1$scores
score1 <- score1[order(score1$cumulative, decreasing = TRUE),]
score1 <- subset(score1, score1$Score<1.0 & score1$scores<1.0)
return(score1$`Candidate Name`[1:5])
}
demand$int.names <- lapply(demand$rqrmt,function (x) unlist( recommendations(x)))
demand$int.names <- vapply(demand$int.names, paste, collapse = ", ", character(1L))
demand$ext.names <- lapply(demand$rqrmt,function (x) unlist( recommendations(x)))
demand$ext.names <- vapply(demand$ext.names, paste, collapse = ", ", character(1L))
demand <- demand[,c("SR.No","Skill.Bucket","Customer.Name","open.days","Requirement","int.names","ext.names")]
colnames(demand) <- c("SR NO", "Skill Bucket","Customer","Open Days", "Job Description","Internal Candidates","External Candidates")
return(demand)
}
################################################Clue#############################################
#Meaning and atternate skills pulled from the alternate keywords
clue<- function(skillword){
if (length(tech$path[tolower(tech$Titile)==tolower(skillword)])==0){return("NA") }
else {
return(as.character(tech$path[tolower(tech$Titile)==tolower(skillword)]))
}
}
#Function to create the UI using the Shiny Dashboard Template.UI#######################
ui <- dashboardPage(#skin = "blue",
#Header for the App, The sidebar and the menu items.
dashboardHeader(title = "Recruitment Analytics"),
dashboardSidebar(
sidebarMenu(
menuItem("About", tabName = "about"),
menuItem("Like - Me", menuSubItem("Skill Radar", tabName = "skill", icon = icon("puzzle-piece")),
menuSubItem("Job Board Search", tabName = "search3", icon = icon("search")),
#menuSubItem("Content Based Search", tabName = "search1", icon = icon("search")),
menuSubItem("Context Based Search", tabName = "search2", icon = icon("search-plus")),
menuSubItem("Candidate Radar", tabName = "reco", icon = icon("search-plus")),icon = icon("id-card")
),
menuItem("DSM +",
#menuSubItem("Demand Forecast", tabName = "demand", icon = icon("line-chart")),
#menuSubItem("Location based Demand", tabName = "location"),
menuSubItem("Skill based Insights", tabName = "customer"),
icon = icon("bar-chart")),
menuItem("Skill Popularity",
#menuSubItem("Demand Forecast", tabName = "demand", icon = icon("line-chart")),
#menuSubItem("Location based Demand", tabName = "location"),
menuSubItem("Hottest Skills 2017", tabName = "Pop"),
icon = icon("bar-chart"))
)
),
#Dashboard Body with all the UI elements for different modules.
dashboardBody(tags$head(tags$style(HTML('.content{
background-color: white;
}
.skin-blue .main-header .navbar{
background-color:#003da5}
.skin-blue .main-header .logo{
background-color:#003da5
}
.skin-blue .sidebar-menu > li.active > a, .skin-blue .sidebar-menu > li:hover > a{
border-left-color:#003da5
}
h1{
font-family:"Cambria"
}'))),
tabItems(
tabItem(tabName = "reco",
tags$h1("Candidate Radar"),
fluidRow(
box(
title = "Get recommendations for the oldest Job Descriptions that are open.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("recoskill","Select Skill Bucket",choices = sort(unique(demand$Skill.Bucket))),
actionButton(inputId = "recogo",label = "Recommend",color="red")
),
mainPanel( DT::dataTableOutput("recoresults"))
)
),
tabItem(tabName = "search3",
tags$h1("Job Board Search"),
fluidRow(
box(
title = "Search the web for alternative skills.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
textInput("kill1","Keyword 1",""),
textInput("kill2","Keyword 2",""),
textInput("kill3","Keyword 3",""),
actionButton(inputId = "go6",label = "generate Keywords",color="red")
),
mainPanel( DT::dataTableOutput("results2"))
)),
tabItem(tabName = "demand",
tags$h1("Forecast Demand"),
fluidRow(
box(
title = "Demand Forecast Input",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("1skill","Select Skill",choices = sort(unique(demand$Skill.Bucket))),
actionButton(inputId = "go2", label = "Forecast Demand")
),
box(
title = "Actual Vs Forecast Plot",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
plotOutput("coolplot")),
mainPanel(DT::dataTableOutput("results"))
)),
tabItem(tabName = "location",
tags$h1("Location based Demand"),
fluidRow(
box(
title = "Select Skill, Year and Quarter.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("skill1","Select Skill",choices = sort(unique(demand$Skill.Bucket))),
selectInput("year","Select Year",choices = c(2014,2015,2016,2017)),
selectInput("quarter","Select Quarter",choices = c(1,2,3,4)),
actionButton(inputId = "go3", label = "Get Demand", color = "red")
),
box(
title = "Demand based on Location in the US",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotOutput("map1")),
box(
title = "Demand Statistics based on Location",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = TRUE,
DT::dataTableOutput("maptable1"))
)
),
tabItem(tabName = "about",
tags$h1("HCL's Recruitment Analytics Tool"),
tags$h3("A project undertaken to enhance recruiting and insert analytics for futureproofing Talent acquisition "),
tags$br(),
tags$h1("Like - Me:"),
tags$h3("Creating sourcing queries and striving to get a" ,tags$em("Content and Context"), "based results .
"),
tags$br(),
tags$h1("DSM+"),
tags$h3("Forecasting demand for On time fulfillment and create supply for",tags$em("heterogeneous"), "demand.")
),
tabItem(tabName = "skill",
tags$h1("Skill Radar"),
tags$h3("Data : 31049 Job descriptions (Jan 2016 to Aug 2017)"),
tags$h4("Results available for 582 Customers,33 Skill buckets,
65 different locations,2835 Technological keywords and all their combinations "),
fluidRow(
box(
title = "Input for skill radar",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
uiOutput("series"),
uiOutput("varun1"),
uiOutput("Box3"),
uiOutput("Box4"),
uiOutput("Box5"),
uiOutput("Box6"),
uiOutput("Box7"),
uiOutput("Box111"),
valueBoxOutput("frequency")
),
box(
title = "Skill Radar",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotOutput("skills")
),
box(
title = "Boolean Strings",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
dataTableOutput("skills3")
),
# box(
# title = "Customer Radar",
# status = "danger",
# solidHeader = TRUE,
# collapsible = TRUE,
# plotlyOutput("skills2")
# ),
mainPanel( dataTableOutput("links"))
)),
################################################UI Pouarity ##########################
tabItem(tabName = "Pop",
tags$h1("Hot SKills"),
fluidRow(
box(
title = "Customer name",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("Coun","Select the Region ", c("India" = "INDIA", "USA" = "USA")),
selectInput("cus","Select Customer",choices = c("", sort(unique(demand.dump$Customer)))),
sliderInput(inputId = "num", label = "Choose a number", value = 20, min=1, max = 100),
selectInput("quat","Select Quarter", choices =c("", sort(unique(demand.dump$quarter)))),
actionButton(inputId = "Pop2", label = "Go")
),
box(
title = "Customer name",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
selectInput("buky","Select SkillBucket", choices =c("", sort(unique(demand.dump$Skill.Bucket)))),
selectInput("band","Select Band", choices =c("", sort(unique(demand.dump$Band)))),
selectInput("expe","Select Experiece", choices =c("", sort(unique(demand.dump$Experience))))
),
mainPanel( formattableOutput("Table"))
)),
tabItem(tabName = "search2",
tags$h1("Context Based Search"),
fluidRow(
box(
title = "Search for Candidates based on Skills and Context",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("stype","Where do you want to search?", c("Outside HCL" = "eser", "Inside HCL" = "iser")),
radioButtons("jobboard","Do you want to include similar skills?", c("Yes" = "yes", "No" = "no")),
textAreaInput("ski.ll", "Enter Skills*"),
tags$h3("OR"),
selectInput("sk.ill", "Select the Primary Skill*", choices = c("I have already entered the skills",as.character(unique(rowman$actual)))),
sliderInput(inputId = "num1", label = "Select the maximum number of skills to be used", value = 6, min=1, max = 50),
textAreaInput("job", "Job Description"),
textAreaInput("functional", "What are the functional requirements?"),
textAreaInput("systems", "What are the system requirements?"),
#textAreaInput("composition", "What are the composition requirements?"),
selectInput("exp", "Years of experience", choices = c("No Preference",unique(datasetexp$experience)[c(1:6,8)])),
selectInput("clack","Which customer are you hiring for?",choices = c(" ",unique(demandda$Customer))),
actionButton(inputId = "go", label = "Find Profiles")
),
mainPanel( DT::dataTableOutput("score"))
)
),
tabItem(
tabName = "customer",
tags$h1("Demand Dashboard"),
tags$h3(paste("The data available for forecast is from 2016-01-01 to", maxdate)),
fluidRow(
fluidRow(
box(
title = "Actuals",
#title = paste("Actuals for ",input$forecast.ss[1]," for the year ",input$forecast.yy[1],"and quarter ",input$forecast.qq[1]),
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
valueBoxOutput("overall",width = 3),
valueBoxOutput("fulfillment", width = 3),
valueBoxOutput("drop", width = 3),
valueBoxOutput("od", width = 3)
),
box(
title = "Forecasts",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
valueBoxOutput("frcst", width = 6),
valueBoxOutput("revenue", width = 6)
)
),
box(
title = "Select Skill bucket, year and quarter.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("custloc","Select the Region for Forecast.", c("India" = "India", "USA" = "USA")),
uiOutput("forecast.skill"),
uiOutput("forecast.year"),
uiOutput("forecast.quarter"),
actionButton(inputId = "cust", label = "Go", color = "red")
),
box(
title = "Upload new Demand Data",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
titlePanel("Appending New Demand"),
fileInput("file1", "Upload New Demand Data",
multiple = TRUE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv"))
),
mainPanel(
DT::dataTableOutput("contents")
),
box(
title = "Top Customers",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotlyOutput("custplot"))
),
fluidRow(
box(title = "Demand Heat Map",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotlyOutput("plot")
),
box(
title = "Initial Demand Report for Various Locations.",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = FALSE,
DT::dataTableOutput("maptable")
)
),
fluidRow(
box(title = "Fulfillment for different Location",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
plotlyOutput("ful.loc")
),
box(
title = "Fulfillment Percentage for different Customer",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = FALSE,
plotlyOutput("ful.cust")
)
),
fluidRow(
box(title = "Forecast for Combination of Top Skills and Top Customers for the selected skills",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
DT::dataTableOutput("combforecast")
),
box(
title = "Forecast for the top customers for the selected skill",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
collapsed = FALSE,
DT::dataTableOutput("custforecast")
)
)
),
tabItem(
tabName = "popularity",
tags$h1("Popularity Dashboard"),
fluidRow(
box(
title = "Select the country and customer",
status = "danger",
solidHeader = TRUE,
collapsible = TRUE,
radioButtons("poploc","Select region", c("INDIA" = "INDIA", "USA" = "USA")),
uiOutput("varun"),
uiOutput("skill.varun"),
actionButton(inputId = "popularity", label = "GO", color = "red")
)
),
fluidRow(
box(
title = "Top 10 gainers.",
status = "danger",
solidHeader = TRUE,
collapsible = FALSE,
DT::dataTableOutput("top10gainers")
),
box(
title = "Top 10 losers.",
status = "danger",
solidHeader = TRUE,
collapsible = FALSE,
DT::dataTableOutput("top10losers")
)
),
fluidRow(
plotlyOutput("pop.plot")
),
fluidRow(
DT::dataTableOutput("pop.table")
)
)
),
tags$a(tags$img(src = "http://www.oneindia.com/img/2015/05/25-1432549894-hcl-logo.jpg", height = 200, width = 400), href= "https://www.hcltech.com/geo-presence/united-states")
))
#############################Fulfillment Percentage##############################
#Function to generate the data for the fulfillment graph based on customer.
fulfillment.customer <- function(skill){
setwd("D:/HCL/LikeMe/Demand")
master <- data.frame( fread("dump.csv", stringsAsFactors = FALSE))
master$filled <- master$External_Joined+master$Internal_Filled
master <- subset(master, master$Skill.Bucket!="#N/A")
master$quarter <- quarter(dmy(master$Approval.Date))
master$month <- month(dmy(master$Approval.Date))
master$year <- year(dmy(master$Approval.Date))
if(skill!="All"){
master.skill <- subset(master, master$Skill.Bucket==skill)
}else{
master.skill <- master
}
master.skill.initial <- aggregate(master.skill$InitialDemand,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.filled <- aggregate(master.skill$filled,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.initial$fulfillment <- (master.skill.filled$x/master.skill.initial$x)*100
master.skill.initial <- master.skill.initial[order(master.skill.initial$fulfillment,decreasing = TRUE),]
if(nrow(master.skill.initial)>1){
master.skill.initial <- subset(master.skill.initial, master.skill.initial$x > mean(master.skill.initial$x))
}
master.skill.initial.customer <- aggregate(master.skill.initial$fulfillment, by = list(master.skill.initial$Group.1), FUN = mean)
master.skill.initial.customer <- master.skill.initial.customer[order(master.skill.initial.customer$x,decreasing = TRUE),]
return(master.skill.initial.customer)
}
#Function to generate the data for the fulfillment graphs based on location.
fulfillment.location <- function(skill){
setwd("D:/HCL/LikeMe/Demand")
master <- data.frame( fread("dump.csv", stringsAsFactors = FALSE))
master$filled <- master$External_Joined+master$Internal_Filled
master <- subset(master, master$Skill.Bucket!="#N/A")
master$quarter <- quarter(dmy(master$Approval.Date))
master$month <- month(dmy(master$Approval.Date))
master$year <- year(dmy(master$Approval.Date))
if(skill!="All"){
master.skill <- subset(master, master$Skill.Bucket==skill)
}else{
master.skill <- master
}
master.skill.initial <- aggregate(master.skill$InitialDemand,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.filled <- aggregate(master.skill$filled,by = list(master.skill$Customer, master.skill$Personal.SubArea), FUN = sum)
master.skill.initial$fulfillment <- (master.skill.filled$x/master.skill.initial$x)*100
master.skill.initial <- master.skill.initial[order(master.skill.initial$fulfillment,decreasing = TRUE),]
if(nrow(master.skill.initial)>1){
master.skill.initial <- subset(master.skill.initial, master.skill.initial$x > mean(master.skill.initial$x))
}
master.skill.initial.customer <- aggregate(master.skill.initial$fulfillment, by = list(master.skill.initial$Group.2), FUN = mean)
master.skill.initial.customer <- master.skill.initial.customer[order(master.skill.initial.customer$x,decreasing = TRUE),]
return(master.skill.initial.customer)
}
############################################POPULARITY#######################################################
#Function to created the popularity dashboard.
popularity <- function(ctry,cust, skillbucket){
cons <- dem
colnames(cons)[which(names(cons) == "C..")] <- "C++"
colnames(cons)[which(names(cons) == "C.")] <- "C#"
#colnames(cons)[which(names(cons) == "C..")] <- "C++"
cons[,137:2972] <- as.data.frame(lapply(cons[,137:2972], function(x){replace(x, x>1,1)}))
cons[,137:2972] <- cons[,137:2972]*cons$InitialDemand
cons <- subset(cons,cons$country==ctry)
cons <- cons %>% filter(cons$Customer==cust)
cons <- cons %>% filter(cons$Skill.Bucket==skillbucket)
max.year <- cons %>% filter(cons$year == max(cons$year))
min.year <- cons %>% filter(cons$year == min(cons$year))
cq <- quarter(Sys.Date())
if(cq==1){
cq = 4
pq = 3
}else{
cq = cq-1
pq = cq-1
}
max.year <- max.year %>% filter(max.year$week == cq)
min.year <- min.year %>% filter(min.year$week == pq)
max.year <- data.frame(colSums(max.year[,137:2972]))
max.year$skills <- row.names(max.year)
colnames(max.year) <- c("Value","skills")
max.year <- max.year[order(max.year$skills, decreasing = T),]
min.year <- data.frame(colSums(min.year[,137:2972]))
min.year$skills <- row.names(min.year)
colnames(min.year) <- c("Value","skills")
min.year <- min.year[order(min.year$skills, decreasing = T),]
skilllist <- cbind(max.year,min.year$Value)
skilllist$PercentageChange <- ((skilllist$Value- skilllist$`min.year$Value`)/skilllist$`min.year$Value`)*100
External2 <- skilllist
col.sums <- data.frame(colSums(cons[,137:2972]))
col.sums$skills <- row.names(col.sums)
colnames(col.sums) <- c("Value","skills")
col.sums <- col.sums[order(col.sums$Value, decreasing = T),]
topskills <- col.sums$skills[1:10]
#col.sums <- head(col.sums$skills,20)
col.sums <- col.sums$skills[1:5]
skill.aggregate <- aggregate(cons[,c(col.sums)], by = list(cons$week, cons$year), FUN = sum)
totalweeks <- ((max(cons$year)-min(cons$year))+1)*52
weeks <- data.frame(Week = rep(1:4,((max(cons$year)-min(cons$year))+1)))
years <- data.frame(Year = rep(min(cons$year), 4))
for(i in 2:((max(cons$year)-min(cons$year))+1)){
years <- rbind(years, data.frame(Year = rep(min(cons$year)+1,4)))
}
weeks <- cbind(years,weeks)
colnames(skill.aggregate) <- c("Week","Year", col.sums)
weeks <- merge(weeks, skill.aggregate, all = TRUE)
colnames(weeks) <- c("Year","Week", col.sums)
weeks[is.na(weeks)] <- 0
year.today <- year(Sys.Date())
week.today <- quarter(Sys.Date())
weeks <- weeks[1:6,]
weeks$year.quarter<- paste(weeks$Year," - " ,weeks$Week)
More.100 <- subset(External2,External2$PercentageChange>=100 )
Stable <- subset(External2,External2$PercentageChange==0)
No.Popularity <- subset(External2,(External2$PercentageChange)*(-1) >=100)
Top10 <- subset(External2, External2$skills %in% topskills)
Top10.gainers <- subset(Top10,Top10$PercentageChange >0)
Top10.losers <- subset(Top10,Top10$PercentageChange < 0 )
Gainers.Losers <- data.frame(Category = c("More than 100% popularity gain","No Loss No Gain",
"Forgotten Skills", "Highest gain in the top 10 list",
"Highest loss in the top 10 list"))
Gainers.Losers$Skills <- c(paste(subset(External2,External2$PercentageChange>=100 )$skills, collapse=", "),
paste(subset(External2,External2$PercentageChange==0)$skills, collapse=", "),
paste(subset(External2,(External2$PercentageChange)*(-1) >=100)$skills, collapse=", "),
paste(subset(Top10,Top10$PercentageChange >0)$skills, collapse=", "),
paste(subset(Top10,Top10$PercentageChange < 0 )$skills, collapse=", "))
More.100[,c(1,2,3)] <- NULL
More.100$PercentageChange[is.infinite(More.100$PercentageChange)] <- 100
More.100$PercentageChange <- round(More.100$PercentageChange)
Stable[,c(1,2,3)] <- NULL
Stable$PercentageChange <- round(Stable$PercentageChange)
No.Popularity[,c(1,2,3)] <- NULL
No.Popularity$PercentageChange <- round(No.Popularity$PercentageChange)
Top10.gainers[,c(1,2,3)] <- NULL
Top10.gainers$PercentageChange <- round(Top10.gainers$PercentageChange)
if(nrow(Top10.gainers)>0){
Top10.gainers$PercentageChange <- paste(Top10.gainers$PercentageChange,"%")
}
Top10.losers[,c(1,2,3)] <- NULL
Top10.losers$PercentageChange <- round(Top10.losers$PercentageChange)
if(nrow(Top10.losers)>0){
Top10.losers$PercentageChange <- paste(Top10.losers$PercentageChange,"%")
}
Top10.gainers <- subset(Top10.gainers, Top10.gainers$PercentageChange!="Inf %")
Top10.gainers <- subset(Top10.gainers, Top10.gainers$PercentageChange!="-Inf %")
Top10.losers <- subset(Top10.losers, Top10.losers$PercentageChange!="Inf %")
Top10.losers <- subset(Top10.losers, Top10.losers$PercentageChange!="-Inf %")
if(!year(Sys.Date())>max(cons$year)){
return(list(weeks,Gainers.Losers, Top10.gainers, Top10.losers))
}else{
weeks = weeks[1:4,]
return(list(weeks,Gainers.Losers, Top10.gainers, Top10.losers))
}
}
#Function for creating a local server.
server <- function(input, output, session) {
output$contents <- renderTable({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file1)
print(1)
df <- read.csv(input$file1$datapath)
df$X <- NULL
print(2)
#demand <- rbind(demand.upload, df)
print("uploaded")
setwd("D:\\HCL\\LikeMe")
write.csv(demand, "newdemand.csv")
print("added")
return(df)
})
output$series<-renderUI({
radioButtons("radio","Start Deep Dive with either,", c("Skill" = "Skill","Customer" = "Customer"))
})
output$varun1 <- renderUI({
if (is.null(input$radio))
return()
switch(input$radio,"Customer" = selectInput("custa", "Select the Customer",
choices = c("",as.character( unique(demandda$Customer))),
selected = "option2"),
"Skill" = selectInput("skilla", "Select Skill",
choices = c("",as.character(unique(colnames(dd)))),
selected = "option2"
)
)
})
output$Box3 = renderUI({
if ((input$radio=="Skill"))
return(selectInput("custa", "Select Customer", choices= act_customer(input$skilla)))
selectInput("skilla",
"Select Skill",
choices = c("", list_customer(input$custa))
)})
output$Box4 = renderUI({
if ((input$radio=="Skill"))
return(selectInput("bucks", "Select Skill Bucket", choices =c("", act_skill(input$skilla))))
selectInput("bucks","Select Skill Bucket",choices = c( "",list_skillbucket(input$custa)))
})
output$Box5 = renderUI({
if ((input$radio=="Skill"))
return(selectInput("subarea", "Select Location", choices =c("", act_location(input$skilla))))
selectInput("subarea","Select Location",choices = c("",list_location(input$custa)))
})
output$Box6 = renderUI(
sliderInput(inputId = "num", label = "Choose a number", value = 20, min=1, max = 50)
)
output$Box7 = renderUI(
actionButton(inputId = "go4", label = "Radar", color = "red") )
data <- eventReactive(input$go, {likeme(input$ski.ll[1], input$job[1], input$exp[1], input$stype[1], input$sk.ill[1], input$num1[1], input$clack[1],input$functional[1],
input$systems[1], input$jobboard[1])})
output$Box111= renderUI(selectInput("years", "Select Year", choices =c("","2016", "2017") ))
output$score <- DT::renderDataTable({
data()
})
#Creating reactive functions for various buttons included in the UI.
data1 <- eventReactive(input$cust, {forecaster(input$forecast.ss[1],input$custloc[1])})
data2 <- eventReactive(input$cust, {maps(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data3 <- eventReactive(input$cust, {maptable(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data4 <- eventReactive(input$go4, {newman(input$skilla[1], input$num, input$bucks, input$subarea, input$custa, input$radio, input$years)})
data5 <- eventReactive(input$go5,{manji(input$skills1,input$Experience, input$Customer, input$Job_family,input$Designation,input$Skill_category, input$L2, input$L3, input$Band, input$Sub_band, input$Personal_subarea)})
data6 <- eventReactive(input$go6,{jobboard(input$kill1,input$kill2, input$kill3)})
data7 <- eventReactive(input$cust,{custskill1(input$forecast.ss, input$forecast.yy, input$forecast.qq)})
data8 <- eventReactive(input$cust,{tabs(input$forecast.ss, input$forecast.yy, input$forecast.qq)})
recodata <- eventReactive(input$recogo, {candidate_recommendation(input$recoskill)})
data9 <- eventReactive(input$go4,{customer(input$skilla[1])})
data10 <- eventReactive(input$cust,{fulfillment.customer(input$forecast.ss[1])})
data11 <- eventReactive(input$cust,{fulfillment.location(input$forecast.ss[1])})
data.popularity <- eventReactive(input$popularity,{popularity(input$poploc,input$dynamic,input$dyna)})
data.combforecast <- eventReactive(input$cust,{combopred(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data.custforecast <- eventReactive(input$cust,{cust.forecast(input$forecast.ss[1],input$forecast.qq[1],input$forecast.yy[1],input$custloc[1])})
data.Pop <- eventReactive(input$Pop2,{Popular(input$Coun, input$cus,input$num, input$buky, input$expe, input$band, input$quat)})
#Functions to generate tables and graphs.
##############################################Table output Poularity #################
output$Table<- renderFormattable(
formattable(data.table(data.Pop()) , list(Delta = formatter(
"span",
style = x ~ style(color = ifelse(x < 0 , "red", ifelse(x>0,"green","gray"))),
x ~ icontext(ifelse(x < 0, "arrow-down", ifelse(x>0,"arrow-up","minus")), x))))
)
output$custforecast <- DT::renderDataTable({
data.custforecast()
})
output$combforecast <- DT::renderDataTable({
data.combforecast()
})
output$recoresults <- DT::renderDataTable({
recodata()
})
output$varun <- renderUI({
if (is.null(input$poploc))
return()
switch(input$poploc,
"INDIA" = selectInput("dynamic", "Select the Customer",
choices = unique(subset(demand.dump, demand.dump$country=="INDIA")$Customer),
selected = "option2"
),
"USA" = selectInput("dynamic", "Select the customer",
choices = unique(subset(demand.dump, demand.dump$country=="USA")$Customer),
selected = "option2"
)
)
})
output$forecast.skill <- renderUI({
selectInput("forecast.ss", "Select Skill Bucket",
choices = unique(subset(demand.dump, tolower(demand.dump$country)==tolower(input$custloc))$Skill.Bucket),
selected = "option3"
)
})
output$forecast.year <- renderUI({
selectInput("forecast.yy", "Select Year",
choices = unique(subset(demand.dump, tolower(demand.dump$country)==tolower(input$custloc) & tolower(demand.dump$Skill.Bucket)==tolower(input$forecast.ss))$year),
selected = "option3"
)
})
output$forecast.quarter <- renderUI({
selectInput("forecast.qq", "Select Quarter",
choices = unique(subset(demand.dump, tolower(demand.dump$country)==tolower(input$custloc) & tolower(demand.dump$Skill.Bucket)==tolower(input$forecast.ss) & demand.dump$year == input$forecast.yy)$quarter),
selected = "option3"
)
})
output$skill.varun <- renderUI({
selectInput("dyna", "Select Skill Bucket",
choices = unique(subset(subset(demand.dump, demand.dump$country==input$poploc),
subset(demand.dump, demand.dump$country==input$poploc)$Customer==input$dynamic)$Skill.Bucket),
selected = "option3"
)
})
#Generating the graphs for the popularity statistics.
output$pop.plot <- renderPlotly({
External1 <- data.frame(data.popularity()[1])
External1$year.quarter <- factor(External1$year.quarter, levels = External1[["year.quarter"]])
plot_ly(External1, x = ~year.quarter, y = ~External1[,3], name = colnames(External1)[3], type = 'scatter', mode = 'lines',
line = list(color = 'rgb(155, 9, 9)', width = 4)) %>%
add_trace(y = ~External1[,4], name = colnames(External1)[4], line = list(color = 'rgb(5, 14, 109)', width = 4)) %>%
add_trace(y = ~External1[,5], name = colnames(External1)[5], line = list(color = 'rgb(20, 109, 4)', width = 4)) %>%
add_trace(y = ~External1[,6], name = colnames(External1)[6], line = list(color = 'rgb(244, 244, 97)', width = 4)) %>%
add_trace(y = ~External1[,7], name = colnames(External1)[7], line = list(color = 'rgb(93, 7, 158)', width = 4)) %>%
layout(title = "The Popularity of top 5 skills over time",
xaxis = list(title = "Year - Quarter"),
yaxis = list (title = "Popularity in Numbers"))
})
output$pop.table <- DT::renderDataTable({
data.frame(data.popularity()[2])
})
output$top10losers <- DT::renderDataTable({
data.frame(data.popularity()[3])
})
output$top10gainers <- DT::renderDataTable({
data.frame(data.popularity()[4])
})
#Plot to display the statistics of demand on the US map.
output$plot <- renderPlotly({
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
lakecolor = toRGB('white')
)
plot_ly(z = data3()$Demand, text = data3()$State, locations = state.abb,
type = 'choropleth', locationmode = 'USA-states') %>%
layout(geo = g)
})
output$ful.loc <- renderPlotly({
plot_ly(
x = data11()$Group.1,
y = data11()$x,
name = "",
type = "bar"
)
})
#Plot to display statistics about the customer. Currently displayed.
output$ful.cust <- renderPlotly({
plot_ly(
x = data10()$Group.1,
y = data10()$x,
name = "",
type = "bar"
)
})
#Displays a table with skills separated with commas.
output$links<- DT::renderDataTable({
#data4()
datatable((data.frame(Skill = colnames(data.frame(data4()[1], check.names =FALSE )),
Definition = unlist(lapply(colnames(data.frame(data4()[1], check.names = FALSE)), function (x) {defin(x)})),
Alternatives= unlist(lapply(colnames(data.frame(data4()[1], check.names = FALSE)), function (x) {alter(x)})))), options = list(columnDefs = list(list(
targets = 3,
render = JS(
"function(data, type, row, meta) {",
"return type === 'display' && data.length > 400 ?",
"'<span title=\"' + data + '\">' + data.substr(0, 400) + '...</span>' : data;",
"}")
))), callback = JS('table.page(3).draw(false);'))
})
#Displays a box with the Overall demand for the quarter and year selected.
output$overall <- renderValueBox({
valueBox(
paste0(data8()$Overall), "Overall Demand", icon = icon("group"),
color = "yellow"
)
})
#Displays a box with the Fulfillment percentage for the quarter and year selected.
output$fulfillment <- renderValueBox({
valueBox(
paste0(data8()$ful.per, "%"), "Fulfillment", icon = icon("thumbs-up"),
color = "olive"
)
})
#Displays a box with the Drop percentage for the quarter and year selected.
output$drop <- renderValueBox({
valueBox(
paste0(data8()$drop.per, "%"), "Drop", icon = icon("thumbs-down"),
color = "red"
)
})
#Displays a with the Unfulfilled Overdue percentage.
output$od <- renderValueBox({
valueBox(
paste0(data8()$od.per, "%"), "Unfulfilled Overdue", icon = icon("list"),
color = "orange"
)
})
#Displays a box with the Forecast for the next quarter.
output$frcst <- renderValueBox({
valueBox(
data1()[3,]$Demand, paste0("Next Quarter Forecast"), icon = icon("line-chart"),
color = "blue"
)
})
#Displays a box with the revenue.
output$revenue <- renderValueBox({
valueBox(
paste0("$",data1()[3,]$Demand*65*2080), paste0(data1()$quarter[nrow(data1())],"-",data1()$year[nrow(data1())],"Revenue"), icon = icon("dollar"),
color = "green"
)
})
#Displays the plot the demand for top10 customers.
output$custplot <- renderPlotly(
{
plot_ly(data7(), x = ~Customer, y = ~Demand, type = 'scatter',color = ~Segement,
size = ~Demand,
mode = 'markers',colors = colors,
marker = list(symbol = "circle", sizemode = 'diameter',
line = list(width = 3, color = '#FFFFFF'))) %>%
layout(title = paste(""),
xaxis = list(title = '',
gridcolor = 'rgb(255, 255, 255)',
zerolinewidth = 1,
ticklen = 5,
gridwidth = 2,
showticklabels = FALSE),
yaxis = list(title = '',
gridcolor = 'rgb(255, 255, 255)',
zerolinewidth = 1,
ticklen = 5,
gridwith = 2),
paper_bgcolor = 'rgb(243, 243, 243)',
plot_bgcolor = 'rgb(243, 243, 243)')
}
)
output$coolplot <- renderPlot({
ggplot(data1(), aes(x = paste(year,"-",quarter), y = Demand.Forecast, group = 1))+
geom_line(aes(color = "green"))+
geom_line(aes(y = Actual.Demand,color = "red"))+
theme(text = element_text(size=10),axis.text.x = element_text(angle=90, hjust=1))+
scale_size_manual(values = c(0.1, 1))+
xlab("Year - Quarter") + ylab("Demand in Numbers") + scale_fill_discrete(name="Type of Demand",
breaks=c("Forecast", "Actual"),labels=c("Forecast", "Actual"))+ggtitle(paste("Forecast for",input$skill[1]))
})
output$results <- DT::renderDataTable({
data1()
})
output$map <- renderPlot({
spplot(data2()['value'], title = paste("Demand throughout the US for",input$skill[1], "in Quarter",input$quarter[1],"of", input$year[1]))
})
output$maptable <- DT::renderDataTable({
data2()
})
output$skills <- renderPlot({
radarchart(data.frame(data4()[1], check.names = FALSE),pcol = "red")
})
##############################################indicator########################
output$skills2 <- renderPlotly({
if ((input$skilla==""))
return()
plot_ly(data=data9(),x = as.factor(data9()$custo),y = data9()$total, type = "bar")%>%layout(xaxis = list(categoryorder = "array",
categoryarray = (data9()$custo)))
})
output$skills3 <- renderDataTable({
datatable( data.frame(Boolean=paste(colnames(data.frame(data4()[1], check.names = FALSE)),collapse = ",")))
})
#newmanvalue box like e radar
output$frequency <- renderValueBox({
valueBox(
paste0(unlist(data4()[3])), "Job Description(s) used to generate the Skill Radar", icon = icon("list"),
color = "purple"
)
})
output$results1 <- DT::renderDataTable({
data5()
})
output$results2 <- DT::renderDataTable({
data6()
})
}
shinyApp(ui = ui, server = server)
|
Kruskal-Wallis rank sum test
data: ARRAY and categs
Kruskal-Wallis chi-squared = 261.56, df = 13, p-value < 2.2e-16
HHCOR2LPNORM HHCOR2MINMAX HHCORandomLPNORM HHCORandomMINMAX HHLA HypE MOEAD MOEADD MOMBI2 NSGAII NSGAIII SPEA2 SPEA2SDE
HHCOR2MINMAX 0.00070 - - - - - - - - - - - -
HHCORandomLPNORM 0.52682 0.68851 - - - - - - - - - - -
HHCORandomMINMAX 0.09555 0.98945 0.99995 - - - - - - - - - -
HHLA 0.99953 0.03243 0.98536 0.65349 - - - - - - - - -
HypE 0.97938 0.12671 0.99975 0.91207 1.00000 - - - - - - - -
MOEAD 4.3e-10 0.43742 0.00040 0.01162 2.5e-07 3.6e-06 - - - - - - -
MOEADD 0.01649 0.99996 0.98746 1.00000 0.27041 0.58552 0.07251 - - - - - -
MOMBI2 1.6e-05 0.99997 0.18344 0.71033 0.00171 0.01072 0.91959 0.96025 - - - - -
NSGAII 0.08992 2.5e-12 3.5e-06 2.8e-08 0.00295 0.00040 1.6e-13 7.2e-10 1.5e-13 - - - -
NSGAIII 0.99430 1.1e-06 0.02424 0.00103 0.62626 0.30266 2.0e-13 7.6e-05 9.5e-09 0.81514 - - -
SPEA2 0.49762 7.6e-10 0.00021 3.2e-06 0.05138 0.01072 1.5e-13 1.2e-07 2.8e-12 0.99996 0.99742 - -
SPEA2SDE 6.4e-08 0.91347 0.00967 0.12803 1.8e-05 0.00018 0.99997 0.42064 0.99966 1.4e-13 1.2e-11 1.3e-13 -
ThetaDEA 0.99999 2.7e-05 0.13753 0.01072 0.93785 0.71033 3.3e-12 0.00115 3.6e-07 0.41234 1.00000 0.91207 7.8e-10
| /MaFMethodology/R/semsde/IGD/5/kruskaloutput.R | no_license | fritsche/hhcopreliminaryresults | R | false | false | 2,221 | r |
Kruskal-Wallis rank sum test
data: ARRAY and categs
Kruskal-Wallis chi-squared = 261.56, df = 13, p-value < 2.2e-16
HHCOR2LPNORM HHCOR2MINMAX HHCORandomLPNORM HHCORandomMINMAX HHLA HypE MOEAD MOEADD MOMBI2 NSGAII NSGAIII SPEA2 SPEA2SDE
HHCOR2MINMAX 0.00070 - - - - - - - - - - - -
HHCORandomLPNORM 0.52682 0.68851 - - - - - - - - - - -
HHCORandomMINMAX 0.09555 0.98945 0.99995 - - - - - - - - - -
HHLA 0.99953 0.03243 0.98536 0.65349 - - - - - - - - -
HypE 0.97938 0.12671 0.99975 0.91207 1.00000 - - - - - - - -
MOEAD 4.3e-10 0.43742 0.00040 0.01162 2.5e-07 3.6e-06 - - - - - - -
MOEADD 0.01649 0.99996 0.98746 1.00000 0.27041 0.58552 0.07251 - - - - - -
MOMBI2 1.6e-05 0.99997 0.18344 0.71033 0.00171 0.01072 0.91959 0.96025 - - - - -
NSGAII 0.08992 2.5e-12 3.5e-06 2.8e-08 0.00295 0.00040 1.6e-13 7.2e-10 1.5e-13 - - - -
NSGAIII 0.99430 1.1e-06 0.02424 0.00103 0.62626 0.30266 2.0e-13 7.6e-05 9.5e-09 0.81514 - - -
SPEA2 0.49762 7.6e-10 0.00021 3.2e-06 0.05138 0.01072 1.5e-13 1.2e-07 2.8e-12 0.99996 0.99742 - -
SPEA2SDE 6.4e-08 0.91347 0.00967 0.12803 1.8e-05 0.00018 0.99997 0.42064 0.99966 1.4e-13 1.2e-11 1.3e-13 -
ThetaDEA 0.99999 2.7e-05 0.13753 0.01072 0.93785 0.71033 3.3e-12 0.00115 3.6e-07 0.41234 1.00000 0.91207 7.8e-10
|
.onLoad <- function(libname, pkgname) {
backports::import(pkgname, c("anyNA", "isTRUE", "lengths"))
}
| /R/onLoad.R | permissive | rossellhayes/incase | R | false | false | 104 | r | .onLoad <- function(libname, pkgname) {
backports::import(pkgname, c("anyNA", "isTRUE", "lengths"))
}
|
call(a =
5,
b)
call(a =
5,
b
)
c(
a =
1,
b = # comment here
2
)
| /tests/testthat/indention_operators/eq_sub_complex_tokens-in.R | permissive | r-lib/styler | R | false | false | 143 | r | call(a =
5,
b)
call(a =
5,
b
)
c(
a =
1,
b = # comment here
2
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFlowHist.R
\name{plot.FlowHist}
\alias{plot.FlowHist}
\title{Plot histograms for FlowHist objects}
\usage{
\method{plot}{FlowHist}(x, init = FALSE, nls = TRUE, comps = TRUE,
main = fhFile(x), ...)
}
\arguments{
\item{x}{a \code{\link{FlowHist}} object}
\item{init}{boolean; if TRUE, plot the regression model using the
initial parameter estimates over the raw data.}
\item{nls}{boolean; if TRUE, plot the fitted regression model over the
raw data (i.e., using the final parameter values)}
\item{comps}{boolean; if TRUE, plot the individual model components
over the raw data.}
\item{main}{character; the plot title. Defaults to the filename of the
\code{\link{FlowHist}} object.}
\item{...}{additional arguments passed on to plot()}
}
\value{
Not applicable
}
\description{
Plot histograms for FlowHist objects
}
\author{
Tyler Smith
}
| /man/plot.FlowHist.Rd | no_license | plantarum/flowPloidy-old | R | false | true | 925 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotFlowHist.R
\name{plot.FlowHist}
\alias{plot.FlowHist}
\title{Plot histograms for FlowHist objects}
\usage{
\method{plot}{FlowHist}(x, init = FALSE, nls = TRUE, comps = TRUE,
main = fhFile(x), ...)
}
\arguments{
\item{x}{a \code{\link{FlowHist}} object}
\item{init}{boolean; if TRUE, plot the regression model using the
initial parameter estimates over the raw data.}
\item{nls}{boolean; if TRUE, plot the fitted regression model over the
raw data (i.e., using the final parameter values)}
\item{comps}{boolean; if TRUE, plot the individual model components
over the raw data.}
\item{main}{character; the plot title. Defaults to the filename of the
\code{\link{FlowHist}} object.}
\item{...}{additional arguments passed on to plot()}
}
\value{
Not applicable
}
\description{
Plot histograms for FlowHist objects
}
\author{
Tyler Smith
}
|
# Decision Tree - Classification
#we want predict for combination of input variables, is a person likely to survive or not
#import data from online site
path = 'https://raw.githubusercontent.com/DUanalytics/datasets/master/csv/titanic_train.csv'
titanic <- read.csv(path)
head(titanic)
names(titanic)
data = titanic[,c(2,3,5,6,7)] #select few columns only
head(data)
dim(data)
#load libraries
library(rpart)
library(rpart.plot)
str(data)
#Decision Tree
names(data)
table(data$Survived)
str(data)
#data$Pclass = factor(data$Pclass)
fit <- rpart(Survived ~ ., data = data, method = 'class')
fit
rpart.plot(fit, extra = 104, cex=.8,nn=T) #plot
head(data)
printcp(fit) #select complexity parameter
prunetree2 = prune(fit, cp=.018)
rpart.plot(prunetree2, cex=.8,nn=T, extra=104)
prunetree2
nrow(data)
table(data$Survived)
# predict for Female, pclass=3, siblings=2, what is the chance of survival
#Predict class category or probabilities
library(dplyr)
(testdata = sample_n(data,2))
predict(prunetree2, newdata=testdata, type='class')
predict(prunetree2, newdata=testdata, type='prob')
str(data)
testdata2 = data.frame(Pclass=2, Sex=factor('male'), Age=5, SibSp=2)
testdata2
predict(prunetree2, newdata = testdata2, type='class')
predict(prunetree2, newdata = testdata2, type='prob')
#Use decision trees for predicting
#customer is likely to buy a product or not with probabilities
#customer is likely to default on payment or not with probabilities
#Student is likely to get selected, cricket team likely to win etc
#Imp steps
#select columns for prediction
#load libraries, create model y ~ x1 + x2
#prune the tree with cp value
#plot the graph
#predict for new cases
#rpart, CART, classification model
#regression decision = predict numerical value eg sales
| /Decision tree.R | no_license | raghavsavaji/iimbg | R | false | false | 1,766 | r | # Decision Tree - Classification
#we want predict for combination of input variables, is a person likely to survive or not
#import data from online site
path = 'https://raw.githubusercontent.com/DUanalytics/datasets/master/csv/titanic_train.csv'
titanic <- read.csv(path)
head(titanic)
names(titanic)
data = titanic[,c(2,3,5,6,7)] #select few columns only
head(data)
dim(data)
#load libraries
library(rpart)
library(rpart.plot)
str(data)
#Decision Tree
names(data)
table(data$Survived)
str(data)
#data$Pclass = factor(data$Pclass)
fit <- rpart(Survived ~ ., data = data, method = 'class')
fit
rpart.plot(fit, extra = 104, cex=.8,nn=T) #plot
head(data)
printcp(fit) #select complexity parameter
prunetree2 = prune(fit, cp=.018)
rpart.plot(prunetree2, cex=.8,nn=T, extra=104)
prunetree2
nrow(data)
table(data$Survived)
# predict for Female, pclass=3, siblings=2, what is the chance of survival
#Predict class category or probabilities
library(dplyr)
(testdata = sample_n(data,2))
predict(prunetree2, newdata=testdata, type='class')
predict(prunetree2, newdata=testdata, type='prob')
str(data)
testdata2 = data.frame(Pclass=2, Sex=factor('male'), Age=5, SibSp=2)
testdata2
predict(prunetree2, newdata = testdata2, type='class')
predict(prunetree2, newdata = testdata2, type='prob')
#Use decision trees for predicting
#customer is likely to buy a product or not with probabilities
#customer is likely to default on payment or not with probabilities
#Student is likely to get selected, cricket team likely to win etc
#Imp steps
#select columns for prediction
#load libraries, create model y ~ x1 + x2
#prune the tree with cp value
#plot the graph
#predict for new cases
#rpart, CART, classification model
#regression decision = predict numerical value eg sales
|
# rm(list=ls())
options(shiny.trace=FALSE)
library(shiny)
library(shinyBS)
library(data.table)
library(stringr)
library(purrr)
library(tidyr)
library(htmltools)
source("helper_functions.R")
source("regexplainer.R")
buy_me_stuff_button_html <- sample(c('www/buy_me_coffee_button.html',
'www/buy_me_beer_button.html'),
size = 1)
safe_slashes <- purrr::possibly(half_slashes, otherwise = NULL, quiet=FALSE)
safe_highlight_test_str <- purrr::possibly(highlight_test_str, otherwise = NULL, quiet=FALSE)
safe_html_format_match_list <- purrr::possibly(html_format_match_list, otherwise = NULL, quiet=FALSE)
safe_get_match_list <- purrr::possibly(get_match_list, otherwise = NULL, quiet=FALSE)
safe_regexplain <- purrr::possibly(regexplain, otherwise = NULL, quiet=FALSE)
highlight_color_pallete <- "Set3"
mgsub = function (pattern, replacement, text.var, leadspace = FALSE, trailspace = FALSE,
fixed = TRUE, trim = TRUE, order.pattern = fixed, ...) {
if (leadspace | trailspace)
replacement <- spaste(replacement, trailing = trailspace, leading = leadspace)
if (fixed && order.pattern) {
ord <- rev(order(nchar(pattern)))
pattern <- pattern[ord]
if (length(replacement) != 1)
replacement <- replacement[ord]
}
if (length(replacement) == 1)
replacement <- rep(replacement, length(pattern))
for (i in seq_along(pattern)) {
text.var <- gsub(pattern[i], replacement[i], text.var, fixed = fixed, ...)
}
if (trim)
text.var <-
gsub("\\s+", " ", gsub("^\\s+|\\s+$", "", text.var, perl = TRUE), perl = TRUE)
text.var
}
| /inst/app/global.R | no_license | liao961120/regexShiny | R | false | false | 1,704 | r | # rm(list=ls())
options(shiny.trace=FALSE)
library(shiny)
library(shinyBS)
library(data.table)
library(stringr)
library(purrr)
library(tidyr)
library(htmltools)
source("helper_functions.R")
source("regexplainer.R")
buy_me_stuff_button_html <- sample(c('www/buy_me_coffee_button.html',
'www/buy_me_beer_button.html'),
size = 1)
safe_slashes <- purrr::possibly(half_slashes, otherwise = NULL, quiet=FALSE)
safe_highlight_test_str <- purrr::possibly(highlight_test_str, otherwise = NULL, quiet=FALSE)
safe_html_format_match_list <- purrr::possibly(html_format_match_list, otherwise = NULL, quiet=FALSE)
safe_get_match_list <- purrr::possibly(get_match_list, otherwise = NULL, quiet=FALSE)
safe_regexplain <- purrr::possibly(regexplain, otherwise = NULL, quiet=FALSE)
highlight_color_pallete <- "Set3"
mgsub = function (pattern, replacement, text.var, leadspace = FALSE, trailspace = FALSE,
fixed = TRUE, trim = TRUE, order.pattern = fixed, ...) {
if (leadspace | trailspace)
replacement <- spaste(replacement, trailing = trailspace, leading = leadspace)
if (fixed && order.pattern) {
ord <- rev(order(nchar(pattern)))
pattern <- pattern[ord]
if (length(replacement) != 1)
replacement <- replacement[ord]
}
if (length(replacement) == 1)
replacement <- rep(replacement, length(pattern))
for (i in seq_along(pattern)) {
text.var <- gsub(pattern[i], replacement[i], text.var, fixed = fixed, ...)
}
if (trim)
text.var <-
gsub("\\s+", " ", gsub("^\\s+|\\s+$", "", text.var, perl = TRUE), perl = TRUE)
text.var
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gamelift_operations.R
\name{gamelift_update_runtime_configuration}
\alias{gamelift_update_runtime_configuration}
\title{Updates the current runtime configuration for the specified fleet, which
tells Amazon GameLift how to launch server processes on instances in the
fleet}
\usage{
gamelift_update_runtime_configuration(FleetId, RuntimeConfiguration)
}
\arguments{
\item{FleetId}{[required] A unique identifier for a fleet to update runtime configuration for. You
can use either the fleet ID or ARN value.}
\item{RuntimeConfiguration}{[required] Instructions for launching server processes on each instance in the
fleet. Server processes run either a custom game build executable or a
Realtime Servers script. The runtime configuration lists the types of
server processes to run on an instance and includes the following
configuration settings: the server executable or launch script file,
launch parameters, and the number of processes to run concurrently on
each instance. A CreateFleet request must include a runtime
configuration with at least one server process configuration.}
}
\description{
Updates the current runtime configuration for the specified fleet, which
tells Amazon GameLift how to launch server processes on instances in the
fleet. You can update a fleet's runtime configuration at any time after
the fleet is created; it does not need to be in an \code{ACTIVE} status.
To update runtime configuration, specify the fleet ID and provide a
\code{RuntimeConfiguration} object with an updated set of server process
configurations.
Each instance in a Amazon GameLift fleet checks regularly for an updated
runtime configuration and changes how it launches server processes to
comply with the latest version. Existing server processes are not
affected by the update; runtime configuration changes are applied
gradually as existing processes shut down and new processes are launched
during Amazon GameLift's normal process recycling activity.
\strong{Learn more}
\href{https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html}{Setting up GameLift Fleets}
\strong{Related operations}
\itemize{
\item CreateFleet
\item ListFleets
\item DeleteFleet
\item DescribeFleetAttributes
\item Update fleets:
\itemize{
\item UpdateFleetAttributes
\item UpdateFleetCapacity
\item UpdateFleetPortSettings
\item UpdateRuntimeConfiguration
}
\item StartFleetActions or StopFleetActions
}
}
\section{Request syntax}{
\preformatted{svc$update_runtime_configuration(
FleetId = "string",
RuntimeConfiguration = list(
ServerProcesses = list(
list(
LaunchPath = "string",
Parameters = "string",
ConcurrentExecutions = 123
)
),
MaxConcurrentGameSessionActivations = 123,
GameSessionActivationTimeoutSeconds = 123
)
)
}
}
\keyword{internal}
| /paws/man/gamelift_update_runtime_configuration.Rd | permissive | sanchezvivi/paws | R | false | true | 2,891 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gamelift_operations.R
\name{gamelift_update_runtime_configuration}
\alias{gamelift_update_runtime_configuration}
\title{Updates the current runtime configuration for the specified fleet, which
tells Amazon GameLift how to launch server processes on instances in the
fleet}
\usage{
gamelift_update_runtime_configuration(FleetId, RuntimeConfiguration)
}
\arguments{
\item{FleetId}{[required] A unique identifier for a fleet to update runtime configuration for. You
can use either the fleet ID or ARN value.}
\item{RuntimeConfiguration}{[required] Instructions for launching server processes on each instance in the
fleet. Server processes run either a custom game build executable or a
Realtime Servers script. The runtime configuration lists the types of
server processes to run on an instance and includes the following
configuration settings: the server executable or launch script file,
launch parameters, and the number of processes to run concurrently on
each instance. A CreateFleet request must include a runtime
configuration with at least one server process configuration.}
}
\description{
Updates the current runtime configuration for the specified fleet, which
tells Amazon GameLift how to launch server processes on instances in the
fleet. You can update a fleet's runtime configuration at any time after
the fleet is created; it does not need to be in an \code{ACTIVE} status.
To update runtime configuration, specify the fleet ID and provide a
\code{RuntimeConfiguration} object with an updated set of server process
configurations.
Each instance in a Amazon GameLift fleet checks regularly for an updated
runtime configuration and changes how it launches server processes to
comply with the latest version. Existing server processes are not
affected by the update; runtime configuration changes are applied
gradually as existing processes shut down and new processes are launched
during Amazon GameLift's normal process recycling activity.
\strong{Learn more}
\href{https://docs.aws.amazon.com/gamelift/latest/developerguide/fleets-intro.html}{Setting up GameLift Fleets}
\strong{Related operations}
\itemize{
\item CreateFleet
\item ListFleets
\item DeleteFleet
\item DescribeFleetAttributes
\item Update fleets:
\itemize{
\item UpdateFleetAttributes
\item UpdateFleetCapacity
\item UpdateFleetPortSettings
\item UpdateRuntimeConfiguration
}
\item StartFleetActions or StopFleetActions
}
}
\section{Request syntax}{
\preformatted{svc$update_runtime_configuration(
FleetId = "string",
RuntimeConfiguration = list(
ServerProcesses = list(
list(
LaunchPath = "string",
Parameters = "string",
ConcurrentExecutions = 123
)
),
MaxConcurrentGameSessionActivations = 123,
GameSessionActivationTimeoutSeconds = 123
)
)
}
}
\keyword{internal}
|
# The function for using ewastools to check sex for placenta samples:
# let argument RGsetChara to be a charactor
CheckGenotype <- function(RGsetChara){
registerDoParallel(cores=10)
# get the object from character
RGset <- get(RGsetChara)
# get meta data
phenoData <- pData(RGset)
# phenoData to phenoData.dt
phenoData.dt <- phenoData %>% as.data.frame() %>% rownames_to_column() %>% setDT()
# read idat files and get beta
sampleDirs <- phenoData$filenames
meth = read_idats(idat_files = sampleDirs,quiet=FALSE)
beta = dont_normalize(meth)
# get detection P values
detP <- minfi::detectionP(RGset)
# get snps
snps = meth$manifest[probe_type=='rs',index]
snps = beta[snps,]
genotypes = call_genotypes(snps,learn=TRUE)
if(ncol(RGset)==1){
if (!"outliers" %in% names(genotypes))
stop("Invalid argument")
log_odds = genotypes$outliers/(1 - genotypes$outliers)
log_odds = mean(log2(log_odds), na.rm = TRUE)
# log_odds
phenoData.dt$log_odds = log_odds
}else{
phenoData.dt$log_odds = snp_outliers(genotypes)
}
table(phenoData.dt$log_odds > -4)
phenoData.dt[phenoData.dt$log_odds > -4, ] %>% arrange(desc(log_odds))
# assumed distribution of SNP-probe intensities.
# if probes are outoff distribution, there could be some technical variance (could to be used to indicate tissue contanmination)
mxm_(genotypes)
# plot genotype checking results
par(mfrow=c(2,1))
barplot(colMeans(detP), col=pal[6], las=2,
cex.names=0.8, ylim=c(0,0.01),
xlab = "",
ylab = "Mean detection p-values",
axisnames = FALSE)
abline(h=0.01,col="red")
barplot(phenoData.dt$log_odds,
xlab = "placental tissue samples(n=53)",
ylab = "Average log odds")
abline(h=-4,col="red")
rm(meth)
# return phenoData_withPredict, the df with predicted fetal sex
return(list(detP=detP, phenoData.dt=as_tibble(phenoData.dt)))
}
| /R_Functions/GEOdataAnalysis_CheckGenotypeFUN.R | permissive | QianhuiWan/MethylationArray_Placenta | R | false | false | 1,982 | r |
# The function for using ewastools to check sex for placenta samples:
# let argument RGsetChara to be a charactor
CheckGenotype <- function(RGsetChara){
registerDoParallel(cores=10)
# get the object from character
RGset <- get(RGsetChara)
# get meta data
phenoData <- pData(RGset)
# phenoData to phenoData.dt
phenoData.dt <- phenoData %>% as.data.frame() %>% rownames_to_column() %>% setDT()
# read idat files and get beta
sampleDirs <- phenoData$filenames
meth = read_idats(idat_files = sampleDirs,quiet=FALSE)
beta = dont_normalize(meth)
# get detection P values
detP <- minfi::detectionP(RGset)
# get snps
snps = meth$manifest[probe_type=='rs',index]
snps = beta[snps,]
genotypes = call_genotypes(snps,learn=TRUE)
if(ncol(RGset)==1){
if (!"outliers" %in% names(genotypes))
stop("Invalid argument")
log_odds = genotypes$outliers/(1 - genotypes$outliers)
log_odds = mean(log2(log_odds), na.rm = TRUE)
# log_odds
phenoData.dt$log_odds = log_odds
}else{
phenoData.dt$log_odds = snp_outliers(genotypes)
}
table(phenoData.dt$log_odds > -4)
phenoData.dt[phenoData.dt$log_odds > -4, ] %>% arrange(desc(log_odds))
# assumed distribution of SNP-probe intensities.
# if probes are outoff distribution, there could be some technical variance (could to be used to indicate tissue contanmination)
mxm_(genotypes)
# plot genotype checking results
par(mfrow=c(2,1))
barplot(colMeans(detP), col=pal[6], las=2,
cex.names=0.8, ylim=c(0,0.01),
xlab = "",
ylab = "Mean detection p-values",
axisnames = FALSE)
abline(h=0.01,col="red")
barplot(phenoData.dt$log_odds,
xlab = "placental tissue samples(n=53)",
ylab = "Average log odds")
abline(h=-4,col="red")
rm(meth)
# return phenoData_withPredict, the df with predicted fetal sex
return(list(detP=detP, phenoData.dt=as_tibble(phenoData.dt)))
}
|
### Get the parameters
parser = argparse::ArgumentParser(description="Run cicero")
parser$add_argument('-P','--peak', help='the peak file')
parser$add_argument('-C','--count', help='peak raw count file')
parser$add_argument('-O','--out', help='outpath')
parser$add_argument('-S','--sample', help='sample ID')
args = parser$parse_args()
###
library("cicero")
library("reshape2")
peak=read.table(args$peak)
count=read.table(args$count,header = T)
count$peak_ID=row.names(count)
peak=peak[,c(1:4)]
peak$peak_pos=paste(peak$V1,peak$V2,peak$V3,sep = "_")
peak=peak[,c(4,5)]
colnames(peak)=c("peak_ID","peak_pos")
merge=merge(peak,count,by="peak_ID")
merge=merge[,-1]
melt=melt(merge,id.vars ="peak_pos")
colnames(melt)=c("Peak","Cell","Count")
cicero_data=melt
input_cds <- make_atac_cds(cicero_data, binarize = TRUE)
agg_cds <- aggregate_nearby_peaks(input_cds)
agg_cds <- detectGenes(agg_cds)
agg_cds <- estimateSizeFactors(agg_cds)
agg_cds <- estimateDispersions(agg_cds)
agg_cds <- reduceDimension(agg_cds,
max_components = 2,
norm_method = 'log',
num_dim = 6,
reduction_method = 'tSNE',
verbose = T)
agg_cds <- clusterCells(agg_cds, verbose = F)
pdf(paste0(args$out,"/","Cicero_cluster_",args$sample,".pdf"))
plot_cell_clusters(agg_cds, color_by = 'as.factor(Cluster)')
dev.off()
| /scripts/run_cicero.R | permissive | liuyang0681/scATACworkflow | R | false | false | 1,430 | r | ### Get the parameters
parser = argparse::ArgumentParser(description="Run cicero")
parser$add_argument('-P','--peak', help='the peak file')
parser$add_argument('-C','--count', help='peak raw count file')
parser$add_argument('-O','--out', help='outpath')
parser$add_argument('-S','--sample', help='sample ID')
args = parser$parse_args()
###
library("cicero")
library("reshape2")
peak=read.table(args$peak)
count=read.table(args$count,header = T)
count$peak_ID=row.names(count)
peak=peak[,c(1:4)]
peak$peak_pos=paste(peak$V1,peak$V2,peak$V3,sep = "_")
peak=peak[,c(4,5)]
colnames(peak)=c("peak_ID","peak_pos")
merge=merge(peak,count,by="peak_ID")
merge=merge[,-1]
melt=melt(merge,id.vars ="peak_pos")
colnames(melt)=c("Peak","Cell","Count")
cicero_data=melt
input_cds <- make_atac_cds(cicero_data, binarize = TRUE)
agg_cds <- aggregate_nearby_peaks(input_cds)
agg_cds <- detectGenes(agg_cds)
agg_cds <- estimateSizeFactors(agg_cds)
agg_cds <- estimateDispersions(agg_cds)
agg_cds <- reduceDimension(agg_cds,
max_components = 2,
norm_method = 'log',
num_dim = 6,
reduction_method = 'tSNE',
verbose = T)
agg_cds <- clusterCells(agg_cds, verbose = F)
pdf(paste0(args$out,"/","Cicero_cluster_",args$sample,".pdf"))
plot_cell_clusters(agg_cds, color_by = 'as.factor(Cluster)')
dev.off()
|
library(shiny)
library(shinydashboard)
# Dashboard UI
shinyUI( dashboardPage(
skin = "green",
dashboardHeader(
title = "Ecodomia"
),
dashboardSidebar(
# Menu items of the sidebar menu panel
sidebarMenu(
menuItem("Mon logement", tabName = "logement", icon = icon("home")),
menuItem("Tableau de bord", tabName = "dashboard", icon = icon("dashboard"))
)
),
dashboardBody(
# Generates HTML head tag to include CSS and Fonts
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css"),
tags$link(href='https://fonts.googleapis.com/css?family=Raleway:400,700', rel='stylesheet', type='text/css')
),
tabItems(
tabItem(tabName = "logement", # BEGIN FIRST TAB
fluidRow(
box(title = "Localisation", solidHeader = TRUE, status = "success",
numericInput("postcode", label = "Code postal", value = 75015)
)
),
fluidRow(
box(title = "Taille du logement", solidHeader = TRUE, status = "success",
numericInput("surface", label = "Surface", value = 50)
),
box(title = "Nombre de pièces", solidHeader = TRUE, status = "success",
numericInput("rooms",label = "", value = 3)
)
),
fluidRow(
box(title = "Hauteur de plafond", solidHeader = TRUE, status = "success",
numericInput("height",label = "", value = 3)
),
box(title = "Nombre d'étages", solidHeader = TRUE, status = "success",
numericInput("floors",label = "", value = 5)
)
),
fluidRow(
box(title = "Date de construction", solidHeader = TRUE, status = "success",
radioButtons("construction",
label = "",
choices = c("Avant 1990" = TRUE,
"Après 1990" = FALSE))
),
box(title = "Type de chauffage", solidHeader = TRUE, status = "success",
radioButtons("heating",
label = "",
choices = c("Electricite" = "electricite",
"Gaz" = "gaz",
"Autre" = "autre"))
)
)
), # END FIRST TAB
tabItem(tabName = "dashboard", # BEGIN SECOND TAB
fluidRow(
column(width = 10, offset = 1,
box(width = 12 ,
fluidRow( column(width = 12, sliderInput("sliderMin", "Choisisez votre minimum et votre objectif de temperature:",
min = 15, max = 21, value = c(17,20)))),
fluidRow(infoBoxOutput("progressBox" , width = 6),
infoBoxOutput("approvalBox" , width = 6)
)
)
)
),
fluidRow(
column(width = 6 , offset = 3,
valueBoxOutput("estimateBox", width = '100%')
)
),
fluidRow(
column(width = 10, offset = 1,
box(
checkboxInput("peak", label = tagList(shiny::icon("area-chart"), "Eviter les pics de consommation"), value = TRUE),
valueBoxOutput("peakBox" , width = '100%')
),
box(
checkboxInput("green", label = tagList(shiny::icon("leaf"),"Favoriser l'energie verte"), value = TRUE),
valueBoxOutput("energieBox" , width = '100%')
)
))
) # END SECOND TAB
) # END TABS
) # END BODY
) # END PAGE
) # END ShinyUI | /ui.R | no_license | dstoiko/hackathon-datapower | R | false | false | 4,411 | r | library(shiny)
library(shinydashboard)
# Dashboard UI
shinyUI( dashboardPage(
skin = "green",
dashboardHeader(
title = "Ecodomia"
),
dashboardSidebar(
# Menu items of the sidebar menu panel
sidebarMenu(
menuItem("Mon logement", tabName = "logement", icon = icon("home")),
menuItem("Tableau de bord", tabName = "dashboard", icon = icon("dashboard"))
)
),
dashboardBody(
# Generates HTML head tag to include CSS and Fonts
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "custom.css"),
tags$link(href='https://fonts.googleapis.com/css?family=Raleway:400,700', rel='stylesheet', type='text/css')
),
tabItems(
tabItem(tabName = "logement", # BEGIN FIRST TAB
fluidRow(
box(title = "Localisation", solidHeader = TRUE, status = "success",
numericInput("postcode", label = "Code postal", value = 75015)
)
),
fluidRow(
box(title = "Taille du logement", solidHeader = TRUE, status = "success",
numericInput("surface", label = "Surface", value = 50)
),
box(title = "Nombre de pièces", solidHeader = TRUE, status = "success",
numericInput("rooms",label = "", value = 3)
)
),
fluidRow(
box(title = "Hauteur de plafond", solidHeader = TRUE, status = "success",
numericInput("height",label = "", value = 3)
),
box(title = "Nombre d'étages", solidHeader = TRUE, status = "success",
numericInput("floors",label = "", value = 5)
)
),
fluidRow(
box(title = "Date de construction", solidHeader = TRUE, status = "success",
radioButtons("construction",
label = "",
choices = c("Avant 1990" = TRUE,
"Après 1990" = FALSE))
),
box(title = "Type de chauffage", solidHeader = TRUE, status = "success",
radioButtons("heating",
label = "",
choices = c("Electricite" = "electricite",
"Gaz" = "gaz",
"Autre" = "autre"))
)
)
), # END FIRST TAB
tabItem(tabName = "dashboard", # BEGIN SECOND TAB
fluidRow(
column(width = 10, offset = 1,
box(width = 12 ,
fluidRow( column(width = 12, sliderInput("sliderMin", "Choisisez votre minimum et votre objectif de temperature:",
min = 15, max = 21, value = c(17,20)))),
fluidRow(infoBoxOutput("progressBox" , width = 6),
infoBoxOutput("approvalBox" , width = 6)
)
)
)
),
fluidRow(
column(width = 6 , offset = 3,
valueBoxOutput("estimateBox", width = '100%')
)
),
fluidRow(
column(width = 10, offset = 1,
box(
checkboxInput("peak", label = tagList(shiny::icon("area-chart"), "Eviter les pics de consommation"), value = TRUE),
valueBoxOutput("peakBox" , width = '100%')
),
box(
checkboxInput("green", label = tagList(shiny::icon("leaf"),"Favoriser l'energie verte"), value = TRUE),
valueBoxOutput("energieBox" , width = '100%')
)
))
) # END SECOND TAB
) # END TABS
) # END BODY
) # END PAGE
) # END ShinyUI |
# Analysis of house elf data
library(dplyr)
# import house elf data
dat <- read.csv("data/houseelf_earlength_dna_data_1.csv")
get_size_class <- function(weight,threshold){
if (weight > threshold){
size_class <- "large"
} else {
size_class <- "small"
} return(size_class)
}
add_size_class <- function(df){
data_w_size_class <-
df %>%
na.omit() %>%
rowise() %>%
mutate(size_class = get_size_class(weight,50))
return(data_w_size_class)
}
| /houseself-analysis.r | no_license | sjgraves/class_example_timeseries | R | false | false | 485 | r | # Analysis of house elf data
library(dplyr)
# import house elf data
dat <- read.csv("data/houseelf_earlength_dna_data_1.csv")
get_size_class <- function(weight,threshold){
if (weight > threshold){
size_class <- "large"
} else {
size_class <- "small"
} return(size_class)
}
add_size_class <- function(df){
data_w_size_class <-
df %>%
na.omit() %>%
rowise() %>%
mutate(size_class = get_size_class(weight,50))
return(data_w_size_class)
}
|
library(knotR)
filename <- "8_21.svg"
a <- reader(filename)
#knotplot2(a,node=TRUE)
sym821 <- symmetry_object(a,Mver=NULL,xver=4)
a <- symmetrize(a,sym821)
#knotplot2(a)
#knotplot2(a,text=TRUE,lwd=1,circ=FALSE)
ou821 <- matrix(c(
09,02,
03,12,
15,06,
07,23,
18,08,
13,19,
22,14,
23,18
),ncol=2,byrow=TRUE)
#knotplot(a,ou821)
jj <-
knotoptim(filename,
symobj = sym821,
ou = ou821,
prob = 0,
iterlim=1000,print.level=2,hessian=FALSE
# control=list(trace=100,maxit=1000), # these arguments for optim()
# useNLM=FALSE
)
write_svg(jj, filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename))
| /inst/8_21_worker.R | no_license | cran/knotR | R | false | false | 748 | r | library(knotR)
filename <- "8_21.svg"
a <- reader(filename)
#knotplot2(a,node=TRUE)
sym821 <- symmetry_object(a,Mver=NULL,xver=4)
a <- symmetrize(a,sym821)
#knotplot2(a)
#knotplot2(a,text=TRUE,lwd=1,circ=FALSE)
ou821 <- matrix(c(
09,02,
03,12,
15,06,
07,23,
18,08,
13,19,
22,14,
23,18
),ncol=2,byrow=TRUE)
#knotplot(a,ou821)
jj <-
knotoptim(filename,
symobj = sym821,
ou = ou821,
prob = 0,
iterlim=1000,print.level=2,hessian=FALSE
# control=list(trace=100,maxit=1000), # these arguments for optim()
# useNLM=FALSE
)
write_svg(jj, filename,safe=FALSE)
dput(jj,file=sub('.svg','.S',filename))
|
setwd('/Users/kindaixin/Dropbox-work/Dropbox/coursera/03 Getting and Cleaning Data/Project');
#1. Load the test, training set
subjectTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subjectTest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("./UCI HAR Dataset/test/Y_test.txt")
#2. Merge them into a set
subjectCombined <-rbind(subjectTrain,subjectTest)
xCombinded <- rbind(xTrain,xTest)
yCombinded <- rbind(yTrain,yTest)
library(plyr)
dataMerged <- cbind(subjectCombined,yCombinded,xCombinded)
#3. Set meaningful column names
features <- read.table("./UCI HAR Dataset/features.txt")
features[,2] <- gsub("-mean[(][)]","Mean",features[,2] )
features[,2] <- gsub("-std[(][)]","StandardDeviation",features[,2] )
colnames(dataMerged) <- c("Subject","ActivityId",as.character(features$V2))
#3. Join with activities label
activities_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(activities_labels) <- c("ActivityId","Activity")
dataAll <- merge(dataMerged,activities_labels,by.x="ActivityId",by.y="ActivityId")
#4. Rearrange the columns to display Subject, ActivityId then Activity and finally all features
dataReordered <- dataAll[c(2,1,564,3:563)]
#5. Select only columns(aka features) with mean and stand deviation
colToAdd <- grep("Mean",names(dataReordered))
dataFiltered <- dataReordered[,c(1:3,colToAdd)]
#6. Create average for each activity and each subject
dataMean <- aggregate(dataFiltered, by = list(Activity = dataFiltered$Activity, Subject = dataFiltered$Subject), mean)
#remove the subjectId and the duplicated Activity columns
dataFinal <- dataMean[c(1:2,6:45)]
#7. write to file
write.table(dataFinal,file="./tidyData.txt",row.names=FALSE)
| /run_analysis.R | no_license | jaxxie/Coursera-Getting-and-Cleaning-Data-Course-Project | R | false | false | 1,916 | r | setwd('/Users/kindaixin/Dropbox-work/Dropbox/coursera/03 Getting and Cleaning Data/Project');
#1. Load the test, training set
subjectTrain <- read.table("./UCI HAR Dataset/train/subject_train.txt")
xTrain <- read.table("./UCI HAR Dataset/train/X_train.txt")
yTrain <- read.table("./UCI HAR Dataset/train/Y_train.txt")
subjectTest <- read.table("./UCI HAR Dataset/test/subject_test.txt")
xTest <- read.table("./UCI HAR Dataset/test/X_test.txt")
yTest <- read.table("./UCI HAR Dataset/test/Y_test.txt")
#2. Merge them into a set
subjectCombined <-rbind(subjectTrain,subjectTest)
xCombinded <- rbind(xTrain,xTest)
yCombinded <- rbind(yTrain,yTest)
library(plyr)
dataMerged <- cbind(subjectCombined,yCombinded,xCombinded)
#3. Set meaningful column names
features <- read.table("./UCI HAR Dataset/features.txt")
features[,2] <- gsub("-mean[(][)]","Mean",features[,2] )
features[,2] <- gsub("-std[(][)]","StandardDeviation",features[,2] )
colnames(dataMerged) <- c("Subject","ActivityId",as.character(features$V2))
#3. Join with activities label
activities_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
colnames(activities_labels) <- c("ActivityId","Activity")
dataAll <- merge(dataMerged,activities_labels,by.x="ActivityId",by.y="ActivityId")
#4. Rearrange the columns to display Subject, ActivityId then Activity and finally all features
dataReordered <- dataAll[c(2,1,564,3:563)]
#5. Select only columns(aka features) with mean and stand deviation
colToAdd <- grep("Mean",names(dataReordered))
dataFiltered <- dataReordered[,c(1:3,colToAdd)]
#6. Create average for each activity and each subject
dataMean <- aggregate(dataFiltered, by = list(Activity = dataFiltered$Activity, Subject = dataFiltered$Subject), mean)
#remove the subjectId and the duplicated Activity columns
dataFinal <- dataMean[c(1:2,6:45)]
#7. write to file
write.table(dataFinal,file="./tidyData.txt",row.names=FALSE)
|
###########################################################
# Functions for parallelization and fitting of GLM models
###########################################################
#' .fitGLM
#' This function parallelizes glm models:
#' Parallelizes across genes in a counts matrix
#' Parallelizes across patterns in a matrix of projected patterns
#' @param countsMatrix matrix of gene expression counts. genes x cells / samples.
#' @param annotDF A dataframe with annotations for each cell or sample in the gene expression dataset
#' @param geneDF (currently) optional dataframe with annotations for each gene in the dataset
#' @param model_formula_str A string specifying the model to fit, in the format "~ var1 + var2"
#' @param projected_patterns A matrix of projected pattern weights to use as an explanatory variable in the model fit for each gene. colnames are patterns, rownames are cells. Must have viable column names.
#' @param cores Integer defining the number of cores to use for parallelization of model fitting across genes.
#' @param exp_family The regression family to use (default: 'negbinomial')
#' @param clean_model Boolean. Passed through to monocle3::fit_model
#' @param verbose Boolean. Verbose output for model fitting.
#' @param result Return full or summarized results. Summarized results are stripped of memory-intensive models and return coefficients from extractCoefficients().
#'
#' @return Returns a list of fitted models (output similar to monocle3::fit_model)
#' @import monocle3
#' @import MASS
#' @import Matrix
#' @import RhpcBLASctl
#' @import future
#' @import furrr
.fitGLM <- function(countsMatrix, annotDF, model_formula_str, projected_patterns, cores, geneDF= NULL){
#from provided matrix
pattern_names <- colnames(projected_patterns)
#dependent on SCE structure
genes <- rownames(countsMatrix)
message(paste0("Fittings models for ", length(pattern_names), " patterns and ", length(genes), " genes"))
if(sum(Matrix::rowSums(countsMatrix)== 0) > 0){
warnings(paste0(sum(Matrix::rowSums(countsMatrix)== 0), " genes have zero expression and will not be successfully fit. It is recommended to remove them before running."))
}
#Not actually sure what these do... other than limit conflicts with furrr multicore operations
RhpcBLASctl::omp_set_num_threads(1)
RhpcBLASctl::blas_set_num_threads(1)
#uses future parallelization structure
#open multicore operations
plan(multicore, workers = cores)
full_glm_models <- furrr::future_map(pattern_names, function(pattern_name){
message(paste0(date(),": working on pattern ",pattern_name))
thisPatt <- projected_patterns[,pattern_name]
#fit one pattern, all genes
glm_models <- apply(countsMatrix,1,fit_helper,thisPattern=thisPatt,model_formula_string=model_formula_str,annotDF=annotDF)
#tranpose lists to create a dataframe, each row is a gene
#TODO: status extractor "success" or "fail"
glm_models_df <- tibble::as_tibble(purrr::transpose(glm_models))
#Add in gene fData to returned dataframe. Currently optional.
if(!is.null(geneDF)){
#TODO might need to assert length or gene names are the same. don't love the assumption they are the same.
glm_models_df <- dplyr::bind_cols(geneDF, glm_models_df)
}else{
#TODO: JS: would names(glm_models) ever not be "genes" var from above? would be clearer
glm_models_df <- glm_models_df %>%
dplyr::mutate(gene_id = names(glm_models))
}
#if(result == "full_model"){
#returns tibble with a column that includes full model for each gene. memory hog
return(glm_models_df)
#}
#return coefficients, estimates, [anything else] to be easier on memory. Create a multitiered list.
#top tiered list is pattern, second tiered list is gene, third tier is data.frame for those coefficients
#TODO: this is essentially extractCoefficients(). replace with that function
#else if(result == "summarized_model"){
# summarized_glm_model <- glm_model %>%
# monocle3::coefficient_table() %>%
# dplyr::select(-c(model, model_summary)) %>% as.data.frame()
# summarized_glm_model <- purrr:::map(genes, function(gene){
# summarized_glm_model %>% dplyr::filter(gene_id == gene)})
# names(summarized_glm_model) <- genes_of_interest
# return(summarized_glm_model)
#}
})
#close multicore operations
plan(sequential)
names(full_glm_models) <- pattern_names
return(full_glm_models)
}
#' fit_helper
#'
#' @param thisGene String of gene name
#' @param thisPattern String of pattern name
#' @param model_formula_string Model Formula String
#' @param annotDF Not sure what this argument is (alina?)
#'
#' @return
#' @importFrom stats as.formula
#' @import MASS
#' @import dplyr
#' @export
#'
# #' @examples
fit_helper <- function(thisGene, thisPattern, model_formula_string, annotDF){
# sample x annotations, gene expression, and p weights
datFrame <- as.data.frame(merge(annotDF,thisGene,by=0,sort=FALSE))
datFrame$patternWeights <- thisPattern
# prepare model formula string
model_formula_str <- paste("y", model_formula_string, sep = "")
model_formula = stats::as.formula(model_formula_str)
# run model
tryCatch({
FM_fit <- glm.nb(model_formula,
data = datFrame, link = log, epsilon = 1e-3,
control = glm.control(maxit = 50))
FM_summary = summary(FM_fit)
df = list(model = FM_fit, model_summary = FM_summary)
}, error = function(e){
error_df <- list(model = NA, model_summary = NA)
return(error_df) # return error messages; don't stop iterating (wondering if this is the best way to handle errors)
})
}
#' fitGLMpd
#' Uses a GLM to identify the set of genes used in a pattern for a given cell type or condition
#' @param countsMatrix matrix of gene expression counts. genes x cells / samples.
#' @param annotDF A dataframe with annotations for each cell or sample in the gene expression dataset
#' @param model_formula_str A string specifying the model to fit, in the format "~ var1 + var2"
#' @param projected_patterns A matrix of projected pattern weights to use as an explanatory variable in the model fit for each gene. colnames are patterns, rownames are cells. Must have viable column names.
#' @param cores Integer defining the number of cores to use for parallelization of model fitting across genes.
#' @param exp_family The regression family to use (default: 'negbinomial')
#' @param clean_model Boolean. Passed through to monocle3::fit_model
#' @param verbose Boolean. Verbose output for model fitting.
#' @param result Return full or summarized results. Summarized results are stripped of memory-intensive models and return coefficients from extractCoefficients().
#'
#' @return Returns a list of fitted models (output similar to monocle3::fit_model)
#' @import monocle3
#' @import SummarizedExperiment
#' @export
#'
# #' @examples
setGeneric("fitGLMpd", function(object, ..., verbose=TRUE) standardGeneric("fitGLMpd"),
signature = "object")
setMethod("fitGLMpd",signature(object="cell_data_set"), function(object, model_formula_str, projected_patterns,cores=1, count_assay = "counts", geneDF = NULL){ #,exp_family="negbinomial",cores,clean_model=T,verbose=T, result = "full_model"){
#TODO: also accept SingleCellExperiment (and SummarizedExperiment?)
cds <- object
countsMatrix <- assay(cds, count_assay)
annotDF <- colData(cds) %>% as.data.frame()
if(is.null(geneDF)){
geneDF <- rowData(cds) %>% as.data.frame()
}
.fitGLM(countsMatrix, annotDF, model_formula_str, projected_patterns,cores, geneDF)
})
setMethod("fitGLMpd",signature(object="matrix"), function(object, annotDF, model_formula_str, projected_patterns,cores=1){ #,exp_family="negbinomial",cores,clean_model=T,verbose=T, result = "full_model"){
countsMatrix <- object
.fitGLM(countsMatrix, annotDF, model_formula_str, projected_patterns,cores)
})
###########################################################
# Functions for organizing model coefficients by genes
###########################################################
#' extractCoefficients
#' Extract coefficients from model fitting object. Each object of list requires a column exactly named "gene_id".
#' @param glm_models Named List (over patterns) of objects returned from "full_result" of fit_models_cds. Organizes each pattern by gene.
#' @param genes gene ids that exactly match the count matrix which was provided to fitGLMpd()
#' @return Returns a mutileveled list of fitted model coefficients. Hierarchy of organization: pattern (list), gene (list), coefficients (data.frame)
#' @import monocle3
#' @import dplyr
#' @import purrr
#' @export
#'
#TODO: assert genes length is the same as glm_models. Allow for "gene_id" to be set manually to allow other columns to match genes on.
#TODO: is there a way to get gene names from models instead of providing... limit misnaming error ... also assumes orders are the same as is
#Generates a multi-leveled list.
#First level: pattern (named list)
#Second level: gene (named list)
#Third level: beta coefficients, columns are: coefficient name, coefficient
extractCoefficients <- function(glm_models, genes){
extracted_coefficients <- lapply(glm_models, function(glm_model){
#Drop full models for memory effeciency. keeps parameters
summarized_glm_model <- glm_model %>%
monocle3::coefficient_table() %>%
dplyr::select(-c(model, model_summary)) %>% as.data.frame()
#organize with a list for each gene
summarized_glm_model <- purrr:::map(genes, function(gene){
summarized_glm_model %>% dplyr::filter(gene_id == gene)})
names(summarized_glm_model) <- genes_of_interest
return(summarized_glm_model)
})
}
#' orderCoefficientsByGene
#' Order model coefficient estimates by genes, such as for visualization by heatmap
#' @param pattern_coefficient_list List (over patterns) of objects returned from extractCoefficients(), or "summarized_result" option of fit_models_cds().
#' @param model_terms_to_keep optional. limit returned dataframe to coefficients of interest.
#' @param filter_significance optional. Numeric value at which to filter q-value significance.
#' Genes with signficant coefficients for one or more terms are returned. If not provided, all genes returned.
#' @param string optional. all model terms that contain this string will be used for signficance filtering.
#' @return Returns a list of fitted models (output similar to monocle3::fit_model)
#' @import dplyr
#' @import tibble
#' @export
#'
#Pass extracted coefficients to organize into a data frame with each gene as as a row ----
#TODO: Can make this more elegant with dplyr instead of lapply
#TODO: filter out failed genes
orderCoefficientsByGene <- function(pattern_coefficient_list, model_terms_to_keep = NULL, filter_significance, string){
pattern_full_names <- names(pattern_coefficient_list)
#loop thru each pattern
params_list <- lapply(pattern_full_names, function(pattern_full_name){
genes <- names(pattern_coefficient_list[[pattern_full_name]])
#loop thru each gene
param_df <- lapply(genes, function(gene){
#pre-empt failed fits by creating a mini df with gene name
#failed genes will have parameters filled in with NA during bind_rows() below
param <- data.frame("gene_id" = gene)
#extract all parameters of interest. "term" is parameter name, "estimate" is the estimated beta value
try(param <- pattern_coefficient_list[[pattern_full_name]][[gene]] %>%
dplyr::select(any_of(c("term","estimate","std_err","p_value","q_value","test_val"))) %>%
tibble::column_to_rownames(var = "term") %>%
t() %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "measure") %>%
mutate("gene_id" = gene)
)
#if provided, restrict parameters to only these
if(!is.null(model_terms_to_keep)){
param <- param %>%
dplyr::select(any_of(model_terms_to_keep))
}
#check if the minimum q-value for a gene is significant. If none are significant, return null to get rid of gene in df
#
if(!is.null(filter_significance)){
#pre-empt failure (Why would this fail? gene doesn't exist?)
min_q <- 1
try(min_q <- param %>%
filter(measure == "q_value") %>%
dplyr::select(matches(string)) %>%
min())
if(min_q > filter_significance){
#not significant
return(NULL)
}
}
return(param)
})
#generate for each pattern a df, with tibbles for each parameter. allows for filtering on qvalue etc
tbl <- bind_rows(param_df) %>%
group_by(measure) %>%
nest() %>%
ungroup()
return(tbl)
})
names(params_list) <- pattern_full_names
#list of dataframes, one df per pattern
return(params_list)
}
#' organizeEstimates
#' Generate matrices of coefficients of interest for later plotting with heatmaps.
#' @param coefficients_list List (over patterns) of objects returned from orderCoefficientByGene()
#' @param terms_exact String. Names of terms to plot which exactly match the terms of the model.
#' @param terms_match String. specify beginning string to match terms to plot. Necessary for interaction terms.
#' @param feature which feature to pull from the model. Default ["estimate"] is beta. Options include "q_value"...
#' @param gene_name name of column which contains gene names. Default ["gene_id"]
#' @param tranpose boolean. Should matrices be tranposed. Default [F] returns genes as rows.
#' @return Returns a list of matrices.
#' @import dplyr
#' @import tibble
#' @import purrr
#' @export
#'
##coefficients_list should be output from orderCoefficientsByGene(). List for each pattern, containing a tibble with estimate, std error, pvalue...
##with tranpose = F (default), rownames are genes, colnames are model parameters
organizeEstimates <- function(coefficients_list, terms_exact, terms_match, feature = "estimate", gene_name = "gene_id", transpose = F){
param_list <- purrr::map(names(coefficients_list), function(pattern_name){
#contains tibbles for each measure, eg. estimate, std error, p-value.
gene_coef_df<- coefficients_list[[pattern_name]] %>%
dplyr::filter(measure == feature) %>%
dplyr::select(data) %>%
unnest(cols =data)
#add gene_short_name
#gene_coef_df <- left_join(gene_coef_df, gene_mapping)
if(!is.null(rownames(gene_coef_df))){
message("Warning: existing rownames may be removed")
rownames(gene_coef_df) <- NULL
}
#remove genes that did not fit any model (ie rows that are all NA)
rm.ind <- which(apply(gene_coef_df, 1, function(x) all(is.na(x))))
if(length(rm.ind) > 0) {
message(paste0("Removing ", length(rm.ind), " genes that did not have any successful fits"))
gene_coef_df <- gene_coef_df[-c(rm.ind),]
}
#organize coefficients for these parameters, match parameter name exactly
exact_parameters <- purrr::map(terms_exact, function(term){
exact <- gene_coef_df %>%
dplyr::select(c(all_of(term),all_of(gene_name))) %>%
column_to_rownames(var = gene_name) %>%
as.matrix()
exact <- exact[,order(colnames(exact))]
if(transpose){
exact <- t(exact)
}
})
names(exact_parameters) <- terms_exact
#organize coefficients for these parameters, match parameter name with starting string
matched_parameters <- purrr:::map(terms_match, function(term){
match <- gene_coef_df %>% dplyr::select(c(starts_with(term), gene_name)) %>% column_to_rownames(var = gene_name) %>% as.matrix()
match <- match[,order(colnames(match))]
if(transpose){
match <- t(match)
}
})
names(matched_parameters) <- terms_match
#for each pattern, return lists with matrix of coefficients for each provided parameter
parameters_list <- c(exact_parameters, matched_parameters)
})
names(param_list) <- names(coefficients_list)
return(param_list)
}
# countsMatrix: genes x samples matrix with gene counts
# annotDF: samples x annotations matrix. includes condition(s) of interest
# projectedPs: patterns x sample matrix with projected p weights (learned with projectR)
# returns a list of model fits for each pattern. Each item in that list contains a fit
# for each gene
#' Title
#'
#' @param countsMatrix genes x samples matrix with gene counts
#' @param annotDF samples x annotations matrix. includes condition(s) of interest
#' @param projectedPs patterns x sample matrix with projected p weights (learned with projectR)
#' @param model_formula_str
#'
#' @return A list of model fits for each pattern. Each item in that list contains a fit for each gene
#' @export
#'
# #' @examples
getProjectionDrivers <- function(countsMatrix, annotDF, projectedPs, model_formula_str){
# ensure that all genes in countsMatrix are expressed in at least one sample
totalCounts <- apply(countsMatrix,1,sum)
numZeroExpGenes <- sum(totalCounts==0)
if(numZeroExpGenes>0)
stop(paste("Number of genes with 0 counts across all samples:",numZeroExpGenes))
# fit models for each pattern
fits <- list()
for (pattI in 1:(dim(projectedPs)[1])){
print(paste0("running models for projection driver analysis: pattern ", pattI, " of ",dim(projectedPs)[1]))
thisPatt <- projectedPs[pattI,]
# fit a model for each gene
fits[[pattI]] <- apply(countsMatrix,1,fit_helper,thisPattern=thisPatt,model_formula_string=model_formula_str,annotDF=annotDF)
}
names(fits) <- rownames(projectedPs)
return(fits)
}
#' extractCoeffHelper_glm_nb
#'
#' @param model A fitted GLM model
#' @param model_summary Not sure what this argument is offhand (Alina?)
#'
#' @return
#' @import tibble
#' @export
#'
# #' @examples
extractCoeffHelper_glm_nb <- function (model, model_summary)
{
coef_mat <- model_summary[["coefficients"]]
coef_mat <- apply(coef_mat, 2, function(x) {
as.numeric(as.character(x))
})
row.names(coef_mat) = row.names(model_summary[["coefficients"]])
colnames(coef_mat) = c("estimate", "std_err", "test_val",
"p_value")
coef_mat = tibble::as_tibble(coef_mat, rownames = "term")
coef_mat$model_component = "count"
return(coef_mat)
}
#' coeff_table_glmpd
#'
#' @param fits A List of model fits. output of getProjectionDrivers.R
#'
#' @return A list of coefficient tables
#' @import dplyr
#' @import tibble
#' @importFrom stats p.adjust
#' @import purrr
#' @export
#'
# #' @examples
coeff_table_glmpd <- function(fits){
coeffTables <- list()
# one coefficient table per pattern
for (pattFitI in (1:length(fits)))
{
models_df <- fits[[pattFitI]]
# successfulModels <- models[unlist(lapply(models,length)) > 1]
# fm <- tibble::as_tibble(purrr::transpose(successfulModels))
# fm_named = fm %>% dplyr::mutate(id = names(successfulModels))
#
M_f = models_df %>% dplyr::mutate(terms = purrr::map2(.f = purrr::possibly(extractCoeffHelper_glm_nb,NA_real_), .x=model, .y= model_summary)) %>% tidyr::unnest(terms)
fit_coefs = M_f %>% dplyr::group_by(model_component, term) %>%
dplyr::mutate(q_value = stats::p.adjust(p_value)) %>%
dplyr::ungroup()
coeffTables[[pattFitI]] <- fit_coefs
}
names(coeffTables) <- names(fits)
return(coeffTables)
}
| /R/model_fitting.R | permissive | gofflab/glmpd | R | false | false | 19,410 | r | ###########################################################
# Functions for parallelization and fitting of GLM models
###########################################################
#' .fitGLM
#' This function parallelizes glm models:
#' Parallelizes across genes in a counts matrix
#' Parallelizes across patterns in a matrix of projected patterns
#' @param countsMatrix matrix of gene expression counts. genes x cells / samples.
#' @param annotDF A dataframe with annotations for each cell or sample in the gene expression dataset
#' @param geneDF (currently) optional dataframe with annotations for each gene in the dataset
#' @param model_formula_str A string specifying the model to fit, in the format "~ var1 + var2"
#' @param projected_patterns A matrix of projected pattern weights to use as an explanatory variable in the model fit for each gene. colnames are patterns, rownames are cells. Must have viable column names.
#' @param cores Integer defining the number of cores to use for parallelization of model fitting across genes.
#' @param exp_family The regression family to use (default: 'negbinomial')
#' @param clean_model Boolean. Passed through to monocle3::fit_model
#' @param verbose Boolean. Verbose output for model fitting.
#' @param result Return full or summarized results. Summarized results are stripped of memory-intensive models and return coefficients from extractCoefficients().
#'
#' @return Returns a list of fitted models (output similar to monocle3::fit_model)
#' @import monocle3
#' @import MASS
#' @import Matrix
#' @import RhpcBLASctl
#' @import future
#' @import furrr
.fitGLM <- function(countsMatrix, annotDF, model_formula_str, projected_patterns, cores, geneDF= NULL){
#from provided matrix
pattern_names <- colnames(projected_patterns)
#dependent on SCE structure
genes <- rownames(countsMatrix)
message(paste0("Fittings models for ", length(pattern_names), " patterns and ", length(genes), " genes"))
if(sum(Matrix::rowSums(countsMatrix)== 0) > 0){
warnings(paste0(sum(Matrix::rowSums(countsMatrix)== 0), " genes have zero expression and will not be successfully fit. It is recommended to remove them before running."))
}
#Not actually sure what these do... other than limit conflicts with furrr multicore operations
RhpcBLASctl::omp_set_num_threads(1)
RhpcBLASctl::blas_set_num_threads(1)
#uses future parallelization structure
#open multicore operations
plan(multicore, workers = cores)
full_glm_models <- furrr::future_map(pattern_names, function(pattern_name){
message(paste0(date(),": working on pattern ",pattern_name))
thisPatt <- projected_patterns[,pattern_name]
#fit one pattern, all genes
glm_models <- apply(countsMatrix,1,fit_helper,thisPattern=thisPatt,model_formula_string=model_formula_str,annotDF=annotDF)
#tranpose lists to create a dataframe, each row is a gene
#TODO: status extractor "success" or "fail"
glm_models_df <- tibble::as_tibble(purrr::transpose(glm_models))
#Add in gene fData to returned dataframe. Currently optional.
if(!is.null(geneDF)){
#TODO might need to assert length or gene names are the same. don't love the assumption they are the same.
glm_models_df <- dplyr::bind_cols(geneDF, glm_models_df)
}else{
#TODO: JS: would names(glm_models) ever not be "genes" var from above? would be clearer
glm_models_df <- glm_models_df %>%
dplyr::mutate(gene_id = names(glm_models))
}
#if(result == "full_model"){
#returns tibble with a column that includes full model for each gene. memory hog
return(glm_models_df)
#}
#return coefficients, estimates, [anything else] to be easier on memory. Create a multitiered list.
#top tiered list is pattern, second tiered list is gene, third tier is data.frame for those coefficients
#TODO: this is essentially extractCoefficients(). replace with that function
#else if(result == "summarized_model"){
# summarized_glm_model <- glm_model %>%
# monocle3::coefficient_table() %>%
# dplyr::select(-c(model, model_summary)) %>% as.data.frame()
# summarized_glm_model <- purrr:::map(genes, function(gene){
# summarized_glm_model %>% dplyr::filter(gene_id == gene)})
# names(summarized_glm_model) <- genes_of_interest
# return(summarized_glm_model)
#}
})
#close multicore operations
plan(sequential)
names(full_glm_models) <- pattern_names
return(full_glm_models)
}
#' fit_helper
#'
#' @param thisGene String of gene name
#' @param thisPattern String of pattern name
#' @param model_formula_string Model Formula String
#' @param annotDF Not sure what this argument is (alina?)
#'
#' @return
#' @importFrom stats as.formula
#' @import MASS
#' @import dplyr
#' @export
#'
# #' @examples
fit_helper <- function(thisGene, thisPattern, model_formula_string, annotDF){
# sample x annotations, gene expression, and p weights
datFrame <- as.data.frame(merge(annotDF,thisGene,by=0,sort=FALSE))
datFrame$patternWeights <- thisPattern
# prepare model formula string
model_formula_str <- paste("y", model_formula_string, sep = "")
model_formula = stats::as.formula(model_formula_str)
# run model
tryCatch({
FM_fit <- glm.nb(model_formula,
data = datFrame, link = log, epsilon = 1e-3,
control = glm.control(maxit = 50))
FM_summary = summary(FM_fit)
df = list(model = FM_fit, model_summary = FM_summary)
}, error = function(e){
error_df <- list(model = NA, model_summary = NA)
return(error_df) # return error messages; don't stop iterating (wondering if this is the best way to handle errors)
})
}
#' fitGLMpd
#' Uses a GLM to identify the set of genes used in a pattern for a given cell type or condition
#' @param countsMatrix matrix of gene expression counts. genes x cells / samples.
#' @param annotDF A dataframe with annotations for each cell or sample in the gene expression dataset
#' @param model_formula_str A string specifying the model to fit, in the format "~ var1 + var2"
#' @param projected_patterns A matrix of projected pattern weights to use as an explanatory variable in the model fit for each gene. colnames are patterns, rownames are cells. Must have viable column names.
#' @param cores Integer defining the number of cores to use for parallelization of model fitting across genes.
#' @param exp_family The regression family to use (default: 'negbinomial')
#' @param clean_model Boolean. Passed through to monocle3::fit_model
#' @param verbose Boolean. Verbose output for model fitting.
#' @param result Return full or summarized results. Summarized results are stripped of memory-intensive models and return coefficients from extractCoefficients().
#'
#' @return Returns a list of fitted models (output similar to monocle3::fit_model)
#' @import monocle3
#' @import SummarizedExperiment
#' @export
#'
# #' @examples
setGeneric("fitGLMpd", function(object, ..., verbose=TRUE) standardGeneric("fitGLMpd"),
signature = "object")
setMethod("fitGLMpd",signature(object="cell_data_set"), function(object, model_formula_str, projected_patterns,cores=1, count_assay = "counts", geneDF = NULL){ #,exp_family="negbinomial",cores,clean_model=T,verbose=T, result = "full_model"){
#TODO: also accept SingleCellExperiment (and SummarizedExperiment?)
cds <- object
countsMatrix <- assay(cds, count_assay)
annotDF <- colData(cds) %>% as.data.frame()
if(is.null(geneDF)){
geneDF <- rowData(cds) %>% as.data.frame()
}
.fitGLM(countsMatrix, annotDF, model_formula_str, projected_patterns,cores, geneDF)
})
setMethod("fitGLMpd",signature(object="matrix"), function(object, annotDF, model_formula_str, projected_patterns,cores=1){ #,exp_family="negbinomial",cores,clean_model=T,verbose=T, result = "full_model"){
countsMatrix <- object
.fitGLM(countsMatrix, annotDF, model_formula_str, projected_patterns,cores)
})
###########################################################
# Functions for organizing model coefficients by genes
###########################################################
#' extractCoefficients
#' Extract coefficients from model fitting object. Each object of list requires a column exactly named "gene_id".
#' @param glm_models Named List (over patterns) of objects returned from "full_result" of fit_models_cds. Organizes each pattern by gene.
#' @param genes gene ids that exactly match the count matrix which was provided to fitGLMpd()
#' @return Returns a mutileveled list of fitted model coefficients. Hierarchy of organization: pattern (list), gene (list), coefficients (data.frame)
#' @import monocle3
#' @import dplyr
#' @import purrr
#' @export
#'
#TODO: assert genes length is the same as glm_models. Allow for "gene_id" to be set manually to allow other columns to match genes on.
#TODO: is there a way to get gene names from models instead of providing... limit misnaming error ... also assumes orders are the same as is
#Generates a multi-leveled list.
#First level: pattern (named list)
#Second level: gene (named list)
#Third level: beta coefficients, columns are: coefficient name, coefficient
extractCoefficients <- function(glm_models, genes){
extracted_coefficients <- lapply(glm_models, function(glm_model){
#Drop full models for memory effeciency. keeps parameters
summarized_glm_model <- glm_model %>%
monocle3::coefficient_table() %>%
dplyr::select(-c(model, model_summary)) %>% as.data.frame()
#organize with a list for each gene
summarized_glm_model <- purrr:::map(genes, function(gene){
summarized_glm_model %>% dplyr::filter(gene_id == gene)})
names(summarized_glm_model) <- genes_of_interest
return(summarized_glm_model)
})
}
#' orderCoefficientsByGene
#' Order model coefficient estimates by genes, such as for visualization by heatmap
#' @param pattern_coefficient_list List (over patterns) of objects returned from extractCoefficients(), or "summarized_result" option of fit_models_cds().
#' @param model_terms_to_keep optional. limit returned dataframe to coefficients of interest.
#' @param filter_significance optional. Numeric value at which to filter q-value significance.
#' Genes with signficant coefficients for one or more terms are returned. If not provided, all genes returned.
#' @param string optional. all model terms that contain this string will be used for signficance filtering.
#' @return Returns a list of fitted models (output similar to monocle3::fit_model)
#' @import dplyr
#' @import tibble
#' @export
#'
#Pass extracted coefficients to organize into a data frame with each gene as as a row ----
#TODO: Can make this more elegant with dplyr instead of lapply
#TODO: filter out failed genes
orderCoefficientsByGene <- function(pattern_coefficient_list, model_terms_to_keep = NULL, filter_significance, string){
pattern_full_names <- names(pattern_coefficient_list)
#loop thru each pattern
params_list <- lapply(pattern_full_names, function(pattern_full_name){
genes <- names(pattern_coefficient_list[[pattern_full_name]])
#loop thru each gene
param_df <- lapply(genes, function(gene){
#pre-empt failed fits by creating a mini df with gene name
#failed genes will have parameters filled in with NA during bind_rows() below
param <- data.frame("gene_id" = gene)
#extract all parameters of interest. "term" is parameter name, "estimate" is the estimated beta value
try(param <- pattern_coefficient_list[[pattern_full_name]][[gene]] %>%
dplyr::select(any_of(c("term","estimate","std_err","p_value","q_value","test_val"))) %>%
tibble::column_to_rownames(var = "term") %>%
t() %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "measure") %>%
mutate("gene_id" = gene)
)
#if provided, restrict parameters to only these
if(!is.null(model_terms_to_keep)){
param <- param %>%
dplyr::select(any_of(model_terms_to_keep))
}
#check if the minimum q-value for a gene is significant. If none are significant, return null to get rid of gene in df
#
if(!is.null(filter_significance)){
#pre-empt failure (Why would this fail? gene doesn't exist?)
min_q <- 1
try(min_q <- param %>%
filter(measure == "q_value") %>%
dplyr::select(matches(string)) %>%
min())
if(min_q > filter_significance){
#not significant
return(NULL)
}
}
return(param)
})
#generate for each pattern a df, with tibbles for each parameter. allows for filtering on qvalue etc
tbl <- bind_rows(param_df) %>%
group_by(measure) %>%
nest() %>%
ungroup()
return(tbl)
})
names(params_list) <- pattern_full_names
#list of dataframes, one df per pattern
return(params_list)
}
#' organizeEstimates
#' Generate matrices of coefficients of interest for later plotting with heatmaps.
#' @param coefficients_list List (over patterns) of objects returned from orderCoefficientByGene()
#' @param terms_exact String. Names of terms to plot which exactly match the terms of the model.
#' @param terms_match String. specify beginning string to match terms to plot. Necessary for interaction terms.
#' @param feature which feature to pull from the model. Default ["estimate"] is beta. Options include "q_value"...
#' @param gene_name name of column which contains gene names. Default ["gene_id"]
#' @param tranpose boolean. Should matrices be tranposed. Default [F] returns genes as rows.
#' @return Returns a list of matrices.
#' @import dplyr
#' @import tibble
#' @import purrr
#' @export
#'
##coefficients_list should be output from orderCoefficientsByGene(). List for each pattern, containing a tibble with estimate, std error, pvalue...
##with tranpose = F (default), rownames are genes, colnames are model parameters
organizeEstimates <- function(coefficients_list, terms_exact, terms_match, feature = "estimate", gene_name = "gene_id", transpose = F){
param_list <- purrr::map(names(coefficients_list), function(pattern_name){
#contains tibbles for each measure, eg. estimate, std error, p-value.
gene_coef_df<- coefficients_list[[pattern_name]] %>%
dplyr::filter(measure == feature) %>%
dplyr::select(data) %>%
unnest(cols =data)
#add gene_short_name
#gene_coef_df <- left_join(gene_coef_df, gene_mapping)
if(!is.null(rownames(gene_coef_df))){
message("Warning: existing rownames may be removed")
rownames(gene_coef_df) <- NULL
}
#remove genes that did not fit any model (ie rows that are all NA)
rm.ind <- which(apply(gene_coef_df, 1, function(x) all(is.na(x))))
if(length(rm.ind) > 0) {
message(paste0("Removing ", length(rm.ind), " genes that did not have any successful fits"))
gene_coef_df <- gene_coef_df[-c(rm.ind),]
}
#organize coefficients for these parameters, match parameter name exactly
exact_parameters <- purrr::map(terms_exact, function(term){
exact <- gene_coef_df %>%
dplyr::select(c(all_of(term),all_of(gene_name))) %>%
column_to_rownames(var = gene_name) %>%
as.matrix()
exact <- exact[,order(colnames(exact))]
if(transpose){
exact <- t(exact)
}
})
names(exact_parameters) <- terms_exact
#organize coefficients for these parameters, match parameter name with starting string
matched_parameters <- purrr:::map(terms_match, function(term){
match <- gene_coef_df %>% dplyr::select(c(starts_with(term), gene_name)) %>% column_to_rownames(var = gene_name) %>% as.matrix()
match <- match[,order(colnames(match))]
if(transpose){
match <- t(match)
}
})
names(matched_parameters) <- terms_match
#for each pattern, return lists with matrix of coefficients for each provided parameter
parameters_list <- c(exact_parameters, matched_parameters)
})
names(param_list) <- names(coefficients_list)
return(param_list)
}
# countsMatrix: genes x samples matrix with gene counts
# annotDF: samples x annotations matrix. includes condition(s) of interest
# projectedPs: patterns x sample matrix with projected p weights (learned with projectR)
# returns a list of model fits for each pattern. Each item in that list contains a fit
# for each gene
#' Title
#'
#' @param countsMatrix genes x samples matrix with gene counts
#' @param annotDF samples x annotations matrix. includes condition(s) of interest
#' @param projectedPs patterns x sample matrix with projected p weights (learned with projectR)
#' @param model_formula_str
#'
#' @return A list of model fits for each pattern. Each item in that list contains a fit for each gene
#' @export
#'
# #' @examples
getProjectionDrivers <- function(countsMatrix, annotDF, projectedPs, model_formula_str){
# ensure that all genes in countsMatrix are expressed in at least one sample
totalCounts <- apply(countsMatrix,1,sum)
numZeroExpGenes <- sum(totalCounts==0)
if(numZeroExpGenes>0)
stop(paste("Number of genes with 0 counts across all samples:",numZeroExpGenes))
# fit models for each pattern
fits <- list()
for (pattI in 1:(dim(projectedPs)[1])){
print(paste0("running models for projection driver analysis: pattern ", pattI, " of ",dim(projectedPs)[1]))
thisPatt <- projectedPs[pattI,]
# fit a model for each gene
fits[[pattI]] <- apply(countsMatrix,1,fit_helper,thisPattern=thisPatt,model_formula_string=model_formula_str,annotDF=annotDF)
}
names(fits) <- rownames(projectedPs)
return(fits)
}
#' extractCoeffHelper_glm_nb
#'
#' @param model A fitted GLM model
#' @param model_summary Not sure what this argument is offhand (Alina?)
#'
#' @return
#' @import tibble
#' @export
#'
# #' @examples
extractCoeffHelper_glm_nb <- function (model, model_summary)
{
coef_mat <- model_summary[["coefficients"]]
coef_mat <- apply(coef_mat, 2, function(x) {
as.numeric(as.character(x))
})
row.names(coef_mat) = row.names(model_summary[["coefficients"]])
colnames(coef_mat) = c("estimate", "std_err", "test_val",
"p_value")
coef_mat = tibble::as_tibble(coef_mat, rownames = "term")
coef_mat$model_component = "count"
return(coef_mat)
}
#' coeff_table_glmpd
#'
#' @param fits A List of model fits. output of getProjectionDrivers.R
#'
#' @return A list of coefficient tables
#' @import dplyr
#' @import tibble
#' @importFrom stats p.adjust
#' @import purrr
#' @export
#'
# #' @examples
coeff_table_glmpd <- function(fits){
coeffTables <- list()
# one coefficient table per pattern
for (pattFitI in (1:length(fits)))
{
models_df <- fits[[pattFitI]]
# successfulModels <- models[unlist(lapply(models,length)) > 1]
# fm <- tibble::as_tibble(purrr::transpose(successfulModels))
# fm_named = fm %>% dplyr::mutate(id = names(successfulModels))
#
M_f = models_df %>% dplyr::mutate(terms = purrr::map2(.f = purrr::possibly(extractCoeffHelper_glm_nb,NA_real_), .x=model, .y= model_summary)) %>% tidyr::unnest(terms)
fit_coefs = M_f %>% dplyr::group_by(model_component, term) %>%
dplyr::mutate(q_value = stats::p.adjust(p_value)) %>%
dplyr::ungroup()
coeffTables[[pattFitI]] <- fit_coefs
}
names(coeffTables) <- names(fits)
return(coeffTables)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBI.R
\name{dbQuoteIdentifier,DatabaseConnectorConnection,character-method}
\alias{dbQuoteIdentifier,DatabaseConnectorConnection,character-method}
\title{Quote identifiers}
\usage{
\S4method{dbQuoteIdentifier}{DatabaseConnectorConnection,character}(conn, x, ...)
}
\arguments{
\item{conn}{A \linkS4class{DBIConnection} object, as returned by
\code{\link[DBI:dbConnect]{dbConnect()}}.}
\item{x}{A character vector, \link[DBI]{SQL} or \link[DBI]{Id} object to quote as identifier.}
\item{...}{Other arguments passed on to methods.}
}
\value{
\code{dbQuoteIdentifier()} returns an object that can be coerced to \link{character},
of the same length as the input.
For an empty character vector this function returns a length-0 object.
The names of the input argument are preserved in the output.
When passing the returned object again to \code{dbQuoteIdentifier()}
as \code{x}
argument, it is returned unchanged.
Passing objects of class \link[DBI]{SQL} should also return them unchanged.
(For backends it may be most convenient to return \link[DBI]{SQL} objects
to achieve this behavior, but this is not required.)
An error is raised if the input contains \code{NA},
but not for an empty string.
}
\description{
Call this method to generate a string that is suitable for
use in a query as a column or table name, to make sure that you
generate valid SQL and protect against SQL injection attacks. The inverse
operation is \code{\link[DBI:dbUnquoteIdentifier]{dbUnquoteIdentifier()}}.
\Sexpr[results=rd,stage=render]{DBI:::methods_as_rd("dbQuoteIdentifier")}
}
\seealso{
Other DBIResult generics:
\code{\link[DBI]{DBIResult-class}},
\code{\link[DBI]{dbBind}()},
\code{\link[DBI]{dbClearResult}()},
\code{\link[DBI]{dbColumnInfo}()},
\code{\link[DBI]{dbFetch}()},
\code{\link[DBI]{dbGetInfo}()},
\code{\link[DBI]{dbGetRowCount}()},
\code{\link[DBI]{dbGetRowsAffected}()},
\code{\link[DBI]{dbGetStatement}()},
\code{\link[DBI]{dbHasCompleted}()},
\code{\link[DBI]{dbIsReadOnly}()},
\code{\link[DBI]{dbIsValid}()},
\code{\link[DBI]{dbQuoteLiteral}()},
\code{\link[DBI]{dbQuoteString}()},
\code{\link[DBI]{dbUnquoteIdentifier}()}
}
| /man/dbQuoteIdentifier-DatabaseConnectorConnection-character-method.Rd | permissive | CristianPachacama/DatabaseConnector | R | false | true | 2,206 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DBI.R
\name{dbQuoteIdentifier,DatabaseConnectorConnection,character-method}
\alias{dbQuoteIdentifier,DatabaseConnectorConnection,character-method}
\title{Quote identifiers}
\usage{
\S4method{dbQuoteIdentifier}{DatabaseConnectorConnection,character}(conn, x, ...)
}
\arguments{
\item{conn}{A \linkS4class{DBIConnection} object, as returned by
\code{\link[DBI:dbConnect]{dbConnect()}}.}
\item{x}{A character vector, \link[DBI]{SQL} or \link[DBI]{Id} object to quote as identifier.}
\item{...}{Other arguments passed on to methods.}
}
\value{
\code{dbQuoteIdentifier()} returns an object that can be coerced to \link{character},
of the same length as the input.
For an empty character vector this function returns a length-0 object.
The names of the input argument are preserved in the output.
When passing the returned object again to \code{dbQuoteIdentifier()}
as \code{x}
argument, it is returned unchanged.
Passing objects of class \link[DBI]{SQL} should also return them unchanged.
(For backends it may be most convenient to return \link[DBI]{SQL} objects
to achieve this behavior, but this is not required.)
An error is raised if the input contains \code{NA},
but not for an empty string.
}
\description{
Call this method to generate a string that is suitable for
use in a query as a column or table name, to make sure that you
generate valid SQL and protect against SQL injection attacks. The inverse
operation is \code{\link[DBI:dbUnquoteIdentifier]{dbUnquoteIdentifier()}}.
\Sexpr[results=rd,stage=render]{DBI:::methods_as_rd("dbQuoteIdentifier")}
}
\seealso{
Other DBIResult generics:
\code{\link[DBI]{DBIResult-class}},
\code{\link[DBI]{dbBind}()},
\code{\link[DBI]{dbClearResult}()},
\code{\link[DBI]{dbColumnInfo}()},
\code{\link[DBI]{dbFetch}()},
\code{\link[DBI]{dbGetInfo}()},
\code{\link[DBI]{dbGetRowCount}()},
\code{\link[DBI]{dbGetRowsAffected}()},
\code{\link[DBI]{dbGetStatement}()},
\code{\link[DBI]{dbHasCompleted}()},
\code{\link[DBI]{dbIsReadOnly}()},
\code{\link[DBI]{dbIsValid}()},
\code{\link[DBI]{dbQuoteLiteral}()},
\code{\link[DBI]{dbQuoteString}()},
\code{\link[DBI]{dbUnquoteIdentifier}()}
}
|
library(SingleR)
library(celldex)
library(Seurat)
library(SeuratDisk)
library(dplyr)
library(scater)
library(scRNAseq)
library(parallel)
library(destiny)
library(monocle)
#========================================Multiple Core
detectCores()
detectCores(logical = F)
mc<-makeCluster(getOption("mc.cores",16))
memory.limit(360000)
#========================================Preparation
GSE.number<-"GSE158055"
sc.dir<-"H:/scRNA-seq"
now.dir<-paste(sc.dir,sprintf("%s",GSE.number),sep = "/")
setwd(now.dir)
sc.type<-"10X"
part1<-Read10X("GSE158055_covid19_part1",
gene.column = 1)
part2<-Read10X("GSE158055_covid19_part2",
gene.column = 1)
part1.sc<-CreateSeuratObject(counts = part1,project = "part1")
part2.sc<-CreateSeuratObject(counts = part2,project = "part2")
rm(part1)
rm(part2)
sc.annot<-read.csv("GSE158055_cell_annotation.csv")
part1.sc$sampleID<-sc.annot$sampleID
part1.sc$celltype<-sc.annot$celltype
part1.sc$majortype<-sc.annot$majorType
sc.meta<-read.delim("GSE158055_sample_metadata.txt")
sc.meta.pbmc<-sc.meta[grep("PBMC",sc.meta$characteristics..Sample.type),]
sc.meta.pbmc<-sc.meta.pbmc[grep("progression|control",sc.meta.pbmc$characteristics..Sample.time),]
sc.pbmc.grep<-which(part1.sc$sampleID%in%sc.meta.pbmc$Sample.name)
part1.sc<-part1.sc[,sc.pbmc.grep]
part2.sc<-part2.sc[,sc.pbmc.grep]
sc.data<-part1.sc
sc.data@assays$RNA@counts<-sc.data@assays$RNA@counts+part2.sc@assays$RNA@counts
sc.data@assays$RNA@data<-sc.data@assays$RNA@data+part2.sc@assays$RNA@data
sc.data$sampleID<-factor(sc.data$sampleID)
sc.data$celltype<-factor(sc.data$celltype)
sc.data$majortype<-factor(sc.data$majortype)
sc.data$batch<-sc.meta.pbmc$`characteristics...Datasets`[match(sc.data$sampleID,sc.meta.pbmc$Sample.name)]
sc.data$batch<-factor(sc.data$batch)
sc.data.list<-SplitObject(sc.data,split.by = "batch")
# SeuratDisk::Convert(source = "scp_scanpy.gzip.h5ad",
# dest="h5Seurat",
# overwirte=F)
# x<-SeuratDisk::LoadH5Seurat("scp_scanpy.gzip.h5seurat")
#
# sc.data.list<-list()
# sc.data.list[[1]]<-CreateSeuratObject(counts = x@assays$RNA@counts,
# min.cells = 3,
# min.features = 200)
# sc.data.list[[1]]$patient<-x$patient
# sc.data.list[[1]]$sort<-x$sort
# sc.data.list[[1]]$cell_type<-x$cell_type
# sc.data.list[[1]]$pheno<-x$pheno
# a<-paste(x$patient,x$pheno,sep='-')
# names(a)<-names(x$patient)
# a<-factor(a)
# sc.data.list[[1]]$orig.ident<-a
# Idents(sc.data.list[[1]])<-sc.data.list[[1]]$orig.ident
# sc.data.list<-list()
# sc.data.list[[1]]<-x
#=======================================Filter
sc.data.list<-lapply(sc.data.list,function(x){a<-x;a[["percent.mt"]]<-PercentageFeatureSet(a, pattern = "^MT-");return(a)})
for(i in 1:length(sc.data.list))
{
sc.data.list[[i]]$percent.mt[is.nan(sc.data.list[[i]]$percent.mt)]<-0
}
sc.data.list<-lapply(sc.data.list,function(x){subset(x,
subset = nFeature_RNA>=median(nFeature_RNA)/4&
nFeature_RNA<=3*median(nFeature_RNA)
# &percent.mt<=2*median(percent.mt)
)})
#=======================================Normalization
for(i in 1:length(sc.data.list))
{
sc.data.list[[i]] <- NormalizeData(sc.data.list[[i]], normalization.method = "LogNormalize", scale.factor = 10000)
}
#=======================================Variable Feature
for(i in 1:length(sc.data.list))
{
sc.data.list[[i]] <- FindVariableFeatures(sc.data.list[[i]], selection.method = "vst", nfeatures = 2000)
}
#========================================Anchors
sc.anchors<-FindIntegrationAnchors(object.list = sc.data.list)
sc.data<-IntegrateData(anchorset = sc.anchors, dims = 1:30)
DefaultAssay(sc.data)<-"integrated"
#========================================Scale
sc.data<-sc.data[,sc.data$sampleID%in%names(table(sc.data$sampleID)[table(sc.data$sampleID)>500])]
sc.data<-ScaleData(sc.data)
#========================================PCA
sc.data<-RunPCA(sc.data,features = VariableFeatures(sc.data))
VizDimLoadings(sc.data, dims = 1:4, reduction = "pca")
DimPlot(sc.data, reduction = "pca",split.by = NULL)
DimHeatmap(sc.data, dims = 1:15, cells = 500, balanced = TRUE)
#========================================
sc.data <- JackStraw(sc.data, num.replicate = 100)
sc.data <- ScoreJackStraw(sc.data, dims = 1:20)
JackStrawPlot(sc.data, dims = 1:20)
ElbowPlot(sc.data,ndims = 50)
#=======================================Cluster===================
sc.data<-FindNeighbors(sc.data, dims = 1:30)
sc.data<-FindClusters(sc.data, resolution = 1)
#========================================Phylogenetic analysis===================
sc.data<-BuildClusterTree(sc.data,slot = "scale.data")
Tool(object = sc.data, slot = 'BuildClusterTree')
PlotClusterTree(sc.data)
#=======================================Inflection sample==============
sc.data<-CalculateBarcodeInflections(sc.data)
SubsetByBarcodeInflections(sc.data)
#=======================================Dim reduction=================
sc.data<-RunUMAP(sc.data,dims = 1:30)
DimPlot(sc.data,reduction = "umap")
# sc.data.dim.tsne<-RunTSNE(sc.data.cluster)
# DimPlot(sc.data.dim.tsne,reduction = "tsne")
#=======================================Cluster biomaker=================
cluster.markers <- FindAllMarkers(sc.data,only.pos = F,
min.pct = 0.1,logfc.threshold = 0.1
)
cluster.markers%>%group_by(cluster)%>%top_n(n=2,wt=avg_log2FC)
VlnPlot(sc.data, features = c("IFNG"))
FeaturePlot(sc.data,features = c("FCGR3A","CD3D","CD3E","CD3G"))
top10<-cluster.markers%>%group_by(cluster)%>%top_n(n=10,wt=avg_log2FC)
DoHeatmap(sc.data, features = top10$gene) + NoLegend()
#=======================================SingleR annotation===============================
ref1<-celldex::HumanPrimaryCellAtlasData()
# ref2<-celldex::BlueprintEncodeData()
ref3<-celldex::DatabaseImmuneCellExpressionData()
myref.data<-list()
myref.group<-list()
myref.data[[1]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE128243/GSE128243_logmedian.txt",header = T)
myref.group[[1]]<-colnames(myref.data[[1]])
myref.group[[1]]<-gsub(".*NKT_HS_(.*)[0-9]+$","NKT_\\1ulated",myref.group[[1]])
myref.data[[2]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE124731/GSE124731_Data with annotation.txt",header = T)
myref.data[[2]]<-myref.data[[2]][,grep("CD|NK|MAIT|Vd",colnames(myref.data[[2]]))]
myref.group[[2]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE124731/GSE124731_low_input_rnaseq_meta_data.txt.gz",header = T)
myref.group[[2]]<-myref.group[[2]]$cell_type
myref.group[[2]]<-gsub("CD([48])","CD\\1+_T_cell",myref.group[[2]])
myref.group[[2]]<-gsub("MAIT","T_cell:MAI",myref.group[[2]])
myref.group[[2]]<-gsub("^NK$","NK_cell",myref.group[[2]])
myref.group[[2]]<-gsub("^iNKT$","NKT",myref.group[[2]])
myref.group[[2]]<-gsub("Vd[12]","T_cell:gamma-delta",myref.group[[2]])
myref.data[[2]][grep("gamma-delta",myref.group[[2]])]<-NULL
myref.group[[2]]<- myref.group[[2]][-grep("gamma-delta",myref.group[[2]])]
myref.data[[3]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE128626/GSE128626_data_matrix_sorted_NKT_cells.txt.gz",header = T)
rownames(myref.data[[3]])<-myref.data[[3]][,1]
rownames(myref.data[[3]])<-gsub("\'","",rownames(myref.data[[3]]))
myref.data[[3]]<-myref.data[[3]][,-1]
myref.group[[3]]<-colnames(myref.data[[3]])
myref.group[[3]]<-gsub("NKT_naive.*","NKT_Unstimulated",myref.group[[3]])
myref.group[[3]]<-gsub("NKT_exposed.*","NKT_Stimulated",myref.group[[3]])
myref.data[[4]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE128626/GSE128626_data_matrix_sorted_monocytes.txt.gz",header = T)
rownames(myref.data[[4]])<-myref.data[[4]][,1]
rownames(myref.data[[4]])<-gsub("\'","",rownames(myref.data[[4]]))
myref.data[[4]]<-myref.data[[4]][,-1]
myref.group[[4]]<-colnames(myref.data[[4]])
myref.group[[4]]<-gsub("Monocytes_naive.*","Monocyte:Unstimulated",myref.group[[4]])
myref.group[[4]]<-gsub("Monocytes_exposed.*","Monocyte:Stimulated",myref.group[[4]])
myref.data[[5]]<-read.delim("F:/myWGCNA/Particular cells expression Data/GSE28726/GSE28726_Data with annotation.txt",header = T)
myref.data[[5]]<-myref.data[[5]][,grep("^GSM[0-9]+",colnames(myref.data[[5]]))]
myref.data[[5]]<-log2(myref.data[[5]]+1)
myref.group[[5]]<-read.delim("F:/myWGCNA/Particular cells expression Data/GSE28726/GSE28726_group_mod.txt",header = T)
myref.group[[5]]<-t(myref.group[[5]])
myref.group[[5]]<-myref.group[[5]][,1]
myref.group[[5]]<-gsub(".*CD4 T cell.*","CD4+_T_cell",myref.group[[5]])
myref.group[[5]]<-gsub(".*NKT cell.*resting","NKT_Unstimulated",myref.group[[5]])
myref.group[[5]]<-gsub(".*NKT cell.*stimulated","NKT_Stimulated",myref.group[[5]])
myref.group[[5]]<-gsub(".*CD1d-aGC\\+ Va24- T cell.*resting","dNKT_Unstimulated",myref.group[[5]])
myref.group[[5]]<-gsub(".*CD1d-aGC\\+ Va24- T cell.*stimulated","dNKT_Stimulated",myref.group[[5]])
# myref.data<-read.delim("E:/myWGCNA/SCRNA Data/GSE128243/GSE128243_ReadCounts.txt",
# header = T)
# myref.data<-LogNormalize(myref.data)
# myref.group<-read.delim("E:/myWGCNA/SCRNA Data/GSE128243/GSE128243_group_mod.txt",header = T)
# myref.group<-gsub("Human NKT cell ([un]*stimulated) sample [0-9]$","NKT_\\1",myref.group)
# myref.data<-sc.data.nkt@assays$RNA@counts
# myref.data<-as.matrix(myref.data)
# myref.data<-SummarizedExperiment(assays=list(counts=myref.data,logcounts=log2(myref.data+1)))
# myref.group<-sc.data.nkt$orig.ident
myref.group[[1]]<-gsub(".*NKT.*","NKT",myref.group[[1]])
myref.group[[2]]<-gsub(".*NKT.*","NKT",myref.group[[2]])
myref.group[[2]]<-gsub("T_cell:MAI","T_cell_MAIT",myref.group[[2]])
myref.group[[2]]<-gsub(".*CD4.*","T_cell_CD4",myref.group[[2]])
myref.group[[2]]<-gsub(".*CD8.*","T_cell_CD8",myref.group[[2]])
myref.group[[3]]<-gsub(".*NKT.*","NKT",myref.group[[3]])
myref.group[[4]]<-gsub(".*Monocyte.*","Monocyte",myref.group[[4]])
myref.group[[5]]<-gsub(".*NKT.*","NKT",myref.group[[5]])
myref.group[[5]]<-gsub(".*CD4.*","T_cell_CD4",myref.group[[5]])
ref1<-ref1[,ref1$label.main%in%c("B_cell","DC","Erythroblast","Macrophage","Monocyte","Neutrophils","NK_cell","Platelets")]
ref3<-ref3[,ref3$label.main%in%c("B cells","Monocytes","NK cells")]
ref3$label.main<-gsub(".*Monocyte.*","Monocyte",ref3$label.main)
ref3$label.main<-gsub(".*B cells","B_cell",ref3$label.main)
ref3$label.main<-gsub(".*NK cells","NK_cell",ref3$label.main)
# ref.list<-list(ref1,ref2,ref3)
ref.list<-list(ref1,ref3)
# ref.list<-list(ref3)
ref.list<-c(ref.list,myref.data)
# labels.list<-list(ref1$label.main,ref2$label.main,ref3$label.main)
labels.list<-list(ref1$label.main,ref3$label.main)
# labels.list<-list(ref3$label.main)
labels.list<-c(labels.list,myref.group)
Rcpp::sourceCpp(code='
#include <Rcpp.h>
using namespace Rcpp;
// [[Rcpp::export]]
IntegerMatrix asMatrix(NumericVector rp,
NumericVector cp,
NumericVector z,
int nrows,
int ncols){
int k = z.size() ;
IntegerMatrix mat(nrows, ncols);
for (int i = 0; i < k; i++){
mat(rp[i],cp[i]) = z[i];
}
return mat;
}')
as_matrix <- function(mat){
row_pos <- mat@i
col_pos <- findInterval(seq(mat@x)-1,mat@p[-1])
tmp <- asMatrix(rp = row_pos, cp = col_pos, z = mat@x,
nrows = mat@Dim[1], ncols = mat@Dim[2])
row.names(tmp) <- mat@Dimnames[[1]]
colnames(tmp) <- mat@Dimnames[[2]]
return(tmp)
}
sc.data.singleR<-GetAssayData(sc.data,slot = "data")
sc.data.singleR<-as_matrix(sc.data.singleR)
sc.data.annot<-SingleR(test = sc.data.singleR,
ref = ref.list,
labels = labels.list,
de.method = "classic")
rm(sc.data.singleR)
sc.data$annot<-sc.data.annot$pruned.labels
sc.data$cluster_annot<-sc.data.annot$pruned.labels
cluster.group<-unique(sc.data$seurat_clusters)
cluster.group<-cluster.group[order(cluster.group)]
for(i in 1:length(cluster.group))
{
print(i)
a<-sc.data.annot$pruned.labels[sc.data$seurat_clusters==cluster.group[i]]
annot.sum<-table(a)
annot.sum.ratio<-annot.sum/sum(annot.sum)
annot.sum.ratio<-annot.sum.ratio[order(annot.sum.ratio,decreasing = T)]
annot.sum.ratio
if(max(annot.sum.ratio)>0.7)
{
a<-names(annot.sum.ratio)[1]
} else{
annot.name.sub<-names(annot.sum.ratio)[annot.sum.ratio>max(annot.sum.ratio)*0.4]
if(length(annot.name.sub)>1)
{
a[!a%in%annot.name.sub]<-names(annot.sum.ratio)[1]
} else{
a<-names(annot.sum.ratio)[1]
}
}
sc.data$cluster_annot[sc.data$seurat_clusters==cluster.group[i]]<-a
i<-i+1
}
DimPlot(sc.data, reduction = "umap", label = T,group.by = "cluster_annot")
#========================================marker annotation=========================
setClass(Class = "Cells",
slots = c(marker="character",
positive="logical",
subunit="numeric",
subgroup="list"),
sealed = F
)
setValidity(Class = "Cells",
method = function(object){
length(object@positive)==0 ||
length(object@subunit)==0 ||
length(object@positive)==length(object@marker)&
length(object@positive)==length(object@subunit)&
length(object@marker)==length(object@subunit)
})
PBMC<-new("Cells")
PBMC@subgroup<-list(HSC=new("Cells",marker=c("CD34","CD38","PTPRC","ITGA2","THY1"),
positive=c(T,F,F,T,T)),
MPP=new("Cells",marker=c("CD34","CD38","PTPRC","THY1"),
positive=c(T,F,F,F)),
CLP=new("Cells",marker=c("CD34","CD38","MME","PTPRC"),
positive=c(T,T,T,T)),
CMP=new("Cells",marker=c("CD34","CD38","CD7","MME","PTPRC","THY1","FLT3"),
positive=c(T,T,F,F,F,F,T)),
MEP=new("Cells",marker=c("CD34","CD38","CD7","MME","PTPRC","FLT3","IL3RA"),
positive=c(T,T,F,F,F,F,F)),
GMP=new("Cells",marker=c("CD34","CD38","MME","PTPRC","IL3RA","FLT3"),
positive=c(T,T,F,T,T,T)),
NK_cell=new("Cells",marker=c("CD3D","CD3E","CD3G","NCAM1"),
positive=c(F,F,F,T),
subunit=c(1,1,1,0)),
T_cell=new("Cells",marker=c("CD3D","CD3E","CD3G"),
positive=c(T,T,T),
subunit=c(1,1,1)),
B_cell=new("Cells",marker=c("CD3D","CD3E","CD3G","CD19","MS4A1"),
positive=c(F,F,F,T,T),
subunit=c(1,1,1,0,0)),
Plasma_cell=new("Cells",marker=c("CD19","SDC1","IL6R","CD52","MZB1"),
positive=c(F,T,T,F,T)),
Monocyte=new("Cells",marker=c("CD14"),
positive=c(T)),
Macrophage=new("Cells",marker=c("ITGAM","CD68","CD163"),
positive=c(T,T,T)),
pDC=new("Cells",marker=c("HLA-DRA","HLA-DRB1","CD209","CLEC4C","IL3RA","LILRA4"),
positive=c(T,T,T,T,T,T),
subunit=c(1,1,0,0,0,0)),
mDC=new("Cells",marker=c("ITGAX","HLA-DRA","HLA-DRB1","CD209","CD1C"),
positive=c(F,T,T,T,T),
subunit=c(0,1,1,0,0)),
Neutrophil=new("Cells",marker=c("ITGAM","CD16","ITGB2","FCGR2A","CD44","CD55","FUT4","ITGA4"),
positive=c(T,T,T,T,T,T,T,F)),
Eosinophil=new("Cells",marker=c("PTPRC","IL5RA","CCR3","ADGRE1","ITGAM"),
positive=c(T,T,T,T,T)),
Basophil=new("Cells",marker=c("CD19","IL3RA","KIT","ENPP3","FCER1A"),
positive=c(F,T,F,T,T)),
Mast_cell=new("Cells",marker=c("FCGR2A","CD33","KIT","ENPP3","FCER1A"),
positive=c(T,T,T,T,T)),
Erythroblast=new("Cells",marker=c("GYPA"),
positive=c(T)),
Platelets=new("Cells",marker=c("ITGA2B","GP9","GP1BA","ITGB3","PPBP"),
positive=c(T,T,T,T,T))
)
subgroup.combine<-function(genes,labels=genes,combine=1:length(genes),positive=rep(T,length(genes)))
{
if(!all(c(length(genes),length(labels),length(combine),length(positive))==length(genes)))
{
stop("Lengths are not equal")
}
res.pos<-list()
res.neg<-list()
res<-list()
for(i in unique(combine))
{
res.pos<-new("Cells",
marker=genes[combine==i],
positive=positive[combine==i],
subgroup=res)
res.neg<-new("Cells",
marker=genes[combine==i],
positive=!positive[combine==i],
subgroup=res)
res<-list(res.pos,res.neg)
names(res)<-c(paste(labels[combine==i],ifelse(positive[combine==i],"+","-"),sep="",collapse = ""),
paste(labels[combine==i],ifelse(positive[combine==i],"-","+"),sep="",collapse = ""))
}
res
}
PBMC@subgroup$Platelets@subgroup<-list(active=new("Cells",marker=c("SELP"),
positive=c(T))
)
PBMC@subgroup$Monocyte@subgroup<-subgroup.combine(genes=c("FCGR3A"),
labels = c("CD16"))
PBMC@subgroup$NK_cell@subgroup<-subgroup.combine(genes = c("KLRB1"),
labels = c("CD161"))
PBMC@subgroup$T_cell@subgroup<-list(Th=new("Cells",marker=c("CD4","KLRB1","TRGC1","TRGC2","TRDC"),
positive=c(T,F,F,F,F),
subunit=c(0,0,1,1,1)),
Ts=new("Cells",marker=c("CD8A","KLRB1","TRGC1","TRGC2","TRDC"),
positive=c(T,F,F,F,F),
subunit=c(0,0,1,1,1)),
NKT=new("Cells",marker=c("NCAM1","KLRB1","KLRG1","KLRD1"),
positive=c(T,T,T,T),
subunit=c(0,1,1,1)),
gdT=new("Cells",marker=c("TRGV9","TRDV2"),
positive=c(T,T,T),
subunit=c(1,1,0)),
MAIT=new("Cells",marker=c("SLC4A10","TRAV1-2"),
positive=c(T,T))
)
PBMC@subgroup$T_cell@subgroup$Th@subgroup<-list(active=new("Cells",marker=c("HLA-DRA","HLA-DRB1"),
positive=c(T,T),
subunit=c(1,1)),
naive_memory=new("Cells",marker=c("PTPRC"),
positive=c(T)),
Treg=new("Cells",marker=c("IL2RA","IL7R"),
positive=c(T,F))
)
PBMC@subgroup$T_cell@subgroup$Ts@subgroup<-list(active=new("Cells",marker=c("HLA-DRA","HLA-DRB1"),
positive=c(T,T),
subunit=c(1,1)),
naive_memory=new("Cells",marker=c("PTPRC"),
positive=c(T))
)
PBMC@subgroup$T_cell@subgroup$gdT@subgroup<-subgroup.combine(genes=c("NCAM1","KLRB1","CD8A"),
labels = c("CD56","CD161","CD8"))
PBMC@subgroup$T_cell@subgroup$NKT@subgroup<-subgroup.combine(genes = c("HAVCR2"),
labels = c("TIM3"))
cell.marker.annot<-function(marker,log2FC)
{
if(length(log2FC)!=length(marker))
{
stop("The length of marker and log2FC are not equal!")
}
cell.type<-"PBMC"
marker.set<-PBMC
match.list<-list()
while (T) {
match.list[[cell.type]]<-list()
if(length(marker.set@subgroup)>0)
{
temp.cell<-c()
for(cell.group in names(marker.set@subgroup))
{
temp.cell[cell.group]<-0
group.marker<-rep(NA,length(marker.set@subgroup[[cell.group]]@marker))
names(group.marker)<-marker.set@subgroup[[cell.group]]@marker
group.marker<-names(group.marker)%in%marker
names(group.marker)<-marker.set@subgroup[[cell.group]]@marker
group.marker[group.marker]<-log2FC[match(names(group.marker)[which(group.marker)],marker)]
group.log2FC<-group.marker
group.positive<-ifelse(group.log2FC==0,NA,group.marker>0)
match.list[[cell.type]][[cell.group]]<-data.frame(marker=marker.set@subgroup[[cell.group]]@marker,
positive=marker.set@subgroup[[cell.group]]@positive,
group.marker=ifelse(group.log2FC==0,NA,names(group.log2FC)),
group.positive=group.positive,
group.log2FC=group.log2FC)
if(length(marker.set@subgroup[[cell.group]]@subunit)>0)
{
temp.subunit<-marker.set@subgroup[[cell.group]]@subunit
names(temp.subunit)<-marker.set@subgroup[[cell.group]]@marker
if(length(temp.subunit[temp.subunit==0])>0)
{
if(any(is.na(group.positive[temp.subunit[temp.subunit==0]]))) next
}
temp.subunit<-temp.subunit[temp.subunit>0]
if(length(temp.subunit)>0)
{
for(j in 1:length(unique(temp.subunit)))
{
group.log2FC[names(temp.subunit[temp.subunit==unique(temp.subunit)[j]])]<-mean(group.log2FC[names(temp.subunit[temp.subunit==unique(temp.subunit)[j]])])
}
group.positive<-ifelse(group.log2FC==0,NA,group.log2FC>0)
}
}
if(all(group.log2FC*(as.numeric(marker.set@subgroup[[cell.group]]@positive)-0.5)>=0))
{
if(all(group.log2FC<0))
{
temp.cell[cell.group]<-mean(abs(group.log2FC))
} else{
temp.cell[cell.group]<-mean(group.log2FC[group.log2FC>=0])
}
}
}
now.cell<-names(temp.cell)[which(temp.cell==max(temp.cell))][1]
if(max(temp.cell)>0){
cell.type<-sprintf("%s_%s",cell.type,now.cell)
} else{
break
}
marker.set<-marker.set@subgroup[[now.cell]]
} else{
break
}
}
cell.type
}
cell.annot<-c()
for(i in unique(cluster.markers$cluster))
{
marker<-cluster.markers$gene[cluster.markers$cluster==i]
log2FC<-cluster.markers$avg_log2FC[cluster.markers$cluster==i]
cell.annot[i]<-cell.marker.annot(marker,log2FC)
}
cell.annot<-gsub("PBMC_","",cell.annot)
sc.data$cluster_marker_annot<-cell.annot[match(as.numeric(sc.data$seurat_clusters),1:length(cell.annot))]
DimPlot(sc.data, reduction = "umap", label = T,group.by = "cluster_marker_annot")
Idents(sc.data)<-paste(as.character(sc.data$seurat_clusters),sc.data$cluster_marker_annot,sep="-")
Idents(sc.data)[grep("NA",Idents(sc.data))]<-"NA"
sc.data<-BuildClusterTree(sc.data,slot = "scale.data")
sc.data@tools$BuildClusterTree$tip.label<-paste(sc.data@tools$BuildClusterTree$tip.label,
1:length(sc.data@tools$BuildClusterTree$tip.label),
sep="-")
Tool(object = sc.data, slot = 'BuildClusterTree')
PlotClusterTree(sc.data,
type="f",
node.pos=2,
no.margin=T)
#========================================NKT annotation========================
sc.data$cluster_final_annot<-sc.data$cluseter_marker_annot
for(i in unique(sc.data$seurat_clusters))
{
a<-table(sc_data$cluster_annot[sc.data$seurat_clusters==i])
if(names(a[order(a,decreasing = T)][1])=="NKT") sc_data$cluseter_final_annot[sc.data$seurat_clusters==i]<-"NKT"
}
#========================================pseudo bulk RNA========================
pseudo.bulk.rna<-function(x,...)
{
UseMethod("pseudo.bulk.rna")
}
pseudo.bulk.rna.list<-function(x,meta=NULL,...){
if(length(x)==0)
{
stop("list length is 0\n")
}
if(class(x[[1]][[1]])!="Seurat")
{
stop("list seems not Seurat object list\n")
}
common.gene<-rownames(x[[1]]@assays$RNA@counts)
if(length(x)>1)
{
for(i in 2:length(x))
{
common.gene<-common.gene[common.gene%in%rownames(x[[i]]@assays$RNA@counts)]
}
}
pseudo.bulk<-matrix(0,
nrow = length(common.gene),
ncol = length(x),
dimnames = list(row=common.gene,
col=names(x)
)
)
for(i in 1:ncol(pseudo.bulk))
{
pseudo.bulk[,i]<-rowSums(x[[i]]@assays$RNA@counts[rownames(x[[i]]@assays$RNA@counts)%in%rownames(pseudo.bulk),])
}
if(!is.null(meta))
{
colnames(pseudo.bulk)<-meta[match(colnames(pseudo.bulk),meta[,1]),2]
} else{
colnames(pseudo.bulk)<-gsub("^(GSM[0-9]+).*","\\1",colnames(pseudo.bulk))
}
pseudo.bulk<-pseudo.bulk[,!is.na(colnames(pseudo.bulk))]
return(pseudo.bulk)
}
pseudo.bulk.rna.Seurat<-function(x,split.by=Idents(x),meta=NULL,...){
x.list<-SplitObject(x,split.by = split.by)
pseudo.bulk<-matrix(0,
nrow = nrow(x@assays$RNA@counts),
ncol = length(x.list),
dimnames = list(row=rownames(x@assays$RNA@counts),
col=names(x.list)
)
)
for(i in 1:ncol(pseudo.bulk))
{
pseudo.bulk[,i]<-rowSums(x.list[[i]]@assays$RNA@counts)
}
if(!is.null(meta))
{
colnames(pseudo.bulk)<-meta[match(colnames(pseudo.bulk),meta[,1]),2]
} else{
colnames(pseudo.bulk)<-gsub("^(GSM[0-9]+).*","\\1",colnames(pseudo.bulk))
}
pseudo.bulk<-pseudo.bulk[,!is.na(colnames(pseudo.bulk))]
return(pseudo.bulk)
}
pseudo.bulk.rna.default<-function(x,...)
{
cat('You should try a Seurat object or Seurat object list.\n')
}
#=======================================NKT cell==============================
modify_vlnplot<- function(obj,
feature,
pt.size = 0,
plot.margin = unit(c(0, 0, 0, 0), "cm"),
...) {
p<- VlnPlot(obj, features = feature, pt.size = pt.size, ... ) +
xlab("") + ylab(feature) + ggtitle("") +
theme(legend.position = "none",
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_line(),
axis.title.y = element_text(size = rel(1), angle = 0, vjust = 0.5),
plot.margin = plot.margin )
return(p)
}
StackedVlnPlot<- function(obj, features,
pt.size = 0,
plot.margin = unit(c(0, 0, 0, 0), "cm"),
...) {
plot_list<- purrr::map(features, function(x) modify_vlnplot(obj = obj,feature = x, ...))
plot_list[[length(plot_list)]]<- plot_list[[length(plot_list)]] +
theme(axis.text.x=element_text(angle = 45,hjust = 1,vjust = 1), axis.ticks.x = element_line())
p<- patchwork::wrap_plots(plotlist = plot_list, ncol = 1)
return(p)
}
sc.data.nkt<-sc.data[,grep("T_cell_NKT",sc.data$cluster_final_annot)]
sc.data.nkt<-FindVariableFeatures(sc.data.nkt)
sc.data.nkt<-RunPCA(sc.data.nkt,features = VariableFeatures(sc.data.nkt))
sc.data.nkt <- JackStraw(sc.data.nkt, num.replicate = 100)
sc.data.nkt <- ScoreJackStraw(sc.data.nkt, dims = 1:20)
JackStrawPlot(sc.data.nkt, dims = 1:20)
ElbowPlot(sc.data.nkt,ndims = 50)
pca.dim<-c(2:9,12,13,18)
sc.data.nkt<-FindNeighbors(sc.data.nkt, dims = pca.dim)
sc.data.nkt<-FindClusters(sc.data.nkt, resolution = 0.5)
sc.data.nkt<-BuildClusterTree(sc.data.nkt,slot = "scale.data")
Tool(object = sc.data.nkt, slot = 'BuildClusterTree')
PlotClusterTree(sc.data.nkt)
sc.data.nkt<-RunUMAP(sc.data.nkt,dims = pca.dim)
DimPlot(sc.data.nkt,reduction = "umap")
DimPlot(sc.data.nkt,reduction = "umap",label = T,group.by = "seurat_clusters")
cluster.markers.nkt<-list()
levels.nkt<-levels(factor(sc.data.nkt$seurat_clusters))
cluster.markers.nkt<-list()
for(i in 1:length(cluster.markers.nkt))
{
cluster.markers.nkt[[levels.nkt[i]]]<-FindMarkers(sc.data.nkt,
ident.1 = levels.nkt[i],
only.pos = F,
min.pct = 0.1,
logfc.threshold = 0.1)
}
nkt.annot<-c()
nkt.annot[c("0")]<-"NKT_CD8"
nkt.annot[c("7","11")]<-"NKT_CD8_TIM3"
nkt.annot["13"]<-"NKT_CD8_CD62L"
nkt.annot["8"]<-"NKT_CD8_CD62L"
nkt.annot["6"]<-"NKT_CD4_CD40LG" #C0
nkt.annot["16"]<-"NKT_CD8" #C1
nkt.annot[c("15","12","4","14")]<-"NKT_DN_ITGAX" #C1
nkt.annot[c("3")]<-"NKT_CD8"
nkt.annot["14"]<-"NKT_CD8"
nkt.annot[c("1","2","5","9","10")]<-"NKT_CD8"
nkt.annot<-nkt.annot[order(as.numeric(names(nkt.annot)))]
sc.data.nkt$cluster_annot_nkt<-nkt.annot[match(sc.data.nkt$seurat_clusters,names(nkt.annot))]
DimPlot(sc.data.nkt,reduction = "umap",label = T,group.by = "cluster_annot_nkt",repel = T)
sc.data.nkt$cluster_annot_nkt[sc.data.nkt@assays$integrated@data["CD8A",]<1&sc.data.nkt$seurat_clusters=="7"]<-"NKT_CD4_TIM3"
sc.data$cluster_nkt_tim3<-"NA"
sc.data$cluster_nkt_tim3[match(colnames(sc.data.nkt)[sc.data.nkt$cluster_annot_nkt%in%c("NKT_CD8_TIM3_CD62L","NKT_CD4_TIM3_CD62L")],
colnames(sc.data))]<-"NKT_TIM3pos"
sc.data$cluster_nkt_tim3[match(colnames(sc.data.nkt)[!sc.data.nkt$cluster_annot_nkt%in%c("NKT_CD8_TIM3_CD62L","NKT_CD4_TIM3_CD62L")],
colnames(sc.data))]<-"NKT_TIM3neg"
sc.data.nkt$cluster_nkt_tim3<-"NKT_TIM3neg"
sc.data.nkt$cluster_nkt_tim3[sc.data.nkt$cluster_annot_nkt=="NKT_CD8_TIM3_CD62L"]<-"NKT_TIM3pos"
sc.data.nkt$cluster_nkt_tim3[sc.data.nkt$cluster_annot_nkt=="NKT_CD4_TIM3_CD62L"]<-"NKT_TIM3pos"
#=======================================pseudo time
sc.data.nkt2<-sc.data.nkt[,sc.data.nkt$sampleID%in%sc.meta.cell$Sample.name[sc.meta.cell$.Datasets%in%c("Batch02","Batch07","Batch06","Batch07")]]
cds <- newCellDataSet(as(as_matrix(sc.data.nkt2@assays$RNA@counts),"sparseMatrix"),
phenoData = AnnotatedDataFrame(data=sc.data.nkt2@meta.data),
featureData = AnnotatedDataFrame(data=data.frame(gene_short_name=rownames(sc.data.nkt2@assays$RNA@counts),
row.names = rownames(sc.data.nkt2@assays$RNA@counts))),
lowerDetectionLimit = 0.1,
expressionFamily = negbinomial.size())
rm(sc.data.nkt2)
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
cds<-setOrderingFilter(cds,VariableFeatures(sc.data.nkt))
plot_ordering_genes(cds)
cds <- reduceDimension(cds, max_components = 2,
method = 'DDRTree')
cds <- orderCells(cds)
| /single cell.R | no_license | JZYangTJ/Tanglab_NKT_COVID-19 | R | false | false | 32,802 | r |
library(SingleR)
library(celldex)
library(Seurat)
library(SeuratDisk)
library(dplyr)
library(scater)
library(scRNAseq)
library(parallel)
library(destiny)
library(monocle)
#========================================Multiple Core
detectCores()
detectCores(logical = F)
mc<-makeCluster(getOption("mc.cores",16))
memory.limit(360000)
#========================================Preparation
GSE.number<-"GSE158055"
sc.dir<-"H:/scRNA-seq"
now.dir<-paste(sc.dir,sprintf("%s",GSE.number),sep = "/")
setwd(now.dir)
sc.type<-"10X"
part1<-Read10X("GSE158055_covid19_part1",
gene.column = 1)
part2<-Read10X("GSE158055_covid19_part2",
gene.column = 1)
part1.sc<-CreateSeuratObject(counts = part1,project = "part1")
part2.sc<-CreateSeuratObject(counts = part2,project = "part2")
rm(part1)
rm(part2)
sc.annot<-read.csv("GSE158055_cell_annotation.csv")
part1.sc$sampleID<-sc.annot$sampleID
part1.sc$celltype<-sc.annot$celltype
part1.sc$majortype<-sc.annot$majorType
sc.meta<-read.delim("GSE158055_sample_metadata.txt")
sc.meta.pbmc<-sc.meta[grep("PBMC",sc.meta$characteristics..Sample.type),]
sc.meta.pbmc<-sc.meta.pbmc[grep("progression|control",sc.meta.pbmc$characteristics..Sample.time),]
sc.pbmc.grep<-which(part1.sc$sampleID%in%sc.meta.pbmc$Sample.name)
part1.sc<-part1.sc[,sc.pbmc.grep]
part2.sc<-part2.sc[,sc.pbmc.grep]
sc.data<-part1.sc
sc.data@assays$RNA@counts<-sc.data@assays$RNA@counts+part2.sc@assays$RNA@counts
sc.data@assays$RNA@data<-sc.data@assays$RNA@data+part2.sc@assays$RNA@data
sc.data$sampleID<-factor(sc.data$sampleID)
sc.data$celltype<-factor(sc.data$celltype)
sc.data$majortype<-factor(sc.data$majortype)
sc.data$batch<-sc.meta.pbmc$`characteristics...Datasets`[match(sc.data$sampleID,sc.meta.pbmc$Sample.name)]
sc.data$batch<-factor(sc.data$batch)
sc.data.list<-SplitObject(sc.data,split.by = "batch")
# SeuratDisk::Convert(source = "scp_scanpy.gzip.h5ad",
# dest="h5Seurat",
# overwirte=F)
# x<-SeuratDisk::LoadH5Seurat("scp_scanpy.gzip.h5seurat")
#
# sc.data.list<-list()
# sc.data.list[[1]]<-CreateSeuratObject(counts = x@assays$RNA@counts,
# min.cells = 3,
# min.features = 200)
# sc.data.list[[1]]$patient<-x$patient
# sc.data.list[[1]]$sort<-x$sort
# sc.data.list[[1]]$cell_type<-x$cell_type
# sc.data.list[[1]]$pheno<-x$pheno
# a<-paste(x$patient,x$pheno,sep='-')
# names(a)<-names(x$patient)
# a<-factor(a)
# sc.data.list[[1]]$orig.ident<-a
# Idents(sc.data.list[[1]])<-sc.data.list[[1]]$orig.ident
# sc.data.list<-list()
# sc.data.list[[1]]<-x
#=======================================Filter
sc.data.list<-lapply(sc.data.list,function(x){a<-x;a[["percent.mt"]]<-PercentageFeatureSet(a, pattern = "^MT-");return(a)})
for(i in 1:length(sc.data.list))
{
sc.data.list[[i]]$percent.mt[is.nan(sc.data.list[[i]]$percent.mt)]<-0
}
sc.data.list<-lapply(sc.data.list,function(x){subset(x,
subset = nFeature_RNA>=median(nFeature_RNA)/4&
nFeature_RNA<=3*median(nFeature_RNA)
# &percent.mt<=2*median(percent.mt)
)})
#=======================================Normalization
for(i in 1:length(sc.data.list))
{
sc.data.list[[i]] <- NormalizeData(sc.data.list[[i]], normalization.method = "LogNormalize", scale.factor = 10000)
}
#=======================================Variable Feature
for(i in 1:length(sc.data.list))
{
sc.data.list[[i]] <- FindVariableFeatures(sc.data.list[[i]], selection.method = "vst", nfeatures = 2000)
}
#========================================Anchors
sc.anchors<-FindIntegrationAnchors(object.list = sc.data.list)
sc.data<-IntegrateData(anchorset = sc.anchors, dims = 1:30)
DefaultAssay(sc.data)<-"integrated"
#========================================Scale
sc.data<-sc.data[,sc.data$sampleID%in%names(table(sc.data$sampleID)[table(sc.data$sampleID)>500])]
sc.data<-ScaleData(sc.data)
#========================================PCA
sc.data<-RunPCA(sc.data,features = VariableFeatures(sc.data))
VizDimLoadings(sc.data, dims = 1:4, reduction = "pca")
DimPlot(sc.data, reduction = "pca",split.by = NULL)
DimHeatmap(sc.data, dims = 1:15, cells = 500, balanced = TRUE)
#========================================
sc.data <- JackStraw(sc.data, num.replicate = 100)
sc.data <- ScoreJackStraw(sc.data, dims = 1:20)
JackStrawPlot(sc.data, dims = 1:20)
ElbowPlot(sc.data,ndims = 50)
#=======================================Cluster===================
sc.data<-FindNeighbors(sc.data, dims = 1:30)
sc.data<-FindClusters(sc.data, resolution = 1)
#========================================Phylogenetic analysis===================
sc.data<-BuildClusterTree(sc.data,slot = "scale.data")
Tool(object = sc.data, slot = 'BuildClusterTree')
PlotClusterTree(sc.data)
#=======================================Inflection sample==============
sc.data<-CalculateBarcodeInflections(sc.data)
SubsetByBarcodeInflections(sc.data)
#=======================================Dim reduction=================
sc.data<-RunUMAP(sc.data,dims = 1:30)
DimPlot(sc.data,reduction = "umap")
# sc.data.dim.tsne<-RunTSNE(sc.data.cluster)
# DimPlot(sc.data.dim.tsne,reduction = "tsne")
#=======================================Cluster biomaker=================
cluster.markers <- FindAllMarkers(sc.data,only.pos = F,
min.pct = 0.1,logfc.threshold = 0.1
)
cluster.markers%>%group_by(cluster)%>%top_n(n=2,wt=avg_log2FC)
VlnPlot(sc.data, features = c("IFNG"))
FeaturePlot(sc.data,features = c("FCGR3A","CD3D","CD3E","CD3G"))
top10<-cluster.markers%>%group_by(cluster)%>%top_n(n=10,wt=avg_log2FC)
DoHeatmap(sc.data, features = top10$gene) + NoLegend()
#=======================================SingleR annotation===============================
ref1<-celldex::HumanPrimaryCellAtlasData()
# ref2<-celldex::BlueprintEncodeData()
ref3<-celldex::DatabaseImmuneCellExpressionData()
myref.data<-list()
myref.group<-list()
myref.data[[1]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE128243/GSE128243_logmedian.txt",header = T)
myref.group[[1]]<-colnames(myref.data[[1]])
myref.group[[1]]<-gsub(".*NKT_HS_(.*)[0-9]+$","NKT_\\1ulated",myref.group[[1]])
myref.data[[2]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE124731/GSE124731_Data with annotation.txt",header = T)
myref.data[[2]]<-myref.data[[2]][,grep("CD|NK|MAIT|Vd",colnames(myref.data[[2]]))]
myref.group[[2]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE124731/GSE124731_low_input_rnaseq_meta_data.txt.gz",header = T)
myref.group[[2]]<-myref.group[[2]]$cell_type
myref.group[[2]]<-gsub("CD([48])","CD\\1+_T_cell",myref.group[[2]])
myref.group[[2]]<-gsub("MAIT","T_cell:MAI",myref.group[[2]])
myref.group[[2]]<-gsub("^NK$","NK_cell",myref.group[[2]])
myref.group[[2]]<-gsub("^iNKT$","NKT",myref.group[[2]])
myref.group[[2]]<-gsub("Vd[12]","T_cell:gamma-delta",myref.group[[2]])
myref.data[[2]][grep("gamma-delta",myref.group[[2]])]<-NULL
myref.group[[2]]<- myref.group[[2]][-grep("gamma-delta",myref.group[[2]])]
myref.data[[3]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE128626/GSE128626_data_matrix_sorted_NKT_cells.txt.gz",header = T)
rownames(myref.data[[3]])<-myref.data[[3]][,1]
rownames(myref.data[[3]])<-gsub("\'","",rownames(myref.data[[3]]))
myref.data[[3]]<-myref.data[[3]][,-1]
myref.group[[3]]<-colnames(myref.data[[3]])
myref.group[[3]]<-gsub("NKT_naive.*","NKT_Unstimulated",myref.group[[3]])
myref.group[[3]]<-gsub("NKT_exposed.*","NKT_Stimulated",myref.group[[3]])
myref.data[[4]]<-read.delim("F:/myWGCNA/SCRNA Data/GSE128626/GSE128626_data_matrix_sorted_monocytes.txt.gz",header = T)
rownames(myref.data[[4]])<-myref.data[[4]][,1]
rownames(myref.data[[4]])<-gsub("\'","",rownames(myref.data[[4]]))
myref.data[[4]]<-myref.data[[4]][,-1]
myref.group[[4]]<-colnames(myref.data[[4]])
myref.group[[4]]<-gsub("Monocytes_naive.*","Monocyte:Unstimulated",myref.group[[4]])
myref.group[[4]]<-gsub("Monocytes_exposed.*","Monocyte:Stimulated",myref.group[[4]])
myref.data[[5]]<-read.delim("F:/myWGCNA/Particular cells expression Data/GSE28726/GSE28726_Data with annotation.txt",header = T)
myref.data[[5]]<-myref.data[[5]][,grep("^GSM[0-9]+",colnames(myref.data[[5]]))]
myref.data[[5]]<-log2(myref.data[[5]]+1)
myref.group[[5]]<-read.delim("F:/myWGCNA/Particular cells expression Data/GSE28726/GSE28726_group_mod.txt",header = T)
myref.group[[5]]<-t(myref.group[[5]])
myref.group[[5]]<-myref.group[[5]][,1]
myref.group[[5]]<-gsub(".*CD4 T cell.*","CD4+_T_cell",myref.group[[5]])
myref.group[[5]]<-gsub(".*NKT cell.*resting","NKT_Unstimulated",myref.group[[5]])
myref.group[[5]]<-gsub(".*NKT cell.*stimulated","NKT_Stimulated",myref.group[[5]])
myref.group[[5]]<-gsub(".*CD1d-aGC\\+ Va24- T cell.*resting","dNKT_Unstimulated",myref.group[[5]])
myref.group[[5]]<-gsub(".*CD1d-aGC\\+ Va24- T cell.*stimulated","dNKT_Stimulated",myref.group[[5]])
# myref.data<-read.delim("E:/myWGCNA/SCRNA Data/GSE128243/GSE128243_ReadCounts.txt",
# header = T)
# myref.data<-LogNormalize(myref.data)
# myref.group<-read.delim("E:/myWGCNA/SCRNA Data/GSE128243/GSE128243_group_mod.txt",header = T)
# myref.group<-gsub("Human NKT cell ([un]*stimulated) sample [0-9]$","NKT_\\1",myref.group)
# myref.data<-sc.data.nkt@assays$RNA@counts
# myref.data<-as.matrix(myref.data)
# myref.data<-SummarizedExperiment(assays=list(counts=myref.data,logcounts=log2(myref.data+1)))
# myref.group<-sc.data.nkt$orig.ident
myref.group[[1]]<-gsub(".*NKT.*","NKT",myref.group[[1]])
myref.group[[2]]<-gsub(".*NKT.*","NKT",myref.group[[2]])
myref.group[[2]]<-gsub("T_cell:MAI","T_cell_MAIT",myref.group[[2]])
myref.group[[2]]<-gsub(".*CD4.*","T_cell_CD4",myref.group[[2]])
myref.group[[2]]<-gsub(".*CD8.*","T_cell_CD8",myref.group[[2]])
myref.group[[3]]<-gsub(".*NKT.*","NKT",myref.group[[3]])
myref.group[[4]]<-gsub(".*Monocyte.*","Monocyte",myref.group[[4]])
myref.group[[5]]<-gsub(".*NKT.*","NKT",myref.group[[5]])
myref.group[[5]]<-gsub(".*CD4.*","T_cell_CD4",myref.group[[5]])
ref1<-ref1[,ref1$label.main%in%c("B_cell","DC","Erythroblast","Macrophage","Monocyte","Neutrophils","NK_cell","Platelets")]
ref3<-ref3[,ref3$label.main%in%c("B cells","Monocytes","NK cells")]
ref3$label.main<-gsub(".*Monocyte.*","Monocyte",ref3$label.main)
ref3$label.main<-gsub(".*B cells","B_cell",ref3$label.main)
ref3$label.main<-gsub(".*NK cells","NK_cell",ref3$label.main)
# ref.list<-list(ref1,ref2,ref3)
ref.list<-list(ref1,ref3)
# ref.list<-list(ref3)
ref.list<-c(ref.list,myref.data)
# labels.list<-list(ref1$label.main,ref2$label.main,ref3$label.main)
labels.list<-list(ref1$label.main,ref3$label.main)
# labels.list<-list(ref3$label.main)
labels.list<-c(labels.list,myref.group)
Rcpp::sourceCpp(code='
#include <Rcpp.h>
using namespace Rcpp;
// [[Rcpp::export]]
IntegerMatrix asMatrix(NumericVector rp,
NumericVector cp,
NumericVector z,
int nrows,
int ncols){
int k = z.size() ;
IntegerMatrix mat(nrows, ncols);
for (int i = 0; i < k; i++){
mat(rp[i],cp[i]) = z[i];
}
return mat;
}')
as_matrix <- function(mat){
row_pos <- mat@i
col_pos <- findInterval(seq(mat@x)-1,mat@p[-1])
tmp <- asMatrix(rp = row_pos, cp = col_pos, z = mat@x,
nrows = mat@Dim[1], ncols = mat@Dim[2])
row.names(tmp) <- mat@Dimnames[[1]]
colnames(tmp) <- mat@Dimnames[[2]]
return(tmp)
}
sc.data.singleR<-GetAssayData(sc.data,slot = "data")
sc.data.singleR<-as_matrix(sc.data.singleR)
sc.data.annot<-SingleR(test = sc.data.singleR,
ref = ref.list,
labels = labels.list,
de.method = "classic")
rm(sc.data.singleR)
sc.data$annot<-sc.data.annot$pruned.labels
sc.data$cluster_annot<-sc.data.annot$pruned.labels
cluster.group<-unique(sc.data$seurat_clusters)
cluster.group<-cluster.group[order(cluster.group)]
for(i in 1:length(cluster.group))
{
print(i)
a<-sc.data.annot$pruned.labels[sc.data$seurat_clusters==cluster.group[i]]
annot.sum<-table(a)
annot.sum.ratio<-annot.sum/sum(annot.sum)
annot.sum.ratio<-annot.sum.ratio[order(annot.sum.ratio,decreasing = T)]
annot.sum.ratio
if(max(annot.sum.ratio)>0.7)
{
a<-names(annot.sum.ratio)[1]
} else{
annot.name.sub<-names(annot.sum.ratio)[annot.sum.ratio>max(annot.sum.ratio)*0.4]
if(length(annot.name.sub)>1)
{
a[!a%in%annot.name.sub]<-names(annot.sum.ratio)[1]
} else{
a<-names(annot.sum.ratio)[1]
}
}
sc.data$cluster_annot[sc.data$seurat_clusters==cluster.group[i]]<-a
i<-i+1
}
DimPlot(sc.data, reduction = "umap", label = T,group.by = "cluster_annot")
#========================================marker annotation=========================
setClass(Class = "Cells",
slots = c(marker="character",
positive="logical",
subunit="numeric",
subgroup="list"),
sealed = F
)
setValidity(Class = "Cells",
method = function(object){
length(object@positive)==0 ||
length(object@subunit)==0 ||
length(object@positive)==length(object@marker)&
length(object@positive)==length(object@subunit)&
length(object@marker)==length(object@subunit)
})
PBMC<-new("Cells")
PBMC@subgroup<-list(HSC=new("Cells",marker=c("CD34","CD38","PTPRC","ITGA2","THY1"),
positive=c(T,F,F,T,T)),
MPP=new("Cells",marker=c("CD34","CD38","PTPRC","THY1"),
positive=c(T,F,F,F)),
CLP=new("Cells",marker=c("CD34","CD38","MME","PTPRC"),
positive=c(T,T,T,T)),
CMP=new("Cells",marker=c("CD34","CD38","CD7","MME","PTPRC","THY1","FLT3"),
positive=c(T,T,F,F,F,F,T)),
MEP=new("Cells",marker=c("CD34","CD38","CD7","MME","PTPRC","FLT3","IL3RA"),
positive=c(T,T,F,F,F,F,F)),
GMP=new("Cells",marker=c("CD34","CD38","MME","PTPRC","IL3RA","FLT3"),
positive=c(T,T,F,T,T,T)),
NK_cell=new("Cells",marker=c("CD3D","CD3E","CD3G","NCAM1"),
positive=c(F,F,F,T),
subunit=c(1,1,1,0)),
T_cell=new("Cells",marker=c("CD3D","CD3E","CD3G"),
positive=c(T,T,T),
subunit=c(1,1,1)),
B_cell=new("Cells",marker=c("CD3D","CD3E","CD3G","CD19","MS4A1"),
positive=c(F,F,F,T,T),
subunit=c(1,1,1,0,0)),
Plasma_cell=new("Cells",marker=c("CD19","SDC1","IL6R","CD52","MZB1"),
positive=c(F,T,T,F,T)),
Monocyte=new("Cells",marker=c("CD14"),
positive=c(T)),
Macrophage=new("Cells",marker=c("ITGAM","CD68","CD163"),
positive=c(T,T,T)),
pDC=new("Cells",marker=c("HLA-DRA","HLA-DRB1","CD209","CLEC4C","IL3RA","LILRA4"),
positive=c(T,T,T,T,T,T),
subunit=c(1,1,0,0,0,0)),
mDC=new("Cells",marker=c("ITGAX","HLA-DRA","HLA-DRB1","CD209","CD1C"),
positive=c(F,T,T,T,T),
subunit=c(0,1,1,0,0)),
Neutrophil=new("Cells",marker=c("ITGAM","CD16","ITGB2","FCGR2A","CD44","CD55","FUT4","ITGA4"),
positive=c(T,T,T,T,T,T,T,F)),
Eosinophil=new("Cells",marker=c("PTPRC","IL5RA","CCR3","ADGRE1","ITGAM"),
positive=c(T,T,T,T,T)),
Basophil=new("Cells",marker=c("CD19","IL3RA","KIT","ENPP3","FCER1A"),
positive=c(F,T,F,T,T)),
Mast_cell=new("Cells",marker=c("FCGR2A","CD33","KIT","ENPP3","FCER1A"),
positive=c(T,T,T,T,T)),
Erythroblast=new("Cells",marker=c("GYPA"),
positive=c(T)),
Platelets=new("Cells",marker=c("ITGA2B","GP9","GP1BA","ITGB3","PPBP"),
positive=c(T,T,T,T,T))
)
subgroup.combine<-function(genes,labels=genes,combine=1:length(genes),positive=rep(T,length(genes)))
{
if(!all(c(length(genes),length(labels),length(combine),length(positive))==length(genes)))
{
stop("Lengths are not equal")
}
res.pos<-list()
res.neg<-list()
res<-list()
for(i in unique(combine))
{
res.pos<-new("Cells",
marker=genes[combine==i],
positive=positive[combine==i],
subgroup=res)
res.neg<-new("Cells",
marker=genes[combine==i],
positive=!positive[combine==i],
subgroup=res)
res<-list(res.pos,res.neg)
names(res)<-c(paste(labels[combine==i],ifelse(positive[combine==i],"+","-"),sep="",collapse = ""),
paste(labels[combine==i],ifelse(positive[combine==i],"-","+"),sep="",collapse = ""))
}
res
}
PBMC@subgroup$Platelets@subgroup<-list(active=new("Cells",marker=c("SELP"),
positive=c(T))
)
PBMC@subgroup$Monocyte@subgroup<-subgroup.combine(genes=c("FCGR3A"),
labels = c("CD16"))
PBMC@subgroup$NK_cell@subgroup<-subgroup.combine(genes = c("KLRB1"),
labels = c("CD161"))
PBMC@subgroup$T_cell@subgroup<-list(Th=new("Cells",marker=c("CD4","KLRB1","TRGC1","TRGC2","TRDC"),
positive=c(T,F,F,F,F),
subunit=c(0,0,1,1,1)),
Ts=new("Cells",marker=c("CD8A","KLRB1","TRGC1","TRGC2","TRDC"),
positive=c(T,F,F,F,F),
subunit=c(0,0,1,1,1)),
NKT=new("Cells",marker=c("NCAM1","KLRB1","KLRG1","KLRD1"),
positive=c(T,T,T,T),
subunit=c(0,1,1,1)),
gdT=new("Cells",marker=c("TRGV9","TRDV2"),
positive=c(T,T,T),
subunit=c(1,1,0)),
MAIT=new("Cells",marker=c("SLC4A10","TRAV1-2"),
positive=c(T,T))
)
PBMC@subgroup$T_cell@subgroup$Th@subgroup<-list(active=new("Cells",marker=c("HLA-DRA","HLA-DRB1"),
positive=c(T,T),
subunit=c(1,1)),
naive_memory=new("Cells",marker=c("PTPRC"),
positive=c(T)),
Treg=new("Cells",marker=c("IL2RA","IL7R"),
positive=c(T,F))
)
PBMC@subgroup$T_cell@subgroup$Ts@subgroup<-list(active=new("Cells",marker=c("HLA-DRA","HLA-DRB1"),
positive=c(T,T),
subunit=c(1,1)),
naive_memory=new("Cells",marker=c("PTPRC"),
positive=c(T))
)
PBMC@subgroup$T_cell@subgroup$gdT@subgroup<-subgroup.combine(genes=c("NCAM1","KLRB1","CD8A"),
labels = c("CD56","CD161","CD8"))
PBMC@subgroup$T_cell@subgroup$NKT@subgroup<-subgroup.combine(genes = c("HAVCR2"),
labels = c("TIM3"))
cell.marker.annot<-function(marker,log2FC)
{
if(length(log2FC)!=length(marker))
{
stop("The length of marker and log2FC are not equal!")
}
cell.type<-"PBMC"
marker.set<-PBMC
match.list<-list()
while (T) {
match.list[[cell.type]]<-list()
if(length(marker.set@subgroup)>0)
{
temp.cell<-c()
for(cell.group in names(marker.set@subgroup))
{
temp.cell[cell.group]<-0
group.marker<-rep(NA,length(marker.set@subgroup[[cell.group]]@marker))
names(group.marker)<-marker.set@subgroup[[cell.group]]@marker
group.marker<-names(group.marker)%in%marker
names(group.marker)<-marker.set@subgroup[[cell.group]]@marker
group.marker[group.marker]<-log2FC[match(names(group.marker)[which(group.marker)],marker)]
group.log2FC<-group.marker
group.positive<-ifelse(group.log2FC==0,NA,group.marker>0)
match.list[[cell.type]][[cell.group]]<-data.frame(marker=marker.set@subgroup[[cell.group]]@marker,
positive=marker.set@subgroup[[cell.group]]@positive,
group.marker=ifelse(group.log2FC==0,NA,names(group.log2FC)),
group.positive=group.positive,
group.log2FC=group.log2FC)
if(length(marker.set@subgroup[[cell.group]]@subunit)>0)
{
temp.subunit<-marker.set@subgroup[[cell.group]]@subunit
names(temp.subunit)<-marker.set@subgroup[[cell.group]]@marker
if(length(temp.subunit[temp.subunit==0])>0)
{
if(any(is.na(group.positive[temp.subunit[temp.subunit==0]]))) next
}
temp.subunit<-temp.subunit[temp.subunit>0]
if(length(temp.subunit)>0)
{
for(j in 1:length(unique(temp.subunit)))
{
group.log2FC[names(temp.subunit[temp.subunit==unique(temp.subunit)[j]])]<-mean(group.log2FC[names(temp.subunit[temp.subunit==unique(temp.subunit)[j]])])
}
group.positive<-ifelse(group.log2FC==0,NA,group.log2FC>0)
}
}
if(all(group.log2FC*(as.numeric(marker.set@subgroup[[cell.group]]@positive)-0.5)>=0))
{
if(all(group.log2FC<0))
{
temp.cell[cell.group]<-mean(abs(group.log2FC))
} else{
temp.cell[cell.group]<-mean(group.log2FC[group.log2FC>=0])
}
}
}
now.cell<-names(temp.cell)[which(temp.cell==max(temp.cell))][1]
if(max(temp.cell)>0){
cell.type<-sprintf("%s_%s",cell.type,now.cell)
} else{
break
}
marker.set<-marker.set@subgroup[[now.cell]]
} else{
break
}
}
cell.type
}
cell.annot<-c()
for(i in unique(cluster.markers$cluster))
{
marker<-cluster.markers$gene[cluster.markers$cluster==i]
log2FC<-cluster.markers$avg_log2FC[cluster.markers$cluster==i]
cell.annot[i]<-cell.marker.annot(marker,log2FC)
}
cell.annot<-gsub("PBMC_","",cell.annot)
sc.data$cluster_marker_annot<-cell.annot[match(as.numeric(sc.data$seurat_clusters),1:length(cell.annot))]
DimPlot(sc.data, reduction = "umap", label = T,group.by = "cluster_marker_annot")
Idents(sc.data)<-paste(as.character(sc.data$seurat_clusters),sc.data$cluster_marker_annot,sep="-")
Idents(sc.data)[grep("NA",Idents(sc.data))]<-"NA"
sc.data<-BuildClusterTree(sc.data,slot = "scale.data")
sc.data@tools$BuildClusterTree$tip.label<-paste(sc.data@tools$BuildClusterTree$tip.label,
1:length(sc.data@tools$BuildClusterTree$tip.label),
sep="-")
Tool(object = sc.data, slot = 'BuildClusterTree')
PlotClusterTree(sc.data,
type="f",
node.pos=2,
no.margin=T)
#========================================NKT annotation========================
sc.data$cluster_final_annot<-sc.data$cluseter_marker_annot
for(i in unique(sc.data$seurat_clusters))
{
a<-table(sc_data$cluster_annot[sc.data$seurat_clusters==i])
if(names(a[order(a,decreasing = T)][1])=="NKT") sc_data$cluseter_final_annot[sc.data$seurat_clusters==i]<-"NKT"
}
#========================================pseudo bulk RNA========================
pseudo.bulk.rna<-function(x,...)
{
UseMethod("pseudo.bulk.rna")
}
pseudo.bulk.rna.list<-function(x,meta=NULL,...){
if(length(x)==0)
{
stop("list length is 0\n")
}
if(class(x[[1]][[1]])!="Seurat")
{
stop("list seems not Seurat object list\n")
}
common.gene<-rownames(x[[1]]@assays$RNA@counts)
if(length(x)>1)
{
for(i in 2:length(x))
{
common.gene<-common.gene[common.gene%in%rownames(x[[i]]@assays$RNA@counts)]
}
}
pseudo.bulk<-matrix(0,
nrow = length(common.gene),
ncol = length(x),
dimnames = list(row=common.gene,
col=names(x)
)
)
for(i in 1:ncol(pseudo.bulk))
{
pseudo.bulk[,i]<-rowSums(x[[i]]@assays$RNA@counts[rownames(x[[i]]@assays$RNA@counts)%in%rownames(pseudo.bulk),])
}
if(!is.null(meta))
{
colnames(pseudo.bulk)<-meta[match(colnames(pseudo.bulk),meta[,1]),2]
} else{
colnames(pseudo.bulk)<-gsub("^(GSM[0-9]+).*","\\1",colnames(pseudo.bulk))
}
pseudo.bulk<-pseudo.bulk[,!is.na(colnames(pseudo.bulk))]
return(pseudo.bulk)
}
pseudo.bulk.rna.Seurat<-function(x,split.by=Idents(x),meta=NULL,...){
x.list<-SplitObject(x,split.by = split.by)
pseudo.bulk<-matrix(0,
nrow = nrow(x@assays$RNA@counts),
ncol = length(x.list),
dimnames = list(row=rownames(x@assays$RNA@counts),
col=names(x.list)
)
)
for(i in 1:ncol(pseudo.bulk))
{
pseudo.bulk[,i]<-rowSums(x.list[[i]]@assays$RNA@counts)
}
if(!is.null(meta))
{
colnames(pseudo.bulk)<-meta[match(colnames(pseudo.bulk),meta[,1]),2]
} else{
colnames(pseudo.bulk)<-gsub("^(GSM[0-9]+).*","\\1",colnames(pseudo.bulk))
}
pseudo.bulk<-pseudo.bulk[,!is.na(colnames(pseudo.bulk))]
return(pseudo.bulk)
}
pseudo.bulk.rna.default<-function(x,...)
{
cat('You should try a Seurat object or Seurat object list.\n')
}
#=======================================NKT cell==============================
modify_vlnplot<- function(obj,
feature,
pt.size = 0,
plot.margin = unit(c(0, 0, 0, 0), "cm"),
...) {
p<- VlnPlot(obj, features = feature, pt.size = pt.size, ... ) +
xlab("") + ylab(feature) + ggtitle("") +
theme(legend.position = "none",
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks.x = element_blank(),
axis.ticks.y = element_line(),
axis.title.y = element_text(size = rel(1), angle = 0, vjust = 0.5),
plot.margin = plot.margin )
return(p)
}
StackedVlnPlot<- function(obj, features,
pt.size = 0,
plot.margin = unit(c(0, 0, 0, 0), "cm"),
...) {
plot_list<- purrr::map(features, function(x) modify_vlnplot(obj = obj,feature = x, ...))
plot_list[[length(plot_list)]]<- plot_list[[length(plot_list)]] +
theme(axis.text.x=element_text(angle = 45,hjust = 1,vjust = 1), axis.ticks.x = element_line())
p<- patchwork::wrap_plots(plotlist = plot_list, ncol = 1)
return(p)
}
sc.data.nkt<-sc.data[,grep("T_cell_NKT",sc.data$cluster_final_annot)]
sc.data.nkt<-FindVariableFeatures(sc.data.nkt)
sc.data.nkt<-RunPCA(sc.data.nkt,features = VariableFeatures(sc.data.nkt))
sc.data.nkt <- JackStraw(sc.data.nkt, num.replicate = 100)
sc.data.nkt <- ScoreJackStraw(sc.data.nkt, dims = 1:20)
JackStrawPlot(sc.data.nkt, dims = 1:20)
ElbowPlot(sc.data.nkt,ndims = 50)
pca.dim<-c(2:9,12,13,18)
sc.data.nkt<-FindNeighbors(sc.data.nkt, dims = pca.dim)
sc.data.nkt<-FindClusters(sc.data.nkt, resolution = 0.5)
sc.data.nkt<-BuildClusterTree(sc.data.nkt,slot = "scale.data")
Tool(object = sc.data.nkt, slot = 'BuildClusterTree')
PlotClusterTree(sc.data.nkt)
sc.data.nkt<-RunUMAP(sc.data.nkt,dims = pca.dim)
DimPlot(sc.data.nkt,reduction = "umap")
DimPlot(sc.data.nkt,reduction = "umap",label = T,group.by = "seurat_clusters")
cluster.markers.nkt<-list()
levels.nkt<-levels(factor(sc.data.nkt$seurat_clusters))
cluster.markers.nkt<-list()
for(i in 1:length(cluster.markers.nkt))
{
cluster.markers.nkt[[levels.nkt[i]]]<-FindMarkers(sc.data.nkt,
ident.1 = levels.nkt[i],
only.pos = F,
min.pct = 0.1,
logfc.threshold = 0.1)
}
nkt.annot<-c()
nkt.annot[c("0")]<-"NKT_CD8"
nkt.annot[c("7","11")]<-"NKT_CD8_TIM3"
nkt.annot["13"]<-"NKT_CD8_CD62L"
nkt.annot["8"]<-"NKT_CD8_CD62L"
nkt.annot["6"]<-"NKT_CD4_CD40LG" #C0
nkt.annot["16"]<-"NKT_CD8" #C1
nkt.annot[c("15","12","4","14")]<-"NKT_DN_ITGAX" #C1
nkt.annot[c("3")]<-"NKT_CD8"
nkt.annot["14"]<-"NKT_CD8"
nkt.annot[c("1","2","5","9","10")]<-"NKT_CD8"
nkt.annot<-nkt.annot[order(as.numeric(names(nkt.annot)))]
sc.data.nkt$cluster_annot_nkt<-nkt.annot[match(sc.data.nkt$seurat_clusters,names(nkt.annot))]
DimPlot(sc.data.nkt,reduction = "umap",label = T,group.by = "cluster_annot_nkt",repel = T)
sc.data.nkt$cluster_annot_nkt[sc.data.nkt@assays$integrated@data["CD8A",]<1&sc.data.nkt$seurat_clusters=="7"]<-"NKT_CD4_TIM3"
sc.data$cluster_nkt_tim3<-"NA"
sc.data$cluster_nkt_tim3[match(colnames(sc.data.nkt)[sc.data.nkt$cluster_annot_nkt%in%c("NKT_CD8_TIM3_CD62L","NKT_CD4_TIM3_CD62L")],
colnames(sc.data))]<-"NKT_TIM3pos"
sc.data$cluster_nkt_tim3[match(colnames(sc.data.nkt)[!sc.data.nkt$cluster_annot_nkt%in%c("NKT_CD8_TIM3_CD62L","NKT_CD4_TIM3_CD62L")],
colnames(sc.data))]<-"NKT_TIM3neg"
sc.data.nkt$cluster_nkt_tim3<-"NKT_TIM3neg"
sc.data.nkt$cluster_nkt_tim3[sc.data.nkt$cluster_annot_nkt=="NKT_CD8_TIM3_CD62L"]<-"NKT_TIM3pos"
sc.data.nkt$cluster_nkt_tim3[sc.data.nkt$cluster_annot_nkt=="NKT_CD4_TIM3_CD62L"]<-"NKT_TIM3pos"
#=======================================pseudo time
sc.data.nkt2<-sc.data.nkt[,sc.data.nkt$sampleID%in%sc.meta.cell$Sample.name[sc.meta.cell$.Datasets%in%c("Batch02","Batch07","Batch06","Batch07")]]
cds <- newCellDataSet(as(as_matrix(sc.data.nkt2@assays$RNA@counts),"sparseMatrix"),
phenoData = AnnotatedDataFrame(data=sc.data.nkt2@meta.data),
featureData = AnnotatedDataFrame(data=data.frame(gene_short_name=rownames(sc.data.nkt2@assays$RNA@counts),
row.names = rownames(sc.data.nkt2@assays$RNA@counts))),
lowerDetectionLimit = 0.1,
expressionFamily = negbinomial.size())
rm(sc.data.nkt2)
cds<-estimateSizeFactors(cds)
cds<-estimateDispersions(cds)
cds<-setOrderingFilter(cds,VariableFeatures(sc.data.nkt))
plot_ordering_genes(cds)
cds <- reduceDimension(cds, max_components = 2,
method = 'DDRTree')
cds <- orderCells(cds)
|
#' Generate an table of descriptive statistics.
#'
#' This is a wrapper function of \code{stat_tab}, allowing for groupped variables,
#' split statistics table by `row_split` variable.
#'
#' @param x Variables to be used or a \code{formula} for summary table.
#' If \code{x} is a \code{formula}, then the \code{group} variable should
#' be provided at the right had side, use \code{1} if there's no grouping
#' variable. And \code{row_split} should also be provided on the right hand side
#' of the formula and separate it using \code{|} with grouping variable. For example,
#' \code{age + sex ~ treat|cycle} or \code{age + sex ~ 1|cycle} without grouping.
#' See details.
#' @param data A \code{data.frame} from which the variables in \code{vars}
#' should be taken.
#' @param group Name of the grouping variable.
#' @param row_split Variable that used for splitting table rows, rows will be
#' splited using this variable. Useful for repeated measures.
#' @param total If a "Total" column will be created (default). Specify
#' \code{FALSE} to omit the column.
#' @param select a named vector with as many components as row-variables. Every
#' element of `select` will be used to select the individuals to be analyzed
#' for every row-variable. Name of the vector corresponds to the row variable,
#' element is the selection.
#' @param add_missing If missing number and missing percentage will be
#' reported in the summary table, default is `TRUE`. This will also produce
#' data missingness report if set \code{TRUE}. See \code{\link{report_missing}}
#' for details.
#' @param add_obs Add an observation row (default).
#' @param digits An integer specifying the number of significant digits to keep,
#' default is 3.
#' @param digits_pct An integer specifying the number of digits after the
#' decimal place for percentages, default is 0.
#' @param rounding_fn The function to use to do the rounding. Defaults is
#' \code{\link{signif_pad}}. To round up by digits instead of significant
#' values, set it to \code{round_pad}.
#' @param subjid_string A character naming the column used to identify subject,
#' default is \code{"subjid"}.
#' @param print_plot A logical value, print summary plot of the variables (default).
#' @param render_num A character or vector indicatin which summary will be reported,
#' default is "Median [Min, Max]". You can change this to "Median [IQR]" then the
#' median and IQR will be reported instead of "Median [Min, Max]". Use
#' \code{options(cctu_render_num = "Median [IQR]")} to set global options.
#' See details \code{\link{render_numeric}}.
#' @param logical_na_impute Impute missing values with \code{FALSE} (default),
#' \code{NA} keep as it is, or \code{TRUE}. The nominator for the logical vector is
#' the number of \code{TRUE}. For \code{FALSE} or \code{TRUE}, the denominator will
#' be all values regardless of missingness, but the non-missing number used as
#' denominator for \code{NA}. Set it to \code{FALSE} if you want to summarise multiple
#' choice variables and \code{NA} for Yes/No type logical variables but don't want No
#' in the summary. You can used a named list in \code{x} and stack multiple
#' choice in one category.
#' @param blinded A logical scalar, if summary table will be report by
#' \code{group} (default) or not. This will ignore \code{group} if set to \code{TRUE}
#' and grouping summary will not be reported.
#' @param ... Not used.
#' @details
#' \strong{1. Parameter settings with global options}
#'
#' Some of the function parameters can be set with options. This will have an global
#' effect on the \code{cctab} function. It is an ideal way to set a global settings
#' if you want this to be effctive globally. Currently, you can set \code{digits},
#' \code{digits_pct}, \code{subjid_string}, \code{print_plot}, \code{render_num} and
#' \code{blinded} by adding \code{"cctu_"} prefix in the \code{options}. For example,
#' you can suppress the plot from printting by setting \code{options(cctu_print_plot = FALSE)}.
#'
#' \strong{2. Formula interface}
#'
#' There are two interfaces, the default, which typically takes a variable vector from
#' \code{data.frame} for \code{x}, and the formula interface. The formula interface is
#' less flexible, but simpler to use and designed to handle the most common use cases.
#' For the formula version, the formula is expected to be a two-sided formula. Left hand
#' side is the variables to be summarised and the right hand side is the group and/or split
#' variable. To include a row splitting variable, use \code{|} to separate the row splitting
#' variable after the grouping variable and then the row split variable. For example,
#' \code{age + sex ~ treat|visit}. The right hand side of the formula will be treated as a grouping
#' variable by default. A value of \code{1} should be provided if there is no grouping variable,
#' for example \code{age + sex ~ 1} or \code{age + sex ~ 1|visit} by visit.
#'
#' \strong{3. Return}
#'
#' A summary table with some attributes will be reutned, a method has been writen for \code{rbind}.
#' So you can use \code{rbind} to combine two tables without losing any attributes. An attribute
#' \code{position} will be used to produce a nice table. There are three 4 possible values for each
#' rows. Row name printed as the first column in the word table. Some styles will be applied to each
#' row based on the \code{position} attributes.
#' \tabular{ll}{
#' \code{0} \tab indicates the row will be bolded, spanned through all columns and a grey background
#' in the word \cr
#' \tab \cr
#' \code{1} \tab indicates the row will be bolded \cr
#' \tab \cr
#' \code{2} \tab the row will be bolded and spanned through all columns \cr
#' \tab \cr
#' \code{3} \tab indicates the row of the first column will be indented \cr
#' }
#'
#' @seealso
#' \code{\link{signif_pad}}
#' \code{\link{round_pad}}
#' \code{\link{stat_tab}}
#' \code{\link{sumby}}
#' \code{\link{dump_missing_report}}
#' \code{\link{get_missing_report}}
#' \code{\link{render_numeric}}
#' \code{\link{render_cat}}
#' @return A matrix with `cttab` class.
#'
#' @example inst/examples/cttab.R
#'
#' @export
#'
cttab <- function(x, ...) {
UseMethod("cttab")
}
#' @describeIn cttab The default interface, where \code{x} is a \code{data.frame}.
#' @export
cttab.default <- function(x,
data,
group = NULL,
row_split = NULL,
total = TRUE,
select = NULL,
add_missing = TRUE,
add_obs = TRUE,
digits = getOption("cctu_digits", default = 3),
digits_pct = getOption("cctu_digits_pct", default = 0),
rounding_fn = signif_pad,
subjid_string = getOption("cctu_subjid_string", default = "subjid"),
print_plot = getOption("cctu_print_plot", default = TRUE),
render_num = getOption("cctu_render_num", default = "Median [Min, Max]"),
logical_na_impute = c(FALSE, NA, TRUE),
blinded = getOption("cctu_blinded", default = FALSE),
...) {
.cttab.internal(vars = x,
data = data,
group = group,
row_split = row_split,
total = total,
select = select,
add_missing = add_missing,
add_obs = add_obs,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
subjid_string = subjid_string,
print_plot =print_plot,
render_num = render_num,
logical_na_impute = logical_na_impute,
blinded = blinded)
}
#' @describeIn cttab The formula interface, where \code{x} is a \code{formula}.
#' @export
cttab.formula <- function(x,
data,
total = TRUE,
select = NULL,
add_missing = TRUE,
add_obs = TRUE,
digits = getOption("cctu_digits", default = 3),
digits_pct = getOption("cctu_digits_pct", default = 0),
rounding_fn = signif_pad,
subjid_string = getOption("cctu_subjid_string", default = "subjid"),
print_plot = getOption("cctu_print_plot", default = TRUE),
render_num = getOption("cctu_render_num", default = "Median [Min, Max]"),
logical_na_impute = c(FALSE, NA, TRUE),
blinded = getOption("cctu_blinded", default = FALSE),
...) {
f <- split_formula(x)
logical_na_impute <- logical_na_impute[1]
if(is.null(f$lhs))
stop("No variables provided to summarise, please add variable to the left hand side of the formula.")
if(length(f$lhs) != 1)
stop("Invalid formula, only `+` is allowed to list multiple variables.")
if(!length(f$rhs) %in% c(1, 2))
stop("Invalid formula, multiple split provided.")
if(f$rhs[[1]] == ".")
stop("Invalid formula, dot is not allowed.")
group <- if(f$rhs[[1]] == 1) NULL else all.vars(f$rhs[[1]])
vars <- all.vars(f$lhs[[1]])
if(length(f$rhs) == 2)
row_split <- all.vars(f$rhs[[2]])
else
row_split <- NULL
.cttab.internal(vars = vars,
data = data,
group = group,
row_split = row_split,
total = total,
select = select,
add_missing = add_missing,
add_obs = add_obs,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
subjid_string = subjid_string,
print_plot =print_plot,
render_num = render_num,
logical_na_impute = logical_na_impute,
blinded = blinded)
}
.cttab.internal <- function(vars,
data,
group = NULL,
row_split = NULL,
total = TRUE,
select = NULL,
add_missing = TRUE,
add_obs = TRUE,
digits = getOption("cctu_digits", default = 3),
digits_pct = getOption("cctu_digits_pct", default = 0),
rounding_fn = signif_pad,
subjid_string = getOption("cctu_subjid_string", default = "subjid"),
print_plot = getOption("cctu_print_plot", default = TRUE),
render_num = getOption("cctu_render_num", default = "Median [Min, Max]"),
logical_na_impute = c(FALSE, NA, TRUE),
blinded = getOption("cctu_blinded", default = FALSE)) {
tpcall <- match.call()
logical_na_impute <- logical_na_impute[1]
stopifnot(logical_na_impute %in% c(FALSE, NA, TRUE))
if(blinded)
group <- NULL
vars_list <- c(unlist(vars), group, row_split)
if (!all(vars_list %in% names(data))) {
stop(
"Variable ",
paste(vars_list[!vars_list %in% names(data)], collapse = ", "),
" not in the dataset, please check!"
)
}
# Convert to data.table to avoid format lose.
data <- data.table::as.data.table(data)
# Group variable to factor
if (!is.null(group)) {
# Remove missing records for group
data <- data[!is.na(data[[group]]), ]
if (has.labels(data[[group]]) | !is.factor(data[[group]]))
data[[group]] <- to_factor(data[[group]], drop.levels = TRUE)
}
if(base::anyDuplicated(vars_list))
stop("The variable list, group or row split variable have duplicated variable.")
if (!is.null(row_split)) {
if (has.labels(data[[row_split]]) | !is.factor(data[[row_split]]))
data[[row_split]] <- to_factor(data[[row_split]], drop.levels = TRUE)
}
# Blank cttab matrix
blnk_cttab <- function(row_labs, pos, from_tab){
to_insert <- matrix(c(rep("", ncol(from_tab))), nrow = 1,
dimnames = list(row_labs, colnames(from_tab)))
structure(to_insert,
position = pos,
class = class(from_tab))
}
# Wrapped tabulation function
calc_tab <- function(dat){
# If variables are not list
if(is.list(vars)){
res <- lapply(seq_along(vars), function(i){
x <- vars[[i]]
r <- stat_tab(vars = x,
group = group,
data = dat,
total = total,
select = select,
add_missing = add_missing,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
render_num = render_num,
logical_na_impute = logical_na_impute)
# Add grouping
if(!is_empty(names(vars)[i])){
to_insert <- blnk_cttab(row_labs = names(vars)[i],
pos = 0,
from_tab = r)
r <- rbind(to_insert, r)
}
return(r)
})
res <- do.call(rbind, res)
# This is for logical value that has no variable name, use the grouping
# label as the variable name
pos <- attr(res, "position")
ps <- which(pos == 0 & c(pos[-1], 3) == 1)
if(any(!is_empty(ps))){
pos[ps] <- rep(2, length(ps))
attr(res, "position") <- pos
}
}else{
res <- stat_tab(vars = vars,
group = group,
data = dat,
total = total,
select = select,
add_missing = add_missing,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
render_num = render_num,
logical_na_impute = logical_na_impute)
}
# Add observation row
if(!is.null(group)){
gp_tab <- table(dat[[group]])
if(total)
gp_tab <- c(gp_tab, "Total" = length(dat[[group]]))
if(add_obs){
obs <- matrix(gp_tab, nrow = 1,
dimnames = list("Observation", names(gp_tab)))
obs <- structure(obs,
position = 1,
class = c("cttab", class(obs)))
res <- rbind(obs, res)
}
}
return(res)
}
# Get arguments that will be passed to plot printing
if(print_plot){
cctab_plot(vars, data, group, row_split, select)
}
# If no split
if (is.null(row_split)) {
tbody <- calc_tab(data)
# Report missing
if(add_missing && !is.null(subjid_string)){
miss_rep <- report_missing(data = data, vars = vars, select = select,
subjid_string = subjid_string)
cctu_env$missing_report_data <- rbind(cctu_env$missing_report_data,
miss_rep)
}
} else{
# Extract split variable label
split_lab <- ifelse(has.label(data[[row_split]]),
var_lab(data[[row_split]]),
row_split)
dfm <- split(data, data[[row_split]])
tbody <- lapply(names(dfm), function(x) {
out <- calc_tab(dfm[[x]])
to_insert <- blnk_cttab(row_labs = paste(split_lab, "=", x),
pos = 0,
from_tab = out)
out <- rbind(to_insert, out)
# Report missing
if(add_missing && !is.null(subjid_string)){
miss_rep <- report_missing(data = dfm[[x]], vars = vars, select = select,
subjid_string = subjid_string)
if(nrow(miss_rep) != 0){
miss_rep$visit_var <- row_split
miss_rep$visit_label <- split_lab
miss_rep$visit <- x
cctu_env$missing_report_data <- rbind(cctu_env$missing_report_data,
miss_rep)
}
}
return(out)
})
tbody <- do.call("rbind", tbody)
}
return(tbody)
}
#' Generate a descriptive summary statistics table.
#'
#'
#' It is important to use variable label and value label to produce a proper
#' descriptive table. Variables with value labels will be converted to ordered
#' factor with same order as the value labels (\code{to_factor}). And variable
#' labels will be used in the output. The first row will be blank with row names
#' of variable label. Variable name will be used if the variable does not have
#' a variable label.
#'
#'
#' @inheritParams cttab
#'
#' @return An object of class "cttab".
#'
#' @importFrom data.table .SD
#' @keywords internal
stat_tab <- function(vars,
group = NULL,
data,
total = TRUE,
select = NULL,
add_missing = TRUE,
digits = 2,
digits_pct = 1,
rounding_fn = signif_pad,
render_num = "Median [Min, Max]",
logical_na_impute = FALSE){
mf <- match.call()
vars_list <- c(unlist(vars), group)
if (!all(vars_list %in% names(data))) {
stop(
"Variable ",
paste(vars_list[!vars_list %in% names(data)], collapse = ", "),
" not in the dataset, please check!"
)
}
# Group variable to factor
if (!is.null(group)) {
# Select records with non-missing group and row split
data <- data[!is.na(data[[group]]), , drop = FALSE]
if (has.labels(data[[group]]) | !is.factor(data[[group]]))
data[[group]] <- to_factor(data[[group]], drop.levels = TRUE)
}
# Create value labels for characters variables to avoid missing levels between groups
convcols <- names(Filter(is.character, data))
# Get variable class, make sure the class is consistent across data
var_class <- sapply(vars, function(v){
if(!inherits(data[[v]], c("numeric", "integer", "factor", "character", "logical")))
stop(paste("The class of variable", v, "is", class(data[[v]]), "and not supported!"))
fcase(
inherits(data[[v]], c("factor", "character")) | has.labels(data[[v]]), 'category',
inherits(data[[v]], c("numeric", "integer")) | all(is.na(data[[v]])), 'numeric',
inherits(data[[v]], c("logical")), 'logical'
)
})
if(length(convcols) > 0)
data[, convcols] <- data[,lapply(.SD, to_character), .SDcols = convcols]
# Check if missing
any_miss <- sapply(vars, function(v)sum(is.na(data[[v]]))) > 0
# Transform data to list for loop
if (total & !is.null(group)) {
x <- c(split(data, data[[group]]), list(Total = data))
} else if (!is.null(group)) {
x <- split(data, data[[group]])
} else{
x <- list(Total = data)
}
r <- do.call(rbind, lapply(vars, function(v){
# Get variable label
variable <- ifelse(has.label(data[[v]]), var_lab(data[[v]]), v)
y <- do.call(cbind, lapply(x, function(s) {
z <- s[gen_selec(s, v, select[v]), ] # Apply subset
z <- z[[v]]
# Convert character to factor
if(has.labels(z) | is.character(z))
z <- to_factor(z, ordered = TRUE)
if(var_class[v] == "category"){
r <- c("", render_cat(z, digits_pct = digits_pct))
}
if(var_class[v] == "logical"){
# Impute missing data for logical
z[is.na(z)] <- logical_na_impute
r <- with(cat_stat(z, digits_pct = digits_pct)$Yes,
c(Missing = sprintf("%s/%s (%s)", FREQ, N, PCTnoNA)))
add_missing <- FALSE
}
if(var_class[v] == "numeric"){
r <- c("", render_numeric(z, what = render_num, digits = digits, digits_pct = digits_pct, rounding_fn = rounding_fn))
}
names(r)[1] <- variable
if(add_missing & any_miss[v]){
miss <- with(cat_stat(is.na(z), digits_pct = digits_pct)$Yes,
c(Missing = ifelse(FREQ == 0, "",
sprintf("%s (%s)", FREQ, PCT))))
r <- c(r, miss)
}
return(r)
}))
y[y == "NA"] <- ""
if(nrow(y) == 1 & all(y == ""))
return(NULL)
# Remove Invalid rows
if(nrow(y) >1){
all_val <- rowSums(apply(y, 2, function(x)x == "")) != ncol(y)
all_val[1] <- TRUE
y <- y[all_val, ,drop = FALSE]
}
# Don't report if the variable has no values to report
if(nrow(y) == 1 & all(y == ""))
return(NULL)
fst <- ifelse(nrow(y) == 1, 3, 2)
if(var_class[v] == "logical")
fst <- 1
structure(y,
position = c(fst, rep(3, nrow(y) - 1)),
class = c("cttab", class(y)))
}))
return(r)
}
# Generate selection vector function
# Evaluate the select in the data and generate a logical vector.
gen_selec <- function(dat, var, select = NULL) {
if (is.null(select) | !var %in% names(select)) {
return(rep(TRUE, length(dat[[var]])))
} else{
r <- eval(str2expression(select[var]), envir = dat)
r & !is.na(r)
}
}
| /R/cttab.R | no_license | shug0131/cctu | R | false | false | 21,532 | r |
#' Generate an table of descriptive statistics.
#'
#' This is a wrapper function of \code{stat_tab}, allowing for groupped variables,
#' split statistics table by `row_split` variable.
#'
#' @param x Variables to be used or a \code{formula} for summary table.
#' If \code{x} is a \code{formula}, then the \code{group} variable should
#' be provided at the right had side, use \code{1} if there's no grouping
#' variable. And \code{row_split} should also be provided on the right hand side
#' of the formula and separate it using \code{|} with grouping variable. For example,
#' \code{age + sex ~ treat|cycle} or \code{age + sex ~ 1|cycle} without grouping.
#' See details.
#' @param data A \code{data.frame} from which the variables in \code{vars}
#' should be taken.
#' @param group Name of the grouping variable.
#' @param row_split Variable that used for splitting table rows, rows will be
#' splited using this variable. Useful for repeated measures.
#' @param total If a "Total" column will be created (default). Specify
#' \code{FALSE} to omit the column.
#' @param select a named vector with as many components as row-variables. Every
#' element of `select` will be used to select the individuals to be analyzed
#' for every row-variable. Name of the vector corresponds to the row variable,
#' element is the selection.
#' @param add_missing If missing number and missing percentage will be
#' reported in the summary table, default is `TRUE`. This will also produce
#' data missingness report if set \code{TRUE}. See \code{\link{report_missing}}
#' for details.
#' @param add_obs Add an observation row (default).
#' @param digits An integer specifying the number of significant digits to keep,
#' default is 3.
#' @param digits_pct An integer specifying the number of digits after the
#' decimal place for percentages, default is 0.
#' @param rounding_fn The function to use to do the rounding. Defaults is
#' \code{\link{signif_pad}}. To round up by digits instead of significant
#' values, set it to \code{round_pad}.
#' @param subjid_string A character naming the column used to identify subject,
#' default is \code{"subjid"}.
#' @param print_plot A logical value, print summary plot of the variables (default).
#' @param render_num A character or vector indicatin which summary will be reported,
#' default is "Median [Min, Max]". You can change this to "Median [IQR]" then the
#' median and IQR will be reported instead of "Median [Min, Max]". Use
#' \code{options(cctu_render_num = "Median [IQR]")} to set global options.
#' See details \code{\link{render_numeric}}.
#' @param logical_na_impute Impute missing values with \code{FALSE} (default),
#' \code{NA} keep as it is, or \code{TRUE}. The nominator for the logical vector is
#' the number of \code{TRUE}. For \code{FALSE} or \code{TRUE}, the denominator will
#' be all values regardless of missingness, but the non-missing number used as
#' denominator for \code{NA}. Set it to \code{FALSE} if you want to summarise multiple
#' choice variables and \code{NA} for Yes/No type logical variables but don't want No
#' in the summary. You can used a named list in \code{x} and stack multiple
#' choice in one category.
#' @param blinded A logical scalar, if summary table will be report by
#' \code{group} (default) or not. This will ignore \code{group} if set to \code{TRUE}
#' and grouping summary will not be reported.
#' @param ... Not used.
#' @details
#' \strong{1. Parameter settings with global options}
#'
#' Some of the function parameters can be set with options. This will have an global
#' effect on the \code{cctab} function. It is an ideal way to set a global settings
#' if you want this to be effctive globally. Currently, you can set \code{digits},
#' \code{digits_pct}, \code{subjid_string}, \code{print_plot}, \code{render_num} and
#' \code{blinded} by adding \code{"cctu_"} prefix in the \code{options}. For example,
#' you can suppress the plot from printting by setting \code{options(cctu_print_plot = FALSE)}.
#'
#' \strong{2. Formula interface}
#'
#' There are two interfaces, the default, which typically takes a variable vector from
#' \code{data.frame} for \code{x}, and the formula interface. The formula interface is
#' less flexible, but simpler to use and designed to handle the most common use cases.
#' For the formula version, the formula is expected to be a two-sided formula. Left hand
#' side is the variables to be summarised and the right hand side is the group and/or split
#' variable. To include a row splitting variable, use \code{|} to separate the row splitting
#' variable after the grouping variable and then the row split variable. For example,
#' \code{age + sex ~ treat|visit}. The right hand side of the formula will be treated as a grouping
#' variable by default. A value of \code{1} should be provided if there is no grouping variable,
#' for example \code{age + sex ~ 1} or \code{age + sex ~ 1|visit} by visit.
#'
#' \strong{3. Return}
#'
#' A summary table with some attributes will be reutned, a method has been writen for \code{rbind}.
#' So you can use \code{rbind} to combine two tables without losing any attributes. An attribute
#' \code{position} will be used to produce a nice table. There are three 4 possible values for each
#' rows. Row name printed as the first column in the word table. Some styles will be applied to each
#' row based on the \code{position} attributes.
#' \tabular{ll}{
#' \code{0} \tab indicates the row will be bolded, spanned through all columns and a grey background
#' in the word \cr
#' \tab \cr
#' \code{1} \tab indicates the row will be bolded \cr
#' \tab \cr
#' \code{2} \tab the row will be bolded and spanned through all columns \cr
#' \tab \cr
#' \code{3} \tab indicates the row of the first column will be indented \cr
#' }
#'
#' @seealso
#' \code{\link{signif_pad}}
#' \code{\link{round_pad}}
#' \code{\link{stat_tab}}
#' \code{\link{sumby}}
#' \code{\link{dump_missing_report}}
#' \code{\link{get_missing_report}}
#' \code{\link{render_numeric}}
#' \code{\link{render_cat}}
#' @return A matrix with `cttab` class.
#'
#' @example inst/examples/cttab.R
#'
#' @export
#'
cttab <- function(x, ...) {
UseMethod("cttab")
}
#' @describeIn cttab The default interface, where \code{x} is a \code{data.frame}.
#' @export
cttab.default <- function(x,
data,
group = NULL,
row_split = NULL,
total = TRUE,
select = NULL,
add_missing = TRUE,
add_obs = TRUE,
digits = getOption("cctu_digits", default = 3),
digits_pct = getOption("cctu_digits_pct", default = 0),
rounding_fn = signif_pad,
subjid_string = getOption("cctu_subjid_string", default = "subjid"),
print_plot = getOption("cctu_print_plot", default = TRUE),
render_num = getOption("cctu_render_num", default = "Median [Min, Max]"),
logical_na_impute = c(FALSE, NA, TRUE),
blinded = getOption("cctu_blinded", default = FALSE),
...) {
.cttab.internal(vars = x,
data = data,
group = group,
row_split = row_split,
total = total,
select = select,
add_missing = add_missing,
add_obs = add_obs,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
subjid_string = subjid_string,
print_plot =print_plot,
render_num = render_num,
logical_na_impute = logical_na_impute,
blinded = blinded)
}
#' @describeIn cttab The formula interface, where \code{x} is a \code{formula}.
#' @export
cttab.formula <- function(x,
data,
total = TRUE,
select = NULL,
add_missing = TRUE,
add_obs = TRUE,
digits = getOption("cctu_digits", default = 3),
digits_pct = getOption("cctu_digits_pct", default = 0),
rounding_fn = signif_pad,
subjid_string = getOption("cctu_subjid_string", default = "subjid"),
print_plot = getOption("cctu_print_plot", default = TRUE),
render_num = getOption("cctu_render_num", default = "Median [Min, Max]"),
logical_na_impute = c(FALSE, NA, TRUE),
blinded = getOption("cctu_blinded", default = FALSE),
...) {
f <- split_formula(x)
logical_na_impute <- logical_na_impute[1]
if(is.null(f$lhs))
stop("No variables provided to summarise, please add variable to the left hand side of the formula.")
if(length(f$lhs) != 1)
stop("Invalid formula, only `+` is allowed to list multiple variables.")
if(!length(f$rhs) %in% c(1, 2))
stop("Invalid formula, multiple split provided.")
if(f$rhs[[1]] == ".")
stop("Invalid formula, dot is not allowed.")
group <- if(f$rhs[[1]] == 1) NULL else all.vars(f$rhs[[1]])
vars <- all.vars(f$lhs[[1]])
if(length(f$rhs) == 2)
row_split <- all.vars(f$rhs[[2]])
else
row_split <- NULL
.cttab.internal(vars = vars,
data = data,
group = group,
row_split = row_split,
total = total,
select = select,
add_missing = add_missing,
add_obs = add_obs,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
subjid_string = subjid_string,
print_plot =print_plot,
render_num = render_num,
logical_na_impute = logical_na_impute,
blinded = blinded)
}
.cttab.internal <- function(vars,
data,
group = NULL,
row_split = NULL,
total = TRUE,
select = NULL,
add_missing = TRUE,
add_obs = TRUE,
digits = getOption("cctu_digits", default = 3),
digits_pct = getOption("cctu_digits_pct", default = 0),
rounding_fn = signif_pad,
subjid_string = getOption("cctu_subjid_string", default = "subjid"),
print_plot = getOption("cctu_print_plot", default = TRUE),
render_num = getOption("cctu_render_num", default = "Median [Min, Max]"),
logical_na_impute = c(FALSE, NA, TRUE),
blinded = getOption("cctu_blinded", default = FALSE)) {
tpcall <- match.call()
logical_na_impute <- logical_na_impute[1]
stopifnot(logical_na_impute %in% c(FALSE, NA, TRUE))
if(blinded)
group <- NULL
vars_list <- c(unlist(vars), group, row_split)
if (!all(vars_list %in% names(data))) {
stop(
"Variable ",
paste(vars_list[!vars_list %in% names(data)], collapse = ", "),
" not in the dataset, please check!"
)
}
# Convert to data.table to avoid format lose.
data <- data.table::as.data.table(data)
# Group variable to factor
if (!is.null(group)) {
# Remove missing records for group
data <- data[!is.na(data[[group]]), ]
if (has.labels(data[[group]]) | !is.factor(data[[group]]))
data[[group]] <- to_factor(data[[group]], drop.levels = TRUE)
}
if(base::anyDuplicated(vars_list))
stop("The variable list, group or row split variable have duplicated variable.")
if (!is.null(row_split)) {
if (has.labels(data[[row_split]]) | !is.factor(data[[row_split]]))
data[[row_split]] <- to_factor(data[[row_split]], drop.levels = TRUE)
}
# Blank cttab matrix
blnk_cttab <- function(row_labs, pos, from_tab){
to_insert <- matrix(c(rep("", ncol(from_tab))), nrow = 1,
dimnames = list(row_labs, colnames(from_tab)))
structure(to_insert,
position = pos,
class = class(from_tab))
}
# Wrapped tabulation function
calc_tab <- function(dat){
# If variables are not list
if(is.list(vars)){
res <- lapply(seq_along(vars), function(i){
x <- vars[[i]]
r <- stat_tab(vars = x,
group = group,
data = dat,
total = total,
select = select,
add_missing = add_missing,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
render_num = render_num,
logical_na_impute = logical_na_impute)
# Add grouping
if(!is_empty(names(vars)[i])){
to_insert <- blnk_cttab(row_labs = names(vars)[i],
pos = 0,
from_tab = r)
r <- rbind(to_insert, r)
}
return(r)
})
res <- do.call(rbind, res)
# This is for logical value that has no variable name, use the grouping
# label as the variable name
pos <- attr(res, "position")
ps <- which(pos == 0 & c(pos[-1], 3) == 1)
if(any(!is_empty(ps))){
pos[ps] <- rep(2, length(ps))
attr(res, "position") <- pos
}
}else{
res <- stat_tab(vars = vars,
group = group,
data = dat,
total = total,
select = select,
add_missing = add_missing,
digits = digits,
digits_pct = digits_pct,
rounding_fn = rounding_fn,
render_num = render_num,
logical_na_impute = logical_na_impute)
}
# Add observation row
if(!is.null(group)){
gp_tab <- table(dat[[group]])
if(total)
gp_tab <- c(gp_tab, "Total" = length(dat[[group]]))
if(add_obs){
obs <- matrix(gp_tab, nrow = 1,
dimnames = list("Observation", names(gp_tab)))
obs <- structure(obs,
position = 1,
class = c("cttab", class(obs)))
res <- rbind(obs, res)
}
}
return(res)
}
# Get arguments that will be passed to plot printing
if(print_plot){
cctab_plot(vars, data, group, row_split, select)
}
# If no split
if (is.null(row_split)) {
tbody <- calc_tab(data)
# Report missing
if(add_missing && !is.null(subjid_string)){
miss_rep <- report_missing(data = data, vars = vars, select = select,
subjid_string = subjid_string)
cctu_env$missing_report_data <- rbind(cctu_env$missing_report_data,
miss_rep)
}
} else{
# Extract split variable label
split_lab <- ifelse(has.label(data[[row_split]]),
var_lab(data[[row_split]]),
row_split)
dfm <- split(data, data[[row_split]])
tbody <- lapply(names(dfm), function(x) {
out <- calc_tab(dfm[[x]])
to_insert <- blnk_cttab(row_labs = paste(split_lab, "=", x),
pos = 0,
from_tab = out)
out <- rbind(to_insert, out)
# Report missing
if(add_missing && !is.null(subjid_string)){
miss_rep <- report_missing(data = dfm[[x]], vars = vars, select = select,
subjid_string = subjid_string)
if(nrow(miss_rep) != 0){
miss_rep$visit_var <- row_split
miss_rep$visit_label <- split_lab
miss_rep$visit <- x
cctu_env$missing_report_data <- rbind(cctu_env$missing_report_data,
miss_rep)
}
}
return(out)
})
tbody <- do.call("rbind", tbody)
}
return(tbody)
}
#' Generate a descriptive summary statistics table.
#'
#'
#' It is important to use variable label and value label to produce a proper
#' descriptive table. Variables with value labels will be converted to ordered
#' factor with same order as the value labels (\code{to_factor}). And variable
#' labels will be used in the output. The first row will be blank with row names
#' of variable label. Variable name will be used if the variable does not have
#' a variable label.
#'
#'
#' @inheritParams cttab
#'
#' @return An object of class "cttab".
#'
#' @importFrom data.table .SD
#' @keywords internal
stat_tab <- function(vars,
group = NULL,
data,
total = TRUE,
select = NULL,
add_missing = TRUE,
digits = 2,
digits_pct = 1,
rounding_fn = signif_pad,
render_num = "Median [Min, Max]",
logical_na_impute = FALSE){
mf <- match.call()
vars_list <- c(unlist(vars), group)
if (!all(vars_list %in% names(data))) {
stop(
"Variable ",
paste(vars_list[!vars_list %in% names(data)], collapse = ", "),
" not in the dataset, please check!"
)
}
# Group variable to factor
if (!is.null(group)) {
# Select records with non-missing group and row split
data <- data[!is.na(data[[group]]), , drop = FALSE]
if (has.labels(data[[group]]) | !is.factor(data[[group]]))
data[[group]] <- to_factor(data[[group]], drop.levels = TRUE)
}
# Create value labels for characters variables to avoid missing levels between groups
convcols <- names(Filter(is.character, data))
# Get variable class, make sure the class is consistent across data
var_class <- sapply(vars, function(v){
if(!inherits(data[[v]], c("numeric", "integer", "factor", "character", "logical")))
stop(paste("The class of variable", v, "is", class(data[[v]]), "and not supported!"))
fcase(
inherits(data[[v]], c("factor", "character")) | has.labels(data[[v]]), 'category',
inherits(data[[v]], c("numeric", "integer")) | all(is.na(data[[v]])), 'numeric',
inherits(data[[v]], c("logical")), 'logical'
)
})
if(length(convcols) > 0)
data[, convcols] <- data[,lapply(.SD, to_character), .SDcols = convcols]
# Check if missing
any_miss <- sapply(vars, function(v)sum(is.na(data[[v]]))) > 0
# Transform data to list for loop
if (total & !is.null(group)) {
x <- c(split(data, data[[group]]), list(Total = data))
} else if (!is.null(group)) {
x <- split(data, data[[group]])
} else{
x <- list(Total = data)
}
r <- do.call(rbind, lapply(vars, function(v){
# Get variable label
variable <- ifelse(has.label(data[[v]]), var_lab(data[[v]]), v)
y <- do.call(cbind, lapply(x, function(s) {
z <- s[gen_selec(s, v, select[v]), ] # Apply subset
z <- z[[v]]
# Convert character to factor
if(has.labels(z) | is.character(z))
z <- to_factor(z, ordered = TRUE)
if(var_class[v] == "category"){
r <- c("", render_cat(z, digits_pct = digits_pct))
}
if(var_class[v] == "logical"){
# Impute missing data for logical
z[is.na(z)] <- logical_na_impute
r <- with(cat_stat(z, digits_pct = digits_pct)$Yes,
c(Missing = sprintf("%s/%s (%s)", FREQ, N, PCTnoNA)))
add_missing <- FALSE
}
if(var_class[v] == "numeric"){
r <- c("", render_numeric(z, what = render_num, digits = digits, digits_pct = digits_pct, rounding_fn = rounding_fn))
}
names(r)[1] <- variable
if(add_missing & any_miss[v]){
miss <- with(cat_stat(is.na(z), digits_pct = digits_pct)$Yes,
c(Missing = ifelse(FREQ == 0, "",
sprintf("%s (%s)", FREQ, PCT))))
r <- c(r, miss)
}
return(r)
}))
y[y == "NA"] <- ""
if(nrow(y) == 1 & all(y == ""))
return(NULL)
# Remove Invalid rows
if(nrow(y) >1){
all_val <- rowSums(apply(y, 2, function(x)x == "")) != ncol(y)
all_val[1] <- TRUE
y <- y[all_val, ,drop = FALSE]
}
# Don't report if the variable has no values to report
if(nrow(y) == 1 & all(y == ""))
return(NULL)
fst <- ifelse(nrow(y) == 1, 3, 2)
if(var_class[v] == "logical")
fst <- 1
structure(y,
position = c(fst, rep(3, nrow(y) - 1)),
class = c("cttab", class(y)))
}))
return(r)
}
# Generate selection vector function
# Evaluate the select in the data and generate a logical vector.
gen_selec <- function(dat, var, select = NULL) {
if (is.null(select) | !var %in% names(select)) {
return(rep(TRUE, length(dat[[var]])))
} else{
r <- eval(str2expression(select[var]), envir = dat)
r & !is.na(r)
}
}
|
#Let us load the data
data=read.csv("choicebasedconjoint.csv")
# Now we will look at the variables present in tha dataset
names(data)
#Serial number is not necessary. So we will remove it from the dataset
data=data[,-1]
# Now we will look at the structure of the data
str(data)
# We will include suurvival library for performing conditional logistic regression
library(survival)
# We can find that the Monthly Income is represented as int
# We will convert it into factor
data$MonthlyIncome=factor(data$MonthlyIncome)
# Now we will perform conditional logistic regression on our data
clogout1=clogit(choice~(Distance+Reputation+Delivery+Payment.Method+Price)
+strata(V1),data=data)
clogout1
# Now we will calculate AIC value for the model
AIC(clogout1)
# Now we will add gender and Health Insurance as an interaction method to the model
clogout2=clogit(choice~(Distance+Reputation+Delivery+Payment.Method+Price)*
(Gender+HealthInsurance)
+strata(V1),data=data)
clogout2
# Now let us calculate the AIC value for the new model
AIC(clogout2)
| /Choice Based Conjoint Analysis.r | no_license | murugeshmanthiramoorthi/Choice-Based-Conjoint-Analysis | R | false | false | 1,102 | r | #Let us load the data
data=read.csv("choicebasedconjoint.csv")
# Now we will look at the variables present in tha dataset
names(data)
#Serial number is not necessary. So we will remove it from the dataset
data=data[,-1]
# Now we will look at the structure of the data
str(data)
# We will include suurvival library for performing conditional logistic regression
library(survival)
# We can find that the Monthly Income is represented as int
# We will convert it into factor
data$MonthlyIncome=factor(data$MonthlyIncome)
# Now we will perform conditional logistic regression on our data
clogout1=clogit(choice~(Distance+Reputation+Delivery+Payment.Method+Price)
+strata(V1),data=data)
clogout1
# Now we will calculate AIC value for the model
AIC(clogout1)
# Now we will add gender and Health Insurance as an interaction method to the model
clogout2=clogit(choice~(Distance+Reputation+Delivery+Payment.Method+Price)*
(Gender+HealthInsurance)
+strata(V1),data=data)
clogout2
# Now let us calculate the AIC value for the new model
AIC(clogout2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TheiaQuery.R
\name{TheiaQuery}
\alias{TheiaQuery}
\title{A query to the Theia website}
\description{
Generate an send a query to Theia web API to get and download tiles based on
input given by the user.
}
\section{Usage}{
\preformatted{
q <- TheiaQuery$new(query)
q$update_token()
q$submit()
}
}
\section{Arguments}{
\describe{
\item{q:}{A \code{TheiaQuery} object}
\item{query:}{\code{list}, the users' request, see `Queries` for
more informations}
}
}
\section{Details}{
\code{TheiaQuery$new()} Create a new instance of the class, parse `query`
list and submit the query to Theia to retrieve files catalog
\code{q$submit()} Submit the query to Theia and get a list of tiles
corresponding to search criteria
}
\section{Queries}{
Search criteria are given with a `list` accepting these fields:
\itemize{
\item{collection:} The collection to look for. Accepted values are:
'SENTINEL2', 'LANDSAT', 'Landsat57', 'SpotWorldHeritage', 'Snow'.
Defaults to 'SENTINEL2'
\item{platform:} The platform to look for. Accepted values are:
'LANDSAT5', 'LANDSAT7', 'LANDSAT8', 'SPOT1', 'SPOT2', 'SPOT3',
'SPOT4', 'SPOT5', 'SENTINEL2A', 'SENTINEL2B'
\item{level:} Processing level. Accepted values are: 'LEVEL1C',
'LEVEL2A', LEVEL3A', 'N2A'. Defaults to 'LEVEL2A' (or 'N2A' if
querying Landsat57 collection).
\item{town:} The location to look for. Give a common town name.
\item{tile:} The tile identifier to retrieve.
\item{start.date:} The first date to look for (format: YYYY-MM-DD).
\item{end.date:} The last date to look for (format: YYYY-MM-DD). Must be
after start.date. Defaults to today's date.
\item{latitude:} The x coordinate of a point
\item{longitude:} The y coordinate of a point
\item{latmin:} The minimum latitude to search
\item{latmax:} The maximum latitude to search
\item{lonmin:} The minimum longitude to search
\item{lonmax:} The maximum longitude to search
\item{orbit.number:} The orbit number
\item{rel.orbit.number:} The relative orbit number
\item{max.clouds:} The maximum of cloud cover wanted (0-100)
\item{max.records:} The maximum of tiles to search
}
}
\examples{
\donttest{
# Create a query to Theia database, looking for tiles from Sentinel2
# satellite around Grenoble, between 2018-07-01 and 2018-07-06.
query <- list(collection = "SENTINEL2",
town = "Grenoble",
start.date = "2018-07-01",
end.date = "2018-07-06")
q <- TheiaQuery$new(query)
# Show informations on found tiles
print(q$tiles)
}
}
\seealso{
\url{https://github.com/olivierhagolle/theia_download} for an alternative
download method based on Python. Inspiration for this function.
}
| /man/TheiaQuery.Rd | no_license | MayaGueguen/theiaR | R | false | true | 2,887 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TheiaQuery.R
\name{TheiaQuery}
\alias{TheiaQuery}
\title{A query to the Theia website}
\description{
Generate an send a query to Theia web API to get and download tiles based on
input given by the user.
}
\section{Usage}{
\preformatted{
q <- TheiaQuery$new(query)
q$update_token()
q$submit()
}
}
\section{Arguments}{
\describe{
\item{q:}{A \code{TheiaQuery} object}
\item{query:}{\code{list}, the users' request, see `Queries` for
more informations}
}
}
\section{Details}{
\code{TheiaQuery$new()} Create a new instance of the class, parse `query`
list and submit the query to Theia to retrieve files catalog
\code{q$submit()} Submit the query to Theia and get a list of tiles
corresponding to search criteria
}
\section{Queries}{
Search criteria are given with a `list` accepting these fields:
\itemize{
\item{collection:} The collection to look for. Accepted values are:
'SENTINEL2', 'LANDSAT', 'Landsat57', 'SpotWorldHeritage', 'Snow'.
Defaults to 'SENTINEL2'
\item{platform:} The platform to look for. Accepted values are:
'LANDSAT5', 'LANDSAT7', 'LANDSAT8', 'SPOT1', 'SPOT2', 'SPOT3',
'SPOT4', 'SPOT5', 'SENTINEL2A', 'SENTINEL2B'
\item{level:} Processing level. Accepted values are: 'LEVEL1C',
'LEVEL2A', LEVEL3A', 'N2A'. Defaults to 'LEVEL2A' (or 'N2A' if
querying Landsat57 collection).
\item{town:} The location to look for. Give a common town name.
\item{tile:} The tile identifier to retrieve.
\item{start.date:} The first date to look for (format: YYYY-MM-DD).
\item{end.date:} The last date to look for (format: YYYY-MM-DD). Must be
after start.date. Defaults to today's date.
\item{latitude:} The x coordinate of a point
\item{longitude:} The y coordinate of a point
\item{latmin:} The minimum latitude to search
\item{latmax:} The maximum latitude to search
\item{lonmin:} The minimum longitude to search
\item{lonmax:} The maximum longitude to search
\item{orbit.number:} The orbit number
\item{rel.orbit.number:} The relative orbit number
\item{max.clouds:} The maximum of cloud cover wanted (0-100)
\item{max.records:} The maximum of tiles to search
}
}
\examples{
\donttest{
# Create a query to Theia database, looking for tiles from Sentinel2
# satellite around Grenoble, between 2018-07-01 and 2018-07-06.
query <- list(collection = "SENTINEL2",
town = "Grenoble",
start.date = "2018-07-01",
end.date = "2018-07-06")
q <- TheiaQuery$new(query)
# Show informations on found tiles
print(q$tiles)
}
}
\seealso{
\url{https://github.com/olivierhagolle/theia_download} for an alternative
download method based on Python. Inspiration for this function.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/munge.R
\name{refactor_list}
\alias{refactor_list}
\title{Produce a lookup for refactor()}
\usage{
refactor_list(x, consolidate = FALSE, file = NULL)
}
\arguments{
\item{x}{A \code{\link{factor}} (or \code{\link{character}}) variable}
\item{consolidate}{\code{\link{logical}}. Should the 'TO' values be passed
through \code{\link{consolidate_values}} in an automated attempt to clean
them up?}
\item{file}{A writable file path. If supplied, the lookup will be written
out to a two column .csv file, as opposed to written to the console. The
file produced can be passed to the file argument in \code{\link{refactor}}}
}
\value{
Nothing. Prints to the console/terminal with \code{\link{cat}}.
}
\description{
{
The \code{refactor_list} command is a helper function for
\code{\link{refactor}}. It prints the \bold{\code{R}} code requiqred for a
'lookup' to the console, for inclusion in data preparation/cleaning scripts
(perhaps after a bit of editing!).
For vary large lookups, it might make more sense to pass the lookup to
\code{\link{refactor}} using a file. You can write the lookup
to a \code{.csv} file by supplying a path/name to the the \code{file}
argument.
To try and make the process less laborious, \code{refactor_list} also has
a \code{consolidate} parameter. If set to \code{TRUE}, the lookup generated
will pass the 'TO' values through \code{\link{consolidate_values}}, hopefully
consoldating factor levels which are different for small formatting reasons
in to one. See the \code{\link{consolidate_values}} documentation for
details.
For a demonstration of how \code{\link{refactor}} and \code{refactor_list}
work together, see the package vignette, with:
\code{vignette('brocks')}
}
}
\examples{
# Let's tidy up the gender variable in test_data
data(test_data)
table(test_data$gender)
# Passing the gender variable to refactor_list, will generate the R code we
# need to create a lookup for it in our data-cleaning script! Setting
# consolidate to TRUE will do some of the work for us.
refactor_list(test_data$gender, consolidate = TRUE)
# At this point you'd take the code generated and itegrate it into your
# script. Here's one I made earlier. We can pass it to refactor, and our
# factor variable is now tidy!
new_vals <- list(
# FROM TO
c("", NA ),
c("<NA>", NA ),
c("F", "female"),
c("Female", "female"),
c("m", "male" ),
c("M", "male" ),
c("Male", "male" ),
c("Man", "male" ),
c("Woman", "female"),
c("n/a", NA )
)
test_data$gender <- refactor(test_data$gender, new_vals)
}
\author{
Brendan Rocks \email{rocks.brendan@gmail.com}
}
\seealso{
\code{\link{refactor}}, the function which \code{rfeactor_list}
supports
}
| /man/refactor_list.Rd | permissive | arturocm/brocks | R | false | true | 2,844 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/munge.R
\name{refactor_list}
\alias{refactor_list}
\title{Produce a lookup for refactor()}
\usage{
refactor_list(x, consolidate = FALSE, file = NULL)
}
\arguments{
\item{x}{A \code{\link{factor}} (or \code{\link{character}}) variable}
\item{consolidate}{\code{\link{logical}}. Should the 'TO' values be passed
through \code{\link{consolidate_values}} in an automated attempt to clean
them up?}
\item{file}{A writable file path. If supplied, the lookup will be written
out to a two column .csv file, as opposed to written to the console. The
file produced can be passed to the file argument in \code{\link{refactor}}}
}
\value{
Nothing. Prints to the console/terminal with \code{\link{cat}}.
}
\description{
{
The \code{refactor_list} command is a helper function for
\code{\link{refactor}}. It prints the \bold{\code{R}} code requiqred for a
'lookup' to the console, for inclusion in data preparation/cleaning scripts
(perhaps after a bit of editing!).
For vary large lookups, it might make more sense to pass the lookup to
\code{\link{refactor}} using a file. You can write the lookup
to a \code{.csv} file by supplying a path/name to the the \code{file}
argument.
To try and make the process less laborious, \code{refactor_list} also has
a \code{consolidate} parameter. If set to \code{TRUE}, the lookup generated
will pass the 'TO' values through \code{\link{consolidate_values}}, hopefully
consoldating factor levels which are different for small formatting reasons
in to one. See the \code{\link{consolidate_values}} documentation for
details.
For a demonstration of how \code{\link{refactor}} and \code{refactor_list}
work together, see the package vignette, with:
\code{vignette('brocks')}
}
}
\examples{
# Let's tidy up the gender variable in test_data
data(test_data)
table(test_data$gender)
# Passing the gender variable to refactor_list, will generate the R code we
# need to create a lookup for it in our data-cleaning script! Setting
# consolidate to TRUE will do some of the work for us.
refactor_list(test_data$gender, consolidate = TRUE)
# At this point you'd take the code generated and itegrate it into your
# script. Here's one I made earlier. We can pass it to refactor, and our
# factor variable is now tidy!
new_vals <- list(
# FROM TO
c("", NA ),
c("<NA>", NA ),
c("F", "female"),
c("Female", "female"),
c("m", "male" ),
c("M", "male" ),
c("Male", "male" ),
c("Man", "male" ),
c("Woman", "female"),
c("n/a", NA )
)
test_data$gender <- refactor(test_data$gender, new_vals)
}
\author{
Brendan Rocks \email{rocks.brendan@gmail.com}
}
\seealso{
\code{\link{refactor}}, the function which \code{rfeactor_list}
supports
}
|
#' read_h5df
#'
#' TODO
#'
#' @param h5in
#' Input file.
#' @param dataset
#' Dataset in input file to read or \code{NULL}. In the latter case, TODO
#' @param indices
#' TODO
#'
#' @export
read_shff = function(h5in, dataset=NULL, indices=NULL)
{
check.is.string(h5in)
if (!is.null(dataset))
check.is.string(dataset)
if (!is.null(indices))
{
if (length(indices) == 0 || !all(is.inty(indices)) || any(indices < 1))
stop("argument 'indices' must be a vector of positive integers")
}
fp = h5file(h5in, mode="r")
dataset = h5_get_dataset(fp, dataset)
x = read_shff_mm(fp, dataset, indices)
h5close(fp)
x
}
extract = function(fp, dataset, indices)
{
if (is.null(indices))
fp[[dataset]][]
else
fp[[dataset]][indices]
}
read_shff_mm = function(fp, dataset, indices)
{
attrs = glue(dataset, ATTR_PATH)
format = h5attr(fp[[attrs]], "format")
M = h5attr(fp[[attrs]], "nrows")
N = h5attr(fp[[attrs]], "ncols")
nz = h5attr(fp[[attrs]], "nz")
matcode = mm_matcode_int2char(h5attr(fp[[attrs]], "matcode"))
indexing = h5attr(fp[[attrs]], "indexing")
I = extract(fp, glue(dataset, "I"), indices)
J = extract(fp, glue(dataset, "J"), indices)
if (mm_is_pattern(matcode))
val = NULL
else
val = extract(fp, glue(dataset, "val"), indices)
mm = list(M=M, N=N, nz=nz, I=I, J=J, val=val, matcode=matcode, indexing=indexing)
}
| /R/read_shff.r | permissive | RBigData/shff | R | false | false | 1,412 | r | #' read_h5df
#'
#' TODO
#'
#' @param h5in
#' Input file.
#' @param dataset
#' Dataset in input file to read or \code{NULL}. In the latter case, TODO
#' @param indices
#' TODO
#'
#' @export
read_shff = function(h5in, dataset=NULL, indices=NULL)
{
check.is.string(h5in)
if (!is.null(dataset))
check.is.string(dataset)
if (!is.null(indices))
{
if (length(indices) == 0 || !all(is.inty(indices)) || any(indices < 1))
stop("argument 'indices' must be a vector of positive integers")
}
fp = h5file(h5in, mode="r")
dataset = h5_get_dataset(fp, dataset)
x = read_shff_mm(fp, dataset, indices)
h5close(fp)
x
}
extract = function(fp, dataset, indices)
{
if (is.null(indices))
fp[[dataset]][]
else
fp[[dataset]][indices]
}
read_shff_mm = function(fp, dataset, indices)
{
attrs = glue(dataset, ATTR_PATH)
format = h5attr(fp[[attrs]], "format")
M = h5attr(fp[[attrs]], "nrows")
N = h5attr(fp[[attrs]], "ncols")
nz = h5attr(fp[[attrs]], "nz")
matcode = mm_matcode_int2char(h5attr(fp[[attrs]], "matcode"))
indexing = h5attr(fp[[attrs]], "indexing")
I = extract(fp, glue(dataset, "I"), indices)
J = extract(fp, glue(dataset, "J"), indices)
if (mm_is_pattern(matcode))
val = NULL
else
val = extract(fp, glue(dataset, "val"), indices)
mm = list(M=M, N=N, nz=nz, I=I, J=J, val=val, matcode=matcode, indexing=indexing)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{closest_to_point}
\alias{closest_to_point}
\title{Get data closest to a point}
\usage{
closest_to_point(mat, point)
}
\value{
Logical vector for each row in input matrix, TRUE if closest point
}
\description{
Get data closest to a point
}
| /man/closest_to_point.Rd | no_license | standardgalactic/scbp | R | false | true | 332 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{closest_to_point}
\alias{closest_to_point}
\title{Get data closest to a point}
\usage{
closest_to_point(mat, point)
}
\value{
Logical vector for each row in input matrix, TRUE if closest point
}
\description{
Get data closest to a point
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.r
\name{coef.gapski}
\alias{coef.gapski}
\title{Extract void point process parameter estimates}
\usage{
\method{coef}{gapski}(object, ...)
}
\arguments{
\item{object}{A fitted model from \link{fit.gap}.}
\item{...}{Other parameters (for S3 generic compatibility).}
}
\description{
Extracts estimated and derived parameters from a model fitted using
\link{fit.gap}.
}
| /man/coef.gapski.Rd | no_license | cmjt/gapski | R | false | true | 454 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.r
\name{coef.gapski}
\alias{coef.gapski}
\title{Extract void point process parameter estimates}
\usage{
\method{coef}{gapski}(object, ...)
}
\arguments{
\item{object}{A fitted model from \link{fit.gap}.}
\item{...}{Other parameters (for S3 generic compatibility).}
}
\description{
Extracts estimated and derived parameters from a model fitted using
\link{fit.gap}.
}
|
#reading the dataset
letters <- read.csv("letterdata.csv")
str(letters)
#splitting the dataset
letters_training <- letters[1:16000, ]
letters_test <- letters[16001:20000, ]
#SVM learning - packages: e1071, klaR, kernlab
install.packages("kernlab") #developed natively in R, can be used with caret package
library(kernlab)
letter_classifier <- ksvm(letter ~ ., data = letters_training,
kernel = "vanilladot") #linear kernel
letter_classifier #cost c = 1 by default: cost of violating the constraints, soft margins
#larger values result in narrower margins
#evaluating model performance
letter_predictions <- predict(letter_classifier, letters_test, type = "response")
head(letter_predictions)
table(letter_predictions, letters_test$letter) #to see how well it performed
agreement <- letter_predictions == letters_test$letter
table(agreement) #how many classified correctly
prop.table(table(agreement))
#improving the model performance
letter_classifier_rbf <- ksvm(letter ~ ., data = letters_training,
kernel = "rbfdot") #Gaussian RBF Kernel, default kernel
#RBF: Radial Basis Functiom
letter_predictions_rbf <- predict(letter_classifier_rbf, letters_test, type = "response")
agreement_rbf <- letter_predictions_rbf == letters_test$letter
table(agreement_rbf)
prop.table(table(agreement_rbf)) #with different kernel, performance improved 9 pts
#for more improved model, cost parameter can be modified
| /ch7_2_ImageProcessing.R | no_license | buyukdem/MLwR | R | false | false | 1,571 | r | #reading the dataset
letters <- read.csv("letterdata.csv")
str(letters)
#splitting the dataset
letters_training <- letters[1:16000, ]
letters_test <- letters[16001:20000, ]
#SVM learning - packages: e1071, klaR, kernlab
install.packages("kernlab") #developed natively in R, can be used with caret package
library(kernlab)
letter_classifier <- ksvm(letter ~ ., data = letters_training,
kernel = "vanilladot") #linear kernel
letter_classifier #cost c = 1 by default: cost of violating the constraints, soft margins
#larger values result in narrower margins
#evaluating model performance
letter_predictions <- predict(letter_classifier, letters_test, type = "response")
head(letter_predictions)
table(letter_predictions, letters_test$letter) #to see how well it performed
agreement <- letter_predictions == letters_test$letter
table(agreement) #how many classified correctly
prop.table(table(agreement))
#improving the model performance
letter_classifier_rbf <- ksvm(letter ~ ., data = letters_training,
kernel = "rbfdot") #Gaussian RBF Kernel, default kernel
#RBF: Radial Basis Functiom
letter_predictions_rbf <- predict(letter_classifier_rbf, letters_test, type = "response")
agreement_rbf <- letter_predictions_rbf == letters_test$letter
table(agreement_rbf)
prop.table(table(agreement_rbf)) #with different kernel, performance improved 9 pts
#for more improved model, cost parameter can be modified
|
# Question-1: Exercise: Explore the relationship between the following, where x contains numbers
# from 1 to 100:
x <- c(1:100)
y <- 1
x
plot(x, x^2)
plot(x, x^3)
# Question-2:: First we'll produce a very simple graph using the values in the car vector:
cars <- c(1, 3, 6, 4, 9)
# Graph the car vector with all defaults plot(cars)
plot(cars, type="o", col="blue")
# Create a title with a red, bold/italic font title(main="Autos", col.main='red',font.main=4)
title(main="Autos", col.main="red", font.main=4)
# Define 2 vectors cars<-c(1,3,6,4,9)trucks<-c(2,5,4,5,12)
cars <- c(1, 3, 6, 4, 9)
trucks <- c(2, 5, 4, 5, 12)
# Graphs cars using a y-axis that ranges from 0 to 12
plot(cars, type="o",col="blue", ylim=c(0,12) )
# Graph trucks with red dashed line and square points
lines(trucks, type="o",col="red",pch=22,lty=2 )
# Create a title with a red ,bold/italic font
title(main="Autos", col.main="red", font.main = 4)
| /Assignment7.R | no_license | mahendra-16/Acadgild_Assignment7 | R | false | false | 969 | r | # Question-1: Exercise: Explore the relationship between the following, where x contains numbers
# from 1 to 100:
x <- c(1:100)
y <- 1
x
plot(x, x^2)
plot(x, x^3)
# Question-2:: First we'll produce a very simple graph using the values in the car vector:
cars <- c(1, 3, 6, 4, 9)
# Graph the car vector with all defaults plot(cars)
plot(cars, type="o", col="blue")
# Create a title with a red, bold/italic font title(main="Autos", col.main='red',font.main=4)
title(main="Autos", col.main="red", font.main=4)
# Define 2 vectors cars<-c(1,3,6,4,9)trucks<-c(2,5,4,5,12)
cars <- c(1, 3, 6, 4, 9)
trucks <- c(2, 5, 4, 5, 12)
# Graphs cars using a y-axis that ranges from 0 to 12
plot(cars, type="o",col="blue", ylim=c(0,12) )
# Graph trucks with red dashed line and square points
lines(trucks, type="o",col="red",pch=22,lty=2 )
# Create a title with a red ,bold/italic font
title(main="Autos", col.main="red", font.main = 4)
|
Data_Frame <- data.frame (
Training = c("Strength", "Stamina", "Other"),
Pulse = c(100, 150, 120),
Duration = c(60, 30, 45)
)
ncol(Data_Frame)
nrow(Data_Frame) | /Saneeth/amount_dataframe.R | no_license | tactlabs/r-samples | R | false | false | 165 | r | Data_Frame <- data.frame (
Training = c("Strength", "Stamina", "Other"),
Pulse = c(100, 150, 120),
Duration = c(60, 30, 45)
)
ncol(Data_Frame)
nrow(Data_Frame) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crypto_helpers.R
\name{safely_read_json}
\alias{safely_read_json}
\title{Safely read json API}
\usage{
safely_read_json(json_url)
}
\arguments{
\item{json_url}{json API URL}
}
\value{
parsed dataset or a elegant error message
}
\description{
This will attempt to safely retrieve data and return a elegant error message to the user if the data is unavailable.
}
\examples{
{
json_url <- 'https://s2.coinmarketcap.com/generated/search/quick_search.json'
result <- safely_read_json(json_url)
}
}
| /man/safely_read_json.Rd | permissive | cran/crypto | R | false | true | 572 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/crypto_helpers.R
\name{safely_read_json}
\alias{safely_read_json}
\title{Safely read json API}
\usage{
safely_read_json(json_url)
}
\arguments{
\item{json_url}{json API URL}
}
\value{
parsed dataset or a elegant error message
}
\description{
This will attempt to safely retrieve data and return a elegant error message to the user if the data is unavailable.
}
\examples{
{
json_url <- 'https://s2.coinmarketcap.com/generated/search/quick_search.json'
result <- safely_read_json(json_url)
}
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 3819
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2982
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2937
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2937
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kontchakov/SUBMITTED/Umbrella_tbm_05.tex.module.000053.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1536
c no.of clauses 3819
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2937
c
c QBFLIB/Kontchakov/SUBMITTED/Umbrella_tbm_05.tex.module.000053.qdimacs 1536 3819 E1 [1120 1122 1127 1130 1131 1132 1135 1137 1139 1144 1145 1146 1156 1157 1159 1160 1166 1168 1173 1176 1177 1178 1181 1183 1185 1190 1191 1192 1202 1203 1205 1206 1212 1214 1219 1222 1223 1224 1227 1229 1231 1236 1237 1238 1248 1249 1251 1252 1258 1260 1265 1268 1269 1270 1273 1275 1277 1282 1283 1284 1294 1295 1297 1298 1304 1306 1311 1314 1315 1316 1319 1321 1323 1328 1329 1330 1340 1341 1343 1344 1350 1352 1357 1360 1361 1362 1365 1367 1369 1374 1375 1376 1386 1387 1389 1390 1396 1398 1403 1406 1407 1408 1411 1413 1415 1420 1421 1422 1432 1433 1435 1436 1442 1444 1449 1452 1453 1454 1457 1459 1461 1466 1467 1468 1478 1479 1481 1482 1488 1490 1495 1498 1499 1500 1503 1505 1507 1512 1513 1514 1524 1525 1527 1528 1129 1175 1221 1267 1313 1359 1405 1451 1497] 0 230 1103 2937 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Kontchakov/SUBMITTED/Umbrella_tbm_05.tex.module.000053/Umbrella_tbm_05.tex.module.000053.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 1,607 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 3819
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2982
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2937
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 2937
c
c Input Parameter (command line, file):
c input filename QBFLIB/Kontchakov/SUBMITTED/Umbrella_tbm_05.tex.module.000053.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 1536
c no.of clauses 3819
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 2937
c
c QBFLIB/Kontchakov/SUBMITTED/Umbrella_tbm_05.tex.module.000053.qdimacs 1536 3819 E1 [1120 1122 1127 1130 1131 1132 1135 1137 1139 1144 1145 1146 1156 1157 1159 1160 1166 1168 1173 1176 1177 1178 1181 1183 1185 1190 1191 1192 1202 1203 1205 1206 1212 1214 1219 1222 1223 1224 1227 1229 1231 1236 1237 1238 1248 1249 1251 1252 1258 1260 1265 1268 1269 1270 1273 1275 1277 1282 1283 1284 1294 1295 1297 1298 1304 1306 1311 1314 1315 1316 1319 1321 1323 1328 1329 1330 1340 1341 1343 1344 1350 1352 1357 1360 1361 1362 1365 1367 1369 1374 1375 1376 1386 1387 1389 1390 1396 1398 1403 1406 1407 1408 1411 1413 1415 1420 1421 1422 1432 1433 1435 1436 1442 1444 1449 1452 1453 1454 1457 1459 1461 1466 1467 1468 1478 1479 1481 1482 1488 1490 1495 1498 1499 1500 1503 1505 1507 1512 1513 1514 1524 1525 1527 1528 1129 1175 1221 1267 1313 1359 1405 1451 1497] 0 230 1103 2937 RED
|
library(saemix)
### Name: cow.saemix
### Title: Evolution of the weight of 560 cows, in SAEM format
### Aliases: cow.saemix
### Keywords: datasets
### ** Examples
data(cow.saemix)
saemix.data<-saemixData(name.data=cow.saemix,header=TRUE,name.group=c("cow"),
name.predictors=c("time"),name.response=c("weight"),
name.covariates=c("birthyear","twin","birthrank"),
units=list(x="days",y="kg",covariates=c("yr","-","-")))
growthcow<-function(psi,id,xidep) {
# input:
# psi : matrix of parameters (3 columns, a, b, k)
# id : vector of indices
# xidep : dependent variables (same nb of rows as length of id)
# returns:
# a vector of predictions of length equal to length of id
x<-xidep[,1]
a<-psi[id,1]
b<-psi[id,2]
k<-psi[id,3]
f<-a*(1-b*exp(-k*x))
return(f)
}
saemix.model<-saemixModel(model=growthcow,
description="Exponential growth model",
psi0=matrix(c(700,0.9,0.02,0,0,0),ncol=3,byrow=TRUE,
dimnames=list(NULL,c("A","B","k"))),transform.par=c(1,1,1),fixed.estim=c(1,1,1),
covariate.model=matrix(c(0,0,0),ncol=3,byrow=TRUE),
covariance.model=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),
omega.init=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),error.model="constant")
saemix.options<-list(algorithms=c(1,1,1),nb.chains=1,nbiter.saemix=c(200,100),
seed=4526,save=FALSE,save.graphs=FALSE)
# Plotting the data
plot(saemix.data,xlab="Time (day)",ylab="Weight of the cow (kg)")
# Not run (strict time constraints for CRAN)
# saemix.fit<-saemix(saemix.model,saemix.data,saemix.options)
| /data/genthat_extracted_code/saemix/examples/cow.saemix.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,548 | r | library(saemix)
### Name: cow.saemix
### Title: Evolution of the weight of 560 cows, in SAEM format
### Aliases: cow.saemix
### Keywords: datasets
### ** Examples
data(cow.saemix)
saemix.data<-saemixData(name.data=cow.saemix,header=TRUE,name.group=c("cow"),
name.predictors=c("time"),name.response=c("weight"),
name.covariates=c("birthyear","twin","birthrank"),
units=list(x="days",y="kg",covariates=c("yr","-","-")))
growthcow<-function(psi,id,xidep) {
# input:
# psi : matrix of parameters (3 columns, a, b, k)
# id : vector of indices
# xidep : dependent variables (same nb of rows as length of id)
# returns:
# a vector of predictions of length equal to length of id
x<-xidep[,1]
a<-psi[id,1]
b<-psi[id,2]
k<-psi[id,3]
f<-a*(1-b*exp(-k*x))
return(f)
}
saemix.model<-saemixModel(model=growthcow,
description="Exponential growth model",
psi0=matrix(c(700,0.9,0.02,0,0,0),ncol=3,byrow=TRUE,
dimnames=list(NULL,c("A","B","k"))),transform.par=c(1,1,1),fixed.estim=c(1,1,1),
covariate.model=matrix(c(0,0,0),ncol=3,byrow=TRUE),
covariance.model=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),
omega.init=matrix(c(1,0,0,0,1,0,0,0,1),ncol=3,byrow=TRUE),error.model="constant")
saemix.options<-list(algorithms=c(1,1,1),nb.chains=1,nbiter.saemix=c(200,100),
seed=4526,save=FALSE,save.graphs=FALSE)
# Plotting the data
plot(saemix.data,xlab="Time (day)",ylab="Weight of the cow (kg)")
# Not run (strict time constraints for CRAN)
# saemix.fit<-saemix(saemix.model,saemix.data,saemix.options)
|
\name{butterworth}
\docType{methods}
\alias{butterworth}
\alias{butterworth,Trace,numeric,missing,numeric,missing-method}
\alias{butterworth,Trace,numeric,numeric,missing,missing-method}
\alias{butterworth,Trace,numeric,numeric,numeric,character-method}
\alias{butterworth,Trace,numeric,numeric,numeric,missing-method}
\title{Apply Butterworth filter}
\description{
The \code{butterworth} method of \code{Trace} objects returns a new \code{Trace}
where data in the \code{@data} slot have been modified by applying a Butterworth filter.
}
\usage{
butterworth(x, n, low, high, type)
}
\arguments{
\item{x}{a \code{Trace} object}
\item{n}{filter order}
\item{low}{frequency used in low- or stop/band-pass filters}
\item{high}{frequency used in high or stop/band-pass filters}
\item{type}{type of filter -- \code{'low', 'high', 'pass'} or \code{'stop'}}
}
\details{
This method creates a Butterworth filter with the specified characteristics and applies
it to the Trace data.
When only \code{n} and \code{low} are specified, a high pass filter is applied.
When only \code{n} and \code{high} are specified, a low pass filter is applied.
When \code{n} and both \code{low} and \code{high} are specified, a band pass filter is applied.
To apply a band stop filter you must specify \code{n}, \code{low}, \code{high} and \code{type='stop'}
}
\value{
A new \code{Trace} object is returned.
}
%%\references{ }
\author{
Jonathan Callahan \email{jonathan@mazamascience.com}
}
%% \note{ }
\seealso{
signal::butter, signal::filter
}
\examples{
\dontrun{
# Open a connection to IRIS DMC webservices
iris <- new("IrisClient")
# Compare to the results in figure 2a of
#
# "Determination of New Zealand Ocean Bottom Seismometer Orientation
# via Rayleigh-Wave Polarization", Stachnik et al.
#
# http://srl.geoscienceworld.org/content/83/4/704
#
# (note: since publication, ZU.NZ19..BH1 has been renamed BH2 and ZU.NZ19..BH2 has been renamed BH1)
starttime <- as.POSIXct("2009-02-18 22:01:07",tz="GMT")
endtime <- starttime + 630
verticalLines <- starttime + seq(30,630,100)
# Get data
stZ <- getSNCL(iris,"ZU.NZ19..BHZ",starttime,endtime)
st2 <- getSNCL(iris,"ZU.NZ19..BH2",starttime,endtime)
st1 <- getSNCL(iris,"ZU.NZ19..BH1",starttime,endtime)
# Demean, Detrend, Taper
trZ <- DDT(stZ@traces[[1]],TRUE,TRUE,0.05)
tr2 <- DDT(st2@traces[[1]],TRUE,TRUE,0.05)
tr1 <- DDT(st1@traces[[1]],TRUE,TRUE,0.05)
# Bandpass filter
trZ_f <- butterworth(trZ,2,0.02,0.04,type='pass')
tr2_f <- butterworth(tr2,2,0.02,0.04,type='pass')
tr1_f <- butterworth(tr1,2,0.02,0.04,type='pass')
# 3 rows
layout(matrix(seq(3)))
# Plot
plot(trZ_f)
abline(v=verticalLines,col='gray50',lty=2)
plot(tr2_f)
abline(v=verticalLines,col='gray50',lty=2)
plot(tr1_f)
abline(v=verticalLines,col='gray50',lty=2)
# Restore default layout
layout(1)
}
}
\keyword{methods}
| /man/butterworth.Rd | no_license | cran/IRISSeismic | R | false | false | 2,843 | rd | \name{butterworth}
\docType{methods}
\alias{butterworth}
\alias{butterworth,Trace,numeric,missing,numeric,missing-method}
\alias{butterworth,Trace,numeric,numeric,missing,missing-method}
\alias{butterworth,Trace,numeric,numeric,numeric,character-method}
\alias{butterworth,Trace,numeric,numeric,numeric,missing-method}
\title{Apply Butterworth filter}
\description{
The \code{butterworth} method of \code{Trace} objects returns a new \code{Trace}
where data in the \code{@data} slot have been modified by applying a Butterworth filter.
}
\usage{
butterworth(x, n, low, high, type)
}
\arguments{
\item{x}{a \code{Trace} object}
\item{n}{filter order}
\item{low}{frequency used in low- or stop/band-pass filters}
\item{high}{frequency used in high or stop/band-pass filters}
\item{type}{type of filter -- \code{'low', 'high', 'pass'} or \code{'stop'}}
}
\details{
This method creates a Butterworth filter with the specified characteristics and applies
it to the Trace data.
When only \code{n} and \code{low} are specified, a high pass filter is applied.
When only \code{n} and \code{high} are specified, a low pass filter is applied.
When \code{n} and both \code{low} and \code{high} are specified, a band pass filter is applied.
To apply a band stop filter you must specify \code{n}, \code{low}, \code{high} and \code{type='stop'}
}
\value{
A new \code{Trace} object is returned.
}
%%\references{ }
\author{
Jonathan Callahan \email{jonathan@mazamascience.com}
}
%% \note{ }
\seealso{
signal::butter, signal::filter
}
\examples{
\dontrun{
# Open a connection to IRIS DMC webservices
iris <- new("IrisClient")
# Compare to the results in figure 2a of
#
# "Determination of New Zealand Ocean Bottom Seismometer Orientation
# via Rayleigh-Wave Polarization", Stachnik et al.
#
# http://srl.geoscienceworld.org/content/83/4/704
#
# (note: since publication, ZU.NZ19..BH1 has been renamed BH2 and ZU.NZ19..BH2 has been renamed BH1)
starttime <- as.POSIXct("2009-02-18 22:01:07",tz="GMT")
endtime <- starttime + 630
verticalLines <- starttime + seq(30,630,100)
# Get data
stZ <- getSNCL(iris,"ZU.NZ19..BHZ",starttime,endtime)
st2 <- getSNCL(iris,"ZU.NZ19..BH2",starttime,endtime)
st1 <- getSNCL(iris,"ZU.NZ19..BH1",starttime,endtime)
# Demean, Detrend, Taper
trZ <- DDT(stZ@traces[[1]],TRUE,TRUE,0.05)
tr2 <- DDT(st2@traces[[1]],TRUE,TRUE,0.05)
tr1 <- DDT(st1@traces[[1]],TRUE,TRUE,0.05)
# Bandpass filter
trZ_f <- butterworth(trZ,2,0.02,0.04,type='pass')
tr2_f <- butterworth(tr2,2,0.02,0.04,type='pass')
tr1_f <- butterworth(tr1,2,0.02,0.04,type='pass')
# 3 rows
layout(matrix(seq(3)))
# Plot
plot(trZ_f)
abline(v=verticalLines,col='gray50',lty=2)
plot(tr2_f)
abline(v=verticalLines,col='gray50',lty=2)
plot(tr1_f)
abline(v=verticalLines,col='gray50',lty=2)
# Restore default layout
layout(1)
}
}
\keyword{methods}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/branchRateMatrix.R
\name{branchRateMatrix}
\alias{branchRateMatrix}
\title{Compute exponentiated rate matrix for all branches}
\usage{
branchRateMatrix(rate, branch.length, pi, log = TRUE)
}
\arguments{
\item{rate}{vector of mutation rates, one per branch}
\item{branch.length}{vector of branch lengths}
\item{pi}{stationary distribution of allele frequencies}
\item{log}{if TRUE returns log transition matricies}
}
\value{
a list of transition matricies, one entry for each specified branch length. The rows are the parental allele,
the columns are the child allele.
}
\description{
Compute exponentiated rate matrix for all branches
}
| /man/branchRateMatrix.Rd | no_license | ndukler/epiAlleleGLM | R | false | true | 718 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/branchRateMatrix.R
\name{branchRateMatrix}
\alias{branchRateMatrix}
\title{Compute exponentiated rate matrix for all branches}
\usage{
branchRateMatrix(rate, branch.length, pi, log = TRUE)
}
\arguments{
\item{rate}{vector of mutation rates, one per branch}
\item{branch.length}{vector of branch lengths}
\item{pi}{stationary distribution of allele frequencies}
\item{log}{if TRUE returns log transition matricies}
}
\value{
a list of transition matricies, one entry for each specified branch length. The rows are the parental allele,
the columns are the child allele.
}
\description{
Compute exponentiated rate matrix for all branches
}
|
#' Tidying methods for ordinal logistic regression models
#'
#' These methods tidy the coefficients of ordinal logistic regression
#' models generated by [ordinal::clm()] or [ordinal::clmm()]
#' of the `ordinal` package, [MASS::polr()] of the `MASS`
#' packge, or [survey::svyolr()] of the `survey` package.
#'
#' @param x a model of class `clm`, `clmm`, `polr` or `svyolr`
#' @template param_confint
#' @template param_exponentiate
#' @template param_quick
#' @template param_data
#' @template param_newdata
#'
#' @param conf.type the type of confidence interval
#' (see [ordinal::confint.clm()])
#'
#' @param type.predict type of prediction to compute for a CLM; passed on to
#' [ordinal::predict.clm()] or `predict.polr`
#' @param ... extra arguments
#' @return
#' `tidy.clm`, `tidy.clmm`, `tidy.polr` and `tidy.svyolr`
#' return one row for each coefficient at each level of the response variable,
#' with six columns:
#' \item{term}{term in the model}
#' \item{estimate}{estimated coefficient}
#' \item{std.error}{standard error}
#' \item{statistic}{t-statistic}
#' \item{p.value}{two-sided p-value}
#' \item{coefficient_type}{type of coefficient, see [ordinal::clm()]}
#'
#' If `conf.int=TRUE`, it also includes columns for `conf.low` and
#'
#' `glance.clm`, `glance.clmm`, `glance.polr` and `glance.svyolr`
#' return a one-row data.frame with the columns:
#' \item{edf}{the effective degrees of freedom}
#' \item{logLik}{the data's log-likelihood under the model}
#' \item{AIC}{the Akaike Information Criterion}
#' \item{BIC}{the Bayesian Information Criterion}
#' \item{df.residual}{residual degrees of freedom}
#'
#' `augment.clm` and `augment.polr` returns
#' one row for each observation, with additional columns added to
#' the original data:
#' \item{.fitted}{fitted values of model}
#' \item{.se.fit}{standard errors of fitted values}
#'
#' `augment` is not supportted for [ordinal::clmm()]
#' and [survey::svyolr()] models.
#'
#' All tidying methods return a `data.frame` without rownames.
#' The structure depends on the method chosen.
#'
#' @name ordinal_tidiers
#'
#' @examples
#' library(ordinal)
#' clm_mod <- clm(rating ~ temp * contact, data = wine)
#' tidy(clm_mod)
#' tidy(clm_mod, conf.int = TRUE)
#' tidy(clm_mod, conf.int = TRUE, conf.type = "Wald", exponentiate = TRUE)
#' glance(clm_mod)
#' augment(clm_mod)
#'
#' clm_mod2 <- clm(rating ~ temp, nominal = ~ contact, data = wine)
#' tidy(clm_mod2)
#'
#' clmm_mod <- clmm(rating ~ temp + contact + (1 | judge), data = wine)
#' tidy(clmm_mod)
#' glance(clmm_mod)
#'
#' library(MASS)
#' polr_mod <- polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing)
#' tidy(polr_mod, exponentiate = TRUE, conf.int = TRUE)
#' glance(polr_mod)
#' augment(polr_mod, type.predict = "class")
NULL
#' @rdname ordinal_tidiers
#' @export
tidy.clm <- function(x, conf.int = FALSE, conf.level = .95,
exponentiate = FALSE, quick = FALSE,
conf.type = c("profile", "Wald"), ...) {
if (quick) {
co <- coef(x)
ret <- data.frame(
term = names(co), estimate = unname(co),
stringsAsFactors = FALSE
)
return(process_clm(ret, x, conf.int = FALSE, exponentiate = exponentiate))
}
conf.type <- match.arg(conf.type)
co <- coef(summary(x))
nn <- c("estimate", "std.error", "statistic", "p.value")
ret <- fix_data_frame(co, nn[seq_len(ncol(co))])
process_clm(
ret, x,
conf.int = conf.int, conf.level = conf.level,
exponentiate = exponentiate, conf.type = conf.type
)
}
process_clm <- function(ret, x, conf.int = FALSE, conf.level = .95,
exponentiate = FALSE, conf.type = "profile") {
if (exponentiate) {
trans <- exp
} else {
trans <- identity
}
if (conf.int) {
CI <- suppressMessages(
trans(stats::confint(x, level = conf.level, type = conf.type))
)
colnames(CI) <- c("conf.low", "conf.high")
CI <- as.data.frame(CI)
CI$term <- rownames(CI)
ret$orig_row_order <- seq_len(nrow(ret))
ret <- merge(ret, unrowname(CI), by = "term", all.x = TRUE)
ret <- ret[order(ret$orig_row_order),]
ret$orig_row_order <- NULL
}
ret$estimate <- trans(ret$estimate)
## make sure original order hasn't changed
if (!identical(ret$term,c(names(x$alpha),names(x$beta),names(x$zeta)))) {
stop("row order changed; please contact maintainers")
}
ret$coefficient_type <- rep(c("alpha","beta","zeta"),
vapply(x[c("alpha","beta","zeta")],
length, numeric(1)))
as_tibble(ret)
}
#' @rdname ordinal_tidiers
#' @export
tidy.clmm <- function(x, conf.int = FALSE, conf.level = .95,
exponentiate = FALSE, quick = FALSE,
conf.type = c("profile", "Wald"), ...) {
tidy.clm(x, conf.int, conf.level, exponentiate, quick, ...)
}
#' @rdname ordinal_tidiers
#' @export
glance.clm <- function(x, ...) {
ret <- with(
x,
tibble(
edf = edf
)
)
finish_glance(ret, x)
}
#' @rdname ordinal_tidiers
#' @export
glance.clmm <- glance.clm
#' @rdname ordinal_tidiers
#' @export
augment.clm <- function(x, data = model.frame(x), newdata = NULL,
type.predict = c("prob", "class"), ...) {
type.predict <- match.arg(type.predict)
augment_columns(x, data, newdata, type = type.predict)
}
| /R/ordinal-tidiers.R | no_license | ethchr/broom | R | false | false | 5,383 | r | #' Tidying methods for ordinal logistic regression models
#'
#' These methods tidy the coefficients of ordinal logistic regression
#' models generated by [ordinal::clm()] or [ordinal::clmm()]
#' of the `ordinal` package, [MASS::polr()] of the `MASS`
#' packge, or [survey::svyolr()] of the `survey` package.
#'
#' @param x a model of class `clm`, `clmm`, `polr` or `svyolr`
#' @template param_confint
#' @template param_exponentiate
#' @template param_quick
#' @template param_data
#' @template param_newdata
#'
#' @param conf.type the type of confidence interval
#' (see [ordinal::confint.clm()])
#'
#' @param type.predict type of prediction to compute for a CLM; passed on to
#' [ordinal::predict.clm()] or `predict.polr`
#' @param ... extra arguments
#' @return
#' `tidy.clm`, `tidy.clmm`, `tidy.polr` and `tidy.svyolr`
#' return one row for each coefficient at each level of the response variable,
#' with six columns:
#' \item{term}{term in the model}
#' \item{estimate}{estimated coefficient}
#' \item{std.error}{standard error}
#' \item{statistic}{t-statistic}
#' \item{p.value}{two-sided p-value}
#' \item{coefficient_type}{type of coefficient, see [ordinal::clm()]}
#'
#' If `conf.int=TRUE`, it also includes columns for `conf.low` and
#'
#' `glance.clm`, `glance.clmm`, `glance.polr` and `glance.svyolr`
#' return a one-row data.frame with the columns:
#' \item{edf}{the effective degrees of freedom}
#' \item{logLik}{the data's log-likelihood under the model}
#' \item{AIC}{the Akaike Information Criterion}
#' \item{BIC}{the Bayesian Information Criterion}
#' \item{df.residual}{residual degrees of freedom}
#'
#' `augment.clm` and `augment.polr` returns
#' one row for each observation, with additional columns added to
#' the original data:
#' \item{.fitted}{fitted values of model}
#' \item{.se.fit}{standard errors of fitted values}
#'
#' `augment` is not supportted for [ordinal::clmm()]
#' and [survey::svyolr()] models.
#'
#' All tidying methods return a `data.frame` without rownames.
#' The structure depends on the method chosen.
#'
#' @name ordinal_tidiers
#'
#' @examples
#' library(ordinal)
#' clm_mod <- clm(rating ~ temp * contact, data = wine)
#' tidy(clm_mod)
#' tidy(clm_mod, conf.int = TRUE)
#' tidy(clm_mod, conf.int = TRUE, conf.type = "Wald", exponentiate = TRUE)
#' glance(clm_mod)
#' augment(clm_mod)
#'
#' clm_mod2 <- clm(rating ~ temp, nominal = ~ contact, data = wine)
#' tidy(clm_mod2)
#'
#' clmm_mod <- clmm(rating ~ temp + contact + (1 | judge), data = wine)
#' tidy(clmm_mod)
#' glance(clmm_mod)
#'
#' library(MASS)
#' polr_mod <- polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing)
#' tidy(polr_mod, exponentiate = TRUE, conf.int = TRUE)
#' glance(polr_mod)
#' augment(polr_mod, type.predict = "class")
NULL
#' @rdname ordinal_tidiers
#' @export
tidy.clm <- function(x, conf.int = FALSE, conf.level = .95,
exponentiate = FALSE, quick = FALSE,
conf.type = c("profile", "Wald"), ...) {
if (quick) {
co <- coef(x)
ret <- data.frame(
term = names(co), estimate = unname(co),
stringsAsFactors = FALSE
)
return(process_clm(ret, x, conf.int = FALSE, exponentiate = exponentiate))
}
conf.type <- match.arg(conf.type)
co <- coef(summary(x))
nn <- c("estimate", "std.error", "statistic", "p.value")
ret <- fix_data_frame(co, nn[seq_len(ncol(co))])
process_clm(
ret, x,
conf.int = conf.int, conf.level = conf.level,
exponentiate = exponentiate, conf.type = conf.type
)
}
process_clm <- function(ret, x, conf.int = FALSE, conf.level = .95,
exponentiate = FALSE, conf.type = "profile") {
if (exponentiate) {
trans <- exp
} else {
trans <- identity
}
if (conf.int) {
CI <- suppressMessages(
trans(stats::confint(x, level = conf.level, type = conf.type))
)
colnames(CI) <- c("conf.low", "conf.high")
CI <- as.data.frame(CI)
CI$term <- rownames(CI)
ret$orig_row_order <- seq_len(nrow(ret))
ret <- merge(ret, unrowname(CI), by = "term", all.x = TRUE)
ret <- ret[order(ret$orig_row_order),]
ret$orig_row_order <- NULL
}
ret$estimate <- trans(ret$estimate)
## make sure original order hasn't changed
if (!identical(ret$term,c(names(x$alpha),names(x$beta),names(x$zeta)))) {
stop("row order changed; please contact maintainers")
}
ret$coefficient_type <- rep(c("alpha","beta","zeta"),
vapply(x[c("alpha","beta","zeta")],
length, numeric(1)))
as_tibble(ret)
}
#' @rdname ordinal_tidiers
#' @export
tidy.clmm <- function(x, conf.int = FALSE, conf.level = .95,
exponentiate = FALSE, quick = FALSE,
conf.type = c("profile", "Wald"), ...) {
tidy.clm(x, conf.int, conf.level, exponentiate, quick, ...)
}
#' @rdname ordinal_tidiers
#' @export
glance.clm <- function(x, ...) {
ret <- with(
x,
tibble(
edf = edf
)
)
finish_glance(ret, x)
}
#' @rdname ordinal_tidiers
#' @export
glance.clmm <- glance.clm
#' @rdname ordinal_tidiers
#' @export
augment.clm <- function(x, data = model.frame(x), newdata = NULL,
type.predict = c("prob", "class"), ...) {
type.predict <- match.arg(type.predict)
augment_columns(x, data, newdata, type = type.predict)
}
|
rm(list = ls())
source("code/table_function.R")
# source("code/preliminary_weighted_analysis.R")
# Run the first 25 lines from the file above to get hte data, questionnaire, etc.
library(tidyverse)
library(hypegrammaR)
library(parallel)
library(dplyr)
questions <- read.csv("./input/survey.csv", stringsAsFactors = F)
choices <- read.csv("./input/choices.csv", stringsAsFactors = F)
data <- load_data(file = "./input/data.csv")
data <- mutate_if(data, is.character, na_if, "")
data <- data %>% select_if(~ !(all(is.na(.x)) | all(. == "")))
sampling_frame <- load_samplingframe(file = "./input/sampling_frame.csv")
questionnaire <- load_questionnaire(data = data,
questions = questions,
choices = choices,
choices.label.column.to.use = "label::English (en)")
weights <-map_to_weighting(sampling.frame = sampling_frame,
data.stratum.column = "strata.names",
sampling.frame.population.column = "population",
sampling.frame.stratum.column = "strata.names",
data = data)
analysisplan <- load_analysisplan(file = "./input/bio_assistance_displacement.csv")
analysisplan %>% filter(analysisplan[["dependent.variable"]] %in% names(data)) -> analysisplan
# Correcting blank cells to NA
# filtering data for what we want to analyze
# leave select multiple text columns in
text <- filter(questions, str_detect(type, "(\\btext\\b)|(\\bnote\\b)"))$name
choices$label..English..en. <- gsub("^\\d+[.]\\s*","", choices$label..English..en.)
data_to_analyze <- data %>%
select(-one_of(text)) %>%
select(-start, -end, -today, -mantika, -baladiya, -baladiya_label, -baladiya_other,
-consent, -not_hoh_ability_toanswer, -`x_index`, -uuid, -`x_submission_time`, -`x_id`,-x__version__ ) %>%
select_if(~ !(all(is.na(.x)) | all(. == "")))
#data_to_analyze <- data_to_analyze[,1:30]
#analysisplan <- analysisplan[2,]
strata_output <- table_maker(data_to_analyze,
questionnaire,
questions,
choices,
weights,
#analysisplan,
labels = T,
language = "english",
"Libya",
"displacement_status"
)
saveRDS(strata_output, "output/overall_displacement_status.RDS")
write_csv(strata_output, "output/overall_displacement_status.csv")
| /code/table_output.R | no_license | hedibmustapha/lby_msna2019 | R | false | false | 2,617 | r | rm(list = ls())
source("code/table_function.R")
# source("code/preliminary_weighted_analysis.R")
# Run the first 25 lines from the file above to get hte data, questionnaire, etc.
library(tidyverse)
library(hypegrammaR)
library(parallel)
library(dplyr)
questions <- read.csv("./input/survey.csv", stringsAsFactors = F)
choices <- read.csv("./input/choices.csv", stringsAsFactors = F)
data <- load_data(file = "./input/data.csv")
data <- mutate_if(data, is.character, na_if, "")
data <- data %>% select_if(~ !(all(is.na(.x)) | all(. == "")))
sampling_frame <- load_samplingframe(file = "./input/sampling_frame.csv")
questionnaire <- load_questionnaire(data = data,
questions = questions,
choices = choices,
choices.label.column.to.use = "label::English (en)")
weights <-map_to_weighting(sampling.frame = sampling_frame,
data.stratum.column = "strata.names",
sampling.frame.population.column = "population",
sampling.frame.stratum.column = "strata.names",
data = data)
analysisplan <- load_analysisplan(file = "./input/bio_assistance_displacement.csv")
analysisplan %>% filter(analysisplan[["dependent.variable"]] %in% names(data)) -> analysisplan
# Correcting blank cells to NA
# filtering data for what we want to analyze
# leave select multiple text columns in
text <- filter(questions, str_detect(type, "(\\btext\\b)|(\\bnote\\b)"))$name
choices$label..English..en. <- gsub("^\\d+[.]\\s*","", choices$label..English..en.)
data_to_analyze <- data %>%
select(-one_of(text)) %>%
select(-start, -end, -today, -mantika, -baladiya, -baladiya_label, -baladiya_other,
-consent, -not_hoh_ability_toanswer, -`x_index`, -uuid, -`x_submission_time`, -`x_id`,-x__version__ ) %>%
select_if(~ !(all(is.na(.x)) | all(. == "")))
#data_to_analyze <- data_to_analyze[,1:30]
#analysisplan <- analysisplan[2,]
strata_output <- table_maker(data_to_analyze,
questionnaire,
questions,
choices,
weights,
#analysisplan,
labels = T,
language = "english",
"Libya",
"displacement_status"
)
saveRDS(strata_output, "output/overall_displacement_status.RDS")
write_csv(strata_output, "output/overall_displacement_status.csv")
|
######################################################
# this file contains internal functions
# for calculation of fuzzy weights using method described
# by Krejčí et al. (2016)
######################################################
setGeneric(".weightsLimits",
function(data, i) standardGeneric(".weightsLimits"))
setMethod(
f=".weightsLimits",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric"),
definition=function(data, i)
{
p = nrow(data@fnMin)
# calculate lower value of weight
prodL = prod(data@fnMin[i,])^(1/p)
optL1 = .optProLower(data, i, "min")
optL2 = .optProLower(data, i, "max")
wL = prodL /(prodL + max(optL1, optL2))
# calculate upper value of weight
prodU = prod(data@fnMax[i,])^(1/p)
optU1 = .optProUpper(data, i, "min")
optU2 = .optProUpper(data, i, "max")
wU = prodU /(prodU + min(optU1, optU2))
return(c(wL,wU))
}
)
# function for calculating upper limits
setGeneric(".optProUpper",
function(data, i, type) standardGeneric(".optProUpper"))
setMethod(
f=".optProUpper",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric", type = "character"),
definition=function(data, i, type)
{
element = data@fnMin
if(type == "min"){
matrix = data@fnMin
}
else if(type == "max"){
matrix = data@fnMax
}
else{
stop(paste("Unrecognized type (should be min or max) is: ",type,".", sep = ""))
}
return(.optPro(data, i, matrix, element))
}
)
# function for calculating lower limits
setGeneric(".optProLower",
function(data, i, type) standardGeneric(".optProLower"))
setMethod(
f=".optProLower",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric", type = "character"),
definition=function(data, i, type)
{
element = data@fnMax
if(type == "min"){
matrix = data@fnMin
}
else if(type == "max"){
matrix = data@fnMax
}
else{
stop(paste("Unrecognized type (should be min or max) is: ",type,".", sep = ""))
}
return(.optPro(data, i, matrix, element))
}
)
# optimization function
setGeneric(".optPro",
function(data, i, matrix, element) standardGeneric(".optPro"))
setMethod(
f=".optPro",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric", matrix = "matrix", element = "matrix"),
definition=function(data, i, matrix, element)
{
sum = 0
p = nrow(matrix)
for (k in 1:p){
if(k == i){
next
}
if(k>1){
prod1 = 1
for(l in 1:(k-1)){
if(l == i){
next
}
prod1 = prod1 * (1/matrix[l,k]) #prod(1/matrix[l,k]) # remove i from set
}
}else{
prod1 = 1
}
if((k+1)<=p){
prod2 = 1
for(l in (k+1):p){
if(l == i){
next
}
prod2 = prod2 * matrix[k,l] #prod(matrix[k,(k+1):p]) # remove i from set
}
}else{
prod2 = 1
}
sum = sum + (element[k,i] * prod1 * prod2)^(1/p)
}
return(sum)
}
)
| /R/function-internal-optimization.R | no_license | cran/FuzzyAHP | R | false | false | 3,233 | r | ######################################################
# this file contains internal functions
# for calculation of fuzzy weights using method described
# by Krejčí et al. (2016)
######################################################
setGeneric(".weightsLimits",
function(data, i) standardGeneric(".weightsLimits"))
setMethod(
f=".weightsLimits",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric"),
definition=function(data, i)
{
p = nrow(data@fnMin)
# calculate lower value of weight
prodL = prod(data@fnMin[i,])^(1/p)
optL1 = .optProLower(data, i, "min")
optL2 = .optProLower(data, i, "max")
wL = prodL /(prodL + max(optL1, optL2))
# calculate upper value of weight
prodU = prod(data@fnMax[i,])^(1/p)
optU1 = .optProUpper(data, i, "min")
optU2 = .optProUpper(data, i, "max")
wU = prodU /(prodU + min(optU1, optU2))
return(c(wL,wU))
}
)
# function for calculating upper limits
setGeneric(".optProUpper",
function(data, i, type) standardGeneric(".optProUpper"))
setMethod(
f=".optProUpper",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric", type = "character"),
definition=function(data, i, type)
{
element = data@fnMin
if(type == "min"){
matrix = data@fnMin
}
else if(type == "max"){
matrix = data@fnMax
}
else{
stop(paste("Unrecognized type (should be min or max) is: ",type,".", sep = ""))
}
return(.optPro(data, i, matrix, element))
}
)
# function for calculating lower limits
setGeneric(".optProLower",
function(data, i, type) standardGeneric(".optProLower"))
setMethod(
f=".optProLower",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric", type = "character"),
definition=function(data, i, type)
{
element = data@fnMax
if(type == "min"){
matrix = data@fnMin
}
else if(type == "max"){
matrix = data@fnMax
}
else{
stop(paste("Unrecognized type (should be min or max) is: ",type,".", sep = ""))
}
return(.optPro(data, i, matrix, element))
}
)
# optimization function
setGeneric(".optPro",
function(data, i, matrix, element) standardGeneric(".optPro"))
setMethod(
f=".optPro",
signature(data = "FuzzyPairwiseComparisonMatrix", i = "numeric", matrix = "matrix", element = "matrix"),
definition=function(data, i, matrix, element)
{
sum = 0
p = nrow(matrix)
for (k in 1:p){
if(k == i){
next
}
if(k>1){
prod1 = 1
for(l in 1:(k-1)){
if(l == i){
next
}
prod1 = prod1 * (1/matrix[l,k]) #prod(1/matrix[l,k]) # remove i from set
}
}else{
prod1 = 1
}
if((k+1)<=p){
prod2 = 1
for(l in (k+1):p){
if(l == i){
next
}
prod2 = prod2 * matrix[k,l] #prod(matrix[k,(k+1):p]) # remove i from set
}
}else{
prod2 = 1
}
sum = sum + (element[k,i] * prod1 * prod2)^(1/p)
}
return(sum)
}
)
|
rankall <- function(outcome, num = "best" ){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
state <- data$State
state <- sort(unique(state))
hospital <- rep("", length(state))
for (i in 1:length(state)) {
statedata<- data[data$State==state[i],]
if (outcome == 'heart attack') {
death <- as.numeric(statedata[,11])
} else if (outcome == 'heart failure') {
death <- as.numeric(statedata[,17])
} else if (outcome == 'pneumonia') {
death <- as.numeric(statedata[,23])
} else {
stop("invalid outcome")
}
a <- rank(death, na.last=NA)
if (num=="best") {
r <- 1
} else if (num =="worst") {
r <- length(a)
} else if (num <= length(a) ) {
r <- num
} else {
r <- NA
}
if (is.na(r)) {
hospital[i] <- NA
} else {
hospital[i] <- statedata$Hospital.Name[order(death, statedata$Hospital.Name)[r]]
}
}
return(data.frame(hospital=hospital, state=state))
}
| /rankall.R | no_license | Eisforinnovate/outcomeofcaremeasurements | R | false | false | 1,030 | r | rankall <- function(outcome, num = "best" ){
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
state <- data$State
state <- sort(unique(state))
hospital <- rep("", length(state))
for (i in 1:length(state)) {
statedata<- data[data$State==state[i],]
if (outcome == 'heart attack') {
death <- as.numeric(statedata[,11])
} else if (outcome == 'heart failure') {
death <- as.numeric(statedata[,17])
} else if (outcome == 'pneumonia') {
death <- as.numeric(statedata[,23])
} else {
stop("invalid outcome")
}
a <- rank(death, na.last=NA)
if (num=="best") {
r <- 1
} else if (num =="worst") {
r <- length(a)
} else if (num <= length(a) ) {
r <- num
} else {
r <- NA
}
if (is.na(r)) {
hospital[i] <- NA
} else {
hospital[i] <- statedata$Hospital.Name[order(death, statedata$Hospital.Name)[r]]
}
}
return(data.frame(hospital=hospital, state=state))
}
|
rm(list=ls())
library(rjags)
library(jagsUI)
library(plyr)
set.seed(2013)
# RE4_10.1.7 (works), adding an extra covariate with random effects in beta coefficient (crop diversity)
# ---- Data simulation ----
# 30 species
# 5 years (unbalanced number of transects per year)
# Observation model calculated with Half Normal detection function
# Sigma site-year specific
### Random sp intercept
### Random effect in observer (site-year)
# Lambda site-year specific
### Random sp-year intercept (include different baseline abundance per species and also per year)
### 2 areas variables (area 1 sp-specific)
# Detection function: HN
g <- function(x, sig) exp(-x^2/(2*sig^2))
# Number of transects per year (unbalanced)
nSites <- seq(74,106, by = 8) # number of line transect surveys (DIFFERENT BY YEAR)
max.sites <- max(nSites) # Maximun number of sites is the last year
total.sites <- sum(nSites)
strip.width <- 500 # strip half-width, w (in this example only one side of the line transect is surveyed)
dist.breaks <- c(0,25,50,100,200,500)
int.w <- diff(dist.breaks) # width of distance categories (v)
midpt <- (int.w/2) + dist.breaks[-6]
nG <- length(dist.breaks)-1
# Year effect
yrs <- 1:5 # eight years
nyrs <- length(yrs)
# Number of species
nSpecies <- 30
#################################
# ---- Detection component ----
# RANDOM INTERCEPT PER SPECIES
mu.sig.sp <- log(50) # Mean of species-level random effect on intercept of sigma
sig.sig.sp <- 0.25 # SD of species-level random effect on intercept of sigma
s.alpha <- rnorm(nSpecies, mu.sig.sp, sig.sig.sp)
# Look at distribution of sigma intercepts (to see if I chose reasonable)
hist(exp(rnorm(1000, mu.sig.sp, sig.sig.sp)))
# RANDOM EFFECT IN OBSERVER
obs <- 1:9
nobs <- length(obs)
sig.sig.obs <- 0.25
# Observer effect in sigma
sig.obs <- rnorm(length(obs), 0, sig.sig.obs) # Mean is 0 because is adding noise around the mean
# Observer covariate
ob.id <- matrix(sample(1:9, max.sites*nyrs, replace = TRUE), nrow = max.sites, ncol = nyrs) # Matix with IDs
ob <- matrix(sig.obs[ob.id], nrow = max.sites, ncol = nyrs) # Matrix with intercept for simulating data
#SIGMA: sigma[j,t,s]
sigma <- exp(array(rep(s.alpha, each = max.sites*nyrs), c(max.sites, nyrs, nSpecies))
+ replicate(nSpecies,ob))
# ---- Abundance component: random effect accross sites, zone covariate and 2 area covariates
# RANDOM INTERCEPT PER SPECIES-YEAR
# Mean abundance and sd per species and year
mu.lam.alpha.spyear <- log(1.5)
sig.lam.alpha.spyear <- 0.5
# Intercept
lam.alpha.spyear <- rnorm(nyrs*nSpecies, mu.lam.alpha.spyear, sig.lam.alpha.spyear)
lam.alpha.spyear_data <- array(rep(lam.alpha.spyear, each = max.sites), c(max.sites, nyrs, nSpecies))
# RANDOM EFFECT IN SPECIES-SITE (Independent of year)
sig.lam.spsite <- 0.3
lam.spsite <- rnorm(nSpecies*max.sites, 0, sig.lam.spsite)
ar <- array(rep(lam.spsite, each = nyrs), c(nyrs, max.sites, nSpecies)) # I need to make it in wide format and transpose it to long
ar1 <- list()
for (i in 1:nSpecies){
df <- as.data.frame(ar[,,i])
df <- t(df)
df <- as.matrix(df)
ar1[[i]] <- df
}
lam.spsite_data <- array(as.numeric(unlist(ar1)), c(max.sites, nyrs, nSpecies))
#AREA COVARIATE (SITE AND YEAR)
#Coefficients
mu.a1 <- 0.2
sig.a1 <- 0.8
b.a1 <- rnorm(nSpecies, mu.a1, sig.a1)
mu.a2 <- -0.5
sig.a2 <- 0.3
b.a2 <- rnorm(nSpecies, mu.a2, sig.a2)
#Covariates
a1 <- abs(rnorm(max.sites*nyrs, 10, 5)) # Although it makes sense to make them positive, it wouldnt matter (you put them on the exp)
a2 <- abs(rnorm(max.sites*nyrs, 5, 2.5))
#SCALED
area1_mean <- mean(a1)
area1_sd <- sd(a1)
area1_sc <- (a1 - area1_mean) / area1_sd
area2_mean <- mean(a2)
area2_sd <- sd(a2)
area2_sc <- (a2 - area2_mean) / area2_sd
# LANDSCAPE COVARIATES
# Coefficients
mu.cd <- 1
sig.cd <- 0.2
bCropdiv <- rnorm(nSpecies, mu.cd, sig.cd)
# Covariates
crop_diversity <- round(abs(rnorm(max.sites*nyrs, 7, 3)),0)
#SCALED
cropdiv_mean <- mean(crop_diversity)
cropdiv_sd <- sd(crop_diversity)
cropdiv_sc <- (crop_diversity - cropdiv_mean) / cropdiv_sd
lam <- exp(lam.alpha.spyear_data +
lam.spsite_data +
array(rep(b.a1,each=max.sites*nyrs)*area1_sc, c(max.sites, nyrs, nSpecies)) +
array(rep(b.a2,each=max.sites*nyrs)*area2_sc, c(max.sites, nyrs, nSpecies)) +
array(rep(bCropdiv,each=max.sites*nyrs)*cropdiv_sc, c(max.sites, nyrs, nSpecies)) )
# Abundance per site year (different abundance per species): N.sysp[j,t,s]
N <- list()
N.sysp <- list()
for (s in 1:nSpecies){
for (t in 1:nyrs){
N[[t]] <- rpois(nSites[t],lam[1:nSites[t], t, s])
}
NLong <- ldply(N,cbind) # 1 long vector with all abundances per site and year
N3 <- ldply(N,rbind)
N.sitesYears <- t(N3) # N per site and year stored in a matrix with columns
N.sysp[[s]] <- N.sitesYears
}
N.sysp <- array(as.numeric(unlist(N.sysp)), c(max.sites,nyrs,nSpecies))
# Total number of individuals in all sampled transects per year
N.tot <- lapply(N,sum) # I only need this list for now, is the same abundance
# ---- Simulate continuous distance data ----
# Create list to store the counts:
yList <- list()
for (i in 1:nyrs){
yList[[i]] <- array(0, c(nSites[i], nG, nSpecies)) }
# EXPLANATION DIMENSIONS: yList[[t]][[j,k,s]]
yList[[1]][,,1] # This is the counts for the year 1 for species 1
yList[[2]][,3,1] # This is the counts for the year 1 for species 1 in bin 3
yList[[1]][,,1]
N.sysp[,,1] # This is the real number of individuals per stite and year of species 1
sigma[,,1] # And this is sigma per site and year for species 1
for (s in 1:nSpecies){
for (t in 1:nyrs){
for(j in 1:max.sites) {
if(N.sysp[j,t,s] == 0 | is.na(N.sysp[j,t,s]))
next
# Distance from observer to the individual
d <- runif(N.sysp[j,t,s], 0, strip.width) # Uniform distribution of animals
# Simulates one distance for each individual in the site (N[j])
p <- g(x=d, sig=sigma[j,t,s]) # Detection probability. Sigma is site-time specific
seen <- rbinom(N.sysp[j,t,s], 1, p)
if(all(seen == 0))
next
d1 <- d[seen==1] # The distance data for seen individuals
counts <- table(cut(d1, dist.breaks, include.lowest=TRUE))
yList[[t]][j,,s] <- counts # The number of detections in each distance interval per year and species
}}}
y.sum.sysp <- list()
y.sum.sites <- list()
for (t in 1:nyrs){
for(s in 1:nSpecies){
y.sum.sysp[[s]] <- rowSums(yList[[t]][,,s]) # List with counts per site in a given year t. Each element is one species
}
y.sum.sites[[t]] <- y.sum.sysp # Stored by years (y.sum.sites[[t]][[s]]): 8 elements with 15 subelements each
}
# Check things in loop because I dont want to do a 3diiimensional mistake
rowSums(yList[[2]][,,2])
h <- yList[[2]]
class(h)
rowSums(h[,,1])
# Arrange it by species (so that each element of the list is a species)
y.sum.sites.sp <- list()
for (s in 1:nSpecies){
y.sum.sites.sp[[s]] <- sapply(y.sum.sites, function(x) x[s]) }
# From here I do the same than in script 8.1 but applied to the list of lists
y.sum <- list()
for (s in 1:nSpecies){
store <- unlist(y.sum.sites.sp[s], recursive = F) # Convert it into single list to use ldply later
y.sum.sites2 <- ldply(store,rbind) # Put all together (in rows)
y.sum[[s]] <- t(y.sum.sites2)}
# y.sum is a list of species counts.
# Contains y per site and year stored in a matrix with columns.
#############################################
# ---- Convert data to JAGS format ----
nind.sp <- list()
for (s in 1:nSpecies){
nind.year.sp <- lapply(y.sum.sites.sp[[s]],sum)
nind.sp[[s]] <- sum(unlist(nind.year.sp, use.names = F)) # Just to know, but jags only wants the sum
}
nind <- do.call(sum, nind.sp)
# Get one long matrix with counts and sites per species (columns)
yLong.sp <- matrix(NA, nrow = total.sites, ncol = nSpecies)
for (s in 1:nSpecies){
yLong.na <- unlist(as.data.frame(y.sum[[s]]), use.names = F) # With NA included (useful if I ever make a model estimating abundance in sites with no information)
yLong.sp[,s] <- yLong.na[complete.cases(yLong.na)]
}
# All this index and variables are site-speficic (not species specific) so they stay like this
sitesYears <- NULL # I did that loop but the normal version actually works, since is an index per site-year
for (i in 1:nyrs){
sitesYears <- c(sitesYears,c(1:nSites[i]))
}
# Create one long vector with covariate values
a1.m <- matrix(area1_sc, nrow = max.sites, ncol = nyrs, byrow = F) # I need to make it from the same matrix
a2.m <- matrix(area2_sc, nrow = max.sites, ncol = nyrs, byrow = F)
cropdiv.m <- matrix(cropdiv_sc, nrow = max.sites, ncol = nyrs, byrow = F)
area1 <- NULL
for (i in 1:nyrs){
area1 <- c(area1,a1.m[1:nSites[i],i])
}
area2 <- NULL
for (i in 1:nyrs){
area2 <- c(area2,a2.m[1:nSites[i],i])
}
cdiv <- NULL
for (i in 1:nyrs){
cdiv <- c(cdiv, cropdiv.m[1:nSites[i],i])
}
ob <- NULL
for (i in 1:nyrs){
ob <- c(ob,ob.id[1:nSites[i], i])
}
# Get one long vector with years, distance category and site
site <- dclass <- year <- NULL
for (s in 1:nSpecies){
for (t in 1:nyrs){
for(j in 1:max.sites){
if (y.sum[[s]][j,t] == 0 | is.na(y.sum[[s]][j,t]))
next
site <- c(site, rep(j, y.sum[[s]][j,t])) # site index: repeat the site as many times as counts in that site (for multi model??)
# vector of sites through years (disregarding distance class)
year <- c(year, rep(t, y.sum[[s]][j,t]))
for (k in 1:nG){
if (yList[[t]][j,k,s] == 0) # Refers for the ditance classes to the list with years and bins
next
dclass <- c(dclass, rep(k, yList[[t]][j,k,s]))} # Distance category index
}}}
# Get one long vector for each site-year combination of each dclass observation
# (so, at which j, or siteyear is every observation or dclass corresponding?)
n.allSiteYear <- sum(nSites)
siteYear.dclass <- NULL
###RS: Fixed index to map dclass onto site-year combinations (all species together)
for (s in 1:nSpecies){
for (i in 1:n.allSiteYear){
siteYear.dclass <- c(siteYear.dclass,rep(i, yLong.sp[i,s]))}
}
# Fixed index to map dclass in species (so that it matches with the dimensions (s,j,K))
sp.dclass <- NULL
for (s in 1:nSpecies){
for (i in 1:n.allSiteYear){
sp.dclass <- c(sp.dclass,rep(s, yLong.sp[i,s]))}
}
# Create one matrix for indexing year when calculating abundance per year in JAGS (works for all species)
allyears <- NULL
for (i in 1:nyrs){
allyears <- c(allyears,rep(yrs[i],nSites[i]))
}
m <- data.frame(allyears = allyears)
m$allyears <- as.factor(m$allyears)
indexYears <- model.matrix(~ allyears-1, data = m)
# ---- Compile data for JAGS model ----
data1 <- list(nyears = nyrs, max.sites = max.sites, nG=nG, siteYear.dclass = siteYear.dclass, int.w=int.w, strip.width = strip.width,
y = yLong.sp, n.allSiteYear = n.allSiteYear, nind=nind, dclass=dclass, sitesYears = sitesYears, indexYears = indexYears, allyears = allyears,
area1 = area1, area2 = area2, cdiv = cdiv, ob = ob, nobs = nobs, db = dist.breaks,
nSpecies = nSpecies, sp.dclass = sp.dclass, nyrs = nyrs)
# ---- JAGS model ----
setwd("D:/PhD/Third chapter/Data/Model")
cat("model{
# PRIORS
# SPECIES SPECIFIC PARAMETERS (random effects)
for (s in 1:nSpecies){ # Random intercept for sigma (dif detection per species)
asig[s] ~ dnorm(mu_s, tau_s)
b.a1[s] ~ dnorm(mu_a1, tau_a1)
b.a2[s] ~ dnorm(mu_a2, tau_a2)
bCropdiv[s] ~ dnorm(mu_cd, tau_cd)
}
for(s in 1:nSpecies){ # Random intercept for lambda (dif abundance per species and year)
for(t in 1:nyrs){
alam[s,t] ~ dnorm(mu_l,tau_l)}}
for (s in 1:nSpecies){ # Random effect for lambda (dif abundance per species and site)
for (i in 1:max.sites){
spsite[s,i] ~ dnorm(0, tau_spsite) }}
# Hyperparameters of species level random effects
mu_s ~ dnorm(0,0.01) # Hyperparameters for sigma intercept
tau_s <- 1/(sig_s*sig_s)
sig_s ~ dunif(0,500)
mu_l ~ dnorm(0,0.01) # Hyperparameters for lambda intercept
tau_l <- 1/(sig_l*sig_l)
sig_l ~ dunif(0,500)
mu_a1 ~ dnorm(0,0.01) # Hyperparameters for beta coefficient area1
tau_a1 <- 1/(sig_a1*sig_a1)
sig_a1 ~ dunif(0,500)
mu_a2 ~ dnorm(0,0.01) # Hyperparameters for beta coefficient area1
tau_a2 <- 1/(sig_a2*sig_a2)
sig_a2 ~ dunif(0,500)
mu_cd ~ dnorm(0,0.01) # Hyperparameters for beta coefficient area1
tau_cd <- 1/(sig_cd*sig_cd)
sig_cd ~ dunif(0,500)
tau_spsite <- 1/(sig_spsite*sig_spsite) # Hyperparameter for site random effect in lambda
sig_spsite ~ dunif(0,500)
# PRIORS FOR SIGMA
sig.sig.ob ~ dunif(0, 10) # Random effects for sigma per observer
tau.sig.ob <- 1/(sig.sig.ob*sig.sig.ob)
#Random observer effect for sigma
for (o in 1:nobs){
sig.obs[o] ~ dnorm(0, tau.sig.ob)
}
for(i in 1:nind){
dclass[i] ~ dcat(fct[sp.dclass[i],siteYear.dclass[i], 1:nG])
}
for (s in 1:nSpecies){
for(j in 1:n.allSiteYear){
sigma[s,j] <- exp(asig[s] + sig.obs[ob[j]])
f.0[s,j] <- 2 * dnorm(0,0, 1/sigma[s,j]^2)
# Construct cell probabilities for nG multinomial cells (distance categories) PER SITE
for(k in 1:nG){
up[s,j,k]<-pnorm(db[k+1], 0, 1/sigma[s,j]^2) ##db are distance bin limits
low[s,j,k]<-pnorm(db[k], 0, 1/sigma[s,j]^2)
p[s,j,k]<- 2 * (up[s,j,k] - low[s,j,k])
pi[s,j,k] <- int.w[k] / strip.width
f[s,j,k]<- p[s,j,k]/f.0[s,j]/int.w[k] ## detection prob. in distance category k
fc[s,j,k]<- f[s,j,k] * pi[s,j,k] ## pi=percent area of k; drops out if constant
fct[s,j,k]<-fc[s,j,k]/pcap[s,j]
}
pcap[s,j] <- sum(fc[s,j,1:nG]) # Different per site and year (sum over all bins)
y[j,s] ~ dbin(pcap[s,j], N[j,s])
N[j,s] ~ dpois(lambda[j,s])
lambda[j,s] <- exp(alam[s,allyears[j]] + spsite[s,sitesYears[j]]
+ b.a1[s]*area1[j] + b.a2[s]*area2[j] + bCropdiv[s]*cdiv[j] )
} }
# Derived parameters
#for (i in 1:nyears){
#Ntotal[i] <- sum(N[s]*indexYears[,i])
#}
for (s in 1:nSpecies){
for (i in 1:nyears){
Ntotal[i,s] <- sum(N[,s]*indexYears[,i]) }}
}", fill=TRUE,
file = "s_HNintegral_sigma[alpha(s)_obs(j,t)]_lambda[alpha(s,t)_sp.site(s,j)_covAreas(s,j,t)_covLands1(s,j,t)].txt")
# Inits
Nst <- yLong.sp + 1
inits <- function(){list(mu_l = runif(1), sig_l = 0.2, sig_spsite = runif(1),
N=Nst,
mu_a1 = runif(1), sig_a1 = runif(1), mu_a2 = runif(1), sig_a2 = runif(1),
mu_cd = runif(1), sig_cd = runif(1),
sig.sig.ob = runif(1),
mu_s = runif(1, log(30), log(50)) , sig_s = runif(1)
)}
# Params
params <- c( "mu_l", "sig_l", "sig_spsite",
"mu_a1", "sig_a1", "mu_a2", "sig_a2",
"mu_cd", "sig_cd",
"sig.sig.ob",
"mu_s", "sig_s")
# MCMC settings
nc <- 3 ; ni <- 200000 ; nb <- 30000 ; nt <- 10
# With jagsUI
out <- jags(data1, inits, params, "s_HNintegral_sigma[alpha(s)_obs(j,t)]_lambda[alpha(s,t)_sp.site(s,j)_covAreas(s,j,t)_covLands1(s,j,t)].txt", n.chain = nc,
n.thin = nt, n.iter = ni, n.burnin = nb, parallel = TRUE)
setwd("D:/ANA/Results/chapter3")
save(out, file = "14_S.RData")
print(out)
summary <- as.data.frame(as.matrix(out$summary))
# To compare:
data_comp <- list(mu.a1 = mu.a1, sig.a1 = sig.a1, mu.a2 = mu.a2, sig.a2 = sig.a2, mu.cd = mu.cd, sig.cd = sig.cd,
mu.lam.alpha.spyear = mu.lam.alpha.spyear, sig.lam.spsite = sig.lam.spsite,
sig.lam.alpha.spyear = sig.lam.alpha.spyear,
sig.sig.obs = sig.sig.obs,
mu.sig.sp = mu.sig.sp,
sig.sig.sp = sig.sig.sp
)
traceplot(out, parameters = c("mu_l", "sig_l", "sig_spsite",
"mu_a1", "sig_a1", "mu_a2", "sig_a2",
"mu_cd", "sig_cd",
"sig.sig.ob",
"mu_s", "sig_s"))
###########################################################################################
| /Ch. 2-3/Ch. 3/Simulations/14. S_x_[dcat_area_HNintegral]_sigma[alpha(s)_obs(j,t)]_lambda[alpha(s,t)_sp.site(s,j)_covAreas(s,j,t)_covLands1(s,j,t)].r | no_license | anasanz/MyScripts | R | false | false | 16,474 | r | rm(list=ls())
library(rjags)
library(jagsUI)
library(plyr)
set.seed(2013)
# RE4_10.1.7 (works), adding an extra covariate with random effects in beta coefficient (crop diversity)
# ---- Data simulation ----
# 30 species
# 5 years (unbalanced number of transects per year)
# Observation model calculated with Half Normal detection function
# Sigma site-year specific
### Random sp intercept
### Random effect in observer (site-year)
# Lambda site-year specific
### Random sp-year intercept (include different baseline abundance per species and also per year)
### 2 areas variables (area 1 sp-specific)
# Detection function: HN
g <- function(x, sig) exp(-x^2/(2*sig^2))
# Number of transects per year (unbalanced)
nSites <- seq(74,106, by = 8) # number of line transect surveys (DIFFERENT BY YEAR)
max.sites <- max(nSites) # Maximun number of sites is the last year
total.sites <- sum(nSites)
strip.width <- 500 # strip half-width, w (in this example only one side of the line transect is surveyed)
dist.breaks <- c(0,25,50,100,200,500)
int.w <- diff(dist.breaks) # width of distance categories (v)
midpt <- (int.w/2) + dist.breaks[-6]
nG <- length(dist.breaks)-1
# Year effect
yrs <- 1:5 # eight years
nyrs <- length(yrs)
# Number of species
nSpecies <- 30
#################################
# ---- Detection component ----
# RANDOM INTERCEPT PER SPECIES
mu.sig.sp <- log(50) # Mean of species-level random effect on intercept of sigma
sig.sig.sp <- 0.25 # SD of species-level random effect on intercept of sigma
s.alpha <- rnorm(nSpecies, mu.sig.sp, sig.sig.sp)
# Look at distribution of sigma intercepts (to see if I chose reasonable)
hist(exp(rnorm(1000, mu.sig.sp, sig.sig.sp)))
# RANDOM EFFECT IN OBSERVER
obs <- 1:9
nobs <- length(obs)
sig.sig.obs <- 0.25
# Observer effect in sigma
sig.obs <- rnorm(length(obs), 0, sig.sig.obs) # Mean is 0 because is adding noise around the mean
# Observer covariate
ob.id <- matrix(sample(1:9, max.sites*nyrs, replace = TRUE), nrow = max.sites, ncol = nyrs) # Matix with IDs
ob <- matrix(sig.obs[ob.id], nrow = max.sites, ncol = nyrs) # Matrix with intercept for simulating data
#SIGMA: sigma[j,t,s]
sigma <- exp(array(rep(s.alpha, each = max.sites*nyrs), c(max.sites, nyrs, nSpecies))
+ replicate(nSpecies,ob))
# ---- Abundance component: random effect accross sites, zone covariate and 2 area covariates
# RANDOM INTERCEPT PER SPECIES-YEAR
# Mean abundance and sd per species and year
mu.lam.alpha.spyear <- log(1.5)
sig.lam.alpha.spyear <- 0.5
# Intercept
lam.alpha.spyear <- rnorm(nyrs*nSpecies, mu.lam.alpha.spyear, sig.lam.alpha.spyear)
lam.alpha.spyear_data <- array(rep(lam.alpha.spyear, each = max.sites), c(max.sites, nyrs, nSpecies))
# RANDOM EFFECT IN SPECIES-SITE (Independent of year)
sig.lam.spsite <- 0.3
lam.spsite <- rnorm(nSpecies*max.sites, 0, sig.lam.spsite)
ar <- array(rep(lam.spsite, each = nyrs), c(nyrs, max.sites, nSpecies)) # I need to make it in wide format and transpose it to long
ar1 <- list()
for (i in 1:nSpecies){
df <- as.data.frame(ar[,,i])
df <- t(df)
df <- as.matrix(df)
ar1[[i]] <- df
}
lam.spsite_data <- array(as.numeric(unlist(ar1)), c(max.sites, nyrs, nSpecies))
#AREA COVARIATE (SITE AND YEAR)
#Coefficients
mu.a1 <- 0.2
sig.a1 <- 0.8
b.a1 <- rnorm(nSpecies, mu.a1, sig.a1)
mu.a2 <- -0.5
sig.a2 <- 0.3
b.a2 <- rnorm(nSpecies, mu.a2, sig.a2)
#Covariates
a1 <- abs(rnorm(max.sites*nyrs, 10, 5)) # Although it makes sense to make them positive, it wouldnt matter (you put them on the exp)
a2 <- abs(rnorm(max.sites*nyrs, 5, 2.5))
#SCALED
area1_mean <- mean(a1)
area1_sd <- sd(a1)
area1_sc <- (a1 - area1_mean) / area1_sd
area2_mean <- mean(a2)
area2_sd <- sd(a2)
area2_sc <- (a2 - area2_mean) / area2_sd
# LANDSCAPE COVARIATES
# Coefficients
mu.cd <- 1
sig.cd <- 0.2
bCropdiv <- rnorm(nSpecies, mu.cd, sig.cd)
# Covariates
crop_diversity <- round(abs(rnorm(max.sites*nyrs, 7, 3)),0)
#SCALED
cropdiv_mean <- mean(crop_diversity)
cropdiv_sd <- sd(crop_diversity)
cropdiv_sc <- (crop_diversity - cropdiv_mean) / cropdiv_sd
lam <- exp(lam.alpha.spyear_data +
lam.spsite_data +
array(rep(b.a1,each=max.sites*nyrs)*area1_sc, c(max.sites, nyrs, nSpecies)) +
array(rep(b.a2,each=max.sites*nyrs)*area2_sc, c(max.sites, nyrs, nSpecies)) +
array(rep(bCropdiv,each=max.sites*nyrs)*cropdiv_sc, c(max.sites, nyrs, nSpecies)) )
# Abundance per site year (different abundance per species): N.sysp[j,t,s]
N <- list()
N.sysp <- list()
for (s in 1:nSpecies){
for (t in 1:nyrs){
N[[t]] <- rpois(nSites[t],lam[1:nSites[t], t, s])
}
NLong <- ldply(N,cbind) # 1 long vector with all abundances per site and year
N3 <- ldply(N,rbind)
N.sitesYears <- t(N3) # N per site and year stored in a matrix with columns
N.sysp[[s]] <- N.sitesYears
}
N.sysp <- array(as.numeric(unlist(N.sysp)), c(max.sites,nyrs,nSpecies))
# Total number of individuals in all sampled transects per year
N.tot <- lapply(N,sum) # I only need this list for now, is the same abundance
# ---- Simulate continuous distance data ----
# Create list to store the counts:
yList <- list()
for (i in 1:nyrs){
yList[[i]] <- array(0, c(nSites[i], nG, nSpecies)) }
# EXPLANATION DIMENSIONS: yList[[t]][[j,k,s]]
yList[[1]][,,1] # This is the counts for the year 1 for species 1
yList[[2]][,3,1] # This is the counts for the year 1 for species 1 in bin 3
yList[[1]][,,1]
N.sysp[,,1] # This is the real number of individuals per stite and year of species 1
sigma[,,1] # And this is sigma per site and year for species 1
for (s in 1:nSpecies){
for (t in 1:nyrs){
for(j in 1:max.sites) {
if(N.sysp[j,t,s] == 0 | is.na(N.sysp[j,t,s]))
next
# Distance from observer to the individual
d <- runif(N.sysp[j,t,s], 0, strip.width) # Uniform distribution of animals
# Simulates one distance for each individual in the site (N[j])
p <- g(x=d, sig=sigma[j,t,s]) # Detection probability. Sigma is site-time specific
seen <- rbinom(N.sysp[j,t,s], 1, p)
if(all(seen == 0))
next
d1 <- d[seen==1] # The distance data for seen individuals
counts <- table(cut(d1, dist.breaks, include.lowest=TRUE))
yList[[t]][j,,s] <- counts # The number of detections in each distance interval per year and species
}}}
y.sum.sysp <- list()
y.sum.sites <- list()
for (t in 1:nyrs){
for(s in 1:nSpecies){
y.sum.sysp[[s]] <- rowSums(yList[[t]][,,s]) # List with counts per site in a given year t. Each element is one species
}
y.sum.sites[[t]] <- y.sum.sysp # Stored by years (y.sum.sites[[t]][[s]]): 8 elements with 15 subelements each
}
# Check things in loop because I dont want to do a 3diiimensional mistake
rowSums(yList[[2]][,,2])
h <- yList[[2]]
class(h)
rowSums(h[,,1])
# Arrange it by species (so that each element of the list is a species)
y.sum.sites.sp <- list()
for (s in 1:nSpecies){
y.sum.sites.sp[[s]] <- sapply(y.sum.sites, function(x) x[s]) }
# From here I do the same than in script 8.1 but applied to the list of lists
y.sum <- list()
for (s in 1:nSpecies){
store <- unlist(y.sum.sites.sp[s], recursive = F) # Convert it into single list to use ldply later
y.sum.sites2 <- ldply(store,rbind) # Put all together (in rows)
y.sum[[s]] <- t(y.sum.sites2)}
# y.sum is a list of species counts.
# Contains y per site and year stored in a matrix with columns.
#############################################
# ---- Convert data to JAGS format ----
nind.sp <- list()
for (s in 1:nSpecies){
nind.year.sp <- lapply(y.sum.sites.sp[[s]],sum)
nind.sp[[s]] <- sum(unlist(nind.year.sp, use.names = F)) # Just to know, but jags only wants the sum
}
nind <- do.call(sum, nind.sp)
# Get one long matrix with counts and sites per species (columns)
yLong.sp <- matrix(NA, nrow = total.sites, ncol = nSpecies)
for (s in 1:nSpecies){
yLong.na <- unlist(as.data.frame(y.sum[[s]]), use.names = F) # With NA included (useful if I ever make a model estimating abundance in sites with no information)
yLong.sp[,s] <- yLong.na[complete.cases(yLong.na)]
}
# All this index and variables are site-speficic (not species specific) so they stay like this
sitesYears <- NULL # I did that loop but the normal version actually works, since is an index per site-year
for (i in 1:nyrs){
sitesYears <- c(sitesYears,c(1:nSites[i]))
}
# Create one long vector with covariate values
a1.m <- matrix(area1_sc, nrow = max.sites, ncol = nyrs, byrow = F) # I need to make it from the same matrix
a2.m <- matrix(area2_sc, nrow = max.sites, ncol = nyrs, byrow = F)
cropdiv.m <- matrix(cropdiv_sc, nrow = max.sites, ncol = nyrs, byrow = F)
area1 <- NULL
for (i in 1:nyrs){
area1 <- c(area1,a1.m[1:nSites[i],i])
}
area2 <- NULL
for (i in 1:nyrs){
area2 <- c(area2,a2.m[1:nSites[i],i])
}
cdiv <- NULL
for (i in 1:nyrs){
cdiv <- c(cdiv, cropdiv.m[1:nSites[i],i])
}
ob <- NULL
for (i in 1:nyrs){
ob <- c(ob,ob.id[1:nSites[i], i])
}
# Get one long vector with years, distance category and site
site <- dclass <- year <- NULL
for (s in 1:nSpecies){
for (t in 1:nyrs){
for(j in 1:max.sites){
if (y.sum[[s]][j,t] == 0 | is.na(y.sum[[s]][j,t]))
next
site <- c(site, rep(j, y.sum[[s]][j,t])) # site index: repeat the site as many times as counts in that site (for multi model??)
# vector of sites through years (disregarding distance class)
year <- c(year, rep(t, y.sum[[s]][j,t]))
for (k in 1:nG){
if (yList[[t]][j,k,s] == 0) # Refers for the ditance classes to the list with years and bins
next
dclass <- c(dclass, rep(k, yList[[t]][j,k,s]))} # Distance category index
}}}
# Get one long vector for each site-year combination of each dclass observation
# (so, at which j, or siteyear is every observation or dclass corresponding?)
n.allSiteYear <- sum(nSites)
siteYear.dclass <- NULL
###RS: Fixed index to map dclass onto site-year combinations (all species together)
for (s in 1:nSpecies){
for (i in 1:n.allSiteYear){
siteYear.dclass <- c(siteYear.dclass,rep(i, yLong.sp[i,s]))}
}
# Fixed index to map dclass in species (so that it matches with the dimensions (s,j,K))
sp.dclass <- NULL
for (s in 1:nSpecies){
for (i in 1:n.allSiteYear){
sp.dclass <- c(sp.dclass,rep(s, yLong.sp[i,s]))}
}
# Create one matrix for indexing year when calculating abundance per year in JAGS (works for all species)
allyears <- NULL
for (i in 1:nyrs){
allyears <- c(allyears,rep(yrs[i],nSites[i]))
}
m <- data.frame(allyears = allyears)
m$allyears <- as.factor(m$allyears)
indexYears <- model.matrix(~ allyears-1, data = m)
# ---- Compile data for JAGS model ----
data1 <- list(nyears = nyrs, max.sites = max.sites, nG=nG, siteYear.dclass = siteYear.dclass, int.w=int.w, strip.width = strip.width,
y = yLong.sp, n.allSiteYear = n.allSiteYear, nind=nind, dclass=dclass, sitesYears = sitesYears, indexYears = indexYears, allyears = allyears,
area1 = area1, area2 = area2, cdiv = cdiv, ob = ob, nobs = nobs, db = dist.breaks,
nSpecies = nSpecies, sp.dclass = sp.dclass, nyrs = nyrs)
# ---- JAGS model ----
setwd("D:/PhD/Third chapter/Data/Model")
cat("model{
# PRIORS
# SPECIES SPECIFIC PARAMETERS (random effects)
for (s in 1:nSpecies){ # Random intercept for sigma (dif detection per species)
asig[s] ~ dnorm(mu_s, tau_s)
b.a1[s] ~ dnorm(mu_a1, tau_a1)
b.a2[s] ~ dnorm(mu_a2, tau_a2)
bCropdiv[s] ~ dnorm(mu_cd, tau_cd)
}
for(s in 1:nSpecies){ # Random intercept for lambda (dif abundance per species and year)
for(t in 1:nyrs){
alam[s,t] ~ dnorm(mu_l,tau_l)}}
for (s in 1:nSpecies){ # Random effect for lambda (dif abundance per species and site)
for (i in 1:max.sites){
spsite[s,i] ~ dnorm(0, tau_spsite) }}
# Hyperparameters of species level random effects
mu_s ~ dnorm(0,0.01) # Hyperparameters for sigma intercept
tau_s <- 1/(sig_s*sig_s)
sig_s ~ dunif(0,500)
mu_l ~ dnorm(0,0.01) # Hyperparameters for lambda intercept
tau_l <- 1/(sig_l*sig_l)
sig_l ~ dunif(0,500)
mu_a1 ~ dnorm(0,0.01) # Hyperparameters for beta coefficient area1
tau_a1 <- 1/(sig_a1*sig_a1)
sig_a1 ~ dunif(0,500)
mu_a2 ~ dnorm(0,0.01) # Hyperparameters for beta coefficient area1
tau_a2 <- 1/(sig_a2*sig_a2)
sig_a2 ~ dunif(0,500)
mu_cd ~ dnorm(0,0.01) # Hyperparameters for beta coefficient area1
tau_cd <- 1/(sig_cd*sig_cd)
sig_cd ~ dunif(0,500)
tau_spsite <- 1/(sig_spsite*sig_spsite) # Hyperparameter for site random effect in lambda
sig_spsite ~ dunif(0,500)
# PRIORS FOR SIGMA
sig.sig.ob ~ dunif(0, 10) # Random effects for sigma per observer
tau.sig.ob <- 1/(sig.sig.ob*sig.sig.ob)
#Random observer effect for sigma
for (o in 1:nobs){
sig.obs[o] ~ dnorm(0, tau.sig.ob)
}
for(i in 1:nind){
dclass[i] ~ dcat(fct[sp.dclass[i],siteYear.dclass[i], 1:nG])
}
for (s in 1:nSpecies){
for(j in 1:n.allSiteYear){
sigma[s,j] <- exp(asig[s] + sig.obs[ob[j]])
f.0[s,j] <- 2 * dnorm(0,0, 1/sigma[s,j]^2)
# Construct cell probabilities for nG multinomial cells (distance categories) PER SITE
for(k in 1:nG){
up[s,j,k]<-pnorm(db[k+1], 0, 1/sigma[s,j]^2) ##db are distance bin limits
low[s,j,k]<-pnorm(db[k], 0, 1/sigma[s,j]^2)
p[s,j,k]<- 2 * (up[s,j,k] - low[s,j,k])
pi[s,j,k] <- int.w[k] / strip.width
f[s,j,k]<- p[s,j,k]/f.0[s,j]/int.w[k] ## detection prob. in distance category k
fc[s,j,k]<- f[s,j,k] * pi[s,j,k] ## pi=percent area of k; drops out if constant
fct[s,j,k]<-fc[s,j,k]/pcap[s,j]
}
pcap[s,j] <- sum(fc[s,j,1:nG]) # Different per site and year (sum over all bins)
y[j,s] ~ dbin(pcap[s,j], N[j,s])
N[j,s] ~ dpois(lambda[j,s])
lambda[j,s] <- exp(alam[s,allyears[j]] + spsite[s,sitesYears[j]]
+ b.a1[s]*area1[j] + b.a2[s]*area2[j] + bCropdiv[s]*cdiv[j] )
} }
# Derived parameters
#for (i in 1:nyears){
#Ntotal[i] <- sum(N[s]*indexYears[,i])
#}
for (s in 1:nSpecies){
for (i in 1:nyears){
Ntotal[i,s] <- sum(N[,s]*indexYears[,i]) }}
}", fill=TRUE,
file = "s_HNintegral_sigma[alpha(s)_obs(j,t)]_lambda[alpha(s,t)_sp.site(s,j)_covAreas(s,j,t)_covLands1(s,j,t)].txt")
# Inits
Nst <- yLong.sp + 1
inits <- function(){list(mu_l = runif(1), sig_l = 0.2, sig_spsite = runif(1),
N=Nst,
mu_a1 = runif(1), sig_a1 = runif(1), mu_a2 = runif(1), sig_a2 = runif(1),
mu_cd = runif(1), sig_cd = runif(1),
sig.sig.ob = runif(1),
mu_s = runif(1, log(30), log(50)) , sig_s = runif(1)
)}
# Params
params <- c( "mu_l", "sig_l", "sig_spsite",
"mu_a1", "sig_a1", "mu_a2", "sig_a2",
"mu_cd", "sig_cd",
"sig.sig.ob",
"mu_s", "sig_s")
# MCMC settings
nc <- 3 ; ni <- 200000 ; nb <- 30000 ; nt <- 10
# With jagsUI
out <- jags(data1, inits, params, "s_HNintegral_sigma[alpha(s)_obs(j,t)]_lambda[alpha(s,t)_sp.site(s,j)_covAreas(s,j,t)_covLands1(s,j,t)].txt", n.chain = nc,
n.thin = nt, n.iter = ni, n.burnin = nb, parallel = TRUE)
setwd("D:/ANA/Results/chapter3")
save(out, file = "14_S.RData")
print(out)
summary <- as.data.frame(as.matrix(out$summary))
# To compare:
data_comp <- list(mu.a1 = mu.a1, sig.a1 = sig.a1, mu.a2 = mu.a2, sig.a2 = sig.a2, mu.cd = mu.cd, sig.cd = sig.cd,
mu.lam.alpha.spyear = mu.lam.alpha.spyear, sig.lam.spsite = sig.lam.spsite,
sig.lam.alpha.spyear = sig.lam.alpha.spyear,
sig.sig.obs = sig.sig.obs,
mu.sig.sp = mu.sig.sp,
sig.sig.sp = sig.sig.sp
)
traceplot(out, parameters = c("mu_l", "sig_l", "sig_spsite",
"mu_a1", "sig_a1", "mu_a2", "sig_a2",
"mu_cd", "sig_cd",
"sig.sig.ob",
"mu_s", "sig_s"))
###########################################################################################
|
#' Time-series of the 1971 influenza epidemic in Tristan-da-Cunha
#'
#' A dataset containing the daily incidence recorded during the 1971 influenza A/H3N2 two-wave epidemic on the island of Tristan-da-Cunha.
#'
#' \itemize{
#' \item \code{date} calendar date of the record
#' \item \code{time} day of record since beginning of epidemic
#' \item \code{Inc} daily count incidence of influenza-like-illness
#' }
#'
#' @format A data frame with 59 rows and 3 variables
#' @source \url{http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&id=4511951&retmode=ref&cmd=prlinks}
#' @name FluTdC1971
NULL
#' Time-series of a measles outbreak
#'
#' A dataset containing the weekly incidence recorded during a recent outbreak of measles in Europe
#'
#' \itemize{
#' \item \code{time} week of the record
#' \item \code{Inc} weekly recorded incidence of measles
#' }
#'
#' @format A data frame with 37 rows and 2 variables
#' @name measles
NULL
#' A simple deterministic SIR model with constant population size
#'
#' A simple deterministic SIR model with constant population size, uniform prior and Poisson observation.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR
NULL
#' A simple stochastic SIR model with constant population size
#'
#' A simple stochastic SIR model with constant population size, uniform prior and Poisson observation.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR_stoch
NULL
#' A simple deterministic SIR model with constant population size and reporting rate
#'
#' A simple deterministic SIR model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR_reporting$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR_reporting
NULL
#' A simple deterministic SIR model with constant population size and parameters on the exponential scale
#'
#' A simple deterministic SIR model with constant population size, uniform prior and Poisson observation. The parameters are transformed using an exponential transformation.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR_exp$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR_exp
NULL
#' The deterministic SEITL model with constant population size
#'
#' The deterministic SEITL model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEITL_deter$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEITL_deter
NULL
#' The stochastic SEITL model with constant population size
#'
#' The stochastic SEITL model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEITL_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEITL_stoch
NULL
#' The deterministic SEIT2L model with constant population size
#'
#' The deterministic SEIT2L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT2L_deter$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT2L_deter
NULL
#' The stochastic SEIT2L model with constant population size
#'
#' The stochastic SEIT2L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT2L_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT2L_stoch
NULL
#' The deterministic SEIT4L model with constant population size
#'
#' The deterministic SEIT4L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT4L_deter$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT4L_deter
NULL
#' The stochastic SEIT4L model with constant population size
#'
#' The stochastic SEIT4L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT4L_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT4L_stoch
NULL
| /R/fitR-package.r | no_license | arbstat/fitR | R | false | false | 9,621 | r | #' Time-series of the 1971 influenza epidemic in Tristan-da-Cunha
#'
#' A dataset containing the daily incidence recorded during the 1971 influenza A/H3N2 two-wave epidemic on the island of Tristan-da-Cunha.
#'
#' \itemize{
#' \item \code{date} calendar date of the record
#' \item \code{time} day of record since beginning of epidemic
#' \item \code{Inc} daily count incidence of influenza-like-illness
#' }
#'
#' @format A data frame with 59 rows and 3 variables
#' @source \url{http://eutils.ncbi.nlm.nih.gov/entrez/eutils/elink.fcgi?dbfrom=pubmed&id=4511951&retmode=ref&cmd=prlinks}
#' @name FluTdC1971
NULL
#' Time-series of a measles outbreak
#'
#' A dataset containing the weekly incidence recorded during a recent outbreak of measles in Europe
#'
#' \itemize{
#' \item \code{time} week of the record
#' \item \code{Inc} weekly recorded incidence of measles
#' }
#'
#' @format A data frame with 37 rows and 2 variables
#' @name measles
NULL
#' A simple deterministic SIR model with constant population size
#'
#' A simple deterministic SIR model with constant population size, uniform prior and Poisson observation.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR
NULL
#' A simple stochastic SIR model with constant population size
#'
#' A simple stochastic SIR model with constant population size, uniform prior and Poisson observation.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR_stoch
NULL
#' A simple deterministic SIR model with constant population size and reporting rate
#'
#' A simple deterministic SIR model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR_reporting$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR_reporting
NULL
#' A simple deterministic SIR model with constant population size and parameters on the exponential scale
#'
#' A simple deterministic SIR model with constant population size, uniform prior and Poisson observation. The parameters are transformed using an exponential transformation.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SIR_exp$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SIR_exp
NULL
#' The deterministic SEITL model with constant population size
#'
#' The deterministic SEITL model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEITL_deter$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEITL_deter
NULL
#' The stochastic SEITL model with constant population size
#'
#' The stochastic SEITL model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEITL_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEITL_stoch
NULL
#' The deterministic SEIT2L model with constant population size
#'
#' The deterministic SEIT2L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT2L_deter$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT2L_deter
NULL
#' The stochastic SEIT2L model with constant population size
#'
#' The stochastic SEIT2L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT2L_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT2L_stoch
NULL
#' The deterministic SEIT4L model with constant population size
#'
#' The deterministic SEIT4L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT4L_deter$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT4L_deter
NULL
#' The stochastic SEIT4L model with constant population size
#'
#' The stochastic SEIT4L model with constant population size, uniform prior and Poisson observation with reporting rate.
#'
#' \itemize{
#' \item \code{name} character.
#' \item \code{state.names} character vector.
#' \item \code{theta.names} character vector.
#' \item \code{simulate} \R-function.
#' \item \code{rPointObs} \R-function.
#' \item \code{dprior} \R-function.
#' \item \code{dPointObs} \R-function.
#' }
#'
#' Look at the documentation of \code{\link{fitmodel}} for more details about each of these elements.
#' You can look at the code of the \R-functions by typing \code{SEIT4L_stoch$simulate} for instance. There are some comments included.
#'
#' @format A \code{\link{fitmodel}} object, that is a list with the following elements:
#' @name SEIT4L_stoch
NULL
|
## function to compute the correlation between clusters
## in reduced dimension (e.g. principle component) space
#' @param seurat_object A seurat object containing reduced dimension embeddings and cluster ids.
#' @param dr_type The dimension reduction type, e.g. "pca" or "cca"
#' @param comps The components to use.
#' @param cor_method The correlation method to use.
#' @param cluster_average If false, the median pairwise correlation is used.
clusterCor <- function(seurat_object=NULL,
dr_type="pca",
comps=NULL,
cor_method="pearson",
cluster_average=FALSE)
{
pcomps <- t(s@dr[[dr_type]]@cell.embeddings)[comps,]
clusters = as.numeric(as.vector(unique(s@ident)))
clusters <- clusters[order(clusters)]
names(clusters) <- paste0("C",clusters)
n=length(clusters)
if(cluster_average==FALSE)
{
# compute pairwise correlations
rmat <- matrix(ncol=n, nrow=n)
for(i in 1:n)
{
xclust = clusters[i]
x <- pcomps[,names(s@ident)[s@ident==xclust]]
for(j in i:n)
{
yclust = clusters[j]
y <- pcomps[,names(s@ident)[s@ident==yclust]]
pairwise_cors <- cor(x,y, method=cor_method)
med_cor <- median(pairwise_cors)
rmat[i,j] <- med_cor
rmat[j,i] <- med_cor
}
}
} else {
# compute correlation of cluster average
res <- data.frame(row.names = rownames(pcomps))
# get the cluster averages
for(i in 1:n)
{
xclust <- clusters[i]
xname <- names(clusters)[i]
x <- apply(pcomps[,names(s@ident)[s@ident==xclust]],1,mean)
res[[xname]] <- x[rownames(res)]
}
rmat <- cor(res, method=cor_method)
}
rownames(rmat) <- names(clusters)
colnames(rmat) <- names(clusters)
rmat
}
| /tenxutils/R/Cluster.R | permissive | MatthieuRouland/tenx | R | false | false | 1,817 | r | ## function to compute the correlation between clusters
## in reduced dimension (e.g. principle component) space
#' @param seurat_object A seurat object containing reduced dimension embeddings and cluster ids.
#' @param dr_type The dimension reduction type, e.g. "pca" or "cca"
#' @param comps The components to use.
#' @param cor_method The correlation method to use.
#' @param cluster_average If false, the median pairwise correlation is used.
clusterCor <- function(seurat_object=NULL,
dr_type="pca",
comps=NULL,
cor_method="pearson",
cluster_average=FALSE)
{
pcomps <- t(s@dr[[dr_type]]@cell.embeddings)[comps,]
clusters = as.numeric(as.vector(unique(s@ident)))
clusters <- clusters[order(clusters)]
names(clusters) <- paste0("C",clusters)
n=length(clusters)
if(cluster_average==FALSE)
{
# compute pairwise correlations
rmat <- matrix(ncol=n, nrow=n)
for(i in 1:n)
{
xclust = clusters[i]
x <- pcomps[,names(s@ident)[s@ident==xclust]]
for(j in i:n)
{
yclust = clusters[j]
y <- pcomps[,names(s@ident)[s@ident==yclust]]
pairwise_cors <- cor(x,y, method=cor_method)
med_cor <- median(pairwise_cors)
rmat[i,j] <- med_cor
rmat[j,i] <- med_cor
}
}
} else {
# compute correlation of cluster average
res <- data.frame(row.names = rownames(pcomps))
# get the cluster averages
for(i in 1:n)
{
xclust <- clusters[i]
xname <- names(clusters)[i]
x <- apply(pcomps[,names(s@ident)[s@ident==xclust]],1,mean)
res[[xname]] <- x[rownames(res)]
}
rmat <- cor(res, method=cor_method)
}
rownames(rmat) <- names(clusters)
colnames(rmat) <- names(clusters)
rmat
}
|
if(!dir.exists("~/Documents/Yonathan/Courera/Exploratory")){
dir.create("~/Documents/Yonathan/Courera/Exploratory")
}
setwd("/Users/ybrhane/Documents/Yonathan/Courera/Exploratory")
getwd()
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,
destfile = "/Users/ybrhane/Documents/Yonathan/Courera/Exploratory/household_power_consumption.zip",
method = "curl")
unzip("household_power_consumption.zip")
list.files()
power = read.table("household_power_consumption.txt",header=T, sep=';', na.strings="?")
power$Date <- as.Date(power$Date,format="%d/%m/%Y")
power.2years <- subset(power,Date >= "2007-02-01" & Date <= "2007-02-02")
str(power.2years)
#power.2days$Time <- strptime(power.2days$Time,format="%H:%M:%S")
# Plot1
png("plot1.png",width = 480, height = 480,units = "px")
with(power.2years,hist(Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power"))
dev.off()
| /plot1.R | no_license | ybrhane/ExData_Plotting1 | R | false | false | 1,008 | r | if(!dir.exists("~/Documents/Yonathan/Courera/Exploratory")){
dir.create("~/Documents/Yonathan/Courera/Exploratory")
}
setwd("/Users/ybrhane/Documents/Yonathan/Courera/Exploratory")
getwd()
fileUrl = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,
destfile = "/Users/ybrhane/Documents/Yonathan/Courera/Exploratory/household_power_consumption.zip",
method = "curl")
unzip("household_power_consumption.zip")
list.files()
power = read.table("household_power_consumption.txt",header=T, sep=';', na.strings="?")
power$Date <- as.Date(power$Date,format="%d/%m/%Y")
power.2years <- subset(power,Date >= "2007-02-01" & Date <= "2007-02-02")
str(power.2years)
#power.2days$Time <- strptime(power.2days$Time,format="%H:%M:%S")
# Plot1
png("plot1.png",width = 480, height = 480,units = "px")
with(power.2years,hist(Global_active_power,col="red",xlab="Global Active Power (kilowatts)",main="Global Active Power"))
dev.off()
|
###############################
### Libraries and functions ###
###############################
options(import.path=c("/homes/hannah/projects/GWAS",
"/homes/hannah/projects"))
options(bitmapType = 'cairo', device = 'pdf')
modules::import_package('ggplot2', attach=TRUE)
smooth <- modules::import('utils/smoothAddR2')
optparse <- modules::import_package('optparse')
bgenie <- modules::import("bgenieResults")
###############
## analysis ###
###############
## command line arguments ####
option_list <- list(
optparse$make_option(c("-d", "--directory"), action="store",
dest="directory",
type="character", help="Path to directory with digital-heart
bgenie association results [default: %default].", default=NULL),
optparse$make_option(c("-ukb", "--ukbdir"), action="store",
dest="ukbdir",
type="character", help="Path to directory with ukbb significant
association results [default: %default].", default=NULL),
optparse$make_option(c("-n", "--name"), action="store", dest="name",
type="character", help="Name of analysis; has to be the same as
in naming bgenie files [default: %default].", default=NULL),
optparse$make_option(c("--showProgress"), action="store_true",
dest="verbose",
default=FALSE, type="logical", help="If set, progress messages
about analyses are printed to standard out [default: %default]."),
optparse$make_option(c("--debug"), action="store_true",
dest="debug", default=FALSE, type="logical",
help="If set, predefined arguments are used to test the script
[default: %default].")
)
args <- optparse$parse_args(optparse$OptionParser(option_list=option_list))
if (args$debug) {
args <- list()
args$directory <- "~/data/digital-heart/gwas/FD"
args$ukbdir <- "~/data/ukbb/ukb-hrt/gwas/180628_fractal_dimension"
args$name <- 'slices'
args$verbose <- TRUE
}
directory <- args$directory
ukbdir <- args$ukbdir
name <- args$name
verbose <- args$verbose
## genome-wide association results digital-heart ####
slices_dh <- lapply(1:22, bgenie$readBgenieOutput, directory=directory,
name=paste("bgenie_", name, "_lm_st_chr", sep=""),
maf=0.001, biallelicOnly=FALSE)
slices_dh <- do.call(rbind, slices_dh)
if (verbose) message("Write file with genome-wide association results")
write.table(slices_dh,
file=paste(directory, "/bgenie_", name, "_lm_st_genomewide.csv",
sep=""),
sep=",",quote=FALSE, col.names=TRUE, row.names=FALSE)
## ld-filtered, significant genome-wide association results ukb ####
slices_ukb <- read.table(paste(ukbdir,
"/Pseudomultitrait_slices_sig5e08_ldFiltered.txt",
sep=""),
sep=",", stringsAsFactors=FALSE, header=TRUE)
# LD filter misses these two SNPs, manually remove
slices_ukb <- slices_ukb[!slices_ukb$rsid %in% c("rs12214483", "rs117953218"),]
## format ukb betas and p-values per slice ####
slices_ukb_beta <- cbind(rsid=slices_ukb$rsid,
slices_ukb[,grepl('beta', colnames(slices_ukb))])
colnames(slices_ukb_beta) <- gsub("_beta", "", colnames(slices_ukb_beta))
beta <- reshape2::melt(slices_ukb_beta, id.var='rsid', value.name='beta',
variable.name='slice')
slices_ukb_logp <- cbind(rsid=slices_ukb$rsid,
slices_ukb[, grepl('log10p', colnames(slices_ukb))])
colnames(slices_ukb_logp) <- gsub("\\.log10p", "", colnames(slices_ukb_logp))
logp <- reshape2::melt(slices_ukb_logp, id.var='rsid', value.name='logp',
variable.name='slice')
ukb <- cbind(beta, logp=logp$logp)
ukb$rsid <- as.character(ukb$rsid)
## ukb significant slice pvalues and betas ####
ukb <- ukb[ukb$rsid %in% slices_dh$rsid, ]
ukb_sig_slices <- dplyr::filter(ukb, logp > -log10(5e-8))
slices2sample <- as.data.frame(table(ukb_sig_slices$slice),
stringsAsFactors=FALSE)
colnames(slices2sample) <- c("slice", "freq")
slices2sample <- slices2sample[slices2sample$freq != 0,]
observedBetas <- ukb_sig_slices$beta
## betas and pvalues of significant snps and slice in ukb ####
dh_beta <- slices_dh[,c(2, which(grepl('beta', colnames(slices_dh))))]
colnames(dh_beta) <- gsub("_beta", "", colnames(dh_beta))
dh_logp <- slices_dh[,c(2, which(grepl('log10p', colnames(slices_dh))))]
colnames(dh_logp) <- gsub("-log10p", "", colnames(dh_logp))
dh_sig_slices <- data.frame(t(apply(ukb_sig_slices, 1, function(x) {
pos <- which(dh_beta$rsid %in% x[1])
beta_tmp <- dh_beta[pos, colnames(dh_beta) %in% x[2]]
logp_tmp <- dh_logp[pos, colnames(dh_logp) %in% x[2]]
return(rbind(x[2], beta_tmp, logp_tmp))
})), stringsAsFactors=FALSE)
colnames(dh_sig_slices) <- c("slice", "beta", "logp")
dh_sig_slices$rsid <- ukb_sig_slices$rsid
dh_sig_slices <- dplyr::select(dh_sig_slices, rsid, slice, beta, logp)
dh_sig_slices$beta <- as.numeric(dh_sig_slices$beta)
dh_sig_slices$logp <- as.numeric(dh_sig_slices$logp)
## concordance of effect sizes ####
nrObservations <- length(sign(dh_sig_slices$beta * observedBetas))
observedConcordance <- sum(sign(dh_sig_slices$beta * observedBetas))
## Empirical concordance: matched for number of sig snps and slices ####
nrsig <- nrow(ukb_sig_slices)
nrtotal <- nrow(slices_dh)
draws <- 100000
seed <- 101
set.seed(seed)
testConcordance <- sapply(1:draws, function(dummy) {
randomSnps <- dh_beta[sample(nrtotal, nrsig),
colnames(dh_beta) %in% slices2sample$slice]
randomBetas <- unlist(sapply(1:nrow(slices2sample), function(x) {
pos <- colnames(randomSnps) == slices2sample$slice[x]
sample(randomSnps[,pos], slices2sample$freq[x])
}))
return(sum(sign(randomBetas * observedBetas)))
})
empiricalConcordance <-
length(which(testConcordance >= observedConcordance))/draws
if (empiricalConcordance == 0) {
empiricalConcordance <- 1/draws
}
concordance <- data.frame(observedConcordance=
(nrObservations - observedConcordance)/2,
empiricalConcordance=empiricalConcordance,
nrObservations=nrObservations)
write.table(concordance,
paste(directory, "/", name, "_concordance_summary.txt", sep=""),
quote=FALSE, col.names=TRUE, row.names=FALSE)
## plot beta concordance ukb and digital heart ####
slices <- cbind(ukb_sig_slices, dh_sig_slices[,3:4])
colnames(slices) <- c('rsid', 'slices', 'ukbb_beta', 'ukbb_logp', 'dh_beta',
'dh_logp')
slices$dh_p <- 10^-(slices$dh_logp)
slices$ukbb_p <- 10^-(slices$ukbb_logp)
sig_adjust <- round(0.05/args$nloci,3)
slices$sig <- factor(as.numeric(slices$dh_logp > -log10(sig_adjust)),
labels=c(expression(p >= sig_adjust),
expression(p < sig_adjust)))
slices$concordance <- factor(-sign(slices$ukbb_beta * slices$dh_beta),
labels=c('yes', 'no'))
write.table(slices, paste(directory, "/", name, "_concordance.txt",
sep=""),
quote=FALSE, col.names=TRUE, row.names=FALSE, sep="\t")
max_y <- max(abs(slices$dh_beta))
max_x <- max(abs(slices$ukbb_beta))
p <- ggplot(data=slices, aes(x=ukbb_beta, y=dh_beta))
p <- p + geom_point(aes(color=concordance, shape=sig)) +
smooth$stat_smooth_func(geom="text", method="lm", hjust=0, parse=TRUE,
xpos=max_x - 1/5*max_x,
ypos=max_y + 1/10*max_y, vjust=0, color="black") +
xlim(c(-max_x - 1/5*max_x, max_x + 1/5*max_x)) +
ylim(c(-max_y - 1/5*max_y, max_y + 1/5*max_y)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
xlab(expression(hat(beta)[discovery])) +
ylab(expression(hat(beta)[replication])) +
scale_color_manual(values=c('black', '#969696'), guide=FALSE) +
scale_shape_manual(values=c(20, 17), name='Replication',
labels=c(bquote(p>=.(sig_adjust)),
bquote(p<.(sig_adjust)))) +
theme_bw()
ggsave(plot=p, paste(directory, "/", name, "_concordance.pdf", sep=""),
height=5, width=6.5)
| /digital-heart/association/scripts/concordance.R | no_license | ImperialCollegeLondon/fractalgenetics | R | false | false | 8,444 | r | ###############################
### Libraries and functions ###
###############################
options(import.path=c("/homes/hannah/projects/GWAS",
"/homes/hannah/projects"))
options(bitmapType = 'cairo', device = 'pdf')
modules::import_package('ggplot2', attach=TRUE)
smooth <- modules::import('utils/smoothAddR2')
optparse <- modules::import_package('optparse')
bgenie <- modules::import("bgenieResults")
###############
## analysis ###
###############
## command line arguments ####
option_list <- list(
optparse$make_option(c("-d", "--directory"), action="store",
dest="directory",
type="character", help="Path to directory with digital-heart
bgenie association results [default: %default].", default=NULL),
optparse$make_option(c("-ukb", "--ukbdir"), action="store",
dest="ukbdir",
type="character", help="Path to directory with ukbb significant
association results [default: %default].", default=NULL),
optparse$make_option(c("-n", "--name"), action="store", dest="name",
type="character", help="Name of analysis; has to be the same as
in naming bgenie files [default: %default].", default=NULL),
optparse$make_option(c("--showProgress"), action="store_true",
dest="verbose",
default=FALSE, type="logical", help="If set, progress messages
about analyses are printed to standard out [default: %default]."),
optparse$make_option(c("--debug"), action="store_true",
dest="debug", default=FALSE, type="logical",
help="If set, predefined arguments are used to test the script
[default: %default].")
)
args <- optparse$parse_args(optparse$OptionParser(option_list=option_list))
if (args$debug) {
args <- list()
args$directory <- "~/data/digital-heart/gwas/FD"
args$ukbdir <- "~/data/ukbb/ukb-hrt/gwas/180628_fractal_dimension"
args$name <- 'slices'
args$verbose <- TRUE
}
directory <- args$directory
ukbdir <- args$ukbdir
name <- args$name
verbose <- args$verbose
## genome-wide association results digital-heart ####
slices_dh <- lapply(1:22, bgenie$readBgenieOutput, directory=directory,
name=paste("bgenie_", name, "_lm_st_chr", sep=""),
maf=0.001, biallelicOnly=FALSE)
slices_dh <- do.call(rbind, slices_dh)
if (verbose) message("Write file with genome-wide association results")
write.table(slices_dh,
file=paste(directory, "/bgenie_", name, "_lm_st_genomewide.csv",
sep=""),
sep=",",quote=FALSE, col.names=TRUE, row.names=FALSE)
## ld-filtered, significant genome-wide association results ukb ####
slices_ukb <- read.table(paste(ukbdir,
"/Pseudomultitrait_slices_sig5e08_ldFiltered.txt",
sep=""),
sep=",", stringsAsFactors=FALSE, header=TRUE)
# LD filter misses these two SNPs, manually remove
slices_ukb <- slices_ukb[!slices_ukb$rsid %in% c("rs12214483", "rs117953218"),]
## format ukb betas and p-values per slice ####
slices_ukb_beta <- cbind(rsid=slices_ukb$rsid,
slices_ukb[,grepl('beta', colnames(slices_ukb))])
colnames(slices_ukb_beta) <- gsub("_beta", "", colnames(slices_ukb_beta))
beta <- reshape2::melt(slices_ukb_beta, id.var='rsid', value.name='beta',
variable.name='slice')
slices_ukb_logp <- cbind(rsid=slices_ukb$rsid,
slices_ukb[, grepl('log10p', colnames(slices_ukb))])
colnames(slices_ukb_logp) <- gsub("\\.log10p", "", colnames(slices_ukb_logp))
logp <- reshape2::melt(slices_ukb_logp, id.var='rsid', value.name='logp',
variable.name='slice')
ukb <- cbind(beta, logp=logp$logp)
ukb$rsid <- as.character(ukb$rsid)
## ukb significant slice pvalues and betas ####
ukb <- ukb[ukb$rsid %in% slices_dh$rsid, ]
ukb_sig_slices <- dplyr::filter(ukb, logp > -log10(5e-8))
slices2sample <- as.data.frame(table(ukb_sig_slices$slice),
stringsAsFactors=FALSE)
colnames(slices2sample) <- c("slice", "freq")
slices2sample <- slices2sample[slices2sample$freq != 0,]
observedBetas <- ukb_sig_slices$beta
## betas and pvalues of significant snps and slice in ukb ####
dh_beta <- slices_dh[,c(2, which(grepl('beta', colnames(slices_dh))))]
colnames(dh_beta) <- gsub("_beta", "", colnames(dh_beta))
dh_logp <- slices_dh[,c(2, which(grepl('log10p', colnames(slices_dh))))]
colnames(dh_logp) <- gsub("-log10p", "", colnames(dh_logp))
dh_sig_slices <- data.frame(t(apply(ukb_sig_slices, 1, function(x) {
pos <- which(dh_beta$rsid %in% x[1])
beta_tmp <- dh_beta[pos, colnames(dh_beta) %in% x[2]]
logp_tmp <- dh_logp[pos, colnames(dh_logp) %in% x[2]]
return(rbind(x[2], beta_tmp, logp_tmp))
})), stringsAsFactors=FALSE)
colnames(dh_sig_slices) <- c("slice", "beta", "logp")
dh_sig_slices$rsid <- ukb_sig_slices$rsid
dh_sig_slices <- dplyr::select(dh_sig_slices, rsid, slice, beta, logp)
dh_sig_slices$beta <- as.numeric(dh_sig_slices$beta)
dh_sig_slices$logp <- as.numeric(dh_sig_slices$logp)
## concordance of effect sizes ####
nrObservations <- length(sign(dh_sig_slices$beta * observedBetas))
observedConcordance <- sum(sign(dh_sig_slices$beta * observedBetas))
## Empirical concordance: matched for number of sig snps and slices ####
nrsig <- nrow(ukb_sig_slices)
nrtotal <- nrow(slices_dh)
draws <- 100000
seed <- 101
set.seed(seed)
testConcordance <- sapply(1:draws, function(dummy) {
randomSnps <- dh_beta[sample(nrtotal, nrsig),
colnames(dh_beta) %in% slices2sample$slice]
randomBetas <- unlist(sapply(1:nrow(slices2sample), function(x) {
pos <- colnames(randomSnps) == slices2sample$slice[x]
sample(randomSnps[,pos], slices2sample$freq[x])
}))
return(sum(sign(randomBetas * observedBetas)))
})
empiricalConcordance <-
length(which(testConcordance >= observedConcordance))/draws
if (empiricalConcordance == 0) {
empiricalConcordance <- 1/draws
}
concordance <- data.frame(observedConcordance=
(nrObservations - observedConcordance)/2,
empiricalConcordance=empiricalConcordance,
nrObservations=nrObservations)
write.table(concordance,
paste(directory, "/", name, "_concordance_summary.txt", sep=""),
quote=FALSE, col.names=TRUE, row.names=FALSE)
## plot beta concordance ukb and digital heart ####
slices <- cbind(ukb_sig_slices, dh_sig_slices[,3:4])
colnames(slices) <- c('rsid', 'slices', 'ukbb_beta', 'ukbb_logp', 'dh_beta',
'dh_logp')
slices$dh_p <- 10^-(slices$dh_logp)
slices$ukbb_p <- 10^-(slices$ukbb_logp)
sig_adjust <- round(0.05/args$nloci,3)
slices$sig <- factor(as.numeric(slices$dh_logp > -log10(sig_adjust)),
labels=c(expression(p >= sig_adjust),
expression(p < sig_adjust)))
slices$concordance <- factor(-sign(slices$ukbb_beta * slices$dh_beta),
labels=c('yes', 'no'))
write.table(slices, paste(directory, "/", name, "_concordance.txt",
sep=""),
quote=FALSE, col.names=TRUE, row.names=FALSE, sep="\t")
max_y <- max(abs(slices$dh_beta))
max_x <- max(abs(slices$ukbb_beta))
p <- ggplot(data=slices, aes(x=ukbb_beta, y=dh_beta))
p <- p + geom_point(aes(color=concordance, shape=sig)) +
smooth$stat_smooth_func(geom="text", method="lm", hjust=0, parse=TRUE,
xpos=max_x - 1/5*max_x,
ypos=max_y + 1/10*max_y, vjust=0, color="black") +
xlim(c(-max_x - 1/5*max_x, max_x + 1/5*max_x)) +
ylim(c(-max_y - 1/5*max_y, max_y + 1/5*max_y)) +
geom_hline(yintercept = 0) +
geom_vline(xintercept = 0) +
xlab(expression(hat(beta)[discovery])) +
ylab(expression(hat(beta)[replication])) +
scale_color_manual(values=c('black', '#969696'), guide=FALSE) +
scale_shape_manual(values=c(20, 17), name='Replication',
labels=c(bquote(p>=.(sig_adjust)),
bquote(p<.(sig_adjust)))) +
theme_bw()
ggsave(plot=p, paste(directory, "/", name, "_concordance.pdf", sep=""),
height=5, width=6.5)
|
#' @rdname unbiased_increment
#' @title Unbiased estimator for the value of the increment at the given level
#' @description Generate four coupled chains and compute the estimator of the increment
#' @param level an integer that determines the target probability distributions of four chains
#' @param rinit function that is utilized for initialization of chains (for instance, samples from prior)
#' @param single_kernel a function that makes a single step through a specified MCMC kernel for a single chain
#' @param coupled2_kernel a function that makes a single step through a specified coupled MCMC kernel for 2 chains at different levels
#' @param coupled2_kernel a function that makes a single step through a specified coupled MCMC kernel for all four chains
#' @param proposal_coupling2 a function that generates proposal for a given input for two coupled chains (burn-in + lag)
#' @param proposal_coupling4 a function that generates proposal for a given input for four coupled chains (main run)
#' @param tuning a list of parameters required for MCMC iterations (for instance standard deviation for RWM)
#' @param tuning_coarse a list of parameters required for MCMC iterations for the coarse level (for instance standard deviation for RWM)
#' @param tuning_fine a list of parameters required for MCMC iterations for the fine level (for instance standard deviation for RWM)
#' @param h function that represents quantity of interest. Depends on level and spatial argument.
#' @param k an integer: lower bound for time-averaging
#' @param m an integer: upper bound for time-averaging
#' @param max_iterations iteration at which to stop the while loop (default to infinity)
#' @param samping_factor a real value that controls the magnitude of perturbation at the initialization step
#'@return a list with the value of MCMC estimator without correction, value of Unbiased MCMC estimator, meeting time, value of iteration counter, flag that is "True" if chains have met before the iteration counter reached the value in max_iterations, cost of calculations
#'@export
unbiased_increment <- function(level, rinit, single_kernel,
coupled2_kernel, coupled4_kernel, proposal_coupling2, proposal_coupling4,
tuning, tuning_coarse, tuning_fine,
h = function(l, x) x, k = 0, m = 1, max_iterations = Inf,
sampling_factor = 0.2){
cost = 0 # number of operations
# initialize chains
state_coarse1 <- rinit(level-1)
cost = cost + 2 ^ (level - 1) # single calculation of the likelihood at level (l - 1)
state_coarse2 <- rinit(level-1)
cost = cost + 2 ^ (level - 1) # single calculation of the likelihood at level (l - 1)
state_fine1 <- rinit(level)
cost = cost + 2 ^ level # single calculation of the likelihood at level l
state_fine2 <- rinit(level)
cost = cost + 2 ^ level # single calculation of the likelihood at level l
identical_coarse <- FALSE
identical_fine <- FALSE
base_state = 0.0 + state_coarse1$chain_state
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_coarse1$chain_state <- base_state + pert
state_coarse1$current_pdf <- logtarget(level, state_coarse1$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_coarse1$current_pdf))
{
generation <- FALSE
}
}
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_fine1$chain_state <- base_state + pert
state_fine1$current_pdf <- logtarget(level, state_fine1$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_fine1$current_pdf))
{
generation <- FALSE
}
}
base_state = 0.0 + state_coarse2$chain_state
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_coarse2$chain_state <- base_state + pert
state_coarse2$current_pdf <- logtarget(level, state_coarse2$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_coarse2$current_pdf))
{
generation <- FALSE
}
}
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_fine2$chain_state <- base_state + pert
state_fine2$current_pdf <- logtarget(level, state_fine2$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_fine2$current_pdf))
{
generation <- FALSE
}
}
mcmcestimator_coarse <- h(level-1, state_coarse1$chain_state)
mcmcestimator_fine <- h(level, state_fine1$chain_state)
dimh <- length(mcmcestimator_coarse)
if (k > 0){
mcmcestimator_coarse <- rep(0, dimh)
mcmcestimator_fine <- rep(0, dimh)
}
# correction computes the sum of min(1, (t - k + 1) / (m - k + 1)) * (h(X_{t+1}) - h(X_t)) for t=k,...,max(m, tau - 1)
correction_coarse <- rep(0, dimh)
correction_fine <- rep(0, dimh)
# create the lag
coupled_kernel_output <- coupled2_kernel(level,
state_coarse1, state_fine1,
identical = FALSE,
tuning = tuning,
proposal_coupling = proposal_coupling2)
state_coarse1 <- coupled_kernel_output$state1
state_fine1 <- coupled_kernel_output$state2
cost = cost + coupled_kernel_output$cost
# coupled_kernel_output <- coupled2_kernel(level,
# state_coarse1, state_fine1,
# identical = FALSE,
# tuning = tuning_coarse,
# proposal_coupling = proposal_coupling2)
if (k == 0){
correction_coarse <- correction_coarse + (min(1, (0 - k + 1)/(m - k + 1))) *
(h(level-1, state_coarse1$chain_state) - h(level-1, state_coarse2$chain_state))
correction_fine <- correction_fine + (min(1, (0 - k + 1)/(m - k + 1))) *
(h(level, state_fine1$chain_state) - h(level, state_fine2$chain_state))
}
if (k <= 1 && m >= 1){
mcmcestimator_coarse <- mcmcestimator_coarse + h(level-1, state_coarse1$chain_state)
mcmcestimator_fine <- mcmcestimator_fine + h(level, state_fine1$chain_state)
}
# initialize
iter <- 1
meet_coarse <- FALSE
meet_fine <- FALSE
finished <- FALSE
meetingtime_coarse <- Inf
meetingtime_fine <- Inf
# iter here is 1; at this point we have X_1,Y_0 and we are going to generate successively X_t,Y_{t-1} where iter = t
while (!finished && iter < max_iterations){
# increment counter
iter <- iter + 1
# run coupled kernel
coupled_kernel_output <- coupled4_kernel(level,
state_coarse1, state_coarse2,
state_fine1, state_fine2,
identical_coarse, identical_fine,
tuning, tuning,
proposal_coupling4)
state_coarse1 <- coupled_kernel_output$state_coarse1
state_coarse2 <- coupled_kernel_output$state_coarse2
state_fine1 <- coupled_kernel_output$state_fine1
state_fine2 <- coupled_kernel_output$state_fine2
identical_coarse <- coupled_kernel_output$identical_coarse
identical_fine <- coupled_kernel_output$identical_fine
cost = cost + coupled_kernel_output$cost
# update estimator for coarse discretization level
if (meet_coarse){
if (k <= iter && iter <= m){
mcmcestimator_coarse <- mcmcestimator_coarse + h(level-1, state_coarse1$chain_state)
}
} else {
if (k <= iter){
if (iter <= m){
mcmcestimator_coarse <- mcmcestimator_coarse + h(level-1, state_coarse1$chain_state)
}
correction_coarse <- correction_coarse + (min(1, (iter-1 - k + 1)/(m - k + 1))) *
(h(level-1, state_coarse1$chain_state) - h(level-1, state_coarse2$chain_state))
}
}
# update estimator for fine discretization level
if (meet_fine){
if (k <= iter && iter <= m){
mcmcestimator_fine <- mcmcestimator_fine + h(level, state_fine1$chain_state)
}
} else {
if (k <= iter){
if (iter <= m){
mcmcestimator_fine <- mcmcestimator_fine + h(level, state_fine1$chain_state)
}
correction_fine <- correction_fine + (min(1, (iter-1 - k + 1)/(m - k + 1))) *
(h(level, state_fine1$chain_state) - h(level, state_fine2$chain_state))
}
}
# check if meeting occurs for coarse discretization level
if (identical_coarse && !meet_coarse){
meet_coarse <- TRUE # recording meeting time tau_coarse
meetingtime_coarse <- iter
}
# check if meeting occurs for fine discretization level
if (identical_fine && !meet_fine){
meet_fine <- TRUE # recording meeting time tau_fine
meetingtime_fine <- iter
}
# stop after max(m, tau_coarse, tau_fine) steps
if (iter >= max(meetingtime_coarse, meetingtime_fine, m)){
finished <- TRUE
}
}
# compute mcmc estimators and their difference
mcmcestimator_coarse <- mcmcestimator_coarse / (m - k + 1)
mcmcestimator_fine <- mcmcestimator_fine / (m - k + 1)
mcmcestimator <- mcmcestimator_fine - mcmcestimator_coarse
# compute unbiased estimators and their difference
uestimator_coarse <- mcmcestimator_coarse + correction_coarse
uestimator_fine <- mcmcestimator_fine + correction_fine
uestimator <- uestimator_fine - uestimator_coarse
return(list(mcmcestimator_coarse = mcmcestimator_coarse, mcmcestimator_fine = mcmcestimator_fine,
uestimator_coarse = uestimator_coarse, uestimator_fine = uestimator_fine,
mcmcestimator = mcmcestimator, uestimator = uestimator,
meetingtime_coarse = meetingtime_coarse, meetingtime_fine = meetingtime_fine,
iteration = iter, finished = finished, cost = cost))
}
| /R/unbiased_increment.R | no_license | jeremyhengjm/UnbiasedMultilevel | R | false | false | 10,016 | r | #' @rdname unbiased_increment
#' @title Unbiased estimator for the value of the increment at the given level
#' @description Generate four coupled chains and compute the estimator of the increment
#' @param level an integer that determines the target probability distributions of four chains
#' @param rinit function that is utilized for initialization of chains (for instance, samples from prior)
#' @param single_kernel a function that makes a single step through a specified MCMC kernel for a single chain
#' @param coupled2_kernel a function that makes a single step through a specified coupled MCMC kernel for 2 chains at different levels
#' @param coupled2_kernel a function that makes a single step through a specified coupled MCMC kernel for all four chains
#' @param proposal_coupling2 a function that generates proposal for a given input for two coupled chains (burn-in + lag)
#' @param proposal_coupling4 a function that generates proposal for a given input for four coupled chains (main run)
#' @param tuning a list of parameters required for MCMC iterations (for instance standard deviation for RWM)
#' @param tuning_coarse a list of parameters required for MCMC iterations for the coarse level (for instance standard deviation for RWM)
#' @param tuning_fine a list of parameters required for MCMC iterations for the fine level (for instance standard deviation for RWM)
#' @param h function that represents quantity of interest. Depends on level and spatial argument.
#' @param k an integer: lower bound for time-averaging
#' @param m an integer: upper bound for time-averaging
#' @param max_iterations iteration at which to stop the while loop (default to infinity)
#' @param samping_factor a real value that controls the magnitude of perturbation at the initialization step
#'@return a list with the value of MCMC estimator without correction, value of Unbiased MCMC estimator, meeting time, value of iteration counter, flag that is "True" if chains have met before the iteration counter reached the value in max_iterations, cost of calculations
#'@export
unbiased_increment <- function(level, rinit, single_kernel,
coupled2_kernel, coupled4_kernel, proposal_coupling2, proposal_coupling4,
tuning, tuning_coarse, tuning_fine,
h = function(l, x) x, k = 0, m = 1, max_iterations = Inf,
sampling_factor = 0.2){
cost = 0 # number of operations
# initialize chains
state_coarse1 <- rinit(level-1)
cost = cost + 2 ^ (level - 1) # single calculation of the likelihood at level (l - 1)
state_coarse2 <- rinit(level-1)
cost = cost + 2 ^ (level - 1) # single calculation of the likelihood at level (l - 1)
state_fine1 <- rinit(level)
cost = cost + 2 ^ level # single calculation of the likelihood at level l
state_fine2 <- rinit(level)
cost = cost + 2 ^ level # single calculation of the likelihood at level l
identical_coarse <- FALSE
identical_fine <- FALSE
base_state = 0.0 + state_coarse1$chain_state
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_coarse1$chain_state <- base_state + pert
state_coarse1$current_pdf <- logtarget(level, state_coarse1$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_coarse1$current_pdf))
{
generation <- FALSE
}
}
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_fine1$chain_state <- base_state + pert
state_fine1$current_pdf <- logtarget(level, state_fine1$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_fine1$current_pdf))
{
generation <- FALSE
}
}
base_state = 0.0 + state_coarse2$chain_state
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_coarse2$chain_state <- base_state + pert
state_coarse2$current_pdf <- logtarget(level, state_coarse2$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_coarse2$current_pdf))
{
generation <- FALSE
}
}
generation <- TRUE
while (generation)
{
pert = sampling_factor * rnorm(length(base_state))
state_fine2$chain_state <- base_state + pert
state_fine2$current_pdf <- logtarget(level, state_fine2$chain_state)
cost = cost + 2 ^ level
if (is.finite(state_fine2$current_pdf))
{
generation <- FALSE
}
}
mcmcestimator_coarse <- h(level-1, state_coarse1$chain_state)
mcmcestimator_fine <- h(level, state_fine1$chain_state)
dimh <- length(mcmcestimator_coarse)
if (k > 0){
mcmcestimator_coarse <- rep(0, dimh)
mcmcestimator_fine <- rep(0, dimh)
}
# correction computes the sum of min(1, (t - k + 1) / (m - k + 1)) * (h(X_{t+1}) - h(X_t)) for t=k,...,max(m, tau - 1)
correction_coarse <- rep(0, dimh)
correction_fine <- rep(0, dimh)
# create the lag
coupled_kernel_output <- coupled2_kernel(level,
state_coarse1, state_fine1,
identical = FALSE,
tuning = tuning,
proposal_coupling = proposal_coupling2)
state_coarse1 <- coupled_kernel_output$state1
state_fine1 <- coupled_kernel_output$state2
cost = cost + coupled_kernel_output$cost
# coupled_kernel_output <- coupled2_kernel(level,
# state_coarse1, state_fine1,
# identical = FALSE,
# tuning = tuning_coarse,
# proposal_coupling = proposal_coupling2)
if (k == 0){
correction_coarse <- correction_coarse + (min(1, (0 - k + 1)/(m - k + 1))) *
(h(level-1, state_coarse1$chain_state) - h(level-1, state_coarse2$chain_state))
correction_fine <- correction_fine + (min(1, (0 - k + 1)/(m - k + 1))) *
(h(level, state_fine1$chain_state) - h(level, state_fine2$chain_state))
}
if (k <= 1 && m >= 1){
mcmcestimator_coarse <- mcmcestimator_coarse + h(level-1, state_coarse1$chain_state)
mcmcestimator_fine <- mcmcestimator_fine + h(level, state_fine1$chain_state)
}
# initialize
iter <- 1
meet_coarse <- FALSE
meet_fine <- FALSE
finished <- FALSE
meetingtime_coarse <- Inf
meetingtime_fine <- Inf
# iter here is 1; at this point we have X_1,Y_0 and we are going to generate successively X_t,Y_{t-1} where iter = t
while (!finished && iter < max_iterations){
# increment counter
iter <- iter + 1
# run coupled kernel
coupled_kernel_output <- coupled4_kernel(level,
state_coarse1, state_coarse2,
state_fine1, state_fine2,
identical_coarse, identical_fine,
tuning, tuning,
proposal_coupling4)
state_coarse1 <- coupled_kernel_output$state_coarse1
state_coarse2 <- coupled_kernel_output$state_coarse2
state_fine1 <- coupled_kernel_output$state_fine1
state_fine2 <- coupled_kernel_output$state_fine2
identical_coarse <- coupled_kernel_output$identical_coarse
identical_fine <- coupled_kernel_output$identical_fine
cost = cost + coupled_kernel_output$cost
# update estimator for coarse discretization level
if (meet_coarse){
if (k <= iter && iter <= m){
mcmcestimator_coarse <- mcmcestimator_coarse + h(level-1, state_coarse1$chain_state)
}
} else {
if (k <= iter){
if (iter <= m){
mcmcestimator_coarse <- mcmcestimator_coarse + h(level-1, state_coarse1$chain_state)
}
correction_coarse <- correction_coarse + (min(1, (iter-1 - k + 1)/(m - k + 1))) *
(h(level-1, state_coarse1$chain_state) - h(level-1, state_coarse2$chain_state))
}
}
# update estimator for fine discretization level
if (meet_fine){
if (k <= iter && iter <= m){
mcmcestimator_fine <- mcmcestimator_fine + h(level, state_fine1$chain_state)
}
} else {
if (k <= iter){
if (iter <= m){
mcmcestimator_fine <- mcmcestimator_fine + h(level, state_fine1$chain_state)
}
correction_fine <- correction_fine + (min(1, (iter-1 - k + 1)/(m - k + 1))) *
(h(level, state_fine1$chain_state) - h(level, state_fine2$chain_state))
}
}
# check if meeting occurs for coarse discretization level
if (identical_coarse && !meet_coarse){
meet_coarse <- TRUE # recording meeting time tau_coarse
meetingtime_coarse <- iter
}
# check if meeting occurs for fine discretization level
if (identical_fine && !meet_fine){
meet_fine <- TRUE # recording meeting time tau_fine
meetingtime_fine <- iter
}
# stop after max(m, tau_coarse, tau_fine) steps
if (iter >= max(meetingtime_coarse, meetingtime_fine, m)){
finished <- TRUE
}
}
# compute mcmc estimators and their difference
mcmcestimator_coarse <- mcmcestimator_coarse / (m - k + 1)
mcmcestimator_fine <- mcmcestimator_fine / (m - k + 1)
mcmcestimator <- mcmcestimator_fine - mcmcestimator_coarse
# compute unbiased estimators and their difference
uestimator_coarse <- mcmcestimator_coarse + correction_coarse
uestimator_fine <- mcmcestimator_fine + correction_fine
uestimator <- uestimator_fine - uestimator_coarse
return(list(mcmcestimator_coarse = mcmcestimator_coarse, mcmcestimator_fine = mcmcestimator_fine,
uestimator_coarse = uestimator_coarse, uestimator_fine = uestimator_fine,
mcmcestimator = mcmcestimator, uestimator = uestimator,
meetingtime_coarse = meetingtime_coarse, meetingtime_fine = meetingtime_fine,
iteration = iter, finished = finished, cost = cost))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{size}
\alias{size}
\alias{size.default}
\alias{size<-}
\alias{size<-.default}
\title{Return size (width and height) of widget}
\usage{
size(obj)
\method{size}{default}(obj)
size(obj) <- value
size(obj) <- value
}
\arguments{
\item{obj}{object}
\item{value}{size in pixels}
}
\description{
The size is specified in pixels (integers). Some toolkits allow -1 as a default, but not all.
}
| /man/size.Rd | no_license | cran/gWidgets2 | R | false | true | 483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{size}
\alias{size}
\alias{size.default}
\alias{size<-}
\alias{size<-.default}
\title{Return size (width and height) of widget}
\usage{
size(obj)
\method{size}{default}(obj)
size(obj) <- value
size(obj) <- value
}
\arguments{
\item{obj}{object}
\item{value}{size in pixels}
}
\description{
The size is specified in pixels (integers). Some toolkits allow -1 as a default, but not all.
}
|
function(input, output, session) {
observeEvent(input$warning, {
# Save the ID for removal later
showNotification({
HTML(paste("Dataframe must be a csv file where rows correspond to individuals and
columns to items. If an item is answered correctly by an individual the value of the
cell is 1, if it is incorrect, it is a 0. The structure must be similar to the one
shown below."),'<img src="ex.png" height="90px" width="160px" class="center">')
}
, duration = 15)
})
# observeEvent(input$warning, {
# showModal(modalDialog(
# title = NULL,
# paste("Dataframe must be a csv file where rows correspond to individuals
# and columns to items. The structure must be similar to the one
# shown below."),
# HTML('<img src="ex.png" height="90px" width="160px" class="center">'),
#
# easyClose = TRUE,
# footer = NULL
# ))
# })
data <- reactive({
if(is.null(input$file)) return(NULL)else{
read.csv(input$file$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)}})
output$contents <- renderTable({
if(is.null(data)) return(NULL) else{
if(input$disp == "head") {
return(head(data()))
}
else {
return(data())
}}})
########### SUMMARY:
output$sample <- renderPrint({
cat(paste(ncol(data()), " items,", nrow(data()), " sample units and", paste(sum(is.na(data()))), " missing values."))
})
output$perc <- renderPlotly({plotPerc(data(), input$xlabsize)})
output$items <- renderPlotly({plotItems(data())})
########### INDICES:
Indexs <- reactive(
list(C.Sato = PerFit::C.Sato(data(), IRT.PModel = input$IRT.PModel),
Cstar = PerFit::Cstar(data(), IRT.PModel = input$IRT.PModel),
U3 = PerFit::U3(data(), IRT.PModel = input$IRT.PModel),
Ht = PerFit::Ht(data(), IRT.PModel = input$IRT.PModel),
lz = PerFit::lz(data(), IRT.PModel = input$IRT.PModel)))
Index <- reactive(switch(input$index,
"C" = Indexs()[["C.Sato"]],
"C*" = Indexs()[["Cstar"]],
"U3" = Indexs()[["U3"]],
"Ht" = Indexs()[["Ht"]],
"lz" = Indexs()[["lz"]]))
Cutoffs <- reactive(lapply(Indexs(), cutoff))
Cutoff <- reactive(switch(input$index,
"C" = Cutoffs()[["C.Sato"]],
"C*" = Cutoffs()[["Cstar"]],
"U3" = Cutoffs()[["U3"]],
"Ht" = Cutoffs()[["Ht"]],
"lz" = Cutoffs()[["lz"]]))
cutoff.value <- reactive(switch(input$cutoff.chosen,
"value" = NA,
"conservativeIC" = if(Cutoff()$Tail == "upper"){
Cutoff()$Cutoff.CI[2]}else{
Cutoff()$Cutoff.CI[1]}))
cutoff.value.all <- reactive(lapply(Cutoffs(), function(x) {
switch(input$cutoff.chosen,
"value" = NA,
"conservativeIC" = if(x$Tail == "upper"){
x$Cutoff.CI[2]}else{
x$Cutoff.CI[1]})}))
Flagged.all <- reactive(
{
aux <- lapply(seq_along(Indexs()), function(x) {
flagged.resp(Indexs()[[x]], UDlvl = cutoff.value.all()[[x]], ord = TRUE)
})
names(aux) <- c("C.Sato", "Cstar", "U3", "Ht", "lz")
aux
}
)
Flagged <- reactive(switch(input$index,
"C" = Flagged.all()[["C.Sato"]],
"C*" = Flagged.all()[["Cstar"]],
"U3" = Flagged.all()[["U3"]],
"Ht" = Flagged.all()[["Ht"]],
"lz" = Flagged.all()[["lz"]]))
Flagged.index.values <- reactive(Flagged()$Scores[,c(1,ncol(Flagged()$Scores))])
output$index.tab <- DT::renderDataTable(DT::datatable({
Flagged.index.values()
}))
output$index.plot <- renderPlotly({
plot(Index(), UDlvl = cutoff.value())
})
cutoff.value.all <- reactive(lapply(Cutoffs(), function(x) {
switch(input$cutoff.chosen,
"value" = NA,
"conservativeIC" = if(x$Tail == "upper"){
x$Cutoff.CI[2]}else{
x$Cutoff.CI[1]})}))
allflagged <- reactive(AllFlagged(indexsList = Indexs()[which(names(Indexs()) %in% input$indexs)],
UDlvl = input$cutoff.chosen2, IRT.PModel = input$IRT.PModel2))
output$allflagged.tab <- DT::renderDataTable({
DT::datatable(allflagged(), rownames = FALSE) %>% formatRound(columns = colnames(allflagged())[-ncol(allflagged())], digits = c(0, rep(2, times = (ncol(allflagged()) - 2))))
})
Profiles <- reactive(profiles(flagged.dataframe = allflagged()))
Profiles2 <- reactive({
Profiles2 <- Profiles()[,-which(colnames(Profiles()) %in% "count")]
Profiles2[,1:length(input$indexs)][Profiles2[,1:length(input$indexs)] == 0] <- "⚪"
Profiles2[,1:length(input$indexs)][Profiles2[,1:length(input$indexs)] == 1] <- "⚫"
Profiles2
})
ind.hline <- reactive({
tab <- as.data.frame(table(Profiles2()$flags))
tab <- tab[order(tab$Var1, decreasing = TRUE), ]
ind.hline <- cumsum(tab$Freq)[-6]
ind.hline
})
output$profiles.table <- renderTable(Profiles2(), na = "")
########### Goodness of fit & local independency
fit <- reactive({
require(irtoys)
est(data(), model = input$IRT.PModel3, engine = "ltm", nqp = 20)})
par <- reactive(fit()$est)
modfit <- reactive({
MODFIT(data = data(), IP = par(), const = FALSE, precision = 4)
})
output$modfitSummary <- renderTable({
summary <- modfit()$Summary.table
summary2 <- data.frame("Under 3" = rowSums(summary[,1:3]), "Over 3" = rowSums(summary[,4:7]),
"Mean" = summary[,"Mean"], "SD" = summary[,"SD"])
summary2
}, rownames = TRUE, sanitize.colnames.function=function(x)gsub("\\."," ",x))
output$casesItemsSinglets <- DT::renderDataTable(data.frame(chisq.adj.df = modfit()$Singlets[,7], check.names = FALSE),
rownames = TRUE)
output$casesItemsDoublets <- DT::renderDataTable({
DoubletsItems <- data.frame("Item1" = colnames(data())[as.numeric(modfit()$Doublets[,"Item1"])])
DoubletsItems[,"Item2"] <- colnames(data())[as.numeric(modfit()$Doublets[,"Item2"])]
DoubletsItems[,"chisq.adj.df"] <- modfit()$Doublets[,"chisq.adj.df"]
DoubletsItems
},rownames = FALSE)
output$casesItemsTriplets <- DT::renderDataTable({
TripletsItems <- data.frame("Item1" = colnames(data())[
as.numeric(modfit()$Triplets[,"Item1"])])
TripletsItems[,"Item2"] <- colnames(data())[as.numeric(modfit()$Triplets[,"Item2"])]
TripletsItems[,"Item3"] <- colnames(data())[as.numeric(modfit()$Triplets[,"Item3"])]
TripletsItems[,"chisq.adj.df"] <- modfit()$Triplets[,"chisq.adj.df"]
TripletsItems
},rownames = FALSE)
########### Unidimensionality
# 1
UN <- reactive({
unidimTest.jorge1(ltm(data() ~ z1))
})
digits = 3
output$printUN <- renderPrint({
cat("<b>Alternative hypothesis</b>: the second eigenvalue of the observed data is substantially larger
than the second eigenvalue of data under the assumed IRT model <br/>
Second eigenvalue in the observed data:", round(UN()$Tobs[2], digits), "<br/>",
"Average of second eigenvalues in Monte Carlo samples:", round(mean(UN()$T.boot[, 2], na.rm = TRUE), digits), "<br/>",
"Monte Carlo samples:", NROW(UN()$T.boot), "<br/>",
"p-value:", round(UN()$p.value, digits), "<br/>")
})
output$vaps <- renderPlotly({
y1 <- UN()$Tobs
y2 <- colMeans(UN()$T.boot, na.rm = TRUE)
y12 <- y1/sum(y1)
y22 <- y2/sum(y2)
plot_ly(x = c(1:length(UN()$Tobs)), y = y12, mode = "markers",
type = "scatter", mode = "line", marker = list(color = "rgba(154, 205, 50, 1)"),
name = "Observed") %>%
add_trace(x = c(1:length(UN()$Tobs)), y = y22, mode = "markers",
marker = list(color = "rgba(102, 102, 102, 1)"), name = 'Mean of simulations')%>%
layout(xaxis = list(title = "Eigenvalue number"), yaxis = list(title = "% variance explained"))
})
output$boot2vap <- renderPlotly(plot(UN()))
##### Monotonicity
require(mokken)
mon <- reactive(check.monotonicity(data(), minvi = 0.03))
nonMon <- reactive({
sumMon <- summary(mon())
nonMon <- sumMon[sumMon[,"#zsig"]>0,] # items that violate the assumption
nonMon
})
nonMonIndexs <- reactive({which(colnames(data()) %in% rownames(nonMon()))})
output$nonMonIndexsCondition <- reactive({
length(nonMonIndexs()) == 0
})
outputOptions(output, 'nonMonIndexsCondition', suspendWhenHidden = FALSE)
output$summaryNonMonIndexs <- renderTable(data.frame("Items" = rownames(nonMon())),
colnames = FALSE)
nonMonIndexsList <- reactive({
nonMonIndexsList <- as.list(nonMonIndexs())
names(nonMonIndexsList) <- rownames(nonMon())
nonMonIndexsList
})
output$si <- renderUI(
selectInput(inputId = "ItemMonotonicity", label = "Select one:",
choices = nonMonIndexsList(), selected = nonMonIndexsList()[[1]],
multiple = FALSE, selectize = TRUE, width = NULL, size = NULL)
)
output$monotonicityPlot <- renderPlotly({
plotMonotonicity(mon(), item = as.numeric(input$ItemMonotonicity))
})
## Diagnostic:
IDs <- reactive({
IDs <- as.list(allflagged()$FlaggedIDs)
names(IDs) <- paste("Individual", allflagged()$FlaggedIDs)
IDs
})
output$selectInd <- renderUI(
selectInput(inputId = "SelectIndividual", label = "Select one:",
choices = IDs(), selected = IDs()[[1]],
multiple = FALSE, selectize = TRUE, width = NULL, size = NULL))
output$DiffPlot <- renderPlotly({
plotDiff(data(), caseRow = as.numeric(input$SelectIndividual))
})
m <- reactive(apply(data(), 2, sum))
data2 <- reactive(data()[, order(m(), decreasing = TRUE)])
Patterns <- reactive({
patterns(data = data2(), caseRow = as.numeric(input$SelectIndividual),
responseOptions = input$num) })
Patterns_thirds <- reactive({
lapply(Patterns(), sum_tertiles)})
Observed_thirds <- reactive({
sum_tertiles(as.numeric(data2()[as.numeric(input$SelectIndividual),]))
})
Observed_thirds_relative <- reactive({
sum_tertiles(as.numeric(data2()[as.numeric(input$SelectIndividual),]), frequence = "relative")
})
D <- reactive({
D <- unlist(lapply(Patterns_thirds(), function(x) dist(rbind(x, Observed_thirds()))))
names(D) <- c("Normal", "Cheater", "Creative", "Lucky Guesser", "Careless")
D <- as.data.frame(t(D))
D
})
output$D1 <- renderTable(D()[,1:3])
output$D2 <- renderTable(D()[,4:5])
output$htmltable <- renderUI({
# define CSS tags
css <- c("#bggreen {background-color: #e1f0c1;}")
# example data frame
# add the tag inside the cells
tab <- D()
tab[tab == min(tab)] <- paste(round(tab[tab == min(tab)], 2), "#bggreen")
# generate html table with pander package and markdown package
require("pander")
require("markdown")
require("stringr")
htmltab <- markdownToHTML(
text=pandoc.table.return(
tab,
style="rmarkdown", split.tables=Inf
),
fragment.only=TRUE
)
colortable(htmltab, css)
})
output$ProfilePlot <- renderPlotly({
plotProfile(data(), caseRow = as.numeric(input$SelectIndividual), main = "Observed pattern")
})
data_aux1 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simNormal"]]
dat
})
output$ProfilePlot_Normal <- renderPlotly({
plotProfile(data_aux1(), caseRow = as.numeric(input$SelectIndividual), main = "Normal pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux2 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simCheater"]]
dat
})
output$ProfilePlot_Cheater <- renderPlotly({
plotProfile(data_aux2(), caseRow = as.numeric(input$SelectIndividual), main = "Cheater pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux3 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simCreative"]]
dat
})
output$ProfilePlot_Creative <- renderPlotly({
plotProfile(data_aux3(), caseRow = as.numeric(input$SelectIndividual), main = "Creative pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux4 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simLucky"]]
dat
})
output$ProfilePlot_Lucky <- renderPlotly({
plotProfile(data_aux4(), caseRow = as.numeric(input$SelectIndividual), main = "Lucky guesser pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux5 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simCareless"]]
dat
})
output$ProfilePlot_Careless <- renderPlotly({
plotProfile(data_aux5(), caseRow = as.numeric(input$SelectIndividual), main = "Careless pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
}
| /server.R | no_license | albamrt/PerFitShiny | R | false | false | 14,238 | r | function(input, output, session) {
observeEvent(input$warning, {
# Save the ID for removal later
showNotification({
HTML(paste("Dataframe must be a csv file where rows correspond to individuals and
columns to items. If an item is answered correctly by an individual the value of the
cell is 1, if it is incorrect, it is a 0. The structure must be similar to the one
shown below."),'<img src="ex.png" height="90px" width="160px" class="center">')
}
, duration = 15)
})
# observeEvent(input$warning, {
# showModal(modalDialog(
# title = NULL,
# paste("Dataframe must be a csv file where rows correspond to individuals
# and columns to items. The structure must be similar to the one
# shown below."),
# HTML('<img src="ex.png" height="90px" width="160px" class="center">'),
#
# easyClose = TRUE,
# footer = NULL
# ))
# })
data <- reactive({
if(is.null(input$file)) return(NULL)else{
read.csv(input$file$datapath,
header = input$header,
sep = input$sep,
quote = input$quote)}})
output$contents <- renderTable({
if(is.null(data)) return(NULL) else{
if(input$disp == "head") {
return(head(data()))
}
else {
return(data())
}}})
########### SUMMARY:
output$sample <- renderPrint({
cat(paste(ncol(data()), " items,", nrow(data()), " sample units and", paste(sum(is.na(data()))), " missing values."))
})
output$perc <- renderPlotly({plotPerc(data(), input$xlabsize)})
output$items <- renderPlotly({plotItems(data())})
########### INDICES:
Indexs <- reactive(
list(C.Sato = PerFit::C.Sato(data(), IRT.PModel = input$IRT.PModel),
Cstar = PerFit::Cstar(data(), IRT.PModel = input$IRT.PModel),
U3 = PerFit::U3(data(), IRT.PModel = input$IRT.PModel),
Ht = PerFit::Ht(data(), IRT.PModel = input$IRT.PModel),
lz = PerFit::lz(data(), IRT.PModel = input$IRT.PModel)))
Index <- reactive(switch(input$index,
"C" = Indexs()[["C.Sato"]],
"C*" = Indexs()[["Cstar"]],
"U3" = Indexs()[["U3"]],
"Ht" = Indexs()[["Ht"]],
"lz" = Indexs()[["lz"]]))
Cutoffs <- reactive(lapply(Indexs(), cutoff))
Cutoff <- reactive(switch(input$index,
"C" = Cutoffs()[["C.Sato"]],
"C*" = Cutoffs()[["Cstar"]],
"U3" = Cutoffs()[["U3"]],
"Ht" = Cutoffs()[["Ht"]],
"lz" = Cutoffs()[["lz"]]))
cutoff.value <- reactive(switch(input$cutoff.chosen,
"value" = NA,
"conservativeIC" = if(Cutoff()$Tail == "upper"){
Cutoff()$Cutoff.CI[2]}else{
Cutoff()$Cutoff.CI[1]}))
cutoff.value.all <- reactive(lapply(Cutoffs(), function(x) {
switch(input$cutoff.chosen,
"value" = NA,
"conservativeIC" = if(x$Tail == "upper"){
x$Cutoff.CI[2]}else{
x$Cutoff.CI[1]})}))
Flagged.all <- reactive(
{
aux <- lapply(seq_along(Indexs()), function(x) {
flagged.resp(Indexs()[[x]], UDlvl = cutoff.value.all()[[x]], ord = TRUE)
})
names(aux) <- c("C.Sato", "Cstar", "U3", "Ht", "lz")
aux
}
)
Flagged <- reactive(switch(input$index,
"C" = Flagged.all()[["C.Sato"]],
"C*" = Flagged.all()[["Cstar"]],
"U3" = Flagged.all()[["U3"]],
"Ht" = Flagged.all()[["Ht"]],
"lz" = Flagged.all()[["lz"]]))
Flagged.index.values <- reactive(Flagged()$Scores[,c(1,ncol(Flagged()$Scores))])
output$index.tab <- DT::renderDataTable(DT::datatable({
Flagged.index.values()
}))
output$index.plot <- renderPlotly({
plot(Index(), UDlvl = cutoff.value())
})
cutoff.value.all <- reactive(lapply(Cutoffs(), function(x) {
switch(input$cutoff.chosen,
"value" = NA,
"conservativeIC" = if(x$Tail == "upper"){
x$Cutoff.CI[2]}else{
x$Cutoff.CI[1]})}))
allflagged <- reactive(AllFlagged(indexsList = Indexs()[which(names(Indexs()) %in% input$indexs)],
UDlvl = input$cutoff.chosen2, IRT.PModel = input$IRT.PModel2))
output$allflagged.tab <- DT::renderDataTable({
DT::datatable(allflagged(), rownames = FALSE) %>% formatRound(columns = colnames(allflagged())[-ncol(allflagged())], digits = c(0, rep(2, times = (ncol(allflagged()) - 2))))
})
Profiles <- reactive(profiles(flagged.dataframe = allflagged()))
Profiles2 <- reactive({
Profiles2 <- Profiles()[,-which(colnames(Profiles()) %in% "count")]
Profiles2[,1:length(input$indexs)][Profiles2[,1:length(input$indexs)] == 0] <- "⚪"
Profiles2[,1:length(input$indexs)][Profiles2[,1:length(input$indexs)] == 1] <- "⚫"
Profiles2
})
ind.hline <- reactive({
tab <- as.data.frame(table(Profiles2()$flags))
tab <- tab[order(tab$Var1, decreasing = TRUE), ]
ind.hline <- cumsum(tab$Freq)[-6]
ind.hline
})
output$profiles.table <- renderTable(Profiles2(), na = "")
########### Goodness of fit & local independency
fit <- reactive({
require(irtoys)
est(data(), model = input$IRT.PModel3, engine = "ltm", nqp = 20)})
par <- reactive(fit()$est)
modfit <- reactive({
MODFIT(data = data(), IP = par(), const = FALSE, precision = 4)
})
output$modfitSummary <- renderTable({
summary <- modfit()$Summary.table
summary2 <- data.frame("Under 3" = rowSums(summary[,1:3]), "Over 3" = rowSums(summary[,4:7]),
"Mean" = summary[,"Mean"], "SD" = summary[,"SD"])
summary2
}, rownames = TRUE, sanitize.colnames.function=function(x)gsub("\\."," ",x))
output$casesItemsSinglets <- DT::renderDataTable(data.frame(chisq.adj.df = modfit()$Singlets[,7], check.names = FALSE),
rownames = TRUE)
output$casesItemsDoublets <- DT::renderDataTable({
DoubletsItems <- data.frame("Item1" = colnames(data())[as.numeric(modfit()$Doublets[,"Item1"])])
DoubletsItems[,"Item2"] <- colnames(data())[as.numeric(modfit()$Doublets[,"Item2"])]
DoubletsItems[,"chisq.adj.df"] <- modfit()$Doublets[,"chisq.adj.df"]
DoubletsItems
},rownames = FALSE)
output$casesItemsTriplets <- DT::renderDataTable({
TripletsItems <- data.frame("Item1" = colnames(data())[
as.numeric(modfit()$Triplets[,"Item1"])])
TripletsItems[,"Item2"] <- colnames(data())[as.numeric(modfit()$Triplets[,"Item2"])]
TripletsItems[,"Item3"] <- colnames(data())[as.numeric(modfit()$Triplets[,"Item3"])]
TripletsItems[,"chisq.adj.df"] <- modfit()$Triplets[,"chisq.adj.df"]
TripletsItems
},rownames = FALSE)
########### Unidimensionality
# 1
UN <- reactive({
unidimTest.jorge1(ltm(data() ~ z1))
})
digits = 3
output$printUN <- renderPrint({
cat("<b>Alternative hypothesis</b>: the second eigenvalue of the observed data is substantially larger
than the second eigenvalue of data under the assumed IRT model <br/>
Second eigenvalue in the observed data:", round(UN()$Tobs[2], digits), "<br/>",
"Average of second eigenvalues in Monte Carlo samples:", round(mean(UN()$T.boot[, 2], na.rm = TRUE), digits), "<br/>",
"Monte Carlo samples:", NROW(UN()$T.boot), "<br/>",
"p-value:", round(UN()$p.value, digits), "<br/>")
})
output$vaps <- renderPlotly({
y1 <- UN()$Tobs
y2 <- colMeans(UN()$T.boot, na.rm = TRUE)
y12 <- y1/sum(y1)
y22 <- y2/sum(y2)
plot_ly(x = c(1:length(UN()$Tobs)), y = y12, mode = "markers",
type = "scatter", mode = "line", marker = list(color = "rgba(154, 205, 50, 1)"),
name = "Observed") %>%
add_trace(x = c(1:length(UN()$Tobs)), y = y22, mode = "markers",
marker = list(color = "rgba(102, 102, 102, 1)"), name = 'Mean of simulations')%>%
layout(xaxis = list(title = "Eigenvalue number"), yaxis = list(title = "% variance explained"))
})
output$boot2vap <- renderPlotly(plot(UN()))
##### Monotonicity
require(mokken)
mon <- reactive(check.monotonicity(data(), minvi = 0.03))
nonMon <- reactive({
sumMon <- summary(mon())
nonMon <- sumMon[sumMon[,"#zsig"]>0,] # items that violate the assumption
nonMon
})
nonMonIndexs <- reactive({which(colnames(data()) %in% rownames(nonMon()))})
output$nonMonIndexsCondition <- reactive({
length(nonMonIndexs()) == 0
})
outputOptions(output, 'nonMonIndexsCondition', suspendWhenHidden = FALSE)
output$summaryNonMonIndexs <- renderTable(data.frame("Items" = rownames(nonMon())),
colnames = FALSE)
nonMonIndexsList <- reactive({
nonMonIndexsList <- as.list(nonMonIndexs())
names(nonMonIndexsList) <- rownames(nonMon())
nonMonIndexsList
})
output$si <- renderUI(
selectInput(inputId = "ItemMonotonicity", label = "Select one:",
choices = nonMonIndexsList(), selected = nonMonIndexsList()[[1]],
multiple = FALSE, selectize = TRUE, width = NULL, size = NULL)
)
output$monotonicityPlot <- renderPlotly({
plotMonotonicity(mon(), item = as.numeric(input$ItemMonotonicity))
})
## Diagnostic:
IDs <- reactive({
IDs <- as.list(allflagged()$FlaggedIDs)
names(IDs) <- paste("Individual", allflagged()$FlaggedIDs)
IDs
})
output$selectInd <- renderUI(
selectInput(inputId = "SelectIndividual", label = "Select one:",
choices = IDs(), selected = IDs()[[1]],
multiple = FALSE, selectize = TRUE, width = NULL, size = NULL))
output$DiffPlot <- renderPlotly({
plotDiff(data(), caseRow = as.numeric(input$SelectIndividual))
})
m <- reactive(apply(data(), 2, sum))
data2 <- reactive(data()[, order(m(), decreasing = TRUE)])
Patterns <- reactive({
patterns(data = data2(), caseRow = as.numeric(input$SelectIndividual),
responseOptions = input$num) })
Patterns_thirds <- reactive({
lapply(Patterns(), sum_tertiles)})
Observed_thirds <- reactive({
sum_tertiles(as.numeric(data2()[as.numeric(input$SelectIndividual),]))
})
Observed_thirds_relative <- reactive({
sum_tertiles(as.numeric(data2()[as.numeric(input$SelectIndividual),]), frequence = "relative")
})
D <- reactive({
D <- unlist(lapply(Patterns_thirds(), function(x) dist(rbind(x, Observed_thirds()))))
names(D) <- c("Normal", "Cheater", "Creative", "Lucky Guesser", "Careless")
D <- as.data.frame(t(D))
D
})
output$D1 <- renderTable(D()[,1:3])
output$D2 <- renderTable(D()[,4:5])
output$htmltable <- renderUI({
# define CSS tags
css <- c("#bggreen {background-color: #e1f0c1;}")
# example data frame
# add the tag inside the cells
tab <- D()
tab[tab == min(tab)] <- paste(round(tab[tab == min(tab)], 2), "#bggreen")
# generate html table with pander package and markdown package
require("pander")
require("markdown")
require("stringr")
htmltab <- markdownToHTML(
text=pandoc.table.return(
tab,
style="rmarkdown", split.tables=Inf
),
fragment.only=TRUE
)
colortable(htmltab, css)
})
output$ProfilePlot <- renderPlotly({
plotProfile(data(), caseRow = as.numeric(input$SelectIndividual), main = "Observed pattern")
})
data_aux1 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simNormal"]]
dat
})
output$ProfilePlot_Normal <- renderPlotly({
plotProfile(data_aux1(), caseRow = as.numeric(input$SelectIndividual), main = "Normal pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux2 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simCheater"]]
dat
})
output$ProfilePlot_Cheater <- renderPlotly({
plotProfile(data_aux2(), caseRow = as.numeric(input$SelectIndividual), main = "Cheater pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux3 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simCreative"]]
dat
})
output$ProfilePlot_Creative <- renderPlotly({
plotProfile(data_aux3(), caseRow = as.numeric(input$SelectIndividual), main = "Creative pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux4 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simLucky"]]
dat
})
output$ProfilePlot_Lucky <- renderPlotly({
plotProfile(data_aux4(), caseRow = as.numeric(input$SelectIndividual), main = "Lucky guesser pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
data_aux5 <- reactive({
dat <- data2()
dat[as.numeric(input$SelectIndividual),] <- Patterns()[["simCareless"]]
dat
})
output$ProfilePlot_Careless <- renderPlotly({
plotProfile(data_aux5(), caseRow = as.numeric(input$SelectIndividual), main = "Careless pattern")%>%
add_trace(x = c(1:3), y = Observed_thirds_relative(), type = "scatter", mode = "lines + markers",
color = I("gray"), inherit = FALSE, showlegend = FALSE)
})
}
|
library(umap)
library(colorspace)
library(RColorBrewer)
library(scales)
library(ggplot2)
library(viridis)
compute_pca <- function(met_mat, vmr_count = 2000)
{
met_mat_imputed = impute_nas(met_mat)
met_mat_imputed[1:5, 1:5]
var_region = apply(X = met_mat_imputed, FUN = var, MARGIN = 1)
var_region_sorted = sort(var_region, decreasing = T)
vmr = names(head(var_region_sorted, vmr_count))
head(vmr)
length(vmr)
met_mat_vmr = met_mat_imputed[vmr, ]
dim(met_mat_vmr)
pca <- prcomp(t(met_mat_vmr))
class(pca)
#plot(pca$x[,1:2])
return(pca$x)
}
compute_umap <- function(pca, num_dims = 10, min_dist = 0.01)
{
pca_input = pca[, 1:num_dims]
set.seed(123)
configs = umap.defaults
configs$min_dist = min_dist
umap_object = umap::umap(pca_input, config = configs)
return(umap_object$layout)
}
plot_dim_red <- function(dim_red, groups = NULL, title = '',
reduc_type = 'UMAP')
{
group_flag = !is.null(groups)
if(!group_flag)
{
groups = rep(1, nrow(dim_red))
}
par(mar=c(4.1, 4.1, 6.1, 10.1), xpd=TRUE)
#color_set = get_divergent_color_set()
#cell_colors = color_set[groups]
#plot(dim_red[,1:2], col = group_colors,
# pch = 20, cex = 0.75,
# xlab = "X", ylab = "Y", main = title, cex.main = 0.9 )
point_size = 500 / nrow(dim_red)
data = data.frame(dim_red)
colnames(data) = c('X1', 'X2')
if(group_flag)
{
gg <- ggplot(data, aes(x = X1, y = X2, color = groups))
}else
{
gg <- ggplot(data, aes(x = X1, y = X2))
}
gg1 = gg + geom_point(size=point_size) +
theme_bw() +
labs(color = '') +
ggtitle(title) +
theme(plot.title = element_text( hjust = 0.5)) +
xlab(paste0(reduc_type, '-1') ) +
ylab(paste0(reduc_type, '-2') )
return(gg1)
}#plot_dim_red
plot_feature <- function(dim_red, feature_matrix = NULL,
feature = 'NULL', title = '',
legend_title = '', reduc_type = 'UMAP',
min_cells_with_info = 1){
idx = which(rownames(feature_matrix) == feature)
if(length(idx) == 0 ){
print(paste0(feature, ' not found in activity matrix, skipping. '))
return()
}
non_na_cell_count = sum(!is.na(feature_matrix[feature, ]))
if(non_na_cell_count < min_cells_with_info ){
msg = paste0(feature, ': Only ', non_na_cell_count,
' cells with information. Less than threshold: ',
min_cells_with_info, ' . Skipping.')
print(msg)
return()
}
num_color_slices = 100
rbPal <- colorRampPalette(c('navy', 'yellow', 'red'))
viridis_hcl <- colorspace::sequential_hcl(num_color_slices,
h = c(300, 75),
c = c(35, 95),
l = c(15, 90),
power = c(0.8, 1.2))
feature_vector = feature_matrix[feature, ]
quant = quantile(x = feature_vector, probs = c(0.05, 0.95), na.rm = T)
roof = quant[2]
feature_vector[feature_vector < quant[1]] = quant[1]
feature_vector[feature_vector > quant[2]] = quant[2]
#print('---1')
data = data.frame(dim_red)
#data$Col <- rbPal(5 + 1)[as.numeric(cut(gene_activity_vector,breaks = 5 + 1))]
#data$Col <- rbPal(num_color_slices + 1)[as.numeric(cut(gene_activity_vector,breaks = num_color_slices + 1))]
data$Col <- viridis_hcl[as.numeric(cut(feature_vector, breaks = num_color_slices + 1))]
head(data)
#data$Col[gene_expression == 0] = 'gray'
data$Col[is.na(feature_vector)] = 'gray'
#opacity = 0.85
#trans_colors = alpha(data$Col, opacity)
#na_opacity = 0.15
#trans_colors[trans_colors == "gray"] = alpha(data$Col, na_opacity)
point_size = 500 / length(feature_vector)
gg <- ggplot(data, aes(X1, X2))
gg1 = gg + geom_point(size=point_size, aes(colour = feature_vector) ) +
scale_color_viridis(discrete=F) +
theme_bw() +
theme(plot.title = element_text( hjust = 0.5)) +
ggtitle(paste0(title, '\n', feature)) +
labs(colour = legend_title) +
xlab(paste0(reduc_type, '-1') ) +
ylab(paste0(reduc_type, '-2') )
return(gg1)
}#plot_dim_red
reduce_dims_for_sample <- function(met_mat_for_dim_red,
#met_mat_for_features,
name_for_dim_red,
#name_for_features,
plot_dir,
methylation_type = 'CpG',
min_call_count_threshold = 10,
max_ratio_of_na_cells = 0.25
#features = NULL,
#legend_title = 'Methylation'
)
{
met_mat_for_dim_red[1:5, 1:5]
pca <- compute_pca(met_mat_for_dim_red, vmr_count = 2000 )
dim(pca)
plot(pca[, 1:2])
umap <- compute_umap(pca, num_dims =10, min_dist = 0.01)
plot(umap)
dim_red_object = list(met_mat_for_dim_red = met_mat_for_dim_red,
#met_mat_for_features = met_mat_for_features,
name_for_dim_red = name_for_dim_red,
#name_for_features = name_for_features,
methylation_type = methylation_type,
pca = pca,
umap = umap)
return(dim_red_object)
}
compute_clusters <- function(umap, rho_threshold = 1, delta_threshold = 4)
{
clusters = dens_clus(umap, rho_threshold = rho_threshold, delta_threshold = delta_threshold)
names(clusters) = rownames(umap)
head(clusters)
return(clusters)
}
plot_features <- function(umap, feature_matrix, features, name_for_dim_red, name_for_features,
legend_title, methylation_type = 'CpG')
{
feature_plot_dir = paste0(plot_dir, '/DimRed/regions_',name_for_dim_red,'/', name_for_features, '/')
dir.create(feature_plot_dir, recursive = T, showWarnings = F )
#plot_file = paste0(plot_dir, '/genes.eps')
#cairo_ps(plot_file, fallback_resolution = 300, onefile = T)
#setEPS()
dir.create(paste0(feature_plot_dir, '/eps/') )
dir.create(paste0(feature_plot_dir, '/png/') )
if(length(features) > 0)
{
for(feature in features)
{
print(feature)
#cairo_ps(plot_file, fallback_resolution = 2400)
#postscript(plot_file, onefile = F, width = 7, height = 6)
title = paste0(sample_name, ' - ' , methylation_type ,
'\nDR region: ', name_for_dim_red
, '\nFeature region: ', name_for_features
)
gg1 = plot_feature(dim_red = umap,
feature_matrix = feature_matrix,
feature = feature,
title = title,
reduc_type = 'UMAP',
legend_title = legend_title)
print(gg1)
plot_file = paste0(feature_plot_dir, '/eps/', feature, '.eps')
ggsave(gg1, filename = plot_file, device = 'eps', width = 20, height = 20, units = 'cm')
plot_file = paste0(feature_plot_dir, '/png/', feature, '.png')
ggsave(gg1, filename = plot_file, device = 'png', width = 20, height = 20, units = 'cm')
#dev.off()
}#for(gene in marker_genes)
}#if(!is.na(features))
}#plot_features
| /R/dimensionality_reduction.R | no_license | yasin-uzun/SINBAD.0.2 | R | false | false | 7,478 | r | library(umap)
library(colorspace)
library(RColorBrewer)
library(scales)
library(ggplot2)
library(viridis)
compute_pca <- function(met_mat, vmr_count = 2000)
{
met_mat_imputed = impute_nas(met_mat)
met_mat_imputed[1:5, 1:5]
var_region = apply(X = met_mat_imputed, FUN = var, MARGIN = 1)
var_region_sorted = sort(var_region, decreasing = T)
vmr = names(head(var_region_sorted, vmr_count))
head(vmr)
length(vmr)
met_mat_vmr = met_mat_imputed[vmr, ]
dim(met_mat_vmr)
pca <- prcomp(t(met_mat_vmr))
class(pca)
#plot(pca$x[,1:2])
return(pca$x)
}
compute_umap <- function(pca, num_dims = 10, min_dist = 0.01)
{
pca_input = pca[, 1:num_dims]
set.seed(123)
configs = umap.defaults
configs$min_dist = min_dist
umap_object = umap::umap(pca_input, config = configs)
return(umap_object$layout)
}
plot_dim_red <- function(dim_red, groups = NULL, title = '',
reduc_type = 'UMAP')
{
group_flag = !is.null(groups)
if(!group_flag)
{
groups = rep(1, nrow(dim_red))
}
par(mar=c(4.1, 4.1, 6.1, 10.1), xpd=TRUE)
#color_set = get_divergent_color_set()
#cell_colors = color_set[groups]
#plot(dim_red[,1:2], col = group_colors,
# pch = 20, cex = 0.75,
# xlab = "X", ylab = "Y", main = title, cex.main = 0.9 )
point_size = 500 / nrow(dim_red)
data = data.frame(dim_red)
colnames(data) = c('X1', 'X2')
if(group_flag)
{
gg <- ggplot(data, aes(x = X1, y = X2, color = groups))
}else
{
gg <- ggplot(data, aes(x = X1, y = X2))
}
gg1 = gg + geom_point(size=point_size) +
theme_bw() +
labs(color = '') +
ggtitle(title) +
theme(plot.title = element_text( hjust = 0.5)) +
xlab(paste0(reduc_type, '-1') ) +
ylab(paste0(reduc_type, '-2') )
return(gg1)
}#plot_dim_red
plot_feature <- function(dim_red, feature_matrix = NULL,
feature = 'NULL', title = '',
legend_title = '', reduc_type = 'UMAP',
min_cells_with_info = 1){
idx = which(rownames(feature_matrix) == feature)
if(length(idx) == 0 ){
print(paste0(feature, ' not found in activity matrix, skipping. '))
return()
}
non_na_cell_count = sum(!is.na(feature_matrix[feature, ]))
if(non_na_cell_count < min_cells_with_info ){
msg = paste0(feature, ': Only ', non_na_cell_count,
' cells with information. Less than threshold: ',
min_cells_with_info, ' . Skipping.')
print(msg)
return()
}
num_color_slices = 100
rbPal <- colorRampPalette(c('navy', 'yellow', 'red'))
viridis_hcl <- colorspace::sequential_hcl(num_color_slices,
h = c(300, 75),
c = c(35, 95),
l = c(15, 90),
power = c(0.8, 1.2))
feature_vector = feature_matrix[feature, ]
quant = quantile(x = feature_vector, probs = c(0.05, 0.95), na.rm = T)
roof = quant[2]
feature_vector[feature_vector < quant[1]] = quant[1]
feature_vector[feature_vector > quant[2]] = quant[2]
#print('---1')
data = data.frame(dim_red)
#data$Col <- rbPal(5 + 1)[as.numeric(cut(gene_activity_vector,breaks = 5 + 1))]
#data$Col <- rbPal(num_color_slices + 1)[as.numeric(cut(gene_activity_vector,breaks = num_color_slices + 1))]
data$Col <- viridis_hcl[as.numeric(cut(feature_vector, breaks = num_color_slices + 1))]
head(data)
#data$Col[gene_expression == 0] = 'gray'
data$Col[is.na(feature_vector)] = 'gray'
#opacity = 0.85
#trans_colors = alpha(data$Col, opacity)
#na_opacity = 0.15
#trans_colors[trans_colors == "gray"] = alpha(data$Col, na_opacity)
point_size = 500 / length(feature_vector)
gg <- ggplot(data, aes(X1, X2))
gg1 = gg + geom_point(size=point_size, aes(colour = feature_vector) ) +
scale_color_viridis(discrete=F) +
theme_bw() +
theme(plot.title = element_text( hjust = 0.5)) +
ggtitle(paste0(title, '\n', feature)) +
labs(colour = legend_title) +
xlab(paste0(reduc_type, '-1') ) +
ylab(paste0(reduc_type, '-2') )
return(gg1)
}#plot_dim_red
reduce_dims_for_sample <- function(met_mat_for_dim_red,
#met_mat_for_features,
name_for_dim_red,
#name_for_features,
plot_dir,
methylation_type = 'CpG',
min_call_count_threshold = 10,
max_ratio_of_na_cells = 0.25
#features = NULL,
#legend_title = 'Methylation'
)
{
met_mat_for_dim_red[1:5, 1:5]
pca <- compute_pca(met_mat_for_dim_red, vmr_count = 2000 )
dim(pca)
plot(pca[, 1:2])
umap <- compute_umap(pca, num_dims =10, min_dist = 0.01)
plot(umap)
dim_red_object = list(met_mat_for_dim_red = met_mat_for_dim_red,
#met_mat_for_features = met_mat_for_features,
name_for_dim_red = name_for_dim_red,
#name_for_features = name_for_features,
methylation_type = methylation_type,
pca = pca,
umap = umap)
return(dim_red_object)
}
compute_clusters <- function(umap, rho_threshold = 1, delta_threshold = 4)
{
clusters = dens_clus(umap, rho_threshold = rho_threshold, delta_threshold = delta_threshold)
names(clusters) = rownames(umap)
head(clusters)
return(clusters)
}
plot_features <- function(umap, feature_matrix, features, name_for_dim_red, name_for_features,
legend_title, methylation_type = 'CpG')
{
feature_plot_dir = paste0(plot_dir, '/DimRed/regions_',name_for_dim_red,'/', name_for_features, '/')
dir.create(feature_plot_dir, recursive = T, showWarnings = F )
#plot_file = paste0(plot_dir, '/genes.eps')
#cairo_ps(plot_file, fallback_resolution = 300, onefile = T)
#setEPS()
dir.create(paste0(feature_plot_dir, '/eps/') )
dir.create(paste0(feature_plot_dir, '/png/') )
if(length(features) > 0)
{
for(feature in features)
{
print(feature)
#cairo_ps(plot_file, fallback_resolution = 2400)
#postscript(plot_file, onefile = F, width = 7, height = 6)
title = paste0(sample_name, ' - ' , methylation_type ,
'\nDR region: ', name_for_dim_red
, '\nFeature region: ', name_for_features
)
gg1 = plot_feature(dim_red = umap,
feature_matrix = feature_matrix,
feature = feature,
title = title,
reduc_type = 'UMAP',
legend_title = legend_title)
print(gg1)
plot_file = paste0(feature_plot_dir, '/eps/', feature, '.eps')
ggsave(gg1, filename = plot_file, device = 'eps', width = 20, height = 20, units = 'cm')
plot_file = paste0(feature_plot_dir, '/png/', feature, '.png')
ggsave(gg1, filename = plot_file, device = 'png', width = 20, height = 20, units = 'cm')
#dev.off()
}#for(gene in marker_genes)
}#if(!is.na(features))
}#plot_features
|
/Oaxaca.R | no_license | allvesbru/Data-Micro | R | false | false | 12,466 | r | ||
rm(list=ls())
#===> loading packages and such <===#
#install.packages("OIsurv")
library(OIsurv)
data(aids)
aids
attach(aids)
infect
detach(aids)
aids$infect
#===> survival object <===#
data(tongue); attach(tongue) # the following will not affect computations
# create a subset for just the first group by using [type==1]
my.surv.object <- Surv(time[type==1], delta[type==1])
my.surv.object
detach(tongue)
data(psych); attach(psych)
my.surv.object <- Surv(age, age+time, death)
my.surv.object
detach(psych)
#===> K-M Estimate <===#
data(tongue); attach(tongue)
my.surv <- Surv(time[type==1], delta[type==1])
survfit(my.surv ~ 1)
my.fit <- survfit(my.surv ~ 1)
summary(my.fit)$surv # returns the Kaplan-Meier estimate at each t_i
summary(my.fit)$time # {t_i}
summary(my.fit)$n.risk # {Y_i}
summary(my.fit)$n.event # {d_i}
summary(my.fit)$std.err # standard error of the K-M estimate at {t_i}
summary(my.fit)$lower # lower pointwise estimates (alternatively, $upper)
str(my.fit) # full summary of the my.fit object
str(summary(my.fit)) # full summary of the my.fit object
pdf("../figures/kmPlot.pdf", 7, 4.5)
plot(my.fit, main="Kaplan-Meier estimate with 95% confidence bounds",
xlab="time", ylab="survival function")
dev.off()
my.fit1 <- survfit( Surv(time, delta) ~ type ) # here the key is "type"
detach(tongue)
#===> confidence bands <===#
data(bmt); attach(bmt)
my.surv <- Surv(t2[group==1], d3[group==1])
my.cb <- confBands(my.surv, confLevel=0.95, type="hall")
pdf("../figures/confBand.pdf", 8, 5)
plot(survfit(my.surv ~ 1), xlim=c(100, 600), xlab="time",
ylab="Estimated Survival Function",
main="Reproducing Confidence Bands for Example 4.2 in Klein/Moeschberger")
lines(my.cb$time, my.cb$lower, lty=3, type="s")
lines(my.cb$time, my.cb$upper, lty=3, type="s")
legend(100, 0.3, legend=c("K-M survival estimate",
"pointwise intervals","confidence bands"), lty=1:3)
dev.off()
detach(bmt)
#===> cumulative hazard <===#
data(tongue); attach(tongue)
my.surv <- Surv(time[type==1], delta[type==1])
my.fit <- summary(survfit(my.surv ~ 1))
H.hat <- -log(my.fit$surv)
H.hat <- c(H.hat, tail(H.hat, 1))
h.sort.of <- my.fit$n.event / my.fit$n.risk
H.tilde <- cumsum(h.sort.of)
H.tilde <- c(H.tilde, tail(H.tilde, 1))
pdf("../figures/cumHazard.pdf", 6, 4)
plot(c(my.fit$time, 250), H.hat, xlab="time", ylab="cumulative hazard",
main="comparing cumulative hazards", ylim=range(c(H.hat, H.tilde)), type="s")
points(c(my.fit$time, 250), H.tilde, lty=2, type="s")
legend("topleft", legend=c("H.hat","H.tilde"), lty=1:2)
dev.off()
detach(tongue)
#===> mean/median <===#
data(drug6mp); attach(drug6mp)
my.surv <- Surv(t1, rep(1, 21)) # all placebo patients observed
survfit(my.surv ~ 1)
print(survfit(my.surv ~ 1), print.rmean=TRUE)
detach(drug6mp)
#===> test for 2+ samples <===#
data(btrial); attach(btrial)
survdiff(Surv(time, death) ~ im) # output omitted
survdiff(Surv(time, death) ~ im, rho=1) # output omitted
detach(btrial)
#===> coxph, time-independent <===#
data(burn); attach(burn)
my.surv <- Surv(T1, D1)
coxph.fit <- coxph(my.surv ~ Z1 + as.factor(Z11), method="breslow")
coxph.fit
co <- coxph.fit$coefficients # may use coxph.fit$coeff instead
va <- coxph.fit$var # I^(-1), estimated cov matrix of the estimates
ll <- coxph.fit$loglik # log-likelihood for alt and null MLEs, resp.
my.survfit.object <- survfit(coxph.fit)
hold <- survfit(my.surv ~ 1)
#source("http://www.stat.ucla.edu/~david/teac/surv/local-coxph-test.R")
coxph.fit
C <- matrix(c(0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1), nrow=3, byrow=TRUE)
d <- rep(0, 3)
t1 <- C %*% co - d
t2 <- C %*% va %*% t(C)
XW2 <- c(t(t1) %*% solve(t2) %*% t1)
pchisq(XW2, 3, lower.tail=FALSE)
#local.coxph.test(coxph.fit, 2:4)
my.survfit.object <- survfit(coxph.fit)
detach(burn)
#===> coxph, time-dependent <===#
data(relapse)
relapse
N <- dim(relapse)[1]
t1 <- rep(0, N+sum(!is.na(relapse$int))) # initialize start time at 0
t2 <- rep(-1, length(t1)) # build vector for end times
d <- rep(-1, length(t1)) # whether event was censored
g <- rep(-1, length(t1)) # gender covariate
i <- rep(FALSE, length(t1)) # initialize intervention at FALSE
j <- 1
for(ii in 1:dim(relapse)[1]){
if(is.na(relapse$int[ii])){ # no intervention, copy survival record
t2[j] <- relapse$event[ii]
d[j] <- relapse$delta[ii]
g[j] <- relapse$gender[ii]
j <- j+1
} else { # intervention, split records
g[j+0:1] <- relapse$gender[ii] # gender is same for each time
d[j] <- 0 # no relapse observed pre-intervention
d[j+1] <- relapse$delta[ii] # relapse occur post-intervention?
i[j+1] <- TRUE # intervention covariate, post-intervention
t2[j] <- relapse$int[ii]-1 # end of pre-intervention
t1[j+1] <- relapse$int[ii]-1 # start of post-intervention
t2[j+1] <- relapse$event[ii] # end of post-intervention
j <- j+2 # two records added
}
}
mySurv <- Surv(t1, t2, d) # pg 3 discusses left-trunc. right-cens. data
myCPH <- coxph(mySurv ~ g + i)
#data(burn); attach(burn)
##source("http://www.stat.ucla.edu/~david/teac/surv/time-dep-coxph.R")
#td.coxph <- timeDepCoxph(burn, "T1", "D1", 2:4, "Z1", verbose=FALSE)
#td.coxph # some model output is omitted for brevity
#detach(burn)
#===> AFT models <===#
data(larynx)
attach(larynx)
srFit <- survreg(Surv(time, delta) ~ as.factor(stage) + age, dist="weibull")
summary(srFit)
srFitExp <- survreg(Surv(time, delta) ~ as.factor(stage) + age, dist="exponential")
summary(srFitExp)
srFitExp$coeff # covariate coefficients
srFitExp$icoef # intercept and scale coefficients
srFitExp$var # variance-covariance matrix
srFitExp$loglik # log-likelihood
srFit$scale # not using srFitExp (defaulted to 1)
detach(larynx)
| /survivalInR-for-statsTeachR/code/SurvGuideCode.R | no_license | OpenIntroOrg/stat-online-extras | R | false | false | 5,952 | r | rm(list=ls())
#===> loading packages and such <===#
#install.packages("OIsurv")
library(OIsurv)
data(aids)
aids
attach(aids)
infect
detach(aids)
aids$infect
#===> survival object <===#
data(tongue); attach(tongue) # the following will not affect computations
# create a subset for just the first group by using [type==1]
my.surv.object <- Surv(time[type==1], delta[type==1])
my.surv.object
detach(tongue)
data(psych); attach(psych)
my.surv.object <- Surv(age, age+time, death)
my.surv.object
detach(psych)
#===> K-M Estimate <===#
data(tongue); attach(tongue)
my.surv <- Surv(time[type==1], delta[type==1])
survfit(my.surv ~ 1)
my.fit <- survfit(my.surv ~ 1)
summary(my.fit)$surv # returns the Kaplan-Meier estimate at each t_i
summary(my.fit)$time # {t_i}
summary(my.fit)$n.risk # {Y_i}
summary(my.fit)$n.event # {d_i}
summary(my.fit)$std.err # standard error of the K-M estimate at {t_i}
summary(my.fit)$lower # lower pointwise estimates (alternatively, $upper)
str(my.fit) # full summary of the my.fit object
str(summary(my.fit)) # full summary of the my.fit object
pdf("../figures/kmPlot.pdf", 7, 4.5)
plot(my.fit, main="Kaplan-Meier estimate with 95% confidence bounds",
xlab="time", ylab="survival function")
dev.off()
my.fit1 <- survfit( Surv(time, delta) ~ type ) # here the key is "type"
detach(tongue)
#===> confidence bands <===#
data(bmt); attach(bmt)
my.surv <- Surv(t2[group==1], d3[group==1])
my.cb <- confBands(my.surv, confLevel=0.95, type="hall")
pdf("../figures/confBand.pdf", 8, 5)
plot(survfit(my.surv ~ 1), xlim=c(100, 600), xlab="time",
ylab="Estimated Survival Function",
main="Reproducing Confidence Bands for Example 4.2 in Klein/Moeschberger")
lines(my.cb$time, my.cb$lower, lty=3, type="s")
lines(my.cb$time, my.cb$upper, lty=3, type="s")
legend(100, 0.3, legend=c("K-M survival estimate",
"pointwise intervals","confidence bands"), lty=1:3)
dev.off()
detach(bmt)
#===> cumulative hazard <===#
data(tongue); attach(tongue)
my.surv <- Surv(time[type==1], delta[type==1])
my.fit <- summary(survfit(my.surv ~ 1))
H.hat <- -log(my.fit$surv)
H.hat <- c(H.hat, tail(H.hat, 1))
h.sort.of <- my.fit$n.event / my.fit$n.risk
H.tilde <- cumsum(h.sort.of)
H.tilde <- c(H.tilde, tail(H.tilde, 1))
pdf("../figures/cumHazard.pdf", 6, 4)
plot(c(my.fit$time, 250), H.hat, xlab="time", ylab="cumulative hazard",
main="comparing cumulative hazards", ylim=range(c(H.hat, H.tilde)), type="s")
points(c(my.fit$time, 250), H.tilde, lty=2, type="s")
legend("topleft", legend=c("H.hat","H.tilde"), lty=1:2)
dev.off()
detach(tongue)
#===> mean/median <===#
data(drug6mp); attach(drug6mp)
my.surv <- Surv(t1, rep(1, 21)) # all placebo patients observed
survfit(my.surv ~ 1)
print(survfit(my.surv ~ 1), print.rmean=TRUE)
detach(drug6mp)
#===> test for 2+ samples <===#
data(btrial); attach(btrial)
survdiff(Surv(time, death) ~ im) # output omitted
survdiff(Surv(time, death) ~ im, rho=1) # output omitted
detach(btrial)
#===> coxph, time-independent <===#
data(burn); attach(burn)
my.surv <- Surv(T1, D1)
coxph.fit <- coxph(my.surv ~ Z1 + as.factor(Z11), method="breslow")
coxph.fit
co <- coxph.fit$coefficients # may use coxph.fit$coeff instead
va <- coxph.fit$var # I^(-1), estimated cov matrix of the estimates
ll <- coxph.fit$loglik # log-likelihood for alt and null MLEs, resp.
my.survfit.object <- survfit(coxph.fit)
hold <- survfit(my.surv ~ 1)
#source("http://www.stat.ucla.edu/~david/teac/surv/local-coxph-test.R")
coxph.fit
C <- matrix(c(0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1), nrow=3, byrow=TRUE)
d <- rep(0, 3)
t1 <- C %*% co - d
t2 <- C %*% va %*% t(C)
XW2 <- c(t(t1) %*% solve(t2) %*% t1)
pchisq(XW2, 3, lower.tail=FALSE)
#local.coxph.test(coxph.fit, 2:4)
my.survfit.object <- survfit(coxph.fit)
detach(burn)
#===> coxph, time-dependent <===#
data(relapse)
relapse
N <- dim(relapse)[1]
t1 <- rep(0, N+sum(!is.na(relapse$int))) # initialize start time at 0
t2 <- rep(-1, length(t1)) # build vector for end times
d <- rep(-1, length(t1)) # whether event was censored
g <- rep(-1, length(t1)) # gender covariate
i <- rep(FALSE, length(t1)) # initialize intervention at FALSE
j <- 1
for(ii in 1:dim(relapse)[1]){
if(is.na(relapse$int[ii])){ # no intervention, copy survival record
t2[j] <- relapse$event[ii]
d[j] <- relapse$delta[ii]
g[j] <- relapse$gender[ii]
j <- j+1
} else { # intervention, split records
g[j+0:1] <- relapse$gender[ii] # gender is same for each time
d[j] <- 0 # no relapse observed pre-intervention
d[j+1] <- relapse$delta[ii] # relapse occur post-intervention?
i[j+1] <- TRUE # intervention covariate, post-intervention
t2[j] <- relapse$int[ii]-1 # end of pre-intervention
t1[j+1] <- relapse$int[ii]-1 # start of post-intervention
t2[j+1] <- relapse$event[ii] # end of post-intervention
j <- j+2 # two records added
}
}
mySurv <- Surv(t1, t2, d) # pg 3 discusses left-trunc. right-cens. data
myCPH <- coxph(mySurv ~ g + i)
#data(burn); attach(burn)
##source("http://www.stat.ucla.edu/~david/teac/surv/time-dep-coxph.R")
#td.coxph <- timeDepCoxph(burn, "T1", "D1", 2:4, "Z1", verbose=FALSE)
#td.coxph # some model output is omitted for brevity
#detach(burn)
#===> AFT models <===#
data(larynx)
attach(larynx)
srFit <- survreg(Surv(time, delta) ~ as.factor(stage) + age, dist="weibull")
summary(srFit)
srFitExp <- survreg(Surv(time, delta) ~ as.factor(stage) + age, dist="exponential")
summary(srFitExp)
srFitExp$coeff # covariate coefficients
srFitExp$icoef # intercept and scale coefficients
srFitExp$var # variance-covariance matrix
srFitExp$loglik # log-likelihood
srFit$scale # not using srFitExp (defaulted to 1)
detach(larynx)
|
rc <- function(cube = rubix()){
hold1 <- cube$top[c(3,6,9),3]
cube$top[c(3,6,9), 3] <- cube$front[c(3,6,9), 3]
cube$front[c(3,6,9), 3] <- cube$base[c(3,6,9), 3]
cube$base[c(3,6,9), 3] <- cube$hind[c(3,6,9), 3]
cube$hind[c(3,6,9), 3] <- hold1
hold2 <- cube$right[4,3]
hold3 <- cube$right[1,3]
cube$right[4,3] <- cube$right[8,3]
cube$right[8,3] <- cube$right[6,3]
cube$right[6,3] <- cube$right[2,3]
cube$right[2,3] <- hold2
cube$right[1,3] <- cube$right[7,3]
cube$right[7,3] <- cube$right[9,3]
cube$right[9,3] <- cube$right[3,3]
cube$right[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ra <- function(cube = rubix()){
hold1 <- cube$top[c(3,6,9),3]
cube$top[c(3,6,9), 3] <- cube$hind[c(3,6,9), 3]
cube$hind[c(3,6,9), 3] <- cube$base[c(3,6,9), 3]
cube$base[c(3,6,9), 3] <- cube$front[c(3,6,9), 3]
cube$front[c(3,6,9), 3] <- hold1
hold2 <- cube$right[4,3]
hold3 <- cube$right[1,3]
cube$right[4,3] <- cube$right[2,3]
cube$right[2,3] <- cube$right[6,3]
cube$right[6,3] <- cube$right[8,3]
cube$right[8,3] <- hold2
cube$right[1,3] <- cube$right[3,3]
cube$right[3,3] <- cube$right[9,3]
cube$right[9,3] <- cube$right[7,3]
cube$right[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
lc <- function(cube = rubix()){
hold1 <- cube$top[c(1,4,7),3]
cube$top[c(1,4,7), 3] <- cube$hind[c(1,4,7), 3]
cube$hind[c(1,4,7), 3] <- cube$base[c(1,4,7), 3]
cube$base[c(1,4,7), 3] <- cube$front[c(1,4,7), 3]
cube$front[c(1,4,7), 3] <- hold1
hold2 <- cube$left[4,3]
hold3 <- cube$left[1,3]
cube$left[4,3] <- cube$left[8,3]
cube$left[8,3] <- cube$left[6,3]
cube$left[6,3] <- cube$left[2,3]
cube$left[2,3] <- hold2
cube$left[1,3] <- cube$left[7,3]
cube$left[7,3] <- cube$left[9,3]
cube$left[9,3] <- cube$left[3,3]
cube$left[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
la <- function(cube = rubix()){
hold1 <- cube$top[c(1,4,7),3]
cube$top[c(1,4,7), 3] <- cube$front[c(1,4,7), 3]
cube$front[c(1,4,7), 3] <- cube$base[c(1,4,7), 3]
cube$base[c(1,4,7), 3] <- cube$hind[c(1,4,7), 3]
cube$hind[c(1,4,7), 3] <- hold1
hold2 <- cube$left[4,3]
hold3 <- cube$left[1,3]
cube$left[4,3] <- cube$left[2,3]
cube$left[2,3] <- cube$left[6,3]
cube$left[6,3] <- cube$left[8,3]
cube$left[8,3] <- hold2
cube$left[1,3] <- cube$left[3,3]
cube$left[3,3] <- cube$left[9,3]
cube$left[9,3] <- cube$left[7,3]
cube$left[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
fc <- function(cube = rubix()){
hold1 <- cube$top[c(7,8,9),3]
cube$top[c(7,8,9), 3] <- cube$left[c(7,8,9), 3]
cube$left[c(7,8,9), 3] <- cube$base[c(3,2,1), 3]
cube$base[c(3,2,1), 3] <- cube$right[c(7,8,9), 3]
cube$right[c(7,8,9), 3] <- hold1
hold2 <- cube$front[4,3]
hold3 <- cube$front[1,3]
cube$front[4,3] <- cube$front[8,3]
cube$front[8,3] <- cube$front[6,3]
cube$front[6,3] <- cube$front[2,3]
cube$front[2,3] <- hold2
cube$front[1,3] <- cube$front[7,3]
cube$front[7,3] <- cube$front[9,3]
cube$front[9,3] <- cube$front[3,3]
cube$front[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
fa <- function(cube = rubix()){
hold1 <- cube$top[c(7,8,9),3]
cube$top[c(7,8,9), 3] <- cube$right[c(7,8,9), 3]
cube$right[c(7,8,9), 3] <- cube$base[c(3,2,1), 3]
cube$base[c(3,2,1), 3] <- cube$left[c(7,8,9), 3]
cube$left[c(7,8,9), 3] <- hold1
hold2 <- cube$front[4,3]
hold3 <- cube$front[1,3]
cube$front[4,3] <- cube$front[2,3]
cube$front[2,3] <- cube$front[6,3]
cube$front[6,3] <- cube$front[8,3]
cube$front[8,3] <- hold2
cube$front[1,3] <- cube$front[3,3]
cube$front[3,3] <- cube$front[9,3]
cube$front[9,3] <- cube$front[7,3]
cube$front[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
hc <- function(cube = rubix()){
hold1 <- cube$top[c(1,2,3),3]
cube$top[c(1,2,3), 3] <- cube$right[c(1,2,3), 3]
cube$right[c(1,2,3), 3] <- cube$base[c(9,8,7), 3]
cube$base[c(9,8,7), 3] <- cube$left[c(1,2,3), 3]
cube$left[c(1,2,3), 3] <- hold1
hold2 <- cube$hind[4,3]
hold3 <- cube$hind[1,3]
cube$hind[4,3] <- cube$hind[8,3]
cube$hind[8,3] <- cube$hind[6,3]
cube$hind[6,3] <- cube$hind[2,3]
cube$hind[2,3] <- hold2
cube$hind[1,3] <- cube$hind[7,3]
cube$hind[7,3] <- cube$hind[9,3]
cube$hind[9,3] <- cube$hind[3,3]
cube$hind[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ha <- function(cube = rubix()){
hold1 <- cube$top[c(1,2,3),3]
cube$top[c(1,2,3), 3] <- cube$left[c(1,2,3), 3]
cube$left[c(1,2,3), 3] <- cube$base[c(9,8,7), 3]
cube$base[c(9,8,7), 3] <- cube$right[c(1,2,3), 3]
cube$right[c(1,2,3), 3] <- hold1
hold2 <- cube$hind[4,3]
hold3 <- cube$hind[1,3]
cube$hind[4,3] <- cube$hind[2,3]
cube$hind[2,3] <- cube$hind[6,3]
cube$hind[6,3] <- cube$hind[8,3]
cube$hind[8,3] <- hold2
cube$hind[1,3] <- cube$hind[3,3]
cube$hind[3,3] <- cube$hind[9,3]
cube$hind[9,3] <- cube$hind[7,3]
cube$hind[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
tc <- function(cube = rubix()){
hold1 <- cube$hind[c(7,8,9),3]
cube$hind[c(7,8,9), 3] <- cube$left[c(9,6,3), 3]
cube$left[c(9,6,3), 3] <- cube$front[c(3,2,1), 3]
cube$front[c(3,2,1), 3] <- cube$right[c(1,4,7), 3]
cube$right[c(1,4,7), 3] <- hold1
hold2 <- cube$top[4,3]
hold3 <- cube$top[1,3]
cube$top[4,3] <- cube$top[8,3]
cube$top[8,3] <- cube$top[6,3]
cube$top[6,3] <- cube$top[2,3]
cube$top[2,3] <- hold2
cube$top[1,3] <- cube$top[7,3]
cube$top[7,3] <- cube$top[9,3]
cube$top[9,3] <- cube$top[3,3]
cube$top[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ta <- function(cube = rubix()){
hold1 <- cube$hind[c(7,8,9),3]
cube$hind[c(7,8,9), 3] <- cube$right[c(1,4,7), 3]
cube$right[c(1,4,7), 3] <- cube$front[c(3,2,1), 3]
cube$front[c(3,2,1), 3] <- cube$left[c(9,6,3), 3]
cube$left[c(9,6,3), 3] <- hold1
hold2 <- cube$top[4,3]
hold3 <- cube$top[1,3]
cube$top[4,3] <- cube$top[2,3]
cube$top[2,3] <- cube$top[6,3]
cube$top[6,3] <- cube$top[8,3]
cube$top[8,3] <- hold2
cube$top[1,3] <- cube$top[3,3]
cube$top[3,3] <- cube$top[9,3]
cube$top[9,3] <- cube$top[7,3]
cube$top[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
bc <- function(cube = rubix()){
hold1 <- cube$hind[c(1,2,3),3]
cube$hind[c(1,2,3), 3] <- cube$right[c(3,6,9), 3]
cube$right[c(3,6,9), 3] <- cube$front[c(9,8,7), 3]
cube$front[c(9,8,7), 3] <- cube$left[c(7,4,1), 3]
cube$left[c(7,4,1), 3] <- hold1
hold2 <- cube$base[4,3]
hold3 <- cube$base[1,3]
cube$base[4,3] <- cube$base[8,3]
cube$base[8,3] <- cube$base[6,3]
cube$base[6,3] <- cube$base[2,3]
cube$base[2,3] <- hold2
cube$base[1,3] <- cube$base[7,3]
cube$base[7,3] <- cube$base[9,3]
cube$base[9,3] <- cube$base[3,3]
cube$base[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ba <- function(cube = rubix()){
hold1 <- cube$hind[c(1,2,3),3]
cube$hind[c(1,2,3), 3] <- cube$left[c(7,4,1), 3]
cube$left[c(7,4,1), 3] <- cube$front[c(9,8,7), 3]
cube$front[c(9,8,7), 3] <- cube$right[c(3,6,9), 3]
cube$right[c(3,6,9), 3] <- hold1
hold2 <- cube$base[4,3]
hold3 <- cube$base[1,3]
cube$base[4,3] <- cube$base[2,3]
cube$base[2,3] <- cube$base[6,3]
cube$base[6,3] <- cube$base[8,3]
cube$base[8,3] <- hold2
cube$base[1,3] <- cube$base[3,3]
cube$base[3,3] <- cube$base[9,3]
cube$base[9,3] <- cube$base[7,3]
cube$base[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
| /rubix_moves.R | no_license | t-reader/r-rubix-cube | R | false | false | 8,062 | r | rc <- function(cube = rubix()){
hold1 <- cube$top[c(3,6,9),3]
cube$top[c(3,6,9), 3] <- cube$front[c(3,6,9), 3]
cube$front[c(3,6,9), 3] <- cube$base[c(3,6,9), 3]
cube$base[c(3,6,9), 3] <- cube$hind[c(3,6,9), 3]
cube$hind[c(3,6,9), 3] <- hold1
hold2 <- cube$right[4,3]
hold3 <- cube$right[1,3]
cube$right[4,3] <- cube$right[8,3]
cube$right[8,3] <- cube$right[6,3]
cube$right[6,3] <- cube$right[2,3]
cube$right[2,3] <- hold2
cube$right[1,3] <- cube$right[7,3]
cube$right[7,3] <- cube$right[9,3]
cube$right[9,3] <- cube$right[3,3]
cube$right[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ra <- function(cube = rubix()){
hold1 <- cube$top[c(3,6,9),3]
cube$top[c(3,6,9), 3] <- cube$hind[c(3,6,9), 3]
cube$hind[c(3,6,9), 3] <- cube$base[c(3,6,9), 3]
cube$base[c(3,6,9), 3] <- cube$front[c(3,6,9), 3]
cube$front[c(3,6,9), 3] <- hold1
hold2 <- cube$right[4,3]
hold3 <- cube$right[1,3]
cube$right[4,3] <- cube$right[2,3]
cube$right[2,3] <- cube$right[6,3]
cube$right[6,3] <- cube$right[8,3]
cube$right[8,3] <- hold2
cube$right[1,3] <- cube$right[3,3]
cube$right[3,3] <- cube$right[9,3]
cube$right[9,3] <- cube$right[7,3]
cube$right[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
lc <- function(cube = rubix()){
hold1 <- cube$top[c(1,4,7),3]
cube$top[c(1,4,7), 3] <- cube$hind[c(1,4,7), 3]
cube$hind[c(1,4,7), 3] <- cube$base[c(1,4,7), 3]
cube$base[c(1,4,7), 3] <- cube$front[c(1,4,7), 3]
cube$front[c(1,4,7), 3] <- hold1
hold2 <- cube$left[4,3]
hold3 <- cube$left[1,3]
cube$left[4,3] <- cube$left[8,3]
cube$left[8,3] <- cube$left[6,3]
cube$left[6,3] <- cube$left[2,3]
cube$left[2,3] <- hold2
cube$left[1,3] <- cube$left[7,3]
cube$left[7,3] <- cube$left[9,3]
cube$left[9,3] <- cube$left[3,3]
cube$left[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
la <- function(cube = rubix()){
hold1 <- cube$top[c(1,4,7),3]
cube$top[c(1,4,7), 3] <- cube$front[c(1,4,7), 3]
cube$front[c(1,4,7), 3] <- cube$base[c(1,4,7), 3]
cube$base[c(1,4,7), 3] <- cube$hind[c(1,4,7), 3]
cube$hind[c(1,4,7), 3] <- hold1
hold2 <- cube$left[4,3]
hold3 <- cube$left[1,3]
cube$left[4,3] <- cube$left[2,3]
cube$left[2,3] <- cube$left[6,3]
cube$left[6,3] <- cube$left[8,3]
cube$left[8,3] <- hold2
cube$left[1,3] <- cube$left[3,3]
cube$left[3,3] <- cube$left[9,3]
cube$left[9,3] <- cube$left[7,3]
cube$left[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
fc <- function(cube = rubix()){
hold1 <- cube$top[c(7,8,9),3]
cube$top[c(7,8,9), 3] <- cube$left[c(7,8,9), 3]
cube$left[c(7,8,9), 3] <- cube$base[c(3,2,1), 3]
cube$base[c(3,2,1), 3] <- cube$right[c(7,8,9), 3]
cube$right[c(7,8,9), 3] <- hold1
hold2 <- cube$front[4,3]
hold3 <- cube$front[1,3]
cube$front[4,3] <- cube$front[8,3]
cube$front[8,3] <- cube$front[6,3]
cube$front[6,3] <- cube$front[2,3]
cube$front[2,3] <- hold2
cube$front[1,3] <- cube$front[7,3]
cube$front[7,3] <- cube$front[9,3]
cube$front[9,3] <- cube$front[3,3]
cube$front[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
fa <- function(cube = rubix()){
hold1 <- cube$top[c(7,8,9),3]
cube$top[c(7,8,9), 3] <- cube$right[c(7,8,9), 3]
cube$right[c(7,8,9), 3] <- cube$base[c(3,2,1), 3]
cube$base[c(3,2,1), 3] <- cube$left[c(7,8,9), 3]
cube$left[c(7,8,9), 3] <- hold1
hold2 <- cube$front[4,3]
hold3 <- cube$front[1,3]
cube$front[4,3] <- cube$front[2,3]
cube$front[2,3] <- cube$front[6,3]
cube$front[6,3] <- cube$front[8,3]
cube$front[8,3] <- hold2
cube$front[1,3] <- cube$front[3,3]
cube$front[3,3] <- cube$front[9,3]
cube$front[9,3] <- cube$front[7,3]
cube$front[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
hc <- function(cube = rubix()){
hold1 <- cube$top[c(1,2,3),3]
cube$top[c(1,2,3), 3] <- cube$right[c(1,2,3), 3]
cube$right[c(1,2,3), 3] <- cube$base[c(9,8,7), 3]
cube$base[c(9,8,7), 3] <- cube$left[c(1,2,3), 3]
cube$left[c(1,2,3), 3] <- hold1
hold2 <- cube$hind[4,3]
hold3 <- cube$hind[1,3]
cube$hind[4,3] <- cube$hind[8,3]
cube$hind[8,3] <- cube$hind[6,3]
cube$hind[6,3] <- cube$hind[2,3]
cube$hind[2,3] <- hold2
cube$hind[1,3] <- cube$hind[7,3]
cube$hind[7,3] <- cube$hind[9,3]
cube$hind[9,3] <- cube$hind[3,3]
cube$hind[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ha <- function(cube = rubix()){
hold1 <- cube$top[c(1,2,3),3]
cube$top[c(1,2,3), 3] <- cube$left[c(1,2,3), 3]
cube$left[c(1,2,3), 3] <- cube$base[c(9,8,7), 3]
cube$base[c(9,8,7), 3] <- cube$right[c(1,2,3), 3]
cube$right[c(1,2,3), 3] <- hold1
hold2 <- cube$hind[4,3]
hold3 <- cube$hind[1,3]
cube$hind[4,3] <- cube$hind[2,3]
cube$hind[2,3] <- cube$hind[6,3]
cube$hind[6,3] <- cube$hind[8,3]
cube$hind[8,3] <- hold2
cube$hind[1,3] <- cube$hind[3,3]
cube$hind[3,3] <- cube$hind[9,3]
cube$hind[9,3] <- cube$hind[7,3]
cube$hind[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
tc <- function(cube = rubix()){
hold1 <- cube$hind[c(7,8,9),3]
cube$hind[c(7,8,9), 3] <- cube$left[c(9,6,3), 3]
cube$left[c(9,6,3), 3] <- cube$front[c(3,2,1), 3]
cube$front[c(3,2,1), 3] <- cube$right[c(1,4,7), 3]
cube$right[c(1,4,7), 3] <- hold1
hold2 <- cube$top[4,3]
hold3 <- cube$top[1,3]
cube$top[4,3] <- cube$top[8,3]
cube$top[8,3] <- cube$top[6,3]
cube$top[6,3] <- cube$top[2,3]
cube$top[2,3] <- hold2
cube$top[1,3] <- cube$top[7,3]
cube$top[7,3] <- cube$top[9,3]
cube$top[9,3] <- cube$top[3,3]
cube$top[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ta <- function(cube = rubix()){
hold1 <- cube$hind[c(7,8,9),3]
cube$hind[c(7,8,9), 3] <- cube$right[c(1,4,7), 3]
cube$right[c(1,4,7), 3] <- cube$front[c(3,2,1), 3]
cube$front[c(3,2,1), 3] <- cube$left[c(9,6,3), 3]
cube$left[c(9,6,3), 3] <- hold1
hold2 <- cube$top[4,3]
hold3 <- cube$top[1,3]
cube$top[4,3] <- cube$top[2,3]
cube$top[2,3] <- cube$top[6,3]
cube$top[6,3] <- cube$top[8,3]
cube$top[8,3] <- hold2
cube$top[1,3] <- cube$top[3,3]
cube$top[3,3] <- cube$top[9,3]
cube$top[9,3] <- cube$top[7,3]
cube$top[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
bc <- function(cube = rubix()){
hold1 <- cube$hind[c(1,2,3),3]
cube$hind[c(1,2,3), 3] <- cube$right[c(3,6,9), 3]
cube$right[c(3,6,9), 3] <- cube$front[c(9,8,7), 3]
cube$front[c(9,8,7), 3] <- cube$left[c(7,4,1), 3]
cube$left[c(7,4,1), 3] <- hold1
hold2 <- cube$base[4,3]
hold3 <- cube$base[1,3]
cube$base[4,3] <- cube$base[8,3]
cube$base[8,3] <- cube$base[6,3]
cube$base[6,3] <- cube$base[2,3]
cube$base[2,3] <- hold2
cube$base[1,3] <- cube$base[7,3]
cube$base[7,3] <- cube$base[9,3]
cube$base[9,3] <- cube$base[3,3]
cube$base[3,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
ba <- function(cube = rubix()){
hold1 <- cube$hind[c(1,2,3),3]
cube$hind[c(1,2,3), 3] <- cube$left[c(7,4,1), 3]
cube$left[c(7,4,1), 3] <- cube$front[c(9,8,7), 3]
cube$front[c(9,8,7), 3] <- cube$right[c(3,6,9), 3]
cube$right[c(3,6,9), 3] <- hold1
hold2 <- cube$base[4,3]
hold3 <- cube$base[1,3]
cube$base[4,3] <- cube$base[2,3]
cube$base[2,3] <- cube$base[6,3]
cube$base[6,3] <- cube$base[8,3]
cube$base[8,3] <- hold2
cube$base[1,3] <- cube$base[3,3]
cube$base[3,3] <- cube$base[9,3]
cube$base[9,3] <- cube$base[7,3]
cube$base[7,3] <- hold3
p <- plot_rubix(cube)
print(p)
return(cube)
}
|
rm(list = ls())
##### loading the needed libraries ################
library (INLA)
library(lme4)
###---------------------------------------------------
###############################################################
############### Simulation Example ####################
### Simulating observations from a mixed Poisson count model
set.seed(1234)
simfun <- function(ng = 50, nr = 2, fsd = 0.75, b = c(1,2,-3)) {
ntot <- nr * ng
b.reff <- rnorm(ng, sd = fsd)
x1 <- rbinom(ntot, 1, 0.6)
x2 <- runif(ntot, 0, 1)
dd <- data.frame(cbind(x1,x2), f = factor(rep(1:ng, each = nr)))
dd$eta0 <- model.matrix(~cbind(x1,x2), data = dd) %*% b
dd$eta <- with(dd, eta0 + b.reff[f] )
dd$mu <- exp(dd$eta)
dd$y <- with(dd, rpois(ntot, lambda = mu))
dd
}
dd <- simfun()
###---------------------------------------------------
##### The main simulation function to compare AGHQ and HDC methods #######
cfun.hdc <- function(d) {
####
beta0<-matrix(0,ncol=4,nrow=iter)
beta1<-matrix(0,ncol=4,nrow=iter)
beta2<-matrix(0,ncol=4,nrow=iter)
vc<-matrix(0,ncol=4,nrow=iter)
se.beta0<-matrix(0,ncol=4,nrow=iter)
se.beta1<-matrix(0,ncol=4,nrow=iter)
se.beta2<-matrix(0,ncol=4,nrow=iter)
se.vc<-matrix(0,ncol=4,nrow=iter)
###
for(i in 1:iter){
d<-simfun()
####
##### MLE fitting by adaptive Gauss-Hermit quadrature method with 15 nodes
####
m <- glmer(y ~ x1 +x2 + (1 | f) , family = "poisson" , data = d , nAGQ=15)
####
##### preparing cloned data vector
####
"x1.k"<-rep(d[,1],k)
"x2.k"<-rep(d[,2],k)
"f.k"<-rep(d[,3],k)
"y.k"<-rep(d[,7],k)
clone.data<-cbind(x1.k,x2.k,f.k,y.k)
clone.data[,3]<-rep(1:(ng*k),each=nr)
clone.data<-as.data.frame(clone.data)
####
##### HDC algorithm with Informative (gamma) prior for variance component
####
hdc.1 <- inla(y.k ~ x1.k + x2.k +
f(f.k,model="iid",param=c(1, 2)),family = "poisson",data = clone.data)
hyp.hdc.1 = inla.hyperpar(hdc.1)
vc.mean.1 = inla.expectation(function(x) 1/x^.5, hyp.hdc.1$marginals[[1]])
vc.m2.1 = inla.expectation(function(x) 1/x, hyp.hdc.1$marginals[[1]])
stdev.1 = sqrt(vc.m2.1- vc.mean.1^2)
####
##### HDC algorithm with Non-informative (flat) priors for variance component
####
hdc.2 <- inla(y.k ~ x1.k + x2.k +
f(f.k,model="iid"),family = "poisson",data = clone.data,
control.family=list(prior="flat"))
hyp.hdc.2 = inla.hyperpar(hdc.2)
vc.mean.2 = inla.expectation(function(x) 1/x^.5, hyp.hdc.2$marginals[[1]])
vc.m2.2 = inla.expectation(function(x) 1/x, hyp.hdc.2$marginals[[1]])
stdev.2 = sqrt(vc.m2.2- vc.mean.2^2)
####
##### HDC algorithm with Vague (gamma) priors for variance component
####
hdc.3 <- inla(y.k ~ x1.k + x2.k +
f(f.k,model="iid",param=c(3, 0.05)),family = "poisson", data = clone.data)
hyp.hdc.3 = inla.hyperpar(hdc.3)
vc.mean.3 = inla.expectation(function(x) 1/x^.5, hyp.hdc.3$marginals[[1]])
vc.m2.3 = inla.expectation(function(x) 1/x, hyp.hdc.3$marginals[[1]])
stdev.3 = sqrt(vc.m2.3- vc.mean.3^2)
####
##### Preparing output (parameters)
####
beta0[i,]<-c(fixef(m)[1],hdc.1$summary.fixed[1,1],hdc.2$summary.fixed[1,1],
hdc.3$summary.fixed[1,1])
##
beta1[i,]<-c(fixef(m)[2],hdc.1$summary.fixed[2,1],hdc.2$summary.fixed[2,1],
hdc.3$summary.fixed[2,1])
##
beta2[i,]<-c(fixef(m)[3],hdc.1$summary.fixed[3,1],hdc.2$summary.fixed[3,1],
hdc.3$summary.fixed[3,1])
##
vc[i,]<-c(sqrt(unlist(VarCorr(m))),vc.mean.1,vc.mean.2,vc.mean.3)
###
###### (precision of parameters)
###
se.beta0[i,]<-c(sqrt(diag(vcov(m)))[1],hdc.1$summary.fixed[1,2],
hdc.2$summary.fixed[1,2],hdc.3$summary.fixed[1,2])
##
se.beta1[i,]<-c(sqrt(diag(vcov(m)))[2],hdc.1$summary.fixed[2,2],
hdc.2$summary.fixed[2,2],hdc.3$summary.fixed[2,2])
##
se.beta2[i,]<-c(sqrt(diag(vcov(m)))[3],hdc.1$summary.fixed[3,2],
hdc.2$summary.fixed[3,2],hdc.3$summary.fixed[3,2])
##
se.vc[i,]<-c(sqrt(diag(VarCorr(m)$f)/ng),stdev.1,stdev.2,stdev.3)
####
cat("iter=", i,"\n")
####
}
return(list('Beta0'=beta0,'Beta1'=beta1,'Beta2'=beta2,'Sigma'=vc,
'SE.Beta0'=se.beta0,'SE.Beta1'=se.beta1,'SE.Beta2'=se.beta2,'SE.Sigma'=se.vc))
}
###---------------------------------------------------
####### Run simulation for 100 data sets and 80 clones of data ############
ng = 50; nr = 2
# k=80
# iter=100
# rr.hdc <- cfun.hdc()
###########
###---------------------------------------------------
####### Comparing computing times for DC and HDC methods on ######################
####### a typical simulated data set ######################
k=100
"x1.k"<-rep(dd[,1],k)
"x2.k"<-rep(dd[,2],k)
"f.k"<-rep(dd[,3],k)
"y.k"<-rep(dd[,7],k)
clone.data<-cbind(x1.k,x2.k,f.k,y.k)
clone.data[,3]<-rep(1:(ng*k),each=nr)
clone.data<-as.data.frame(clone.data)
### HDC
hdc<-inla(y.k ~ x1.k + x2.k + f(f.k,model="iid"), data=clone.data, family="poisson")
### DC
wd.dc = tempfile()
## same as before
result.dc = system.time(inla(y.k ~ x1.k + x2.k + f(f.k,model="iid"),
data=clone.data, family="poisson",
working.directory = wd.dc,
keep = TRUE,
inla.arg = "-m mcmc -N 10000 -T 10 -S 1"))
## Compare cpu times
time.hdc = hdc$cpu.used
time.hdc
result.dc
# Plotting the results (for example marginals for the hyperparameters)
hyp.hdc = hdc$marginals.hyperpar[[1]]
hyp.dc = scan(paste(wd.dc,"/results.files/hyperparameter-random.effect00000001-parameter-user-scale/trace.dat",sep=""))
plot(hyp.hdc, lty="l", col=1,
main=expression(paste("hybrid DC and DC based densities of",~ sigma^{-2})))
lines(density(hyp.dc), lty="2", col=2)
##### compare another DC and HDC-based densities (for example the slope of x1)
int.inla = hdc$marginals.fixed[[2]]
int.dc = scan(paste(wd.dc,"/results.files/fixed.effect00000002/trace.dat",sep=""))
plot(int.inla, lty="l", col=1,
main=expression(paste("hybrid DC and DC based densities of",~ beta[1])))
lines(density(int.dc), lty="2", col=2)
####--------------------------------------------------------
###################################################################
############### Overdispersion Example: Seeds Data #########
################# Preparing cloned data set with k=200 ############
seed.data<-Seeds
n1=21
k=200
n=n1*k
"r.k"<-rep(seed.data[,1],k)
"n.k"<-rep(seed.data[,2],k)
"x1.k"<-rep(seed.data[,3],k)
"x2.k"<-rep(seed.data[,4],k)
"plate.k"<-rep(seed.data[,5],k)
clone.data<-cbind(r.k,n.k,x1.k,x2.k,plate.k)
clone.data[,5]<-1:n
clone.data<-as.data.frame(clone.data)
####---------------------------------------------------------------------------
###################### main effects model ############################
### Fitting by AGHQ method with 15 nodes #####
(main.effects <- glmer(r/n ~ x1 + x2 + (1 | plate) ,
family = "binomial", data = Seeds, nAGQ=15))
####---------------------------------------------------------------------------
#### Fitting by INLA method ####
seeds.inla.fit.1 = inla(r ~ x1 + x2 + f(plate, model="iid",
param=c(.5, .0164)), data=Seeds, family="binomial", Ntrials=n )
seeds.hyperpar.1 = inla.hyperpar(seeds.inla.fit.1)
#### Fixed effects summaries
summary(seeds.inla.fit.1)
#### variance component summaries
mean.1=m1 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.1$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, seeds.hyperpar.1$marginals[[1]])
stdev.1 = sqrt(m2- mean.1^2)
####---------------------------------------------------------------------------
#### Fitting by HDC method with informative prior ######
formula.clone.1 = r.k ~ x1.k+x2.k+f(plate.k,model="iid",param=c(.5,.0164))
mod.seeds.clone.1 = inla(formula.clone.1,data=clone.data,family="binomial",Ntrials=n.k)
seeds.hyperpar.clone.1 = inla.hyperpar(mod.seeds.clone.1)
#### Fixed effects summaries
summary(mod.seeds.clone.1)
sd.clone.1<-as.vector(sqrt(k)*mod.seeds.clone.1$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.intercept<-mod.seeds.clone.1$marginals.fixed[[1]]
marginal.clone.x1<-mod.seeds.clone.1$marginals.fixed$x1
marginal.clone.x2<-mod.seeds.clone.1$marginals.fixed$x2
hyp.k.1<-seeds.hyperpar.clone.1$marginals$`Precision for plate.k`
#### Variance component summaries
mean.clone.1=m1 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1$marginals[[1]])
m2.clone = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.1$marginals[[1]])
stdev.clone.1 = sqrt(k*(m2.clone- mean.clone.1^2))
####---------------------------------------------------------------------------
#### Fitting by HDC method with flat prior ######
formula.clone.1.2 = r.k ~ x1.k+x2.k+f(plate.k,model="iid")
mod.seeds.clone.1.2 = inla(formula.clone.1.2,data=clone.data,
control.family=list(prior="flat"), family="binomial",Ntrials=n.k,
control.fixed=list(mean=c(1,1),prec=c(0.001,0.001)))
seeds.hyperpar.clone.1.2 = inla.hyperpar(mod.seeds.clone.1.2)
#### Fixed effects summaries
summary(mod.seeds.clone.1.2)
sd.clone.1.2<-as.vector(sqrt(k)*mod.seeds.clone.1.2$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.2.intercept<-mod.seeds.clone.1.2$marginals.fixed[[1]]
marginal.clone.2.x1<-mod.seeds.clone.1.2$marginals.fixed$x1
marginal.clone.2.x2<-mod.seeds.clone.1.2$marginals.fixed$x2
hyp.k.2<-seeds.hyperpar.clone.1.2$marginals[[1]]
#### Variance component summaries
mean.clone.1.2 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1.2$marginals[[1]])
m2.clone.2 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.1.2$marginals[[1]])
stdev.clone.1.2 = sqrt(k*(m2.clone.2- mean.clone.1.2^2))
####---------------------------------------------------------------------------
#### Fitting by HDC method with vague (gamma) prior ######
formula.clone.1.3 = r.k ~ x1.k+x2.k+f(plate.k,model="iid",param=c(.01,0.01))
mod.seeds.clone.1.3 = inla(formula.clone.1.3,data=clone.data,
family="binomial",Ntrials=n.k, control.fixed=list(mean=c(-2,-1),prec=c(0.1,0.1)))
seeds.hyperpar.clone.1.3 = inla.hyperpar(mod.seeds.clone.1.3)
#### Fixed effects summaries
summary(mod.seeds.clone.1.3)
sd.clone.1.3<-as.vector(sqrt(k)*mod.seeds.clone.1.3$summary.fixed[,2])
inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1.3$marginals[[1]])
#### Preparing elements for plotting
marginal.clone.3.intercept<-mod.seeds.clone.1.3$marginals.fixed[[1]]
marginal.clone.3.x1<-mod.seeds.clone.1.3$marginals.fixed$x1
marginal.clone.3.x2<-mod.seeds.clone.1.3$marginals.fixed$x2
hyp.k.3<-seeds.hyperpar.clone.1.3$marginals[[1]]
#### Variance component summaries
mean.clone.1.3=m1.3 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1.3$marginals[[1]])
m2.clone.3 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.1.3$marginals[[1]])
stdev.clone.1.3 = sqrt(k*(m2.clone.3- mean.clone.1.3^2))
####---------------------------------------------------------------------------
###################################################
#### Plotting the results to compare HDC-based densities with respect to priors
par(mfrow=c(2,2))
plot(marginal.clone.intercept,main=expression(paste("HDC-based density of",~ beta[0])),
col=1,xlab="",ylab="",lty=1)
lines(marginal.clone.2.intercept,lty=3,col=3)
lines(marginal.clone.3.intercept,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.x1,main=expression(paste("DC-based density of",~ beta[1])),
col=1,xlab="",ylab="",lty=1)
lines(marginal.clone.2.x1,lty=3,col=3)
lines(marginal.clone.3.x1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.x2,main=expression(paste("DC-based density of",~ beta[2])),
col=1,xlab="",ylab="",lty=1)
lines(marginal.clone.2.x2,lty=3,col=3)
lines(marginal.clone.3.x2,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.k.1,main=expression(paste("DC-based density of",~ sigma^{-2})),
col=1,xlab="",ylab="",lty=1,ylim=c(0,.7))
lines(hyp.k.2,lty=3,col=3)
lines(hyp.k.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
###############################################################################
############### Second Model for Interaction Model ###########################
####----------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes #####
(interaction.effects <- glmer(r/n ~ x1 + x2 + I(x1*x2) + (1 | plate) ,
family = "binomial", data = Seeds, nAGQ=15))
####----------------------------------------------------------------
## Fitting by INLA
seeds.inla.fit.2 = inla(r ~ x1 + x2+I(x1*x2) + f(plate, model="iid",
param=c(.5, .0164)), data=Seeds, family="binomial", Ntrials=n )
seeds.hyperpar.2 = inla.hyperpar(seeds.inla.fit.2)
### Fixed effects summaries
summary(seeds.inla.fit.2)
### Variance components summaries
mean.2 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.2$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, seeds.hyperpar.2$marginals[[1]])
stdev.2 = sqrt(m2- mean.2^2)
####---------------------------------------------------------------------------
################ Fitting by HDC with informative prior
formula.clone.int.1 = r.k ~ x1.k+x2.k+I(x1.k*x2.k)+f(plate.k,model="iid",param=c(.5,.0164))
mod.seeds.clone.int.1 = inla(formula.clone.int.1,data=clone.data,family="binomial",Ntrials=n.k,
control.fixed=list(mean=c(1,1,1),prec=c(0.001,0.001,0.001)))
seeds.hyperpar.clone.int.1 = inla.hyperpar(mod.seeds.clone.int.1)
### Fixed effects summaries
summary(mod.seeds.clone.int.1)
sd.clone.int.1<-as.vector(sqrt(k)*mod.seeds.clone.int.1$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.int.1.intercept<-mod.seeds.clone.int.1$marginals.fixed[[1]]
marginal.clone.int.1.x1<-mod.seeds.clone.int.1$marginals.fixed$x1.k
marginal.clone.int.1.x2<-mod.seeds.clone.int.1$marginals.fixed$x2.k
marginal.clone.int.1.interaction<-mod.seeds.clone.int.1$marginals.fixed[[4]]
hyp.k.int.1<-seeds.hyperpar.clone.int.1$marginals$`Precision for plate.k`
### Variance components summaries
mean.clone.int.1 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.1$marginals[[1]])
m2.clone.int.1 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.int.1$marginals[[1]])
stdev.clone.int.1 = sqrt(k*(m2.clone.int.1- mean.clone.int.1^2))
# -----------------------------------------------------------
################ Fitting by HDC with flat prior
formula.clone.int.2 = r.k ~ x1.k+x2.k+I(x1.k*x2.k)+f(plate.k,model="iid")
mod.seeds.clone.int.2 = inla(formula.clone.int.2,data=clone.data,
control.family=list(prior="flat"), family="binomial",Ntrials=n.k,
control.fixed=list(mean=c(-1,-2,-1),prec=c(0.1,0.1,0.1)))
seeds.hyperpar.clone.int.2 = inla.hyperpar(mod.seeds.clone.int.2)
### Fixed effects summaries
summary(mod.seeds.clone.int.2)
sd.clone.int.2<-as.vector(sqrt(k)*mod.seeds.clone.int.2$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.int.2.intercept<-mod.seeds.clone.int.2$marginals.fixed[[1]]
marginal.clone.int.2.x1<-mod.seeds.clone.int.2$marginals.fixed$x1.k
marginal.clone.int.2.x2<-mod.seeds.clone.int.2$marginals.fixed$x2.k
marginal.clone.int.2.interaction<-mod.seeds.clone.int.2$marginals.fixed[[4]]
hyp.k.int.2<-seeds.hyperpar.clone.int.2$marginals$`Precision for plate.k`
### Variance components summaries
mean.clone.int.2 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.2$marginals[[1]])
m2.clone.int.2 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.int.2$marginals[[1]])
stdev.clone.int.2 = sqrt(k*(m2.clone.int.2- mean.clone.int.2^2))
# -----------------------------------------------------------
################ Fitting by HDC with vague (gamma) prior
formula.clone.int.3 = r.k ~ x1.k+x2.k+I(x1.k*x2.k)+f(plate.k,model="iid",param=c(.01,0.01))
mod.seeds.clone.int.3 = inla(formula.clone.int.3,data=clone.data,
family="binomial",Ntrials=n.k, control.fixed=list(mean=c(-2,2,1),prec=c(0.001,0.001,0.001)))
seeds.hyperpar.clone.int.3 = inla.hyperpar(mod.seeds.clone.int.3)
### Fixed effects summaries
summary(mod.seeds.clone.int.3)
sd.clone.int.3<-as.vector(sqrt(k)*mod.seeds.clone.int.3$summary.fixed[,2])
inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.3$marginals[[1]])
#### Preparing elements for plotting
marginal.clone.int.3.intercept<-mod.seeds.clone.int.3$marginals.fixed[[1]]
marginal.clone.int.3.x1<-mod.seeds.clone.int.3$marginals.fixed$x1.k
marginal.clone.int.3.x2<-mod.seeds.clone.int.3$marginals.fixed$x2.k
marginal.clone.int.3.interaction<-mod.seeds.clone.int.3$marginals.fixed[[4]]
hyp.k.int.3<-seeds.hyperpar.clone.int.3$marginals$`Precision for plate.k`
## Variance components summaries
mean.clone.int.3 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.3$marginals[[1]])
m2.clone.int.3 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.int.3$marginals[[1]])
stdev.clone.int.3 = sqrt(k*(m2.clone.int.3- mean.clone.int.3^2))
# -----------------------------------------------------------
#### Plotting the results
par(mfrow=c(2,2))
plot(marginal.clone.int.1.intercept,
main=expression(paste("DC-based distribution",~ beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.x1,lty=3,col=3)
lines(marginal.clone.int.3.x1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.int.1.x1,
main=expression(paste("DC-based distribution",~ beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.x1,lty=3,col=3)
lines(marginal.clone.int.3.x1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.int.1.x2,
main=expression(paste("DC-based distribution",~ beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.x2,lty=3,col=3)
lines(marginal.clone.int.3.x2,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.int.1.interaction,
main=expression(paste("DC-based distribution",~ beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.interaction,lty=3,col=3)
lines(marginal.clone.int.3.interaction,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.k.int.1,main=expression(paste("DC-based distribution",~ sigma^{-2})),
col=1,xlab="",ylab="Density",lty=1,type="l",ylim=c(0,.4))
lines(hyp.k.int.2,lty=3,col=3)
lines(hyp.k.int.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
# -------------------------------------------------------------------------
#### Comparing DC and HDC results and their computing times ####
formula = r ~ x1+x2+f(plate,model="iid",param=c(.5,.0164))
formula.clone = r.k ~ x1.k+x2.k+f(plate.k,model="iid",param=c(.5,.0164))
## Run HDC
result.inla = inla(formula.clone, data=clone.data, family="binomial",
Ntrials=n.k, verbose=T)
## Run DC
wd.dc = tempfile()
## same as before
result.dc = system.time(inla(formula.clone,data=clone.data,
family="binomial",Ntrials=n.k,
working.directory = wd.dc,
keep = TRUE,
inla.arg = "-m mcmc -N 10000 -T 10 -S 1"))
## Compare cpu times
time.inla = result.inla$cpu.used
time.inla
result.dc
## compare curves (for example densities for the hyperparameters)
hyp.inla = result.inla$marginals.hyperpar[[1]]
hyp.dc = scan(paste(wd.dc,"/results.files/hyperparameter-random.effect00000001-parameter-user-scale/trace.dat",sep=""))
plot(hyp.inla, type="l",lty=1,col=1,xlab="",ylab="",
main=expression(paste("hybrid DC and DC-based densities of"~ sigma^{-2})),ylim=c(0,0.5))
lines(density(hyp.dc),lty=2,col=2)
## compare curve for fixed effects (for example \beta_1)
int.inla = result.inla$marginals.fixed[[2]]
int.dc = scan(paste(wd.dc,"/results.files/fixed.effect00000002/trace.dat",sep=""))
plot(int.inla,type="l",lty=1,col=1,ylim=c(0,20),xlab="",ylab="",
main=expression(paste("hybrid DC and DC based densities of",~ beta[1])))
lines(density(int.dc),col=2,lty=2)
############################################################################
# -------------------------------------------------------------------------
#########################################################################
############### Longitudianl Data Example: Epilepsy Data #########
### Loading data
## Visit is created by glmmAK, it corresponds to Breslow and Clayton's
## Visit/10, because the codes are -3,-1,1,3.
require (glmmAK)
data(epilepticBC)
epil = epilepticBC
epil$id2=epil$id
epil$rand=1:nrow(epil)
epil$V4=epil$visit==4
epil$newid=rep(1:(nrow(epil)/4), each=4)
###---------------------------------------------------------------------
### Constructing cloned data set
####################################
epil.data<-epil
patient=59
n1=patient*4
k=100
n=n1*k
"id.k"<-rep(epil.data[,1],k)
"visit.k"<-rep(epil.data[,2],k)
"seizure0.k"<-rep(epil.data[,3],k)
"age.k"<-rep(epil.data[,4],k)
"Seizure.k"<-rep(epil.data[,5],k)
"Base.k"<-rep(epil.data[,6],k)
"Trt.k"<-rep(epil.data[,7],k)
"Base.Trt.k"<-rep(epil.data[,8],k)
"Age.k"<-rep(epil.data[,9],k)
"Visit.k"<-rep(epil.data[,10],k)
"id2.k"<-rep(epil.data[,11],k)
"rand.k"<-rep(epil.data[,12],k)
"V4.k"<-rep(epil.data[,13],k)
"newid.k"<-rep(epil.data[,14],k)
clone.data<-cbind(id.k,visit.k,seizure0.k,age.k,Seizure.k,
Base.k,Trt.k,Base.Trt.k,Age.k,Visit.k,id2.k,rand.k,V4.k,newid.k)
clone.data[,12]<-1:n
clone.data[,1]<-clone.data[,11]<-rep((1+100):(patient*k+100),each=4)
clone.data[,2]<-clone.data[,14]<-rep(1:patient*k,each=4)
clone.data<-as.data.frame(clone.data)
##############################################################################
################## First random intercept model for Epilepsy data #########
################## proposed by Fong et al. (2009) #########################
###---------------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes
(first.model <- glmer(Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
(1 | id) , family = "poisson", data = epil, nAGQ=15))
###---------------------------------------------------------------------
### Fitting by INLA method
formula=Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
f(id,model="iid",param=c(2, 1.140),diagonal=0)
epil.inla.fit.1 = inla(formula, data=epil, family="poisson" ,
control.compute=list(hyperpar=T,dic=T))
epil.hyperpar.1 = inla.hyperpar(epil.inla.fit.1)
### Fixed effects summaries
summary(epil.inla.fit.1)
### Variance component summaries
mean.1=m1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.1$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, epil.hyperpar.1$marginals[[1]])
stdev.1 = sqrt(m2- mean.1^2)
###---------------------------------------------------------------------
### Fitting by HDC method with informative prior
epil.dc.fit.1 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k)
+Age.k + V4.k+f(id.k,model="iid",param=c(2, 1.140)), data=clone.data,
family="poisson" )
epil.dc.hyperpar.1 = inla.hyperpar(epil.dc.fit.1)
### Fixed effects summaries
summary(epil.dc.fit.1)
sd.clone.1<-as.vector(sqrt(k)*epil.dc.fit.1$summary.fixed[,2])
### Variance component summaries
mean.dc.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.1$marginals[[1]])
m2.dc.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.1$marginals[[1]])
stdev.clone.1 = sqrt(k*(m2.dc.1- mean.dc.1^2))
#### Preparing elements to plot the results
marginal.Intercept.1<-epil.dc.fit.1$marginals.fixed[[1]]
marginal.Base.1<-epil.dc.fit.1$marginals.fixed$Base.k
marginal.Trt.1<-epil.dc.fit.1$marginals.fixed$Trt.k
marginal.BaseTrt.1<-epil.dc.fit.1$marginals.fixed[[4]]
marginal.Age.1<-epil.dc.fit.1$marginals.fixed$Age.k
marginal.V4.1<-epil.dc.fit.1$marginals.fixed$V4.k
hyp.clone.1<-epil.dc.hyperpar.1$marginals$`Precision for id.k`
###---------------------------------------------------------------------
### Fitting by HDC method with flat prior
epil.dc.fit.2 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k)
+Age.k + V4.k+f(id.k,model="iid"), data=clone.data,control.family=list(prior="flat"),
family="poisson",control.fixed=list(mean=c(1,1,1,1,0),prec=c(0.001,0.001,0.01,0.001,
0.01)))
epil.dc.hyperpar.2 = inla.hyperpar(epil.dc.fit.2)
### Fixed effects summaries
summary(epil.dc.fit.2)
sd.clone.2<-as.vector(sqrt(k)*epil.dc.fit.2$summary.fixed[,2])
### Variance component summaries
mean.dc.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2$marginals[[1]])
m2.dc.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2$marginals[[1]])
stdev.clone.2 = sqrt(k*(m2.dc.2- mean.dc.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2<-epil.dc.fit.2$marginals.fixed[[1]]
marginal.Base.2<-epil.dc.fit.2$marginals.fixed$Base.k
marginal.Trt.2<-epil.dc.fit.2$marginals.fixed$Trt.k
marginal.BaseTrt.2<-epil.dc.fit.2$marginals.fixed[[4]]
marginal.Age.2<-epil.dc.fit.2$marginals.fixed$Age.k
marginal.V4.2<-epil.dc.fit.2$marginals.fixed$V4.k
hyp.clone.2<-epil.dc.hyperpar.2$marginals$`Precision for id.k`
###---------------------------------------------------------------------
### Fitting by HDC method with vague (gamma) prior
epil.dc.fit.3 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k)
+Age.k + V4.k+f(id.k,model="iid",param=c(0.05,0.02)), data=clone.data,
family="poisson",control.fixed=list(mean=c(-1,-2,2,0,1),prec=c(0.01,0.001,0.001,0.01,
0.001)))
epil.dc.hyperpar.3 = inla.hyperpar(epil.dc.fit.3)
### Fixed effects summaries
summary(epil.dc.fit.3)
sd.clone.3<-as.vector(sqrt(k)*epil.dc.fit.3$summary.fixed[,2])
### Variance component summaries
mean.dc.3 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3$marginals[[1]])
m2.dc.3 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3$marginals[[1]])
stdev.clone.3 = sqrt(k*(m2.dc.3- mean.dc.3^2))
#### Preparing elements to plot the results
marginal.Intercept.3<-epil.dc.fit.3$marginals.fixed[[1]]
marginal.Base.3<-epil.dc.fit.3$marginals.fixed$Base.k
marginal.Trt.3<-epil.dc.fit.3$marginals.fixed$Trt.k
marginal.BaseTrt.3<-epil.dc.fit.3$marginals.fixed[[4]]
marginal.Age.3<-epil.dc.fit.3$marginals.fixed$Age.k
marginal.V4.3<-epil.dc.fit.3$marginals.fixed$V4.k
hyp.clone.3<-epil.dc.hyperpar.3$marginals$`Precision for id.k`
###---------------------------------------------------------------------
#### Plotting the results
par(mfrow=c(3,2))
plot(marginal.Intercept.1,main=expression(paste(beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Intercept.2,lty=3,col=3)
lines(marginal.Intercept.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Base.1,main=expression(paste(beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Base.2,lty=3,col=3)
lines(marginal.Base.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Trt.1,main=expression(paste(beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Trt.2,lty=3,col=3)
lines(marginal.Trt.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.BaseTrt.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.BaseTrt.2,lty=3,col=3)
lines(marginal.BaseTrt.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Age.1,main=expression(paste(beta[4])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Age.2,lty=3,col=3)
lines(marginal.Age.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.V4.1,main=expression(paste(beta[5])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.V4.2,lty=3,col=3)
lines(marginal.V4.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.clone.1,main=expression(paste("HDC-based distribution of",~ sigma^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.2,lty=3,col=3)
lines(hyp.clone.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
##############################################################################
################## Second random intercept with measurement error #########
############ model for Epilepsy data proposed by Fong et al. (2009) ####
###---------------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes
(second.model<- glmer(Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
(1 | id) + (1|rand), family = "poisson", data = epil))
###---------------------------------------------------------------------
### Fitting by INLA method
formula.2=Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
f(id,model="iid",param=c(2, 1.240)) +
f(rand,model="iid",param=c(2, 1.140), diagonal=0)
epil.inla.fit.2 = inla(formula.2, data=epil,
family="poisson" )
epil.hyperpar.2 = inla.hyperpar(epil.inla.fit.2)
### Fixed effects summaries
summary(epil.inla.fit.2)
### Variance component summaries
mean.1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.2$marginals[[1]])
m1 = inla.expectation(function(x) 1/x, epil.hyperpar.2$marginals[[1]])
stdev.1 = sqrt(m1- mean.1^2)
mean.2=m1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.2$marginals[[2]])
m2 = inla.expectation(function(x) 1/x, epil.hyperpar.2$marginals[[2]])
stdev.2 = sqrt(m2- mean.2^2)
###---------------------------------------------------------------------
### Fitting by HDC method with informative prior
formula.clone.2=Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k + V4.k +
f(id.k,model="iid",param=c(2, 1.240))+
f(rand.k,model="iid",param=c(2, 1.140))
epil.dc.fit.2.1 = inla(formula.clone.2,data=clone.data,family="poisson" )
epil.dc.hyperpar.2.1 = inla.hyperpar(epil.dc.fit.2.1)
### Fixed effects summaries
summary(epil.dc.fit.2.1)
sd.clone.2.1<-as.vector(sqrt(k)*epil.dc.fit.2.1$summary.fixed[,2])
### Variance component summaries
mean.dc.2.1.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.1$marginals[[1]])
m2.dc.2.1.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.1$marginals[[1]])
stdev.clone.2.1.1 = sqrt(k*(m2.dc.2.1.1- mean.dc.2.1.1^2))
mean.dc.2.1.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.1$marginals[[2]])
m2.dc.2.1.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.1$marginals[[2]])
stdev.clone.2.1.2 = sqrt(k*(m2.dc.2.1.2- mean.dc.2.1.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2.1<-epil.dc.fit.2.1$marginals.fixed[[1]]
marginal.Base.2.1<-epil.dc.fit.2.1$marginals.fixed$Base.k
marginal.Trt.2.1<-epil.dc.fit.2.1$marginals.fixed$Trt.k
marginal.BaseTrt.2.1<-epil.dc.fit.2.1$marginals.fixed[[4]]
marginal.Age.2.1<-epil.dc.fit.2.1$marginals.fixed$Age.k
marginal.V4.2.1<-epil.dc.fit.2.1$marginals.fixed$V4.k
hyp.clone.2.1.1<-epil.dc.hyperpar.2.1$marginals$`Precision for id.k`
hyp.clone.2.1.2<-epil.dc.hyperpar.2.1$marginals$`Precision for rand.k`
###---------------------------------------------------------------------
### Fitting by HDC method with flat prior
epil.dc.fit.2.2 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k + V4.k +
f(id.k,model="iid")+f(rand.k,model="iid"),control.family=list(prior="flat"),
data=clone.data,family="poisson",
control.fixed=list(mean=c(1,1,1,1,0),prec=c(0.001,0.001,0.01,0.001,0.01)))
epil.dc.hyperpar.2.2 = inla.hyperpar(epil.dc.fit.2.2)
### Fixed effects summaries
summary(epil.dc.fit.2.2)
sd.clone.2.2<-as.vector(sqrt(k)*epil.dc.fit.2.2$summary.fixed[,2])
### Variance component summaries
mean.dc.2.2.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.2$marginals[[1]])
m2.dc.2.2.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.2$marginals[[1]])
stdev.clone.2.2.1 = sqrt(k*(m2.dc.2.2.1- mean.dc.2.2.1^2))
mean.dc.2.2.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.2$marginals[[2]])
m2.dc.2.2.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.2$marginals[[2]])
stdev.clone.2.2.2 = sqrt(k*(m2.dc.2.2.2- mean.dc.2.2.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2.2<-epil.dc.fit.2.2$marginals.fixed[[1]]
marginal.Base.2.2<-epil.dc.fit.2.2$marginals.fixed$Base.k
marginal.Trt.2.2<-epil.dc.fit.2.2$marginals.fixed$Trt.k
marginal.BaseTrt.2.2<-epil.dc.fit.2.2$marginals.fixed[[4]]
marginal.Age.2.2<-epil.dc.fit.2.2$marginals.fixed$Age.k
marginal.V4.2.2<-epil.dc.fit.2.2$marginals.fixed$V4.k
hyp.clone.2.2.1<-epil.dc.hyperpar.2.2$marginals$`Precision for id.k`
hyp.clone.2.2.2<-epil.dc.hyperpar.2.2$marginals$`Precision for rand.k`
###---------------------------------------------------------------------
### Fitting by HDC method with vague (gamma) prior
epil.dc.fit.2.3 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k + V4.k +
f(id.k,model="iid",param=c(0.05, 0.02))+f(rand.k,model="iid",param=c(0.01, 0.01)),
data=clone.data,family="poisson",
control.fixed=list(mean=c(-1,-2,2,0,1),prec=c(0.01,0.001,0.001,0.01,0.001)))
epil.dc.hyperpar.2.3 = inla.hyperpar(epil.dc.fit.2.3)
### Fixed effects summaries
summary(epil.dc.fit.2.3)
sd.clone.2.3<-as.vector(sqrt(k)*epil.dc.fit.2.3$summary.fixed[,2])
### Variance component summaries
mean.dc.2.3.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.3$marginals[[1]])
m2.dc.2.3.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.3$marginals[[1]])
stdev.clone.2.3.1 = sqrt(k*(m2.dc.2.3.1- mean.dc.2.3.1^2))
mean.dc.2.3.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.3$marginals[[2]])
m2.dc.2.3.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.3$marginals[[2]])
stdev.clone.2.3.2 = sqrt(k*(m2.dc.2.3.2- mean.dc.2.3.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2.3<-epil.dc.fit.2.3$marginals.fixed[[1]]
marginal.Base.2.3<-epil.dc.fit.2.3$marginals.fixed$Base.k
marginal.Trt.2.3<-epil.dc.fit.2.3$marginals.fixed$Trt.k
marginal.BaseTrt.2.3<-epil.dc.fit.2.3$marginals.fixed[[4]]
marginal.Age.2.3<-epil.dc.fit.2.3$marginals.fixed$Age.k
marginal.V4.2.3<-epil.dc.fit.2.3$marginals.fixed$V4.k
hyp.clone.2.3.1<-epil.dc.hyperpar.2.3$marginals$`Precision for id.k`
hyp.clone.2.3.2<-epil.dc.hyperpar.2.3$marginals$`Precision for rand.k`
###---------------------------------------------------------------------
#### Plotting the results
par(mfrow=c(3,2))
plot(marginal.Intercept.2.1,main=expression(paste(beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Intercept.2.2,lty=3,col=3)
lines(marginal.Intercept.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Base.2.1,main=expression(paste(beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Base.2.2,lty=3,col=3)
lines(marginal.Base.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Trt.2.1,main=expression(paste("beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Trt.2.2,lty=3,col=3)
lines(marginal.Trt.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.BaseTrt.2.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.BaseTrt.2.2,lty=3,col=3)
lines(marginal.BaseTrt.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Age.2.1,main=expression(paste(beta[4])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Age.2.2,lty=3,col=3)
lines(marginal.Age.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.V4.2.1,main=expression(paste(beta[5])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.V4.2.2,lty=3,col=3)
lines(marginal.V4.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
par(mfrow=c(2,1))
plot(hyp.clone.2.1.1,main=expression(paste("HDC-based distribution of",~ sigma[1]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.2.2.1,lty=3,col=3)
lines(hyp.clone.2.3.1,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.clone.2.1.2,main=expression(paste("HDC-based distribution of",~ sigma[2]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.2.2.2,lty=3,col=3)
lines(hyp.clone.2.3.2,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
##############################################################################
################## Second random slope model ##############################
############ for Epilepsy data proposed by Fong et al. (2009) ##########
###---------------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes
(third.model<- glmer(Seizure ~ Base + Trt + I(Base*Trt) + Age + Visit +
(Visit|id2), family = "poisson", nAGQ=15, data = epil))
###---------------------------------------------------------------------
### Fitting by INLA method
epil.inla.fit.3 = inla(Seizure ~ Base + Trt + I(Base*Trt) + Age +
Visit +f(id, model="2diidwishartpart0", param=c(5, 2.277904,
1.692047, 0), diagonal=0) + f(id2, Visit,
model="2diidwishartpart1", diagonal = 0), data=epil,
family="poisson" )
epil.hyperpar.3 = inla.hyperpar(epil.inla.fit.3)
### Fixed effects summaries
summary(epil.inla.fit.3)
### Variance component summaries
mean.3=m1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.3$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, epil.hyperpar.3$marginals[[1]])
stdev.3.1 = sqrt(m2- mean.3^2)
###---------------------------------------------------------------------
### Fitting by HDC method with first prior set
epil.dc.fit.3.1 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k +
Visit.k +f(id.k, model="2diidwishartpart0", param=c(5, 2.277904,
1.692047, 0), diagonal=0) + f(id2.k, Visit.k,
model="2diidwishartpart1", diagonal = 0), data=clone.data,
family="poisson" )
epil.dc.hyperpar.3.1 = inla.hyperpar(epil.dc.fit.3.1)
### Fixed effects summaries
summary(epil.dc.fit.3.1)
sd.clone.3.1<-as.vector(sqrt(k)*epil.dc.fit.3.1$summary.fixed[,2])
### Variance component summaries
mean.dc.3.1.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.1$marginals[[1]])
m2.dc.3.1.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.1$marginals[[1]])
stdev.clone.3.1.1 = sqrt(k*(m2.dc.3.1.1- mean.dc.3.1.1^2))
mean.dc.3.1.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.1$marginals[[2]])
m2.dc.3.1.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.1$marginals[[2]])
stdev.clone.3.1.2 = sqrt(k*(m2.dc.3.1.2- mean.dc.3.1.2^2))
#### Preparing elements to plot the results
marginal.Intercept.3.1<-epil.dc.fit.3.1$marginals.fixed[[1]]
marginal.Base.3.1<-epil.dc.fit.3.1$marginals.fixed$Base.k
marginal.Trt.3.1<-epil.dc.fit.3.1$marginals.fixed$Trt.k
marginal.BaseTrt.3.1<-epil.dc.fit.3.1$marginals.fixed[[4]]
marginal.Age.3.1<-epil.dc.fit.3.1$marginals.fixed$Age.k
marginal.V4.3.1<-epil.dc.fit.3.1$marginals.fixed[[6]]
hyp.clone.3.1.1<-epil.dc.hyperpar.3.1$marginals[[1]]
hyp.clone.3.1.2<-epil.dc.hyperpar.3.1$marginals[[2]]
###---------------------------------------------------------------------
### Fitting by HDC method with second prior set
epil.dc.fit.3.2 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k +
Visit.k +f(id.k, model="2diidwishartpart0", param=c(4, 3,
4, 0), diagonal=0) + f(id2.k, Visit.k,
model="2diidwishartpart1", diagonal = 0), data=clone.data,
family="poisson" )
epil.dc.hyperpar.3.2 = inla.hyperpar(epil.dc.fit.3.2)
### Fixed effects summaries
summary(epil.dc.fit.3.2)
sd.clone.3.2<-as.vector(sqrt(k)*epil.dc.fit.3.2$summary.fixed[,2])
### Variance component summaries
mean.dc.3.2.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.2$marginals[[1]])
m2.dc.3.2.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.2$marginals[[1]])
stdev.clone.3.2.1 = sqrt(k*(m2.dc.3.2.1- mean.dc.3.2.1^2))
mean.dc.3.2.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.2$marginals[[2]])
m2.dc.3.2.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.2$marginals[[2]])
stdev.clone.3.2.2 = sqrt(k*(m2.dc.3.2.2- mean.dc.3.2.2^2))
#### Preparing elements to plot the results
marginal.Intercept.3.2<-epil.dc.fit.3.2$marginals.fixed[[1]]
marginal.Base.3.2<-epil.dc.fit.3.2$marginals.fixed$Base.k
marginal.Trt.3.2<-epil.dc.fit.3.2$marginals.fixed$Trt.k
marginal.BaseTrt.3.2<-epil.dc.fit.3.2$marginals.fixed[[4]]
marginal.Age.3.2<-epil.dc.fit.3.2$marginals.fixed$Age.k
marginal.V4.3.2<-epil.dc.fit.3.2$marginals.fixed[[6]]
hyp.clone.3.2.1<-epil.dc.hyperpar.3.2$marginals[[1]]
hyp.clone.3.2.2<-epil.dc.hyperpar.3.2$marginals[[2]]
###---------------------------------------------------------------------
### Fitting by HDC method with third prior set
epil.dc.fit.3.3 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k +
Visit.k +f(id.k, model="2diidwishartpart0", param=c(6, 0.5,
0.5, 0), diagonal=0) + f(id2.k, Visit.k,
model="2diidwishartpart1", diagonal = 0), data=clone.data,
family="poisson" )
epil.dc.hyperpar.3.3 = inla.hyperpar(epil.dc.fit.3.3)
### Fixed effects summaries
summary(epil.dc.fit.3.3)
sd.clone.3.3<-as.vector(sqrt(k)*epil.dc.fit.3.3$summary.fixed[,2]
### Variance component summaries
mean.dc.3.3.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.3$marginals[[1]])
m2.dc.3.3.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.3$marginals[[1]])
stdev.clone.3.3.1 = sqrt(k*(m2.dc.3.3.1- mean.dc.3.3.1^2))
mean.dc.3.3.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.3$marginals[[2]])
m2.dc.3.3.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.3$marginals[[2]])
stdev.clone.3.3.2 = sqrt(k*(m2.dc.3.3.2- mean.dc.3.3.2^2))
#### Preparing elements to plot the results
marginal.Intercept.3.3<-epil.dc.fit.3.3$marginals.fixed[[1]]
marginal.Base.3.3<-epil.dc.fit.3.3$marginals.fixed$Base.k
marginal.Trt.3.3<-epil.dc.fit.3.3$marginals.fixed$Trt.k
marginal.BaseTrt.3.3<-epil.dc.fit.3.3$marginals.fixed[[4]]
marginal.Age.3.3<-epil.dc.fit.3.3$marginals.fixed$Age.k
marginal.V4.3.3<-epil.dc.fit.3.3$marginals.fixed[[6]]
hyp.clone.3.3.1<-epil.dc.hyperpar.3.3$marginals[[1]]
hyp.clone.3.3.2<-epil.dc.hyperpar.3.3$marginals[[2]]
###---------------------------------------------------------------------
#### Plotting the results
par(mfrow=c(3,2))
plot(marginal.Intercept.3.1,main=expression(paste(beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Intercept.3.2,lty=3,col=3)
lines(marginal.Intercept.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Base.3.1,main=expression(paste(beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Base.3.2,lty=3,col=3)
lines(marginal.Base.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Trt.3.1,main=expression(paste(beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Trt.3.2,lty=3,col=3)
lines(marginal.Trt.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.BaseTrt.3.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.BaseTrt.3.2,lty=3,col=3)
lines(marginal.BaseTrt.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Age.3.1,main=expression(paste(beta[4])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Age.3.2,lty=3,col=3)
lines(marginal.Age.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.V4.3.1,main=expression(paste(beta[5])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.V4.3.2,lty=3,col=3)
lines(marginal.V4.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
par(mfrow=c(2,1))
plot(hyp.clone.3.1.1,main=expression(paste(sigma[1]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.3.2.1,lty=3,col=3)
lines(hyp.clone.3.3.1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.clone.3.1.2,main=expression(paste(sigma[2]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.3.2.2,lty=3,col=3)
lines(hyp.clone.3.3.2,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
############################################################################
# -------------------------------------------------------------------------
##################################################################################
########### Crossed Random Effects Data Example: Salamander Mating Data #########
# -------------------------------------------------------------------------
### Loading data
load("salam.RData")
## organize data into a form suitable for logistic regression
dat0=data.frame("y"=c(salam$y), "fW"=as.integer(salam$x[,"W/R"]==1 | salam$x[,"W/W"]==1),
"mW"=as.integer(salam$x[,"R/W"]==1 | salam$x[,"W/W"]==1),
"WW"=as.integer(salam$x[,"W/W"]==1 ) )
## add salamander id
id = t( apply(salam$z, 1, function(x) {
tmp = which (x==1)
tmp[2] = tmp[2] - 20
tmp
}) )
## ids are suitable for model A and C, but not B
id.modA = rbind(id, id+40, id+20)
colnames (id.modA) = c("f.modA","m.modA")
dat0=cbind (dat0, id.modA, group=1)
dat0$experiment=as.factor(rep(1:3, each=120))
dat0$group=as.factor(dat0$group)
salamander = dat0
salamander.e1 = subset (dat0, dat0$experiment==1)
salamander.e2 = subset (dat0, dat0$experiment==2)
salamander.e3 = subset (dat0, dat0$experiment==3)
### Constructing Cloned data set
k=100
dat0=salamander.e1
dat0$no<-1:120
dat0.1 = subset (dat0, dat0$no==(1:20))
dat0.2 = subset (dat0, dat0$no==(21:40))
dat0.3 = subset (dat0, dat0$no==(41:60))
dat0.4 = subset (dat0, dat0$no==(61:80))
dat0.5 = subset (dat0, dat0$no==(81:100))
dat0.6 = subset (dat0, dat0$no==(101:120))
###
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.1[,i],k)
}
return(data.clone)
}
data.0.1.clone<-cloning(k)
data.0.1.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.1.clone[(((i-1)*20+1):(i*20)),6]<-data.0.1.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.1<-data.0.1.clone[,-9]
#####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.2[,i],k)
}
return(data.clone)
}
data.0.2.clone<-cloning(k)
data.0.2.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.2.clone[(((i-1)*20+1):(i*20)),6]<-data.0.2.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.2<-data.0.2.clone[,-9]
####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.3[,i],k)
}
return(data.clone)
}
data.0.3.clone<-cloning(k)
data.0.3.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.3.clone[(((i-1)*20+1):(i*20)),6]<-data.0.3.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.3<-data.0.3.clone[,-9]
#######
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.4[,i],k)
}
return(data.clone)
}
data.0.4.clone<-cloning(k)
data.0.4.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.4.clone[(((i-1)*20+1):(i*20)),6]<-data.0.4.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.4<-data.0.4.clone[,-9]
####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.5[,i],k)
}
return(data.clone)
}
data.0.5.clone<-cloning(k)
data.0.5.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.5.clone[(((i-1)*20+1):(i*20)),6]<-data.0.5.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.5<-data.0.5.clone[,-9]
#####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.6[,i],k)
}
return(data.clone)
}
data.0.6.clone<-cloning(k)
data.0.6.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.6.clone[(((i-1)*20+1):(i*20)),6]<-data.0.6.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.6<-data.0.6.clone[,-9]
#############
data.clone<-rbind(data.clone.1,data.clone.2,data.clone.3,data.clone.4,
data.clone.5,data.clone.6)
aa<-data.clone<-as.data.frame(data.clone)
#########################################################################
###-----------------------------------------------------------------------
######################### Summer Experiment ############################
#### For two other experiments the codes are the same ####################
###-----------------------------------------------------------------------
### Fitting by INLA approach
formula=y~fW+mW+WW + f(f.modA, model="iid", param=c(1,.622)) +
f(m.modA, model="iid", param=c(1,.622))
salamander.e1.inla.fit = inla(formula,
family="binomial", data=salamander.e1, Ntrials=rep(1,nrow(salamander.e1)))
salamander.e1.hyperpar = inla.hyperpar (salamander.e1.inla.fit)
### Fixed effects summaries
summary(salamander.e1.inla.fit)
### Variance components summaries
mean.inla.1 = inla.expectation(function(x) 1/x^.5, salamander.e1.hyperpar$marginals[[1]])
mean.inla.2 = inla.expectation(function(x) 1/x^.5, salamander.e1.hyperpar$marginals[[2]])
m2.inla.1 = inla.expectation(function(x) 1/x, salamander.e1.hyperpar$marginals[[1]])
m2.inla.2 = inla.expectation(function(x) 1/x, salamander.e1.hyperpar$marginals[[2]])
stdev.inla.1 = sqrt(k*(m2.inla.1- mean.inla.1^2))
stdev.inla.2 = sqrt(k*(m2.inla.2- mean.inla.2^2))
###-----------------------------------------------------------------------
### Fitting by HDC approach with informative prior set
formula.clone=aa[,1]~aa[,2]+aa[,3]+aa[,4]+f(aa[,5],model="iid",param=c(1,.622)) +
f(aa[,6], model="iid", param=c(1,.622))
salamander.e1.clone.1 = inla(formula.clone,
family="binomial", data=aa, Ntrials=rep(1,nrow(aa)))
salamander.clone.e1.hyperpar.1 = inla.hyperpar (salamander.e1.clone.1)
### Fixed effects summaries
summary(salamander.e1.clone.1)
sd.clone.e1<-as.vector(sqrt(k)*salamander.e1.clone.1$summary.fixed[,2])
### Variance components summaries
mean.clone.1.1=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.1$marginals[[1]])
mean.clone.2.1=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.1$marginals[[2]])
m2.clone.1.1 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.1$marginals[[1]])
m2.clone.2.1 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.1$marginals[[2]])
stdev.clone.1.1= sqrt(k*(m2.clone.1.1- mean.clone.1.1^2))
stdev.clone.2.1= sqrt(k*(m2.clone.2.1- mean.clone.2.1^2))
### Preparing elements to plot the results
marginal.intercept.1<-salamander.e1.clone.1$marginals.fixed[[1]]
marginal.F.WS.1<-salamander.e1.clone.1$marginals.fixed[[2]]
marginal.M.WS.1<-salamander.e1.clone.1$marginals.fixed[[3]]
marginal.FM.WS.1<-salamander.e1.clone.1$marginals.fixed[[4]]
hyp.F.1<-salamander.clone.e1.hyperpar.1$marginals[[1]]
hyp.M.1<-salamander.clone.e1.hyperpar.1$marginals[[2]]
###-----------------------------------------------------------------------
### Fitting by HDC approach with flat prior set
salamander.e1.clone.2 = inla(aa[,1]~aa[,2]+aa[,3]+aa[,4]+f(aa[,5],model="iid")+
f(aa[,6], model="iid"), control.family=list(prior="flat"),
family="binomial", data=aa, Ntrials=rep(1,nrow(aa)))
salamander.clone.e1.hyperpar.2 = inla.hyperpar (salamander.e1.clone.2)
### Fixed effects summaries
summary(salamander.e1.clone.2)
sd.clone.e2<-as.vector(sqrt(k)*salamander.e1.clone.2$summary.fixed[,2])
### Variance components summaries
mean.clone.1.2=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.2$marginals[[1]])
mean.clone.2.2=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.2$marginals[[2]])
m2.clone.1.2 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.2$marginals[[1]])
m2.clone.2.2 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.2$marginals[[2]])
stdev.clone.1.2= sqrt(k*(m2.clone.1.2- mean.clone.1.2^2))
stdev.clone.2.2= sqrt(k*(m2.clone.2.2- mean.clone.2.2^2))
### Preparing elements to plot the results
marginal.intercept.2<-salamander.e1.clone.2$marginals.fixed[[1]]
marginal.F.WS.2<-salamander.e1.clone.2$marginals.fixed[[2]]
marginal.M.WS.2<-salamander.e1.clone.2$marginals.fixed[[3]]
marginal.FM.WS.2<-salamander.e1.clone.2$marginals.fixed[[4]]
hyp.F.2<-salamander.clone.e1.hyperpar.2$marginals[[1]]
hyp.M.2<-salamander.clone.e1.hyperpar.2$marginals[[2]]
###-----------------------------------------------------------------------
### Fitting by HDC approach with vague (gamma) prior set
salamander.e1.clone.3 = inla(aa[,1]~aa[,2]+aa[,3]+aa[,4]+f(aa[,5],model="iid",param=c(0.1,0.1))
+ f(aa[,6], model="iid", param=c(0.1,0.1)),
family="binomial", data=aa, Ntrials=rep(1,nrow(aa)))
salamander.clone.e1.hyperpar.3 = inla.hyperpar (salamander.e1.clone.3)
### Fixed effects summaries
summary(salamander.e1.clone.3)
sd.clone.e3<-as.vector(sqrt(k)*salamander.e1.clone.3$summary.fixed[,2])
### Variance components summaries
mean.clone.1.3=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.3$marginals[[1]])
mean.clone.2.3=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.3$marginals[[2]])
m2.clone.1.3 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.3$marginals[[1]])
m2.clone.2.3 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.3$marginals[[2]])
stdev.clone.1.3= sqrt(k*(m2.clone.1.3- mean.clone.1.3^2))
stdev.clone.2.3= sqrt(k*(m2.clone.2.3- mean.clone.2.3^2))
### Preparing elements to plot the results
marginal.intercept.3<-salamander.e1.clone.3$marginals.fixed[[1]]
marginal.F.WS.3<-salamander.e1.clone.3$marginals.fixed[[2]]
marginal.M.WS.3<-salamander.e1.clone.3$marginals.fixed[[3]]
marginal.FM.WS.3<-salamander.e1.clone.3$marginals.fixed[[4]]
hyp.F.3<-salamander.clone.e1.hyperpar.3$marginals[[1]]
hyp.M.3<-salamander.clone.e1.hyperpar.3$marginals[[2]]
###-----------------------------------------------------------------------
############### Drawing HDC graphs ##################
par(mfrow=c(2,2))
plot(marginal.intercept.1,main=expression(paste("beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.intercept.2,lty=3,col=3)
lines(marginal.intercept.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.F.WS.1,main=expression(paste("beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.F.WS.2,lty=3,col=3)
lines(marginal.F.WS.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.M.WS.1,main=expression(paste(beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.M.WS.2,lty=3,col=3)
lines(marginal.M.WS.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.FM.WS.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.FM.WS.2,lty=3,col=3)
lines(marginal.FM.WS.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
par(mfrow=c(2,1))
plot(hyp.F.1,main=expression(paste(sigma[f]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.F.2,lty=3,col=3)
lines(hyp.F.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.M.1,main=expression(paste(sigma[m]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l",ylim=c(0,0.26))
lines(hyp.M.2,lty=3,col=3)
lines(hyp.M.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
###----------------------------------------------------------------
################################################################################
########### Comparing computing times in DC and hybrid DC methods ################
wd.dc = tempfile()
## same as before
result.dc = system.time(inla(formula.clone,data=aa,
family="binomial",Ntrials=rep(1,nrow(aa)),
working.directory = wd.dc,
keep = TRUE,
inla.arg = "-m mcmc -N 10000 -T 10 -S 0.1"))
### computing cpu times
time.inla = salamander.e1.clone.1$cpu.used
time.inla
result.dc
| /r-inla.org/papers/S8-2010-R.R | no_license | HughParsonage/R-INLA-mirror | R | false | false | 57,802 | r | rm(list = ls())
##### loading the needed libraries ################
library (INLA)
library(lme4)
###---------------------------------------------------
###############################################################
############### Simulation Example ####################
### Simulating observations from a mixed Poisson count model
set.seed(1234)
simfun <- function(ng = 50, nr = 2, fsd = 0.75, b = c(1,2,-3)) {
ntot <- nr * ng
b.reff <- rnorm(ng, sd = fsd)
x1 <- rbinom(ntot, 1, 0.6)
x2 <- runif(ntot, 0, 1)
dd <- data.frame(cbind(x1,x2), f = factor(rep(1:ng, each = nr)))
dd$eta0 <- model.matrix(~cbind(x1,x2), data = dd) %*% b
dd$eta <- with(dd, eta0 + b.reff[f] )
dd$mu <- exp(dd$eta)
dd$y <- with(dd, rpois(ntot, lambda = mu))
dd
}
dd <- simfun()
###---------------------------------------------------
##### The main simulation function to compare AGHQ and HDC methods #######
cfun.hdc <- function(d) {
####
beta0<-matrix(0,ncol=4,nrow=iter)
beta1<-matrix(0,ncol=4,nrow=iter)
beta2<-matrix(0,ncol=4,nrow=iter)
vc<-matrix(0,ncol=4,nrow=iter)
se.beta0<-matrix(0,ncol=4,nrow=iter)
se.beta1<-matrix(0,ncol=4,nrow=iter)
se.beta2<-matrix(0,ncol=4,nrow=iter)
se.vc<-matrix(0,ncol=4,nrow=iter)
###
for(i in 1:iter){
d<-simfun()
####
##### MLE fitting by adaptive Gauss-Hermit quadrature method with 15 nodes
####
m <- glmer(y ~ x1 +x2 + (1 | f) , family = "poisson" , data = d , nAGQ=15)
####
##### preparing cloned data vector
####
"x1.k"<-rep(d[,1],k)
"x2.k"<-rep(d[,2],k)
"f.k"<-rep(d[,3],k)
"y.k"<-rep(d[,7],k)
clone.data<-cbind(x1.k,x2.k,f.k,y.k)
clone.data[,3]<-rep(1:(ng*k),each=nr)
clone.data<-as.data.frame(clone.data)
####
##### HDC algorithm with Informative (gamma) prior for variance component
####
hdc.1 <- inla(y.k ~ x1.k + x2.k +
f(f.k,model="iid",param=c(1, 2)),family = "poisson",data = clone.data)
hyp.hdc.1 = inla.hyperpar(hdc.1)
vc.mean.1 = inla.expectation(function(x) 1/x^.5, hyp.hdc.1$marginals[[1]])
vc.m2.1 = inla.expectation(function(x) 1/x, hyp.hdc.1$marginals[[1]])
stdev.1 = sqrt(vc.m2.1- vc.mean.1^2)
####
##### HDC algorithm with Non-informative (flat) priors for variance component
####
hdc.2 <- inla(y.k ~ x1.k + x2.k +
f(f.k,model="iid"),family = "poisson",data = clone.data,
control.family=list(prior="flat"))
hyp.hdc.2 = inla.hyperpar(hdc.2)
vc.mean.2 = inla.expectation(function(x) 1/x^.5, hyp.hdc.2$marginals[[1]])
vc.m2.2 = inla.expectation(function(x) 1/x, hyp.hdc.2$marginals[[1]])
stdev.2 = sqrt(vc.m2.2- vc.mean.2^2)
####
##### HDC algorithm with Vague (gamma) priors for variance component
####
hdc.3 <- inla(y.k ~ x1.k + x2.k +
f(f.k,model="iid",param=c(3, 0.05)),family = "poisson", data = clone.data)
hyp.hdc.3 = inla.hyperpar(hdc.3)
vc.mean.3 = inla.expectation(function(x) 1/x^.5, hyp.hdc.3$marginals[[1]])
vc.m2.3 = inla.expectation(function(x) 1/x, hyp.hdc.3$marginals[[1]])
stdev.3 = sqrt(vc.m2.3- vc.mean.3^2)
####
##### Preparing output (parameters)
####
beta0[i,]<-c(fixef(m)[1],hdc.1$summary.fixed[1,1],hdc.2$summary.fixed[1,1],
hdc.3$summary.fixed[1,1])
##
beta1[i,]<-c(fixef(m)[2],hdc.1$summary.fixed[2,1],hdc.2$summary.fixed[2,1],
hdc.3$summary.fixed[2,1])
##
beta2[i,]<-c(fixef(m)[3],hdc.1$summary.fixed[3,1],hdc.2$summary.fixed[3,1],
hdc.3$summary.fixed[3,1])
##
vc[i,]<-c(sqrt(unlist(VarCorr(m))),vc.mean.1,vc.mean.2,vc.mean.3)
###
###### (precision of parameters)
###
se.beta0[i,]<-c(sqrt(diag(vcov(m)))[1],hdc.1$summary.fixed[1,2],
hdc.2$summary.fixed[1,2],hdc.3$summary.fixed[1,2])
##
se.beta1[i,]<-c(sqrt(diag(vcov(m)))[2],hdc.1$summary.fixed[2,2],
hdc.2$summary.fixed[2,2],hdc.3$summary.fixed[2,2])
##
se.beta2[i,]<-c(sqrt(diag(vcov(m)))[3],hdc.1$summary.fixed[3,2],
hdc.2$summary.fixed[3,2],hdc.3$summary.fixed[3,2])
##
se.vc[i,]<-c(sqrt(diag(VarCorr(m)$f)/ng),stdev.1,stdev.2,stdev.3)
####
cat("iter=", i,"\n")
####
}
return(list('Beta0'=beta0,'Beta1'=beta1,'Beta2'=beta2,'Sigma'=vc,
'SE.Beta0'=se.beta0,'SE.Beta1'=se.beta1,'SE.Beta2'=se.beta2,'SE.Sigma'=se.vc))
}
###---------------------------------------------------
####### Run simulation for 100 data sets and 80 clones of data ############
ng = 50; nr = 2
# k=80
# iter=100
# rr.hdc <- cfun.hdc()
###########
###---------------------------------------------------
####### Comparing computing times for DC and HDC methods on ######################
####### a typical simulated data set ######################
k=100
"x1.k"<-rep(dd[,1],k)
"x2.k"<-rep(dd[,2],k)
"f.k"<-rep(dd[,3],k)
"y.k"<-rep(dd[,7],k)
clone.data<-cbind(x1.k,x2.k,f.k,y.k)
clone.data[,3]<-rep(1:(ng*k),each=nr)
clone.data<-as.data.frame(clone.data)
### HDC
hdc<-inla(y.k ~ x1.k + x2.k + f(f.k,model="iid"), data=clone.data, family="poisson")
### DC
wd.dc = tempfile()
## same as before
result.dc = system.time(inla(y.k ~ x1.k + x2.k + f(f.k,model="iid"),
data=clone.data, family="poisson",
working.directory = wd.dc,
keep = TRUE,
inla.arg = "-m mcmc -N 10000 -T 10 -S 1"))
## Compare cpu times
time.hdc = hdc$cpu.used
time.hdc
result.dc
# Plotting the results (for example marginals for the hyperparameters)
hyp.hdc = hdc$marginals.hyperpar[[1]]
hyp.dc = scan(paste(wd.dc,"/results.files/hyperparameter-random.effect00000001-parameter-user-scale/trace.dat",sep=""))
plot(hyp.hdc, lty="l", col=1,
main=expression(paste("hybrid DC and DC based densities of",~ sigma^{-2})))
lines(density(hyp.dc), lty="2", col=2)
##### compare another DC and HDC-based densities (for example the slope of x1)
int.inla = hdc$marginals.fixed[[2]]
int.dc = scan(paste(wd.dc,"/results.files/fixed.effect00000002/trace.dat",sep=""))
plot(int.inla, lty="l", col=1,
main=expression(paste("hybrid DC and DC based densities of",~ beta[1])))
lines(density(int.dc), lty="2", col=2)
####--------------------------------------------------------
###################################################################
############### Overdispersion Example: Seeds Data #########
################# Preparing cloned data set with k=200 ############
seed.data<-Seeds
n1=21
k=200
n=n1*k
"r.k"<-rep(seed.data[,1],k)
"n.k"<-rep(seed.data[,2],k)
"x1.k"<-rep(seed.data[,3],k)
"x2.k"<-rep(seed.data[,4],k)
"plate.k"<-rep(seed.data[,5],k)
clone.data<-cbind(r.k,n.k,x1.k,x2.k,plate.k)
clone.data[,5]<-1:n
clone.data<-as.data.frame(clone.data)
####---------------------------------------------------------------------------
###################### main effects model ############################
### Fitting by AGHQ method with 15 nodes #####
(main.effects <- glmer(r/n ~ x1 + x2 + (1 | plate) ,
family = "binomial", data = Seeds, nAGQ=15))
####---------------------------------------------------------------------------
#### Fitting by INLA method ####
seeds.inla.fit.1 = inla(r ~ x1 + x2 + f(plate, model="iid",
param=c(.5, .0164)), data=Seeds, family="binomial", Ntrials=n )
seeds.hyperpar.1 = inla.hyperpar(seeds.inla.fit.1)
#### Fixed effects summaries
summary(seeds.inla.fit.1)
#### variance component summaries
mean.1=m1 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.1$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, seeds.hyperpar.1$marginals[[1]])
stdev.1 = sqrt(m2- mean.1^2)
####---------------------------------------------------------------------------
#### Fitting by HDC method with informative prior ######
formula.clone.1 = r.k ~ x1.k+x2.k+f(plate.k,model="iid",param=c(.5,.0164))
mod.seeds.clone.1 = inla(formula.clone.1,data=clone.data,family="binomial",Ntrials=n.k)
seeds.hyperpar.clone.1 = inla.hyperpar(mod.seeds.clone.1)
#### Fixed effects summaries
summary(mod.seeds.clone.1)
sd.clone.1<-as.vector(sqrt(k)*mod.seeds.clone.1$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.intercept<-mod.seeds.clone.1$marginals.fixed[[1]]
marginal.clone.x1<-mod.seeds.clone.1$marginals.fixed$x1
marginal.clone.x2<-mod.seeds.clone.1$marginals.fixed$x2
hyp.k.1<-seeds.hyperpar.clone.1$marginals$`Precision for plate.k`
#### Variance component summaries
mean.clone.1=m1 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1$marginals[[1]])
m2.clone = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.1$marginals[[1]])
stdev.clone.1 = sqrt(k*(m2.clone- mean.clone.1^2))
####---------------------------------------------------------------------------
#### Fitting by HDC method with flat prior ######
formula.clone.1.2 = r.k ~ x1.k+x2.k+f(plate.k,model="iid")
mod.seeds.clone.1.2 = inla(formula.clone.1.2,data=clone.data,
control.family=list(prior="flat"), family="binomial",Ntrials=n.k,
control.fixed=list(mean=c(1,1),prec=c(0.001,0.001)))
seeds.hyperpar.clone.1.2 = inla.hyperpar(mod.seeds.clone.1.2)
#### Fixed effects summaries
summary(mod.seeds.clone.1.2)
sd.clone.1.2<-as.vector(sqrt(k)*mod.seeds.clone.1.2$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.2.intercept<-mod.seeds.clone.1.2$marginals.fixed[[1]]
marginal.clone.2.x1<-mod.seeds.clone.1.2$marginals.fixed$x1
marginal.clone.2.x2<-mod.seeds.clone.1.2$marginals.fixed$x2
hyp.k.2<-seeds.hyperpar.clone.1.2$marginals[[1]]
#### Variance component summaries
mean.clone.1.2 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1.2$marginals[[1]])
m2.clone.2 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.1.2$marginals[[1]])
stdev.clone.1.2 = sqrt(k*(m2.clone.2- mean.clone.1.2^2))
####---------------------------------------------------------------------------
#### Fitting by HDC method with vague (gamma) prior ######
formula.clone.1.3 = r.k ~ x1.k+x2.k+f(plate.k,model="iid",param=c(.01,0.01))
mod.seeds.clone.1.3 = inla(formula.clone.1.3,data=clone.data,
family="binomial",Ntrials=n.k, control.fixed=list(mean=c(-2,-1),prec=c(0.1,0.1)))
seeds.hyperpar.clone.1.3 = inla.hyperpar(mod.seeds.clone.1.3)
#### Fixed effects summaries
summary(mod.seeds.clone.1.3)
sd.clone.1.3<-as.vector(sqrt(k)*mod.seeds.clone.1.3$summary.fixed[,2])
inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1.3$marginals[[1]])
#### Preparing elements for plotting
marginal.clone.3.intercept<-mod.seeds.clone.1.3$marginals.fixed[[1]]
marginal.clone.3.x1<-mod.seeds.clone.1.3$marginals.fixed$x1
marginal.clone.3.x2<-mod.seeds.clone.1.3$marginals.fixed$x2
hyp.k.3<-seeds.hyperpar.clone.1.3$marginals[[1]]
#### Variance component summaries
mean.clone.1.3=m1.3 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.1.3$marginals[[1]])
m2.clone.3 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.1.3$marginals[[1]])
stdev.clone.1.3 = sqrt(k*(m2.clone.3- mean.clone.1.3^2))
####---------------------------------------------------------------------------
###################################################
#### Plotting the results to compare HDC-based densities with respect to priors
par(mfrow=c(2,2))
plot(marginal.clone.intercept,main=expression(paste("HDC-based density of",~ beta[0])),
col=1,xlab="",ylab="",lty=1)
lines(marginal.clone.2.intercept,lty=3,col=3)
lines(marginal.clone.3.intercept,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.x1,main=expression(paste("DC-based density of",~ beta[1])),
col=1,xlab="",ylab="",lty=1)
lines(marginal.clone.2.x1,lty=3,col=3)
lines(marginal.clone.3.x1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.x2,main=expression(paste("DC-based density of",~ beta[2])),
col=1,xlab="",ylab="",lty=1)
lines(marginal.clone.2.x2,lty=3,col=3)
lines(marginal.clone.3.x2,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.k.1,main=expression(paste("DC-based density of",~ sigma^{-2})),
col=1,xlab="",ylab="",lty=1,ylim=c(0,.7))
lines(hyp.k.2,lty=3,col=3)
lines(hyp.k.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
###############################################################################
############### Second Model for Interaction Model ###########################
####----------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes #####
(interaction.effects <- glmer(r/n ~ x1 + x2 + I(x1*x2) + (1 | plate) ,
family = "binomial", data = Seeds, nAGQ=15))
####----------------------------------------------------------------
## Fitting by INLA
seeds.inla.fit.2 = inla(r ~ x1 + x2+I(x1*x2) + f(plate, model="iid",
param=c(.5, .0164)), data=Seeds, family="binomial", Ntrials=n )
seeds.hyperpar.2 = inla.hyperpar(seeds.inla.fit.2)
### Fixed effects summaries
summary(seeds.inla.fit.2)
### Variance components summaries
mean.2 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.2$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, seeds.hyperpar.2$marginals[[1]])
stdev.2 = sqrt(m2- mean.2^2)
####---------------------------------------------------------------------------
################ Fitting by HDC with informative prior
formula.clone.int.1 = r.k ~ x1.k+x2.k+I(x1.k*x2.k)+f(plate.k,model="iid",param=c(.5,.0164))
mod.seeds.clone.int.1 = inla(formula.clone.int.1,data=clone.data,family="binomial",Ntrials=n.k,
control.fixed=list(mean=c(1,1,1),prec=c(0.001,0.001,0.001)))
seeds.hyperpar.clone.int.1 = inla.hyperpar(mod.seeds.clone.int.1)
### Fixed effects summaries
summary(mod.seeds.clone.int.1)
sd.clone.int.1<-as.vector(sqrt(k)*mod.seeds.clone.int.1$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.int.1.intercept<-mod.seeds.clone.int.1$marginals.fixed[[1]]
marginal.clone.int.1.x1<-mod.seeds.clone.int.1$marginals.fixed$x1.k
marginal.clone.int.1.x2<-mod.seeds.clone.int.1$marginals.fixed$x2.k
marginal.clone.int.1.interaction<-mod.seeds.clone.int.1$marginals.fixed[[4]]
hyp.k.int.1<-seeds.hyperpar.clone.int.1$marginals$`Precision for plate.k`
### Variance components summaries
mean.clone.int.1 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.1$marginals[[1]])
m2.clone.int.1 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.int.1$marginals[[1]])
stdev.clone.int.1 = sqrt(k*(m2.clone.int.1- mean.clone.int.1^2))
# -----------------------------------------------------------
################ Fitting by HDC with flat prior
formula.clone.int.2 = r.k ~ x1.k+x2.k+I(x1.k*x2.k)+f(plate.k,model="iid")
mod.seeds.clone.int.2 = inla(formula.clone.int.2,data=clone.data,
control.family=list(prior="flat"), family="binomial",Ntrials=n.k,
control.fixed=list(mean=c(-1,-2,-1),prec=c(0.1,0.1,0.1)))
seeds.hyperpar.clone.int.2 = inla.hyperpar(mod.seeds.clone.int.2)
### Fixed effects summaries
summary(mod.seeds.clone.int.2)
sd.clone.int.2<-as.vector(sqrt(k)*mod.seeds.clone.int.2$summary.fixed[,2])
#### Preparing elements for plotting
marginal.clone.int.2.intercept<-mod.seeds.clone.int.2$marginals.fixed[[1]]
marginal.clone.int.2.x1<-mod.seeds.clone.int.2$marginals.fixed$x1.k
marginal.clone.int.2.x2<-mod.seeds.clone.int.2$marginals.fixed$x2.k
marginal.clone.int.2.interaction<-mod.seeds.clone.int.2$marginals.fixed[[4]]
hyp.k.int.2<-seeds.hyperpar.clone.int.2$marginals$`Precision for plate.k`
### Variance components summaries
mean.clone.int.2 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.2$marginals[[1]])
m2.clone.int.2 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.int.2$marginals[[1]])
stdev.clone.int.2 = sqrt(k*(m2.clone.int.2- mean.clone.int.2^2))
# -----------------------------------------------------------
################ Fitting by HDC with vague (gamma) prior
formula.clone.int.3 = r.k ~ x1.k+x2.k+I(x1.k*x2.k)+f(plate.k,model="iid",param=c(.01,0.01))
mod.seeds.clone.int.3 = inla(formula.clone.int.3,data=clone.data,
family="binomial",Ntrials=n.k, control.fixed=list(mean=c(-2,2,1),prec=c(0.001,0.001,0.001)))
seeds.hyperpar.clone.int.3 = inla.hyperpar(mod.seeds.clone.int.3)
### Fixed effects summaries
summary(mod.seeds.clone.int.3)
sd.clone.int.3<-as.vector(sqrt(k)*mod.seeds.clone.int.3$summary.fixed[,2])
inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.3$marginals[[1]])
#### Preparing elements for plotting
marginal.clone.int.3.intercept<-mod.seeds.clone.int.3$marginals.fixed[[1]]
marginal.clone.int.3.x1<-mod.seeds.clone.int.3$marginals.fixed$x1.k
marginal.clone.int.3.x2<-mod.seeds.clone.int.3$marginals.fixed$x2.k
marginal.clone.int.3.interaction<-mod.seeds.clone.int.3$marginals.fixed[[4]]
hyp.k.int.3<-seeds.hyperpar.clone.int.3$marginals$`Precision for plate.k`
## Variance components summaries
mean.clone.int.3 = inla.expectation(function(x) 1/x^.5, seeds.hyperpar.clone.int.3$marginals[[1]])
m2.clone.int.3 = inla.expectation(function(x) 1/x, seeds.hyperpar.clone.int.3$marginals[[1]])
stdev.clone.int.3 = sqrt(k*(m2.clone.int.3- mean.clone.int.3^2))
# -----------------------------------------------------------
#### Plotting the results
par(mfrow=c(2,2))
plot(marginal.clone.int.1.intercept,
main=expression(paste("DC-based distribution",~ beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.x1,lty=3,col=3)
lines(marginal.clone.int.3.x1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.int.1.x1,
main=expression(paste("DC-based distribution",~ beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.x1,lty=3,col=3)
lines(marginal.clone.int.3.x1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.int.1.x2,
main=expression(paste("DC-based distribution",~ beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.x2,lty=3,col=3)
lines(marginal.clone.int.3.x2,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.clone.int.1.interaction,
main=expression(paste("DC-based distribution",~ beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.clone.int.2.interaction,lty=3,col=3)
lines(marginal.clone.int.3.interaction,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.k.int.1,main=expression(paste("DC-based distribution",~ sigma^{-2})),
col=1,xlab="",ylab="Density",lty=1,type="l",ylim=c(0,.4))
lines(hyp.k.int.2,lty=3,col=3)
lines(hyp.k.int.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
# -------------------------------------------------------------------------
#### Comparing DC and HDC results and their computing times ####
formula = r ~ x1+x2+f(plate,model="iid",param=c(.5,.0164))
formula.clone = r.k ~ x1.k+x2.k+f(plate.k,model="iid",param=c(.5,.0164))
## Run HDC
result.inla = inla(formula.clone, data=clone.data, family="binomial",
Ntrials=n.k, verbose=T)
## Run DC
wd.dc = tempfile()
## same as before
result.dc = system.time(inla(formula.clone,data=clone.data,
family="binomial",Ntrials=n.k,
working.directory = wd.dc,
keep = TRUE,
inla.arg = "-m mcmc -N 10000 -T 10 -S 1"))
## Compare cpu times
time.inla = result.inla$cpu.used
time.inla
result.dc
## compare curves (for example densities for the hyperparameters)
hyp.inla = result.inla$marginals.hyperpar[[1]]
hyp.dc = scan(paste(wd.dc,"/results.files/hyperparameter-random.effect00000001-parameter-user-scale/trace.dat",sep=""))
plot(hyp.inla, type="l",lty=1,col=1,xlab="",ylab="",
main=expression(paste("hybrid DC and DC-based densities of"~ sigma^{-2})),ylim=c(0,0.5))
lines(density(hyp.dc),lty=2,col=2)
## compare curve for fixed effects (for example \beta_1)
int.inla = result.inla$marginals.fixed[[2]]
int.dc = scan(paste(wd.dc,"/results.files/fixed.effect00000002/trace.dat",sep=""))
plot(int.inla,type="l",lty=1,col=1,ylim=c(0,20),xlab="",ylab="",
main=expression(paste("hybrid DC and DC based densities of",~ beta[1])))
lines(density(int.dc),col=2,lty=2)
############################################################################
# -------------------------------------------------------------------------
#########################################################################
############### Longitudianl Data Example: Epilepsy Data #########
### Loading data
## Visit is created by glmmAK, it corresponds to Breslow and Clayton's
## Visit/10, because the codes are -3,-1,1,3.
require (glmmAK)
data(epilepticBC)
epil = epilepticBC
epil$id2=epil$id
epil$rand=1:nrow(epil)
epil$V4=epil$visit==4
epil$newid=rep(1:(nrow(epil)/4), each=4)
###---------------------------------------------------------------------
### Constructing cloned data set
####################################
epil.data<-epil
patient=59
n1=patient*4
k=100
n=n1*k
"id.k"<-rep(epil.data[,1],k)
"visit.k"<-rep(epil.data[,2],k)
"seizure0.k"<-rep(epil.data[,3],k)
"age.k"<-rep(epil.data[,4],k)
"Seizure.k"<-rep(epil.data[,5],k)
"Base.k"<-rep(epil.data[,6],k)
"Trt.k"<-rep(epil.data[,7],k)
"Base.Trt.k"<-rep(epil.data[,8],k)
"Age.k"<-rep(epil.data[,9],k)
"Visit.k"<-rep(epil.data[,10],k)
"id2.k"<-rep(epil.data[,11],k)
"rand.k"<-rep(epil.data[,12],k)
"V4.k"<-rep(epil.data[,13],k)
"newid.k"<-rep(epil.data[,14],k)
clone.data<-cbind(id.k,visit.k,seizure0.k,age.k,Seizure.k,
Base.k,Trt.k,Base.Trt.k,Age.k,Visit.k,id2.k,rand.k,V4.k,newid.k)
clone.data[,12]<-1:n
clone.data[,1]<-clone.data[,11]<-rep((1+100):(patient*k+100),each=4)
clone.data[,2]<-clone.data[,14]<-rep(1:patient*k,each=4)
clone.data<-as.data.frame(clone.data)
##############################################################################
################## First random intercept model for Epilepsy data #########
################## proposed by Fong et al. (2009) #########################
###---------------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes
(first.model <- glmer(Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
(1 | id) , family = "poisson", data = epil, nAGQ=15))
###---------------------------------------------------------------------
### Fitting by INLA method
formula=Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
f(id,model="iid",param=c(2, 1.140),diagonal=0)
epil.inla.fit.1 = inla(formula, data=epil, family="poisson" ,
control.compute=list(hyperpar=T,dic=T))
epil.hyperpar.1 = inla.hyperpar(epil.inla.fit.1)
### Fixed effects summaries
summary(epil.inla.fit.1)
### Variance component summaries
mean.1=m1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.1$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, epil.hyperpar.1$marginals[[1]])
stdev.1 = sqrt(m2- mean.1^2)
###---------------------------------------------------------------------
### Fitting by HDC method with informative prior
epil.dc.fit.1 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k)
+Age.k + V4.k+f(id.k,model="iid",param=c(2, 1.140)), data=clone.data,
family="poisson" )
epil.dc.hyperpar.1 = inla.hyperpar(epil.dc.fit.1)
### Fixed effects summaries
summary(epil.dc.fit.1)
sd.clone.1<-as.vector(sqrt(k)*epil.dc.fit.1$summary.fixed[,2])
### Variance component summaries
mean.dc.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.1$marginals[[1]])
m2.dc.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.1$marginals[[1]])
stdev.clone.1 = sqrt(k*(m2.dc.1- mean.dc.1^2))
#### Preparing elements to plot the results
marginal.Intercept.1<-epil.dc.fit.1$marginals.fixed[[1]]
marginal.Base.1<-epil.dc.fit.1$marginals.fixed$Base.k
marginal.Trt.1<-epil.dc.fit.1$marginals.fixed$Trt.k
marginal.BaseTrt.1<-epil.dc.fit.1$marginals.fixed[[4]]
marginal.Age.1<-epil.dc.fit.1$marginals.fixed$Age.k
marginal.V4.1<-epil.dc.fit.1$marginals.fixed$V4.k
hyp.clone.1<-epil.dc.hyperpar.1$marginals$`Precision for id.k`
###---------------------------------------------------------------------
### Fitting by HDC method with flat prior
epil.dc.fit.2 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k)
+Age.k + V4.k+f(id.k,model="iid"), data=clone.data,control.family=list(prior="flat"),
family="poisson",control.fixed=list(mean=c(1,1,1,1,0),prec=c(0.001,0.001,0.01,0.001,
0.01)))
epil.dc.hyperpar.2 = inla.hyperpar(epil.dc.fit.2)
### Fixed effects summaries
summary(epil.dc.fit.2)
sd.clone.2<-as.vector(sqrt(k)*epil.dc.fit.2$summary.fixed[,2])
### Variance component summaries
mean.dc.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2$marginals[[1]])
m2.dc.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2$marginals[[1]])
stdev.clone.2 = sqrt(k*(m2.dc.2- mean.dc.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2<-epil.dc.fit.2$marginals.fixed[[1]]
marginal.Base.2<-epil.dc.fit.2$marginals.fixed$Base.k
marginal.Trt.2<-epil.dc.fit.2$marginals.fixed$Trt.k
marginal.BaseTrt.2<-epil.dc.fit.2$marginals.fixed[[4]]
marginal.Age.2<-epil.dc.fit.2$marginals.fixed$Age.k
marginal.V4.2<-epil.dc.fit.2$marginals.fixed$V4.k
hyp.clone.2<-epil.dc.hyperpar.2$marginals$`Precision for id.k`
###---------------------------------------------------------------------
### Fitting by HDC method with vague (gamma) prior
epil.dc.fit.3 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k)
+Age.k + V4.k+f(id.k,model="iid",param=c(0.05,0.02)), data=clone.data,
family="poisson",control.fixed=list(mean=c(-1,-2,2,0,1),prec=c(0.01,0.001,0.001,0.01,
0.001)))
epil.dc.hyperpar.3 = inla.hyperpar(epil.dc.fit.3)
### Fixed effects summaries
summary(epil.dc.fit.3)
sd.clone.3<-as.vector(sqrt(k)*epil.dc.fit.3$summary.fixed[,2])
### Variance component summaries
mean.dc.3 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3$marginals[[1]])
m2.dc.3 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3$marginals[[1]])
stdev.clone.3 = sqrt(k*(m2.dc.3- mean.dc.3^2))
#### Preparing elements to plot the results
marginal.Intercept.3<-epil.dc.fit.3$marginals.fixed[[1]]
marginal.Base.3<-epil.dc.fit.3$marginals.fixed$Base.k
marginal.Trt.3<-epil.dc.fit.3$marginals.fixed$Trt.k
marginal.BaseTrt.3<-epil.dc.fit.3$marginals.fixed[[4]]
marginal.Age.3<-epil.dc.fit.3$marginals.fixed$Age.k
marginal.V4.3<-epil.dc.fit.3$marginals.fixed$V4.k
hyp.clone.3<-epil.dc.hyperpar.3$marginals$`Precision for id.k`
###---------------------------------------------------------------------
#### Plotting the results
par(mfrow=c(3,2))
plot(marginal.Intercept.1,main=expression(paste(beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Intercept.2,lty=3,col=3)
lines(marginal.Intercept.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Base.1,main=expression(paste(beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Base.2,lty=3,col=3)
lines(marginal.Base.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Trt.1,main=expression(paste(beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Trt.2,lty=3,col=3)
lines(marginal.Trt.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.BaseTrt.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.BaseTrt.2,lty=3,col=3)
lines(marginal.BaseTrt.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Age.1,main=expression(paste(beta[4])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Age.2,lty=3,col=3)
lines(marginal.Age.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.V4.1,main=expression(paste(beta[5])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.V4.2,lty=3,col=3)
lines(marginal.V4.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.clone.1,main=expression(paste("HDC-based distribution of",~ sigma^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.2,lty=3,col=3)
lines(hyp.clone.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
##############################################################################
################## Second random intercept with measurement error #########
############ model for Epilepsy data proposed by Fong et al. (2009) ####
###---------------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes
(second.model<- glmer(Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
(1 | id) + (1|rand), family = "poisson", data = epil))
###---------------------------------------------------------------------
### Fitting by INLA method
formula.2=Seizure ~ Base + Trt + I(Base*Trt) + Age + V4 +
f(id,model="iid",param=c(2, 1.240)) +
f(rand,model="iid",param=c(2, 1.140), diagonal=0)
epil.inla.fit.2 = inla(formula.2, data=epil,
family="poisson" )
epil.hyperpar.2 = inla.hyperpar(epil.inla.fit.2)
### Fixed effects summaries
summary(epil.inla.fit.2)
### Variance component summaries
mean.1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.2$marginals[[1]])
m1 = inla.expectation(function(x) 1/x, epil.hyperpar.2$marginals[[1]])
stdev.1 = sqrt(m1- mean.1^2)
mean.2=m1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.2$marginals[[2]])
m2 = inla.expectation(function(x) 1/x, epil.hyperpar.2$marginals[[2]])
stdev.2 = sqrt(m2- mean.2^2)
###---------------------------------------------------------------------
### Fitting by HDC method with informative prior
formula.clone.2=Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k + V4.k +
f(id.k,model="iid",param=c(2, 1.240))+
f(rand.k,model="iid",param=c(2, 1.140))
epil.dc.fit.2.1 = inla(formula.clone.2,data=clone.data,family="poisson" )
epil.dc.hyperpar.2.1 = inla.hyperpar(epil.dc.fit.2.1)
### Fixed effects summaries
summary(epil.dc.fit.2.1)
sd.clone.2.1<-as.vector(sqrt(k)*epil.dc.fit.2.1$summary.fixed[,2])
### Variance component summaries
mean.dc.2.1.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.1$marginals[[1]])
m2.dc.2.1.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.1$marginals[[1]])
stdev.clone.2.1.1 = sqrt(k*(m2.dc.2.1.1- mean.dc.2.1.1^2))
mean.dc.2.1.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.1$marginals[[2]])
m2.dc.2.1.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.1$marginals[[2]])
stdev.clone.2.1.2 = sqrt(k*(m2.dc.2.1.2- mean.dc.2.1.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2.1<-epil.dc.fit.2.1$marginals.fixed[[1]]
marginal.Base.2.1<-epil.dc.fit.2.1$marginals.fixed$Base.k
marginal.Trt.2.1<-epil.dc.fit.2.1$marginals.fixed$Trt.k
marginal.BaseTrt.2.1<-epil.dc.fit.2.1$marginals.fixed[[4]]
marginal.Age.2.1<-epil.dc.fit.2.1$marginals.fixed$Age.k
marginal.V4.2.1<-epil.dc.fit.2.1$marginals.fixed$V4.k
hyp.clone.2.1.1<-epil.dc.hyperpar.2.1$marginals$`Precision for id.k`
hyp.clone.2.1.2<-epil.dc.hyperpar.2.1$marginals$`Precision for rand.k`
###---------------------------------------------------------------------
### Fitting by HDC method with flat prior
epil.dc.fit.2.2 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k + V4.k +
f(id.k,model="iid")+f(rand.k,model="iid"),control.family=list(prior="flat"),
data=clone.data,family="poisson",
control.fixed=list(mean=c(1,1,1,1,0),prec=c(0.001,0.001,0.01,0.001,0.01)))
epil.dc.hyperpar.2.2 = inla.hyperpar(epil.dc.fit.2.2)
### Fixed effects summaries
summary(epil.dc.fit.2.2)
sd.clone.2.2<-as.vector(sqrt(k)*epil.dc.fit.2.2$summary.fixed[,2])
### Variance component summaries
mean.dc.2.2.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.2$marginals[[1]])
m2.dc.2.2.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.2$marginals[[1]])
stdev.clone.2.2.1 = sqrt(k*(m2.dc.2.2.1- mean.dc.2.2.1^2))
mean.dc.2.2.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.2$marginals[[2]])
m2.dc.2.2.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.2$marginals[[2]])
stdev.clone.2.2.2 = sqrt(k*(m2.dc.2.2.2- mean.dc.2.2.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2.2<-epil.dc.fit.2.2$marginals.fixed[[1]]
marginal.Base.2.2<-epil.dc.fit.2.2$marginals.fixed$Base.k
marginal.Trt.2.2<-epil.dc.fit.2.2$marginals.fixed$Trt.k
marginal.BaseTrt.2.2<-epil.dc.fit.2.2$marginals.fixed[[4]]
marginal.Age.2.2<-epil.dc.fit.2.2$marginals.fixed$Age.k
marginal.V4.2.2<-epil.dc.fit.2.2$marginals.fixed$V4.k
hyp.clone.2.2.1<-epil.dc.hyperpar.2.2$marginals$`Precision for id.k`
hyp.clone.2.2.2<-epil.dc.hyperpar.2.2$marginals$`Precision for rand.k`
###---------------------------------------------------------------------
### Fitting by HDC method with vague (gamma) prior
epil.dc.fit.2.3 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k + V4.k +
f(id.k,model="iid",param=c(0.05, 0.02))+f(rand.k,model="iid",param=c(0.01, 0.01)),
data=clone.data,family="poisson",
control.fixed=list(mean=c(-1,-2,2,0,1),prec=c(0.01,0.001,0.001,0.01,0.001)))
epil.dc.hyperpar.2.3 = inla.hyperpar(epil.dc.fit.2.3)
### Fixed effects summaries
summary(epil.dc.fit.2.3)
sd.clone.2.3<-as.vector(sqrt(k)*epil.dc.fit.2.3$summary.fixed[,2])
### Variance component summaries
mean.dc.2.3.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.3$marginals[[1]])
m2.dc.2.3.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.3$marginals[[1]])
stdev.clone.2.3.1 = sqrt(k*(m2.dc.2.3.1- mean.dc.2.3.1^2))
mean.dc.2.3.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.2.3$marginals[[2]])
m2.dc.2.3.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.2.3$marginals[[2]])
stdev.clone.2.3.2 = sqrt(k*(m2.dc.2.3.2- mean.dc.2.3.2^2))
#### Preparing elements to plot the results
marginal.Intercept.2.3<-epil.dc.fit.2.3$marginals.fixed[[1]]
marginal.Base.2.3<-epil.dc.fit.2.3$marginals.fixed$Base.k
marginal.Trt.2.3<-epil.dc.fit.2.3$marginals.fixed$Trt.k
marginal.BaseTrt.2.3<-epil.dc.fit.2.3$marginals.fixed[[4]]
marginal.Age.2.3<-epil.dc.fit.2.3$marginals.fixed$Age.k
marginal.V4.2.3<-epil.dc.fit.2.3$marginals.fixed$V4.k
hyp.clone.2.3.1<-epil.dc.hyperpar.2.3$marginals$`Precision for id.k`
hyp.clone.2.3.2<-epil.dc.hyperpar.2.3$marginals$`Precision for rand.k`
###---------------------------------------------------------------------
#### Plotting the results
par(mfrow=c(3,2))
plot(marginal.Intercept.2.1,main=expression(paste(beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Intercept.2.2,lty=3,col=3)
lines(marginal.Intercept.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Base.2.1,main=expression(paste(beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Base.2.2,lty=3,col=3)
lines(marginal.Base.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Trt.2.1,main=expression(paste("beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Trt.2.2,lty=3,col=3)
lines(marginal.Trt.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.BaseTrt.2.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.BaseTrt.2.2,lty=3,col=3)
lines(marginal.BaseTrt.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Age.2.1,main=expression(paste(beta[4])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Age.2.2,lty=3,col=3)
lines(marginal.Age.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.V4.2.1,main=expression(paste(beta[5])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.V4.2.2,lty=3,col=3)
lines(marginal.V4.2.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
par(mfrow=c(2,1))
plot(hyp.clone.2.1.1,main=expression(paste("HDC-based distribution of",~ sigma[1]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.2.2.1,lty=3,col=3)
lines(hyp.clone.2.3.1,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.clone.2.1.2,main=expression(paste("HDC-based distribution of",~ sigma[2]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.2.2.2,lty=3,col=3)
lines(hyp.clone.2.3.2,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
##############################################################################
################## Second random slope model ##############################
############ for Epilepsy data proposed by Fong et al. (2009) ##########
###---------------------------------------------------------------------
### Fitting by AGHQ method with 15 nodes
(third.model<- glmer(Seizure ~ Base + Trt + I(Base*Trt) + Age + Visit +
(Visit|id2), family = "poisson", nAGQ=15, data = epil))
###---------------------------------------------------------------------
### Fitting by INLA method
epil.inla.fit.3 = inla(Seizure ~ Base + Trt + I(Base*Trt) + Age +
Visit +f(id, model="2diidwishartpart0", param=c(5, 2.277904,
1.692047, 0), diagonal=0) + f(id2, Visit,
model="2diidwishartpart1", diagonal = 0), data=epil,
family="poisson" )
epil.hyperpar.3 = inla.hyperpar(epil.inla.fit.3)
### Fixed effects summaries
summary(epil.inla.fit.3)
### Variance component summaries
mean.3=m1 = inla.expectation(function(x) 1/x^.5, epil.hyperpar.3$marginals[[1]])
m2 = inla.expectation(function(x) 1/x, epil.hyperpar.3$marginals[[1]])
stdev.3.1 = sqrt(m2- mean.3^2)
###---------------------------------------------------------------------
### Fitting by HDC method with first prior set
epil.dc.fit.3.1 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k +
Visit.k +f(id.k, model="2diidwishartpart0", param=c(5, 2.277904,
1.692047, 0), diagonal=0) + f(id2.k, Visit.k,
model="2diidwishartpart1", diagonal = 0), data=clone.data,
family="poisson" )
epil.dc.hyperpar.3.1 = inla.hyperpar(epil.dc.fit.3.1)
### Fixed effects summaries
summary(epil.dc.fit.3.1)
sd.clone.3.1<-as.vector(sqrt(k)*epil.dc.fit.3.1$summary.fixed[,2])
### Variance component summaries
mean.dc.3.1.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.1$marginals[[1]])
m2.dc.3.1.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.1$marginals[[1]])
stdev.clone.3.1.1 = sqrt(k*(m2.dc.3.1.1- mean.dc.3.1.1^2))
mean.dc.3.1.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.1$marginals[[2]])
m2.dc.3.1.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.1$marginals[[2]])
stdev.clone.3.1.2 = sqrt(k*(m2.dc.3.1.2- mean.dc.3.1.2^2))
#### Preparing elements to plot the results
marginal.Intercept.3.1<-epil.dc.fit.3.1$marginals.fixed[[1]]
marginal.Base.3.1<-epil.dc.fit.3.1$marginals.fixed$Base.k
marginal.Trt.3.1<-epil.dc.fit.3.1$marginals.fixed$Trt.k
marginal.BaseTrt.3.1<-epil.dc.fit.3.1$marginals.fixed[[4]]
marginal.Age.3.1<-epil.dc.fit.3.1$marginals.fixed$Age.k
marginal.V4.3.1<-epil.dc.fit.3.1$marginals.fixed[[6]]
hyp.clone.3.1.1<-epil.dc.hyperpar.3.1$marginals[[1]]
hyp.clone.3.1.2<-epil.dc.hyperpar.3.1$marginals[[2]]
###---------------------------------------------------------------------
### Fitting by HDC method with second prior set
epil.dc.fit.3.2 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k +
Visit.k +f(id.k, model="2diidwishartpart0", param=c(4, 3,
4, 0), diagonal=0) + f(id2.k, Visit.k,
model="2diidwishartpart1", diagonal = 0), data=clone.data,
family="poisson" )
epil.dc.hyperpar.3.2 = inla.hyperpar(epil.dc.fit.3.2)
### Fixed effects summaries
summary(epil.dc.fit.3.2)
sd.clone.3.2<-as.vector(sqrt(k)*epil.dc.fit.3.2$summary.fixed[,2])
### Variance component summaries
mean.dc.3.2.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.2$marginals[[1]])
m2.dc.3.2.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.2$marginals[[1]])
stdev.clone.3.2.1 = sqrt(k*(m2.dc.3.2.1- mean.dc.3.2.1^2))
mean.dc.3.2.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.2$marginals[[2]])
m2.dc.3.2.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.2$marginals[[2]])
stdev.clone.3.2.2 = sqrt(k*(m2.dc.3.2.2- mean.dc.3.2.2^2))
#### Preparing elements to plot the results
marginal.Intercept.3.2<-epil.dc.fit.3.2$marginals.fixed[[1]]
marginal.Base.3.2<-epil.dc.fit.3.2$marginals.fixed$Base.k
marginal.Trt.3.2<-epil.dc.fit.3.2$marginals.fixed$Trt.k
marginal.BaseTrt.3.2<-epil.dc.fit.3.2$marginals.fixed[[4]]
marginal.Age.3.2<-epil.dc.fit.3.2$marginals.fixed$Age.k
marginal.V4.3.2<-epil.dc.fit.3.2$marginals.fixed[[6]]
hyp.clone.3.2.1<-epil.dc.hyperpar.3.2$marginals[[1]]
hyp.clone.3.2.2<-epil.dc.hyperpar.3.2$marginals[[2]]
###---------------------------------------------------------------------
### Fitting by HDC method with third prior set
epil.dc.fit.3.3 = inla(Seizure.k ~ Base.k + Trt.k + I(Base.k*Trt.k) + Age.k +
Visit.k +f(id.k, model="2diidwishartpart0", param=c(6, 0.5,
0.5, 0), diagonal=0) + f(id2.k, Visit.k,
model="2diidwishartpart1", diagonal = 0), data=clone.data,
family="poisson" )
epil.dc.hyperpar.3.3 = inla.hyperpar(epil.dc.fit.3.3)
### Fixed effects summaries
summary(epil.dc.fit.3.3)
sd.clone.3.3<-as.vector(sqrt(k)*epil.dc.fit.3.3$summary.fixed[,2]
### Variance component summaries
mean.dc.3.3.1 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.3$marginals[[1]])
m2.dc.3.3.1 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.3$marginals[[1]])
stdev.clone.3.3.1 = sqrt(k*(m2.dc.3.3.1- mean.dc.3.3.1^2))
mean.dc.3.3.2 = inla.expectation(function(x) 1/x^.5, epil.dc.hyperpar.3.3$marginals[[2]])
m2.dc.3.3.2 = inla.expectation(function(x) 1/x, epil.dc.hyperpar.3.3$marginals[[2]])
stdev.clone.3.3.2 = sqrt(k*(m2.dc.3.3.2- mean.dc.3.3.2^2))
#### Preparing elements to plot the results
marginal.Intercept.3.3<-epil.dc.fit.3.3$marginals.fixed[[1]]
marginal.Base.3.3<-epil.dc.fit.3.3$marginals.fixed$Base.k
marginal.Trt.3.3<-epil.dc.fit.3.3$marginals.fixed$Trt.k
marginal.BaseTrt.3.3<-epil.dc.fit.3.3$marginals.fixed[[4]]
marginal.Age.3.3<-epil.dc.fit.3.3$marginals.fixed$Age.k
marginal.V4.3.3<-epil.dc.fit.3.3$marginals.fixed[[6]]
hyp.clone.3.3.1<-epil.dc.hyperpar.3.3$marginals[[1]]
hyp.clone.3.3.2<-epil.dc.hyperpar.3.3$marginals[[2]]
###---------------------------------------------------------------------
#### Plotting the results
par(mfrow=c(3,2))
plot(marginal.Intercept.3.1,main=expression(paste(beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Intercept.3.2,lty=3,col=3)
lines(marginal.Intercept.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Base.3.1,main=expression(paste(beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Base.3.2,lty=3,col=3)
lines(marginal.Base.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Trt.3.1,main=expression(paste(beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Trt.3.2,lty=3,col=3)
lines(marginal.Trt.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.BaseTrt.3.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.BaseTrt.3.2,lty=3,col=3)
lines(marginal.BaseTrt.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.Age.3.1,main=expression(paste(beta[4])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.Age.3.2,lty=3,col=3)
lines(marginal.Age.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.V4.3.1,main=expression(paste(beta[5])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.V4.3.2,lty=3,col=3)
lines(marginal.V4.3.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
par(mfrow=c(2,1))
plot(hyp.clone.3.1.1,main=expression(paste(sigma[1]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.3.2.1,lty=3,col=3)
lines(hyp.clone.3.3.1,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.clone.3.1.2,main=expression(paste(sigma[2]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.clone.3.2.2,lty=3,col=3)
lines(hyp.clone.3.3.2,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
############################################################################
# -------------------------------------------------------------------------
##################################################################################
########### Crossed Random Effects Data Example: Salamander Mating Data #########
# -------------------------------------------------------------------------
### Loading data
load("salam.RData")
## organize data into a form suitable for logistic regression
dat0=data.frame("y"=c(salam$y), "fW"=as.integer(salam$x[,"W/R"]==1 | salam$x[,"W/W"]==1),
"mW"=as.integer(salam$x[,"R/W"]==1 | salam$x[,"W/W"]==1),
"WW"=as.integer(salam$x[,"W/W"]==1 ) )
## add salamander id
id = t( apply(salam$z, 1, function(x) {
tmp = which (x==1)
tmp[2] = tmp[2] - 20
tmp
}) )
## ids are suitable for model A and C, but not B
id.modA = rbind(id, id+40, id+20)
colnames (id.modA) = c("f.modA","m.modA")
dat0=cbind (dat0, id.modA, group=1)
dat0$experiment=as.factor(rep(1:3, each=120))
dat0$group=as.factor(dat0$group)
salamander = dat0
salamander.e1 = subset (dat0, dat0$experiment==1)
salamander.e2 = subset (dat0, dat0$experiment==2)
salamander.e3 = subset (dat0, dat0$experiment==3)
### Constructing Cloned data set
k=100
dat0=salamander.e1
dat0$no<-1:120
dat0.1 = subset (dat0, dat0$no==(1:20))
dat0.2 = subset (dat0, dat0$no==(21:40))
dat0.3 = subset (dat0, dat0$no==(41:60))
dat0.4 = subset (dat0, dat0$no==(61:80))
dat0.5 = subset (dat0, dat0$no==(81:100))
dat0.6 = subset (dat0, dat0$no==(101:120))
###
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.1[,i],k)
}
return(data.clone)
}
data.0.1.clone<-cloning(k)
data.0.1.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.1.clone[(((i-1)*20+1):(i*20)),6]<-data.0.1.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.1<-data.0.1.clone[,-9]
#####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.2[,i],k)
}
return(data.clone)
}
data.0.2.clone<-cloning(k)
data.0.2.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.2.clone[(((i-1)*20+1):(i*20)),6]<-data.0.2.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.2<-data.0.2.clone[,-9]
####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.3[,i],k)
}
return(data.clone)
}
data.0.3.clone<-cloning(k)
data.0.3.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.3.clone[(((i-1)*20+1):(i*20)),6]<-data.0.3.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.3<-data.0.3.clone[,-9]
#######
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.4[,i],k)
}
return(data.clone)
}
data.0.4.clone<-cloning(k)
data.0.4.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.4.clone[(((i-1)*20+1):(i*20)),6]<-data.0.4.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.4<-data.0.4.clone[,-9]
####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.5[,i],k)
}
return(data.clone)
}
data.0.5.clone<-cloning(k)
data.0.5.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.5.clone[(((i-1)*20+1):(i*20)),6]<-data.0.5.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.5<-data.0.5.clone[,-9]
#####
data.clone<-matrix(NA,nrow=20*k,ncol=9)
cloning<-function(k){
for (i in 1:9){
data.clone[,i]<-rep(dat0.6[,i],k)
}
return(data.clone)
}
data.0.6.clone<-cloning(k)
data.0.6.clone[,5]<-1:(20*k)
for (i in 2:k){
data.0.6.clone[(((i-1)*20+1):(i*20)),6]<-data.0.6.clone[(((i-1)*20+1):(i*20)),6]+(i-1)*20
}
data.clone.6<-data.0.6.clone[,-9]
#############
data.clone<-rbind(data.clone.1,data.clone.2,data.clone.3,data.clone.4,
data.clone.5,data.clone.6)
aa<-data.clone<-as.data.frame(data.clone)
#########################################################################
###-----------------------------------------------------------------------
######################### Summer Experiment ############################
#### For two other experiments the codes are the same ####################
###-----------------------------------------------------------------------
### Fitting by INLA approach
formula=y~fW+mW+WW + f(f.modA, model="iid", param=c(1,.622)) +
f(m.modA, model="iid", param=c(1,.622))
salamander.e1.inla.fit = inla(formula,
family="binomial", data=salamander.e1, Ntrials=rep(1,nrow(salamander.e1)))
salamander.e1.hyperpar = inla.hyperpar (salamander.e1.inla.fit)
### Fixed effects summaries
summary(salamander.e1.inla.fit)
### Variance components summaries
mean.inla.1 = inla.expectation(function(x) 1/x^.5, salamander.e1.hyperpar$marginals[[1]])
mean.inla.2 = inla.expectation(function(x) 1/x^.5, salamander.e1.hyperpar$marginals[[2]])
m2.inla.1 = inla.expectation(function(x) 1/x, salamander.e1.hyperpar$marginals[[1]])
m2.inla.2 = inla.expectation(function(x) 1/x, salamander.e1.hyperpar$marginals[[2]])
stdev.inla.1 = sqrt(k*(m2.inla.1- mean.inla.1^2))
stdev.inla.2 = sqrt(k*(m2.inla.2- mean.inla.2^2))
###-----------------------------------------------------------------------
### Fitting by HDC approach with informative prior set
formula.clone=aa[,1]~aa[,2]+aa[,3]+aa[,4]+f(aa[,5],model="iid",param=c(1,.622)) +
f(aa[,6], model="iid", param=c(1,.622))
salamander.e1.clone.1 = inla(formula.clone,
family="binomial", data=aa, Ntrials=rep(1,nrow(aa)))
salamander.clone.e1.hyperpar.1 = inla.hyperpar (salamander.e1.clone.1)
### Fixed effects summaries
summary(salamander.e1.clone.1)
sd.clone.e1<-as.vector(sqrt(k)*salamander.e1.clone.1$summary.fixed[,2])
### Variance components summaries
mean.clone.1.1=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.1$marginals[[1]])
mean.clone.2.1=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.1$marginals[[2]])
m2.clone.1.1 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.1$marginals[[1]])
m2.clone.2.1 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.1$marginals[[2]])
stdev.clone.1.1= sqrt(k*(m2.clone.1.1- mean.clone.1.1^2))
stdev.clone.2.1= sqrt(k*(m2.clone.2.1- mean.clone.2.1^2))
### Preparing elements to plot the results
marginal.intercept.1<-salamander.e1.clone.1$marginals.fixed[[1]]
marginal.F.WS.1<-salamander.e1.clone.1$marginals.fixed[[2]]
marginal.M.WS.1<-salamander.e1.clone.1$marginals.fixed[[3]]
marginal.FM.WS.1<-salamander.e1.clone.1$marginals.fixed[[4]]
hyp.F.1<-salamander.clone.e1.hyperpar.1$marginals[[1]]
hyp.M.1<-salamander.clone.e1.hyperpar.1$marginals[[2]]
###-----------------------------------------------------------------------
### Fitting by HDC approach with flat prior set
salamander.e1.clone.2 = inla(aa[,1]~aa[,2]+aa[,3]+aa[,4]+f(aa[,5],model="iid")+
f(aa[,6], model="iid"), control.family=list(prior="flat"),
family="binomial", data=aa, Ntrials=rep(1,nrow(aa)))
salamander.clone.e1.hyperpar.2 = inla.hyperpar (salamander.e1.clone.2)
### Fixed effects summaries
summary(salamander.e1.clone.2)
sd.clone.e2<-as.vector(sqrt(k)*salamander.e1.clone.2$summary.fixed[,2])
### Variance components summaries
mean.clone.1.2=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.2$marginals[[1]])
mean.clone.2.2=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.2$marginals[[2]])
m2.clone.1.2 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.2$marginals[[1]])
m2.clone.2.2 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.2$marginals[[2]])
stdev.clone.1.2= sqrt(k*(m2.clone.1.2- mean.clone.1.2^2))
stdev.clone.2.2= sqrt(k*(m2.clone.2.2- mean.clone.2.2^2))
### Preparing elements to plot the results
marginal.intercept.2<-salamander.e1.clone.2$marginals.fixed[[1]]
marginal.F.WS.2<-salamander.e1.clone.2$marginals.fixed[[2]]
marginal.M.WS.2<-salamander.e1.clone.2$marginals.fixed[[3]]
marginal.FM.WS.2<-salamander.e1.clone.2$marginals.fixed[[4]]
hyp.F.2<-salamander.clone.e1.hyperpar.2$marginals[[1]]
hyp.M.2<-salamander.clone.e1.hyperpar.2$marginals[[2]]
###-----------------------------------------------------------------------
### Fitting by HDC approach with vague (gamma) prior set
salamander.e1.clone.3 = inla(aa[,1]~aa[,2]+aa[,3]+aa[,4]+f(aa[,5],model="iid",param=c(0.1,0.1))
+ f(aa[,6], model="iid", param=c(0.1,0.1)),
family="binomial", data=aa, Ntrials=rep(1,nrow(aa)))
salamander.clone.e1.hyperpar.3 = inla.hyperpar (salamander.e1.clone.3)
### Fixed effects summaries
summary(salamander.e1.clone.3)
sd.clone.e3<-as.vector(sqrt(k)*salamander.e1.clone.3$summary.fixed[,2])
### Variance components summaries
mean.clone.1.3=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.3$marginals[[1]])
mean.clone.2.3=inla.expectation(function(x) 1/x^.5, salamander.clone.e1.hyperpar.3$marginals[[2]])
m2.clone.1.3 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.3$marginals[[1]])
m2.clone.2.3 = inla.expectation(function(x) 1/x, salamander.clone.e1.hyperpar.3$marginals[[2]])
stdev.clone.1.3= sqrt(k*(m2.clone.1.3- mean.clone.1.3^2))
stdev.clone.2.3= sqrt(k*(m2.clone.2.3- mean.clone.2.3^2))
### Preparing elements to plot the results
marginal.intercept.3<-salamander.e1.clone.3$marginals.fixed[[1]]
marginal.F.WS.3<-salamander.e1.clone.3$marginals.fixed[[2]]
marginal.M.WS.3<-salamander.e1.clone.3$marginals.fixed[[3]]
marginal.FM.WS.3<-salamander.e1.clone.3$marginals.fixed[[4]]
hyp.F.3<-salamander.clone.e1.hyperpar.3$marginals[[1]]
hyp.M.3<-salamander.clone.e1.hyperpar.3$marginals[[2]]
###-----------------------------------------------------------------------
############### Drawing HDC graphs ##################
par(mfrow=c(2,2))
plot(marginal.intercept.1,main=expression(paste("beta[0])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.intercept.2,lty=3,col=3)
lines(marginal.intercept.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.F.WS.1,main=expression(paste("beta[1])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.F.WS.2,lty=3,col=3)
lines(marginal.F.WS.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.M.WS.1,main=expression(paste(beta[2])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.M.WS.2,lty=3,col=3)
lines(marginal.M.WS.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(marginal.FM.WS.1,main=expression(paste(beta[3])),
col=1,xlab="",ylab="",lty=1,type="l")
lines(marginal.FM.WS.2,lty=3,col=3)
lines(marginal.FM.WS.3,lty=4,col=4)
#legend("topleft",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
par(mfrow=c(2,1))
plot(hyp.F.1,main=expression(paste(sigma[f]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l")
lines(hyp.F.2,lty=3,col=3)
lines(hyp.F.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
plot(hyp.M.1,main=expression(paste(sigma[m]^{-2})),
col=1,xlab="",ylab="",lty=1,type="l",ylim=c(0,0.26))
lines(hyp.M.2,lty=3,col=3)
lines(hyp.M.3,lty=4,col=4)
#legend("topright",c("Priors 1","Priors 2","Priors 3")
#, bty="n",lty=c(1,3,4),col=c(1,3,4))
###----------------------------------------------------------------
################################################################################
########### Comparing computing times in DC and hybrid DC methods ################
wd.dc = tempfile()
## same as before
result.dc = system.time(inla(formula.clone,data=aa,
family="binomial",Ntrials=rep(1,nrow(aa)),
working.directory = wd.dc,
keep = TRUE,
inla.arg = "-m mcmc -N 10000 -T 10 -S 0.1"))
### computing cpu times
time.inla = salamander.e1.clone.1$cpu.used
time.inla
result.dc
|
#' nullplot
#'
#' Make an plot with nothing in it
#'
#' @param x1 lowest x-axis value
#' @param x2 largest x-axis value
#' @param y1 lowest y-axis value
#' @param y2 largest y-axis value
#' @param xlab x-axis title, defaults to no title
#' @param ylab y-axis title, defaults to no title
#' @param ... further arguments passed on to plot
#' @examples
#' nullplot()
nullplot <- function(x1=0,x2=1,y1=0,y2=1,xlab="",ylab="",...) {
plot(0,0,xlim=c(x1,x2),ylim=c(y1,y2),type="n",xlab=xlab,ylab=ylab,...)
}
| /R/nullplot.R | no_license | CymGen30/rafalib | R | false | false | 505 | r | #' nullplot
#'
#' Make an plot with nothing in it
#'
#' @param x1 lowest x-axis value
#' @param x2 largest x-axis value
#' @param y1 lowest y-axis value
#' @param y2 largest y-axis value
#' @param xlab x-axis title, defaults to no title
#' @param ylab y-axis title, defaults to no title
#' @param ... further arguments passed on to plot
#' @examples
#' nullplot()
nullplot <- function(x1=0,x2=1,y1=0,y2=1,xlab="",ylab="",...) {
plot(0,0,xlim=c(x1,x2),ylim=c(y1,y2),type="n",xlab=xlab,ylab=ylab,...)
}
|
library(tidyverse)
library(xml2)
library(fs)
library(sys)
library(rvest)
library(pdftools)
library(lubridate)
# Utility functions ------------------------------------------------------------
#' Extract URLs from home page
extract_urls <- function(url) {
url %>%
read_html() %>%
html_nodes(".country-data a") %>%
html_attr("href") %>%
tibble(url = .) %>%
mutate(file_path = path("pdf", basename(url)))
}
download_if_not_exists <- function(url, file_path) {
if (file_exists(file_path)) return()
download.file(url, file_path, mode = "wb")
}
#' Convert pdf pages to svg files and return their paths
pdf_to_svg <- function(file_path) {
cat("Converting to svg: ", file_path, "\n")
# Create a temporary directory named after the file
svg_dir <- path_temp(path_ext_remove(path_file(file_path)))
dir_create(svg_dir)
command <- c(
"pdf2svg",
file_path,
path(svg_dir, "%d.pdf"),
"all"
)
exec_wait(command)
dir_ls(svg_dir)
}
#' Extract geometry from svg file
extract_geometry <- function(svg_path) {
cat("Extracting geometry: ", svg_path, "\n")
element <-
svg_path %>%
read_xml() %>%
xml_ns_strip() %>%
xml_find_all("//path[@style='fill:none;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(25.878906%,52.159119%,95.689392%);stroke-opacity:1;stroke-miterlimit:4;']")
if (length(element) == 0) return(tibble()) # no graph
geometry <- xml_attr(element, "d") # the data points
panel <- # the row and column of the graph
element %>%
xml_attr("transform") %>%
str_sub(8, -2) %>%
str_split(",") %>%
map(~ .x[5:6]) %>% # x,y of the corner of the graph
transpose() %>%
map(unlist) %>%
map(as.numeric) %>%
set_names(c("col", "row"))
# Renumber the row/col from being an x,y coordinate to being the position of
# the graph on the page, either 3 rows of 1 column, or 4 rows of 3 columns.
panel$row <- as.integer(as_factor(panel$row))
panel$col <- as.integer(as_factor(panel$col))
tibble(geometry, row = panel$row, col = panel$col)
}
#' Convert a string of coordinates to a tibble
parse_geometry <- function(geometry) {
geometry %>%
str_replace_all(" Z", "") %>% # Remove close path commands
str_replace_all("(?<=[0-9]) (?=[A-Z])", "\n") %>% # Separate into rows
str_trim() %>%
map_dfr(read_table2, col_names = c("command", "x", "y"), col_types = "cdd")
}
#' Extract the width of one day on the x-axis from the strokes
extract_day_width <- function(.data) {
# Width between consecutive points (assume the most common width)
.data %>%
group_by(page, row, col) %>%
arrange(x) %>%
mutate(diff = x - lag(x)) %>%
ungroup() %>%
count(diff, sort = TRUE) %>%
filter(diff > 0) %>%
top_n(1, n) %>%
pull(diff)
}
#' Extract the text from the pdf
extract_text <- function(file_path) {
cat("Extracting text: ", file_path, "\n")
file_path %>%
pdf_data() %>%
bind_rows(.id = "page") %>%
mutate(page = as.integer(page))
}
#' Extract the title from the text
extract_title <- function(text) {
# Read the country name and date
text %>%
filter(page == 1, y == 75) %>%
pull(text)
}
#' Extract the country name from the title
extract_country_name <- function(title) {
paste(head(title, -3), collapse = " ")
}
#' Extract the report date from the title
extract_report_date <- function(title) {
title %>%
tail(3) %>%
paste(collapse = " ") %>%
parse_date(format = "%B %d, %Y")
}
#' Extract the country-level text
extract_country_text <- function(text) {
filter(text, page <= 2)
}
#' Extract the region-level text
extract_region_text <- function(text) {
filter(text, page >= 3, page < max(page))
}
#' Extract the region names and row/col from the text
extract_region_names <- function(type, text) {
if (type != "region") {
return(tibble(row = 1:3, region_name = NA_character_))
}
text %>%
filter(y %in% c(36, 363), height == 20) %>%
arrange(y, x) %>%
group_by(y) %>%
summarise(region_name = paste(text, collapse = " ")) %>%
ungroup() %>%
# Duplicate to two rows per place
mutate(row = as.integer(factor(y)) * 2L) %>%
select(row, region_name) %>%
bind_rows(mutate(., row = row - 1L)) %>%
arrange(row)
}
#' Extract the categories and row/col from the text
extract_categories <- function(type, page, text) {
text %>%
filter(
switch(type,
country = !(page == 1 & y < 342) & (height == 13),
region = (y %in% c(82, 220, 409, 547)) & (height == 11)
)
) %>%
arrange(y, x) %>%
mutate(x = plyr::round_any(x, 100, f = floor)) %>%
group_by(y, x) %>%
summarise(category = paste(text, collapse = " ")) %>%
ungroup() %>%
mutate(row = as.integer(factor(y)),
col = as.integer(factor(x))) %>%
select(row, col, category)
}
#' Extract the baselines and row/col from the text
extract_baselines <- function(type, text) {
text %>%
filter(
switch(type,
country = height == 45,
region = y %in% c(104, 242, 431, 568) & height == 13
)
) %>%
mutate(baseline = parse_number(text) / 100) %>%
mutate(row = as.integer(factor(y)),
col = as.integer(factor(x))) %>%
select(row, col, baseline)
}
#' Join the separate panels of region names, categories and baselines
join_panels <- function(region_name, category, baseline) {
region_name %>%
inner_join(category, by = "row") %>%
inner_join(baseline, by = c("row", "col"))
}
#' Scale y-values by the baseline
scale_y <- function(y, baseline) {
diff_from_first <- y - first(y)
scale <- last(diff_from_first) / baseline
diff_from_first / scale
}
#' Convert x-values to dates
x_to_date <- function(x, report_date) {
day_diff <- round((x - lag(x)) / day_width)
day_diff <- replace_na(day_diff, 1)
report_date - days(rev(cumsum(day_diff)) - 1)
}
# Main script ------------------------------------------------------------------
# The Google page to download each pdf
home_url <- "https://www.google.com/covid19/mobility/"
# A folder to store the PDF files
dir_create("pdf")
# A data frame to collect the data, beginning with the URL of each pdf
df <- extract_urls(home_url)
# Download the pdf files
walk2(df$url, df$file_path, download_if_not_exists)
# Convert to svg and extract the graphs
df_trends <-
df %>%
mutate(svg_path = map(file_path, pdf_to_svg)) %>% # convert pdf pages to svg
unnest(svg_path) %>%
group_by(url) %>%
mutate(page = row_number()) %>%
ungroup() %>%
mutate(type = if_else(page <= 2, "country", "region")) %>%
mutate(geometry = map(svg_path, extract_geometry)) %>% # extract geometry from svg
unnest(geometry) %>%
group_by(url, page) %>%
# mutate(stroke = row_number()) %>%
mutate(geometry = map(geometry, parse_geometry)) %>% # parse geometry to tibble
select(-type) %>%
unnest(geometry) %>%
mutate(y = -y) %>% # Flip y coords to be positive at the top
group_by(url, page, row, col) %>%
mutate(group = cumsum(command == "M")) %>% # group sections of strokes
ungroup() %>%
select(-file_path, -svg_path, -command)
# Extract the text from the pdf
df_text <-
df %>%
mutate(country_code = str_extract(url, "(?<=_)[A-Z]{2}(?=_)")) %>%
mutate(text = map(file_path, extract_text)) %>%
mutate(title = map(text, extract_title),
country_name = map_chr(title, extract_country_name),
report_date = do.call(c, map(title, extract_report_date))) %>%
select(-title) %>%
# Split the text into pages
mutate(text = map(text, nest_by, page, .key = "text")) %>%
unnest(text) %>%
mutate(type = if_else(page <= 2, "country", "region")) %>%
group_by(url) %>%
filter(page != max(page)) %>% # Drop the final page, which is notes
ungroup() %>%
# Extract the panel region names, categories and baselines
rowwise() %>%
mutate(region_name = list(extract_region_names(type, text))) %>%
mutate(category = list(extract_categories(type, page, text))) %>%
mutate(baseline = list(extract_baselines(type, text))) %>%
mutate(panel = list(join_panels(region_name, category, baseline))) %>%
ungroup() %>%
select(url, page, country_code, country_name, report_date, type, panel) %>%
unnest(panel) %>%
select(url, page, country_code, country_name, report_date, type, row, col,
region_name, category, baseline)
# Guess the x-width of one day between data points
day_width <- extract_day_width(df_trends)
# Pair up the text with the trends
final <-
inner_join(df_text, df_trends, by = c("url", "page", "row", "col")) %>%
group_by(url, page, row, col) %>%
arrange(url, page, row, col, x) %>%
mutate(
trend = scale_y(y, baseline),
date = x_to_date(x, report_date)
) %>%
ungroup() %>%
select(-x, -y)
write_tsv(final, paste0(max(final$report_date), ".tsv"))
| /extract.R | no_license | cuulee/google-location-coronavirus | R | false | false | 8,885 | r | library(tidyverse)
library(xml2)
library(fs)
library(sys)
library(rvest)
library(pdftools)
library(lubridate)
# Utility functions ------------------------------------------------------------
#' Extract URLs from home page
extract_urls <- function(url) {
url %>%
read_html() %>%
html_nodes(".country-data a") %>%
html_attr("href") %>%
tibble(url = .) %>%
mutate(file_path = path("pdf", basename(url)))
}
download_if_not_exists <- function(url, file_path) {
if (file_exists(file_path)) return()
download.file(url, file_path, mode = "wb")
}
#' Convert pdf pages to svg files and return their paths
pdf_to_svg <- function(file_path) {
cat("Converting to svg: ", file_path, "\n")
# Create a temporary directory named after the file
svg_dir <- path_temp(path_ext_remove(path_file(file_path)))
dir_create(svg_dir)
command <- c(
"pdf2svg",
file_path,
path(svg_dir, "%d.pdf"),
"all"
)
exec_wait(command)
dir_ls(svg_dir)
}
#' Extract geometry from svg file
extract_geometry <- function(svg_path) {
cat("Extracting geometry: ", svg_path, "\n")
element <-
svg_path %>%
read_xml() %>%
xml_ns_strip() %>%
xml_find_all("//path[@style='fill:none;stroke-width:2;stroke-linecap:butt;stroke-linejoin:miter;stroke:rgb(25.878906%,52.159119%,95.689392%);stroke-opacity:1;stroke-miterlimit:4;']")
if (length(element) == 0) return(tibble()) # no graph
geometry <- xml_attr(element, "d") # the data points
panel <- # the row and column of the graph
element %>%
xml_attr("transform") %>%
str_sub(8, -2) %>%
str_split(",") %>%
map(~ .x[5:6]) %>% # x,y of the corner of the graph
transpose() %>%
map(unlist) %>%
map(as.numeric) %>%
set_names(c("col", "row"))
# Renumber the row/col from being an x,y coordinate to being the position of
# the graph on the page, either 3 rows of 1 column, or 4 rows of 3 columns.
panel$row <- as.integer(as_factor(panel$row))
panel$col <- as.integer(as_factor(panel$col))
tibble(geometry, row = panel$row, col = panel$col)
}
#' Convert a string of coordinates to a tibble
parse_geometry <- function(geometry) {
geometry %>%
str_replace_all(" Z", "") %>% # Remove close path commands
str_replace_all("(?<=[0-9]) (?=[A-Z])", "\n") %>% # Separate into rows
str_trim() %>%
map_dfr(read_table2, col_names = c("command", "x", "y"), col_types = "cdd")
}
#' Extract the width of one day on the x-axis from the strokes
extract_day_width <- function(.data) {
# Width between consecutive points (assume the most common width)
.data %>%
group_by(page, row, col) %>%
arrange(x) %>%
mutate(diff = x - lag(x)) %>%
ungroup() %>%
count(diff, sort = TRUE) %>%
filter(diff > 0) %>%
top_n(1, n) %>%
pull(diff)
}
#' Extract the text from the pdf
extract_text <- function(file_path) {
cat("Extracting text: ", file_path, "\n")
file_path %>%
pdf_data() %>%
bind_rows(.id = "page") %>%
mutate(page = as.integer(page))
}
#' Extract the title from the text
extract_title <- function(text) {
# Read the country name and date
text %>%
filter(page == 1, y == 75) %>%
pull(text)
}
#' Extract the country name from the title
extract_country_name <- function(title) {
paste(head(title, -3), collapse = " ")
}
#' Extract the report date from the title
extract_report_date <- function(title) {
title %>%
tail(3) %>%
paste(collapse = " ") %>%
parse_date(format = "%B %d, %Y")
}
#' Extract the country-level text
extract_country_text <- function(text) {
filter(text, page <= 2)
}
#' Extract the region-level text
extract_region_text <- function(text) {
filter(text, page >= 3, page < max(page))
}
#' Extract the region names and row/col from the text
extract_region_names <- function(type, text) {
if (type != "region") {
return(tibble(row = 1:3, region_name = NA_character_))
}
text %>%
filter(y %in% c(36, 363), height == 20) %>%
arrange(y, x) %>%
group_by(y) %>%
summarise(region_name = paste(text, collapse = " ")) %>%
ungroup() %>%
# Duplicate to two rows per place
mutate(row = as.integer(factor(y)) * 2L) %>%
select(row, region_name) %>%
bind_rows(mutate(., row = row - 1L)) %>%
arrange(row)
}
#' Extract the categories and row/col from the text
extract_categories <- function(type, page, text) {
text %>%
filter(
switch(type,
country = !(page == 1 & y < 342) & (height == 13),
region = (y %in% c(82, 220, 409, 547)) & (height == 11)
)
) %>%
arrange(y, x) %>%
mutate(x = plyr::round_any(x, 100, f = floor)) %>%
group_by(y, x) %>%
summarise(category = paste(text, collapse = " ")) %>%
ungroup() %>%
mutate(row = as.integer(factor(y)),
col = as.integer(factor(x))) %>%
select(row, col, category)
}
#' Extract the baselines and row/col from the text
extract_baselines <- function(type, text) {
text %>%
filter(
switch(type,
country = height == 45,
region = y %in% c(104, 242, 431, 568) & height == 13
)
) %>%
mutate(baseline = parse_number(text) / 100) %>%
mutate(row = as.integer(factor(y)),
col = as.integer(factor(x))) %>%
select(row, col, baseline)
}
#' Join the separate panels of region names, categories and baselines
join_panels <- function(region_name, category, baseline) {
region_name %>%
inner_join(category, by = "row") %>%
inner_join(baseline, by = c("row", "col"))
}
#' Scale y-values by the baseline
scale_y <- function(y, baseline) {
diff_from_first <- y - first(y)
scale <- last(diff_from_first) / baseline
diff_from_first / scale
}
#' Convert x-values to dates
x_to_date <- function(x, report_date) {
day_diff <- round((x - lag(x)) / day_width)
day_diff <- replace_na(day_diff, 1)
report_date - days(rev(cumsum(day_diff)) - 1)
}
# Main script ------------------------------------------------------------------
# The Google page to download each pdf
home_url <- "https://www.google.com/covid19/mobility/"
# A folder to store the PDF files
dir_create("pdf")
# A data frame to collect the data, beginning with the URL of each pdf
df <- extract_urls(home_url)
# Download the pdf files
walk2(df$url, df$file_path, download_if_not_exists)
# Convert to svg and extract the graphs
df_trends <-
df %>%
mutate(svg_path = map(file_path, pdf_to_svg)) %>% # convert pdf pages to svg
unnest(svg_path) %>%
group_by(url) %>%
mutate(page = row_number()) %>%
ungroup() %>%
mutate(type = if_else(page <= 2, "country", "region")) %>%
mutate(geometry = map(svg_path, extract_geometry)) %>% # extract geometry from svg
unnest(geometry) %>%
group_by(url, page) %>%
# mutate(stroke = row_number()) %>%
mutate(geometry = map(geometry, parse_geometry)) %>% # parse geometry to tibble
select(-type) %>%
unnest(geometry) %>%
mutate(y = -y) %>% # Flip y coords to be positive at the top
group_by(url, page, row, col) %>%
mutate(group = cumsum(command == "M")) %>% # group sections of strokes
ungroup() %>%
select(-file_path, -svg_path, -command)
# Extract the text from the pdf
df_text <-
df %>%
mutate(country_code = str_extract(url, "(?<=_)[A-Z]{2}(?=_)")) %>%
mutate(text = map(file_path, extract_text)) %>%
mutate(title = map(text, extract_title),
country_name = map_chr(title, extract_country_name),
report_date = do.call(c, map(title, extract_report_date))) %>%
select(-title) %>%
# Split the text into pages
mutate(text = map(text, nest_by, page, .key = "text")) %>%
unnest(text) %>%
mutate(type = if_else(page <= 2, "country", "region")) %>%
group_by(url) %>%
filter(page != max(page)) %>% # Drop the final page, which is notes
ungroup() %>%
# Extract the panel region names, categories and baselines
rowwise() %>%
mutate(region_name = list(extract_region_names(type, text))) %>%
mutate(category = list(extract_categories(type, page, text))) %>%
mutate(baseline = list(extract_baselines(type, text))) %>%
mutate(panel = list(join_panels(region_name, category, baseline))) %>%
ungroup() %>%
select(url, page, country_code, country_name, report_date, type, panel) %>%
unnest(panel) %>%
select(url, page, country_code, country_name, report_date, type, row, col,
region_name, category, baseline)
# Guess the x-width of one day between data points
day_width <- extract_day_width(df_trends)
# Pair up the text with the trends
final <-
inner_join(df_text, df_trends, by = c("url", "page", "row", "col")) %>%
group_by(url, page, row, col) %>%
arrange(url, page, row, col, x) %>%
mutate(
trend = scale_y(y, baseline),
date = x_to_date(x, report_date)
) %>%
ungroup() %>%
select(-x, -y)
write_tsv(final, paste0(max(final$report_date), ".tsv"))
|
# test_galeshapley.R
# test matching with the Gale-Shapley Algorithm
test_that("Check if galeShapley.marriageMarket matching is stable", {
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 3, ncol = 4)
matching.marriageMarket <- galeShapley.marriageMarket(uM, uW)
expect_true(galeShapley.checkStability(uM, uW, matching.marriageMarket$proposals, matching.marriageMarket$engagements))
})
test_that("Check if galeShapley is an alias for galeShapley.marriageMarket", {
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 3, ncol = 4)
matching1 <- galeShapley(uM, uW)
matching2 <- galeShapley.marriageMarket(uM, uW)
expect_true(all.equal(matching1, matching2))
})
test_that("Check if galeShapley.collegeAdmissions matching is stable", {
uM <- matrix(runif(16), nrow = 2, ncol = 8)
uW <- matrix(runif(16), nrow = 8, ncol = 2)
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 4)
expect_true(galeShapley.checkStability(uM, uW, matching$matched.students, matching$matched.colleges))
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 8)
expect_true(galeShapley.checkStability(uM, uW, matching$matched.students, matching$matched.colleges))
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 10)
expect_true(galeShapley.checkStability(uM, uW, matching$matched.students, matching$matched.colleges))
})
test_that("Check if college-optimal galeShapley.collegeAdmissions matching is stable", {
uM <- matrix(runif(6), nrow = 3, ncol = 2)
uW <- matrix(runif(6), nrow = 2, ncol = 3)
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 2, studentOptimal = FALSE)
expect_true(galeShapley.checkStability(uW, uM, matching$matched.colleges, matching$matched.students))
})
test_that(
"Check if using preferences as inputs yields the same results as when using cardinal utilities as inputs",
{
uM <- matrix(runif(16 * 14), nrow = 14, ncol = 16)
uW <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
matching1 <- galeShapley.marriageMarket(uM, uW)
matching2 <- galeShapley.marriageMarket(proposerPref = sortIndex(uM), reviewerPref = sortIndex(uW))
expect_true(all(matching1$engagements == matching2$engagements))
}
)
test_that(
"Check if using preferences as inputs with R indices yields the same results as when using cardinal utilities as inputs",
{
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
uW <- matrix(runif(16 * 14), nrow = 14, ncol = 16)
matching1 <- galeShapley.marriageMarket(uM, uW)
matching2 <- galeShapley.marriageMarket(proposerPref = sortIndex(uM) + 1, reviewerPref = sortIndex(uW) + 1)
expect_true(all.equal(matching1$engagements, matching2$engagements))
}
)
test_that("Check if incorrect preference orders result in an error", {
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
uW <- matrix(runif(16 * 14), nrow = 14, ncol = 16)
proposerPref <- sortIndex(uM) + 1
reviewerPref <- sortIndex(uW) + 1
proposerPrefPrime <- proposerPref
proposerPrefPrime[1, 1] <- 9999
reviewerPrefPrime <- reviewerPref
reviewerPrefPrime[1, 1] <- 9999
expect_error(
galeShapley.marriageMarket(proposerPref = proposerPrefPrime, reviewerPref = reviewerPref),
"proposerPref was defined by the user but is not a complete list of preference orderings"
)
expect_error(
galeShapley.marriageMarket(proposerPref = proposerPref, reviewerPref = reviewerPrefPrime),
"reviewerPref was defined by the user but is not a complete list of preference orderings"
)
})
test_that("Check if validate function", {
# generate cardinal and ordinal preferences
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 4, ncol = 3)
prefM <- sortIndex(uM)
prefW <- sortIndex(uW)
# expect errors
expect_error(galeShapley.validate(proposerUtils = uM, reviewerUtils = uW))
expect_error(galeShapley.validate(proposerPref = prefM, reviewerPref = prefW))
# generate cardinal and ordinal preferences
uM <- matrix(runif(12), nrow = 4, ncol = 4)
uW <- matrix(runif(12), nrow = 4, ncol = 3)
prefM <- sortIndex(uM)
prefW <- sortIndex(uW)
# expect errors
expect_error(galeShapley.validate(proposerUtils = uM, reviewerUtils = uW))
expect_error(galeShapley.validate(proposerPref = prefM, reviewerPref = prefW))
})
test_that("Check null inputs", {
expect_error(
galeShapley.marriageMarket(),
"missing proposer preferences"
)
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
expect_error(
galeShapley.marriageMarket(uM),
"missing reviewer utilities"
)
expect_error(
galeShapley.marriageMarket(proposerPref = sortIndex(uM)),
"missing reviewer utilities"
)
})
test_that("Check if incorrect dimensions result in error", {
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
uW <- matrix(runif(15 * 15), nrow = 15, ncol = 15)
expect_error(galeShapley.marriageMarket(uM, uW))
expect_error(galeShapley.marriageMarket(proposerPref = sortIndex(uM), reviewerUtils = uW))
uM <- matrix(runif(16 * 16), nrow = 16, ncol = 16)
uW <- matrix(runif(15 * 16), nrow = 15, ncol = 16)
expect_error(galeShapley.marriageMarket(proposerPref = sortIndex(uM), reviewerUtils = uW))
})
test_that("Check outcome from galeShapley.marriageMarket matching", {
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
matching <- galeShapley.marriageMarket(uM, uW)
expect_true(all.equal(matching$engagements, matrix(c(2, 3), ncol = 1)))
expect_true(all.equal(matching$proposals, matrix(c(NA, 1, 2), ncol = 1)))
})
test_that("Check outcome from student-optimal galeShapley.collegeAdmissions matching", {
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 2, studentOptimal = TRUE)
expect_true(all.equal(matching$matched.colleges, matrix(c(2, 3, NA, 1), ncol = 2)))
expect_true(all.equal(matching$matched.students, matrix(c(2, 1, 2), ncol = 1)))
})
test_that("Check outcome from collge-optimal galeShapley.collegeAdmissions matching", {
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
matching <- galeShapley.collegeAdmissions(uW, uM, slots = 2, studentOptimal = FALSE)
expect_true(all.equal(matching$matched.students, matrix(c(2, 3), ncol = 1)))
expect_true(all.equal(matching$matched.colleges, matrix(c(NA, NA, NA, NA, 1, 2), ncol = 2)))
})
test_that("Check checkStability", {
# define preferences
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
# define matchings (this one is correct)
matching <- list(
"engagements" = as.matrix(c(1, 2) + 1),
"proposals" = as.matrix(c(2, 0, 1) + 1)
)
# check if the matching is stable
expect_true(galeShapley.checkStability(uM, uW, matching$proposals, matching$engagements))
# swap proposals and engagements (this one isn't stable)
expect_false(suppressWarnings(galeShapley.checkStability(uM, uW, matching$engagements, matching$proposals)))
})
test_that("Assortative matching?", {
uM <- matrix(runif(16), nrow = 4, ncol = 4)
uW <- matrix(runif(16), nrow = 4, ncol = 4)
diag(uM)[] <- 2
diag(uW)[] <- 2
matching <- galeShapley.marriageMarket(uM, uW)
expect_true(all(matching$proposals == 1:4))
expect_true(all(matching$engagements == 1:4))
})
test_that("Marriage Market and College Admissions Problem Should Be Identical When Slots = 1", {
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 3, ncol = 4)
# student-optimal
matching.marriageMarket <- galeShapley.marriageMarket(uM, uW)
matching.collegeAdmissions <- galeShapley.collegeAdmissions(uM, uW, slots = 1, studentOptimal = TRUE)
expect_equal(matching.marriageMarket$proposals, matching.collegeAdmissions$matched.students)
expect_equal(matching.marriageMarket$engagements, matching.collegeAdmissions$matched.colleges)
expect_equal(matching.marriageMarket$single.proposers, matching.collegeAdmissions$unmatched.students)
expect_equal(matching.marriageMarket$single.reviewers, matching.collegeAdmissions$unmatched.colleges)
# college-optimal
matching.marriageMarket <- galeShapley.marriageMarket(uW, uM)
matching.collegeAdmissions <- galeShapley.collegeAdmissions(uM, uW, slots = 1, studentOptimal = FALSE)
expect_equal(matching.marriageMarket$proposals, matching.collegeAdmissions$matched.colleges)
expect_equal(matching.marriageMarket$engagements, matching.collegeAdmissions$matched.students)
expect_equal(matching.marriageMarket$single.proposers, matching.collegeAdmissions$unmatched.colleges)
expect_equal(matching.marriageMarket$single.reviewers, matching.collegeAdmissions$unmatched.students)
})
test_that("Check if galeShapley.collegeAdmissions matching returns the same results when the slots are constant across colleges", {
uM <- matrix(runif(16), nrow = 2, ncol = 8)
uW <- matrix(runif(16), nrow = 8, ncol = 2)
matching1 <- galeShapley.collegeAdmissions(uM, uW, slots = 4)
matching2 <- galeShapley.collegeAdmissions(uM, uW, slots = c(4, 4))
expect_true(identical(matching1, matching2))
})
test_that("Check student-optimal galeShapley.collegeAdmissions with differnet numbers of slots", {
# four students, two colleges, slots c(1,2)
uStudents <- matrix(runif(8), nrow = 2, ncol = 4)
uColleges <- matrix(runif(8), nrow = 4, ncol = 2)
matching1 <- galeShapley.collegeAdmissions(uStudents, uColleges, slots = c(1, 2))
# now, expand students and college preferences and use galeShapley() instead
uStudents <- rbind(uStudents[1, ], uStudents[2, ], uStudents[2, ])
uColleges <- cbind(uColleges[, 1], uColleges[, 2], uColleges[, 2])
matching2 <- galeShapley(uStudents, uColleges)
expect_true(all.equal(matching1$unmatched.students, matching2$single.proposers))
expect_equal(matching1$matched.colleges[[1]], matching2$engagements[1])
expect_equal(sort(matching1$matched.colleges[[2]]), sort(matching2$engagements[2:3]))
# college 3 gets mapped into college 2
matching2$proposals[matching2$proposals == 3] <- 2
expect_equal(matching1$matched.students, matching2$proposals)
})
test_that("Check college-optimal galeShapley.collegeAdmissions with differnet numbers of slots", {
# four students, two colleges, slots c(1,2)
uStudents <- matrix(runif(8), nrow = 2, ncol = 4)
uColleges <- matrix(runif(8), nrow = 4, ncol = 2)
matching1 <- galeShapley.collegeAdmissions(uStudents, uColleges, slots = c(1, 2), studentOptimal = FALSE)
# now, expand students and college preferences and use galeShapley() instead
uStudents <- rbind(uStudents[1, ], uStudents[2, ], uStudents[2, ])
uColleges <- cbind(uColleges[, 1], uColleges[, 2], uColleges[, 2])
matching2 <- galeShapley(uColleges, uStudents)
expect_true(all.equal(matching1$unmatched.students, matching2$single.reviewers))
expect_equal(matching1$matched.colleges[[1]], matching2$proposals[1])
expect_equal(sort(matching1$matched.colleges[[2]]), sort(matching2$proposals[2:3]))
# college 3 gets mapped into college 2
matching2$engagements[matching2$engagements == 3] <- 2
expect_equal(matching1$matched.students, matching2$engagements)
})
| /tests/testthat/test_galeshapley.R | no_license | minghao2016/matchingR | R | false | false | 11,414 | r | # test_galeshapley.R
# test matching with the Gale-Shapley Algorithm
test_that("Check if galeShapley.marriageMarket matching is stable", {
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 3, ncol = 4)
matching.marriageMarket <- galeShapley.marriageMarket(uM, uW)
expect_true(galeShapley.checkStability(uM, uW, matching.marriageMarket$proposals, matching.marriageMarket$engagements))
})
test_that("Check if galeShapley is an alias for galeShapley.marriageMarket", {
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 3, ncol = 4)
matching1 <- galeShapley(uM, uW)
matching2 <- galeShapley.marriageMarket(uM, uW)
expect_true(all.equal(matching1, matching2))
})
test_that("Check if galeShapley.collegeAdmissions matching is stable", {
uM <- matrix(runif(16), nrow = 2, ncol = 8)
uW <- matrix(runif(16), nrow = 8, ncol = 2)
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 4)
expect_true(galeShapley.checkStability(uM, uW, matching$matched.students, matching$matched.colleges))
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 8)
expect_true(galeShapley.checkStability(uM, uW, matching$matched.students, matching$matched.colleges))
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 10)
expect_true(galeShapley.checkStability(uM, uW, matching$matched.students, matching$matched.colleges))
})
test_that("Check if college-optimal galeShapley.collegeAdmissions matching is stable", {
uM <- matrix(runif(6), nrow = 3, ncol = 2)
uW <- matrix(runif(6), nrow = 2, ncol = 3)
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 2, studentOptimal = FALSE)
expect_true(galeShapley.checkStability(uW, uM, matching$matched.colleges, matching$matched.students))
})
test_that(
"Check if using preferences as inputs yields the same results as when using cardinal utilities as inputs",
{
uM <- matrix(runif(16 * 14), nrow = 14, ncol = 16)
uW <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
matching1 <- galeShapley.marriageMarket(uM, uW)
matching2 <- galeShapley.marriageMarket(proposerPref = sortIndex(uM), reviewerPref = sortIndex(uW))
expect_true(all(matching1$engagements == matching2$engagements))
}
)
test_that(
"Check if using preferences as inputs with R indices yields the same results as when using cardinal utilities as inputs",
{
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
uW <- matrix(runif(16 * 14), nrow = 14, ncol = 16)
matching1 <- galeShapley.marriageMarket(uM, uW)
matching2 <- galeShapley.marriageMarket(proposerPref = sortIndex(uM) + 1, reviewerPref = sortIndex(uW) + 1)
expect_true(all.equal(matching1$engagements, matching2$engagements))
}
)
test_that("Check if incorrect preference orders result in an error", {
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
uW <- matrix(runif(16 * 14), nrow = 14, ncol = 16)
proposerPref <- sortIndex(uM) + 1
reviewerPref <- sortIndex(uW) + 1
proposerPrefPrime <- proposerPref
proposerPrefPrime[1, 1] <- 9999
reviewerPrefPrime <- reviewerPref
reviewerPrefPrime[1, 1] <- 9999
expect_error(
galeShapley.marriageMarket(proposerPref = proposerPrefPrime, reviewerPref = reviewerPref),
"proposerPref was defined by the user but is not a complete list of preference orderings"
)
expect_error(
galeShapley.marriageMarket(proposerPref = proposerPref, reviewerPref = reviewerPrefPrime),
"reviewerPref was defined by the user but is not a complete list of preference orderings"
)
})
test_that("Check if validate function", {
# generate cardinal and ordinal preferences
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 4, ncol = 3)
prefM <- sortIndex(uM)
prefW <- sortIndex(uW)
# expect errors
expect_error(galeShapley.validate(proposerUtils = uM, reviewerUtils = uW))
expect_error(galeShapley.validate(proposerPref = prefM, reviewerPref = prefW))
# generate cardinal and ordinal preferences
uM <- matrix(runif(12), nrow = 4, ncol = 4)
uW <- matrix(runif(12), nrow = 4, ncol = 3)
prefM <- sortIndex(uM)
prefW <- sortIndex(uW)
# expect errors
expect_error(galeShapley.validate(proposerUtils = uM, reviewerUtils = uW))
expect_error(galeShapley.validate(proposerPref = prefM, reviewerPref = prefW))
})
test_that("Check null inputs", {
expect_error(
galeShapley.marriageMarket(),
"missing proposer preferences"
)
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
expect_error(
galeShapley.marriageMarket(uM),
"missing reviewer utilities"
)
expect_error(
galeShapley.marriageMarket(proposerPref = sortIndex(uM)),
"missing reviewer utilities"
)
})
test_that("Check if incorrect dimensions result in error", {
uM <- matrix(runif(16 * 14), nrow = 16, ncol = 14)
uW <- matrix(runif(15 * 15), nrow = 15, ncol = 15)
expect_error(galeShapley.marriageMarket(uM, uW))
expect_error(galeShapley.marriageMarket(proposerPref = sortIndex(uM), reviewerUtils = uW))
uM <- matrix(runif(16 * 16), nrow = 16, ncol = 16)
uW <- matrix(runif(15 * 16), nrow = 15, ncol = 16)
expect_error(galeShapley.marriageMarket(proposerPref = sortIndex(uM), reviewerUtils = uW))
})
test_that("Check outcome from galeShapley.marriageMarket matching", {
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
matching <- galeShapley.marriageMarket(uM, uW)
expect_true(all.equal(matching$engagements, matrix(c(2, 3), ncol = 1)))
expect_true(all.equal(matching$proposals, matrix(c(NA, 1, 2), ncol = 1)))
})
test_that("Check outcome from student-optimal galeShapley.collegeAdmissions matching", {
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
matching <- galeShapley.collegeAdmissions(uM, uW, slots = 2, studentOptimal = TRUE)
expect_true(all.equal(matching$matched.colleges, matrix(c(2, 3, NA, 1), ncol = 2)))
expect_true(all.equal(matching$matched.students, matrix(c(2, 1, 2), ncol = 1)))
})
test_that("Check outcome from collge-optimal galeShapley.collegeAdmissions matching", {
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
matching <- galeShapley.collegeAdmissions(uW, uM, slots = 2, studentOptimal = FALSE)
expect_true(all.equal(matching$matched.students, matrix(c(2, 3), ncol = 1)))
expect_true(all.equal(matching$matched.colleges, matrix(c(NA, NA, NA, NA, 1, 2), ncol = 2)))
})
test_that("Check checkStability", {
# define preferences
uM <- matrix(c(
0, 1,
1, 0,
0, 1
), nrow = 2, ncol = 3)
uW <- matrix(c(
0, 2, 1,
1, 0, 2
), nrow = 3, ncol = 2)
# define matchings (this one is correct)
matching <- list(
"engagements" = as.matrix(c(1, 2) + 1),
"proposals" = as.matrix(c(2, 0, 1) + 1)
)
# check if the matching is stable
expect_true(galeShapley.checkStability(uM, uW, matching$proposals, matching$engagements))
# swap proposals and engagements (this one isn't stable)
expect_false(suppressWarnings(galeShapley.checkStability(uM, uW, matching$engagements, matching$proposals)))
})
test_that("Assortative matching?", {
uM <- matrix(runif(16), nrow = 4, ncol = 4)
uW <- matrix(runif(16), nrow = 4, ncol = 4)
diag(uM)[] <- 2
diag(uW)[] <- 2
matching <- galeShapley.marriageMarket(uM, uW)
expect_true(all(matching$proposals == 1:4))
expect_true(all(matching$engagements == 1:4))
})
test_that("Marriage Market and College Admissions Problem Should Be Identical When Slots = 1", {
uM <- matrix(runif(12), nrow = 4, ncol = 3)
uW <- matrix(runif(12), nrow = 3, ncol = 4)
# student-optimal
matching.marriageMarket <- galeShapley.marriageMarket(uM, uW)
matching.collegeAdmissions <- galeShapley.collegeAdmissions(uM, uW, slots = 1, studentOptimal = TRUE)
expect_equal(matching.marriageMarket$proposals, matching.collegeAdmissions$matched.students)
expect_equal(matching.marriageMarket$engagements, matching.collegeAdmissions$matched.colleges)
expect_equal(matching.marriageMarket$single.proposers, matching.collegeAdmissions$unmatched.students)
expect_equal(matching.marriageMarket$single.reviewers, matching.collegeAdmissions$unmatched.colleges)
# college-optimal
matching.marriageMarket <- galeShapley.marriageMarket(uW, uM)
matching.collegeAdmissions <- galeShapley.collegeAdmissions(uM, uW, slots = 1, studentOptimal = FALSE)
expect_equal(matching.marriageMarket$proposals, matching.collegeAdmissions$matched.colleges)
expect_equal(matching.marriageMarket$engagements, matching.collegeAdmissions$matched.students)
expect_equal(matching.marriageMarket$single.proposers, matching.collegeAdmissions$unmatched.colleges)
expect_equal(matching.marriageMarket$single.reviewers, matching.collegeAdmissions$unmatched.students)
})
test_that("Check if galeShapley.collegeAdmissions matching returns the same results when the slots are constant across colleges", {
uM <- matrix(runif(16), nrow = 2, ncol = 8)
uW <- matrix(runif(16), nrow = 8, ncol = 2)
matching1 <- galeShapley.collegeAdmissions(uM, uW, slots = 4)
matching2 <- galeShapley.collegeAdmissions(uM, uW, slots = c(4, 4))
expect_true(identical(matching1, matching2))
})
test_that("Check student-optimal galeShapley.collegeAdmissions with differnet numbers of slots", {
# four students, two colleges, slots c(1,2)
uStudents <- matrix(runif(8), nrow = 2, ncol = 4)
uColleges <- matrix(runif(8), nrow = 4, ncol = 2)
matching1 <- galeShapley.collegeAdmissions(uStudents, uColleges, slots = c(1, 2))
# now, expand students and college preferences and use galeShapley() instead
uStudents <- rbind(uStudents[1, ], uStudents[2, ], uStudents[2, ])
uColleges <- cbind(uColleges[, 1], uColleges[, 2], uColleges[, 2])
matching2 <- galeShapley(uStudents, uColleges)
expect_true(all.equal(matching1$unmatched.students, matching2$single.proposers))
expect_equal(matching1$matched.colleges[[1]], matching2$engagements[1])
expect_equal(sort(matching1$matched.colleges[[2]]), sort(matching2$engagements[2:3]))
# college 3 gets mapped into college 2
matching2$proposals[matching2$proposals == 3] <- 2
expect_equal(matching1$matched.students, matching2$proposals)
})
test_that("Check college-optimal galeShapley.collegeAdmissions with differnet numbers of slots", {
# four students, two colleges, slots c(1,2)
uStudents <- matrix(runif(8), nrow = 2, ncol = 4)
uColleges <- matrix(runif(8), nrow = 4, ncol = 2)
matching1 <- galeShapley.collegeAdmissions(uStudents, uColleges, slots = c(1, 2), studentOptimal = FALSE)
# now, expand students and college preferences and use galeShapley() instead
uStudents <- rbind(uStudents[1, ], uStudents[2, ], uStudents[2, ])
uColleges <- cbind(uColleges[, 1], uColleges[, 2], uColleges[, 2])
matching2 <- galeShapley(uColleges, uStudents)
expect_true(all.equal(matching1$unmatched.students, matching2$single.reviewers))
expect_equal(matching1$matched.colleges[[1]], matching2$proposals[1])
expect_equal(sort(matching1$matched.colleges[[2]]), sort(matching2$proposals[2:3]))
# college 3 gets mapped into college 2
matching2$engagements[matching2$engagements == 3] <- 2
expect_equal(matching1$matched.students, matching2$engagements)
})
|
#Developed by Pablo Vicente-Munuera
source('~/Documents/Dropbox/MScBioinformatics/Thesis/Project/Analyzing-PPIs/src/lib/plotingResults.R', echo=TRUE)
plotingResults(title = "Malaria",
resultsGeo2D = resultsGeo2DMalariaAdj,
fileOriginal = "data/results/MalariaAdj.sifGC.txt",
resultsSF = resultsSfMalariaAdj
) | /src/showingTheResults.R | permissive | Pablo1990/Analyzing-PPIs | R | false | false | 371 | r | #Developed by Pablo Vicente-Munuera
source('~/Documents/Dropbox/MScBioinformatics/Thesis/Project/Analyzing-PPIs/src/lib/plotingResults.R', echo=TRUE)
plotingResults(title = "Malaria",
resultsGeo2D = resultsGeo2DMalariaAdj,
fileOriginal = "data/results/MalariaAdj.sifGC.txt",
resultsSF = resultsSfMalariaAdj
) |
## Programming Assignment 2
## Writing a pair of functions that cache the inverse of a matrix
## Step 1: Create a matrix object that can cache its inverse
makeCacheMatrix <- function(x=matrix()){ #initialising x as a matrix
i <- NULL # initialising i within the function environment for future use
set<-function(y){
x<<-y # assigning input value of y to x within the function environment
i <<- NULL
}
get<- function() x # retrieves value of x from function environment
setinverse <- function(inverse) i <<- inverse # assing input value to i
getinverse <- function() i # retrieves the value of i
list(set = set, get = get,
setinverse = setinverse,
getinverse=getinverse) # create a list with each function as named elements
}
## Step 2: Compute the inverse of matrix returned by makeCacheMatrix()
cacheSolve <- function(x,...){
i <- x$getinverse() # assigns inversed matrix of inputted matrix (x) to i
if(!is.null(i)){ # determine if inversed matrix is cached
message("getting cached data") # if cached, prints message
return(i) # returns the matrix assigned to i
}
data <- x$get() # retrieve matrix from input value/argument
i <- solve(data,...) # inverse the matrix
x$setinverse(i) # set i to the setinverse element of the list in makeCacheMatrix()
i # Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | cmasih/ProgrammingAssignment2 | R | false | false | 1,379 | r | ## Programming Assignment 2
## Writing a pair of functions that cache the inverse of a matrix
## Step 1: Create a matrix object that can cache its inverse
makeCacheMatrix <- function(x=matrix()){ #initialising x as a matrix
i <- NULL # initialising i within the function environment for future use
set<-function(y){
x<<-y # assigning input value of y to x within the function environment
i <<- NULL
}
get<- function() x # retrieves value of x from function environment
setinverse <- function(inverse) i <<- inverse # assing input value to i
getinverse <- function() i # retrieves the value of i
list(set = set, get = get,
setinverse = setinverse,
getinverse=getinverse) # create a list with each function as named elements
}
## Step 2: Compute the inverse of matrix returned by makeCacheMatrix()
cacheSolve <- function(x,...){
i <- x$getinverse() # assigns inversed matrix of inputted matrix (x) to i
if(!is.null(i)){ # determine if inversed matrix is cached
message("getting cached data") # if cached, prints message
return(i) # returns the matrix assigned to i
}
data <- x$get() # retrieve matrix from input value/argument
i <- solve(data,...) # inverse the matrix
x$setinverse(i) # set i to the setinverse element of the list in makeCacheMatrix()
i # Return a matrix that is the inverse of 'x'
}
|
#' Copyright(c) 2017-2020 R. Mark Sharp
# This file is part of nprcgenekeepr
test_that("mapIdsToObfuscated maps IDS as expected", {
set_seed(1)
ped <- qcStudbook(nprcgenekeepr::pedSix)
obfuscated <- obfuscatePed(ped, map = TRUE)
someIds <- c("s1", "s2", "d1", "d1")
someUndefinedIds <- c("nope", "s1", "still_nope", "d1")
expect_equal(mapIdsToObfuscated(someIds, obfuscated$map),
c("JNAN5L", "0ZR5QI", "2D0P3X", "2D0P3X"))
expect_error(mapIdsToObfuscated(someUndefinedIds, obfuscated$map),
"Some IDs are not in map.")
})
| /tests/testthat/test_mapIdsToObfuscated.R | permissive | jhagberg/nprcgenekeepr | R | false | false | 565 | r | #' Copyright(c) 2017-2020 R. Mark Sharp
# This file is part of nprcgenekeepr
test_that("mapIdsToObfuscated maps IDS as expected", {
set_seed(1)
ped <- qcStudbook(nprcgenekeepr::pedSix)
obfuscated <- obfuscatePed(ped, map = TRUE)
someIds <- c("s1", "s2", "d1", "d1")
someUndefinedIds <- c("nope", "s1", "still_nope", "d1")
expect_equal(mapIdsToObfuscated(someIds, obfuscated$map),
c("JNAN5L", "0ZR5QI", "2D0P3X", "2D0P3X"))
expect_error(mapIdsToObfuscated(someUndefinedIds, obfuscated$map),
"Some IDs are not in map.")
})
|
# ###################### OLD FUNCTIONS ###############################
#
# ####################################################################
# # Get Personal Portfolio's Data
# #
# # This function downloads my personal Excel with my Portfolio data
# #
# # @family Investment
# # @param filename Characeter. Import a local Excel file
# # @param token_dir Character. Where is my personal token for Dropbox connection?
# # @param auto Boolean. Automatically user my local personal file?
# # @param sheets Character Vector. Names of each sheet containing Portfolio summary,
# # Cash, and Transactions information
# # @param keep_old Boolean. Keep 100% sold tickers?
# get_stocks <- function(filename = NA, token_dir = "~/Dropbox (Personal)/Documentos/Docs/Data",
# auto = TRUE, sheets = c("Portafolio","Fondos","Transacciones"),
# keep_old = TRUE) {
#
# warning("New portfolio/stocks functions created. Use stocks_file() instead")
#
# processFile <- function(file, keep_old = TRUE) {
# port <- read.xlsx(file, sheet = sheets[1], skipEmptyRows = TRUE, detectDates = TRUE)
# cash <- read.xlsx(file, sheet = sheets[2], skipEmptyRows = TRUE, detectDates = TRUE)
# trans <- read.xlsx(file, sheet = sheets[3], skipEmptyRows = TRUE, detectDates = TRUE)
# if (keep_old == FALSE) port <- port[port$Stocks != 0,]
# mylist <- list("portfolio" = port, "transactions" = trans, "cash" = cash)
# return(mylist)
# }
#
# # FOR PERSONAL USE
# local <- Sys.info()
# if (local[["nodename"]] == "MacBook-Pro-de-Bernardo.local" & auto == TRUE) {
# message("Using BL's local file...")
# local <- "~/Dropbox (Personal)/Documentos/Interactive Brokers/Portfolio/Portfolio LC.xlsx"
# results <- processFile(local, keep_old)
# } else {
# # FOR EVERYONE'S USE
# if (!is.na(filename)) {
# if (file.exists(filename)) results <- processFile(filename, keep_old) else
# stop("Error: that file doesn't exist or it's not in your working directory!")
# } else {
# # FOR DROPBOX'S USE
# token_dir <- token_dir
# load(paste0(token_dir, "/token_pers.rds"))
# x <- drop_search("Portfolio LC.xlsx", dtoken = token)
# file <- "temp.xlsx"
# invisible(
# drop_download(x$matches[[1]]$metadata$path_lower,
# local_path = file,
# overwrite = TRUE,
# dtoken = token))
# results <- processFile(file, keep_old)
# file.remove(file)
# }
# }
# message("File imported succesfully!")
# return(results)
# }
#
#
# ####################################################################
# # Download Stocks Historical Data
# #
# # This function lets the user download stocks historical data
# #
# # @family Investment
# # @param symbols Character Vector. List of symbols to download historical data.
# # Example: c('VTI','TSLA')
# # @param from Date. Since when do you wish to download historical data
# # @param today Boolean. Do you wish to additionaly download today's quote?
# # @param tax Numeric. Percentage for dividends real return. Range from 0 to 99
# # @param verbose Boolean. Print results and progress while downloading?
# get_stocks_hist <- function(symbols = NA,
# from = NA,
# today = TRUE,
# tax = 30,
# verbose = TRUE) {
#
# warning("New portfolio/stocks functions created. Use stocks_hist() instead")
#
# try_require("quantmod")
#
# if (!haveInternet()) stop("You currently have NO internet connection!")
#
# options("getSymbols.warning4.0" = FALSE)
# options("getSymbols.yahoo.warning" = FALSE)
# data <- divs <- c()
#
# if (!any(is.na(symbols))) {
# from[is.na(from)] <- Sys.Date() - 365
# if (length(from) == length(symbols)) {
# for (i in 1:length(symbols)) {
# symbol <- as.character(symbols[i])
# start_date <- as.character(from[i])
# values <- getSymbols(symbol, env = NULL, from = start_date, src = "yahoo") %>% data.frame()
# values <- cbind(row.names(values), as.character(symbol), values)
# colnames(values) <- c("Date","Symbol","Open","High","Low","Close","Volume","Adjusted")
# values <- mutate(values, Adjusted = rowMeans(select(values, High, Close), na.rm = TRUE))
# row.names(values) <- NULL
#
# # Add right now's data
# if (today) {
# quote <- function(ticks) {
# qRoot <- "https://query1.finance.yahoo.com/v7/finance/quote?fields=symbol,longName,regularMarketPrice,regularMarketChange,regularMarketTime&formatted=false&symbols="
# z <- fromJSON(paste(qRoot, paste(ticks, collapse = ","), sep = ""))
# z <- z$quoteResponse$result[,c("symbol", "regularMarketTime", "regularMarketPrice", "regularMarketChange", "longName")]
# row.names(z) <- z$symbol
# z$symbol <- NULL
# names(z) <- c("Time", "Price", "Change", "Name")
# z$Time <- as.POSIXct(z$Time, origin = '1970-01-01 00:00:00')
# return(z)
# }
# now <- quote(symbol)
# now <- data.frame(Date = as.character(as.Date(now$Time)), Symbol = symbol,
# Open = now$Price, High = now$Price, Low = now$Price, Close = now$Price,
# Volume = 0, Adjusted = now$Price)
# values <- rbind(values, now)
# }
#
# data <- rbind(data, values)
#
# # Dividends if case
# d <- getDividends(as.character(symbol), from = start_date)
# if (nrow(d) > 0) {
# div <- data.frame(Symbol = rep(symbol, nrow(d)),
# Date = ymd(row.names(data.frame(d))),
# Div = as.vector(d),
# DivReal = as.vector(d)*(100 - tax)/100)
# divs <- rbind(divs, div)
# }
# if (verbose == TRUE) {
# info <- paste(symbol, "since", start_date, " ")
# statusbar(i, length(symbols), info)
# }
# }
# } else {message("The parameters 'symbols' and 'from' should be the same length.") }
# } else {message("You need to define which stocks to bring. Use the 'symbols=' parameter.") }
#
# joined <- data %>%
# select(Date, Symbol, Adjusted) %>% rename(Value = Adjusted) %>%
# mutate(Date = as.Date(Date), Symbol = as.character(Symbol)) %>%
# left_join(mutate(divs, Symbol = as.character(Symbol)),
# by = c("Date", "Symbol")) %>%
# replace(is.na(.), 0) %>%
# arrange(desc(Date))
#
# results <- list("values" = data, "dividends" = divs, "joined" = joined)
# return(results)
# }
#
#
# ####################################################################
# # Fix Historical Data on Stocks
# #
# # This function lets the user fix downloaded stock data into a usefull
# # format output
# #
# # @family Investment
# # @param dailys Dataframe. Daily values. Structure: "Date", "Symbol",
# # "Open", "High", "Low", "Close", "Volume", "Adjusted"
# # @param dividends Dataframe. Dividends. Structure: "Symbol", "Date",
# # "Div", "DivReal"
# # @param transactions Dataframe. Transactions. Structure: "ID", "Inv",
# # "CODE", "Symbol", "Date", "Quant", "Value", "Amount", "Description"
# # @param expenses Numeric. How much does that bank or broker charges per
# # transaction? Absolute value.
# stocks_hist_fix <- function(dailys, dividends, transactions, expenses = 7) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# dailys_structure <- c("Date", "Symbol", "Open", "High", "Low", "Close", "Volume", "Adjusted")
# dividends_structure <- c("Symbol", "Date", "Div", "DivReal")
# trans_structure <- c("ID", "Inv", "CODE", "Symbol", "Date", "Quant", "Value", "Amount", "Description")
#
# if (!all(dailys_structure %in% colnames(dailys))) {
# stop(paste("The structure of the 'dailys' table should be:",
# paste(shQuote(dailys_structure), collapse = ", ")))
# }
#
# if (!all(dividends_structure %in% colnames(dividends))) {
# stop(paste("The structure of the 'dividends' table should be:",
# paste(shQuote(dividends_structure), collapse = ", ")))
# }
#
# if (!all(trans_structure %in% colnames(transactions))) {
# stop(paste("The structure of the 'transactions' table should be:",
# paste(shQuote(trans_structure), collapse = ", ")))
# }
#
# df <- dailys %>%
# mutate(Date = as.Date(as.character(Date)), Symbol = as.character(Symbol)) %>%
# arrange(Date, Symbol) %>%
# # Add transactions daily data
# left_join(transactions %>%
# mutate(Date = as.Date(as.character(Date)), Symbol = as.character(Symbol)) %>%
# select(Symbol, Date, Quant, Value, Amount),
# by = c('Symbol','Date')) %>%
# mutate(Expenses = ifelse(is.na(Quant), 0, expenses)) %>%
# group_by(Symbol) %>%
# mutate(Quant = ifelse(is.na(Quant), 0, Quant),
# Stocks = cumsum(Quant)) %>%
# # Add dividends daily data
# left_join(dividends %>%
# mutate(Date = as.Date(as.character(Date)), Symbol = as.character(Symbol)),
# by = c('Symbol','Date')) %>%
# mutate(DailyDiv = ifelse(is.na(DivReal), 0, Stocks * DivReal)) %>% ungroup() %>%
# # If sold everything and then restarted...
# group_by(Symbol) %>%
# mutate(group = ifelse(lead(Stocks) > 0 & Stocks == 0, 1, 0)) %>%
# mutate(groupi = cumsum(group)) %>% ungroup() %>%
# mutate(Ticker = ifelse(groupi > 0, paste0(Symbol, groupi + 1), Symbol)) %>%
# select(-group, -groupi) %>%
# # Some other cumulative calculations
# arrange(Date) %>% group_by(Stocks) %>%
# mutate(DailyValue = Close * Stocks) %>%
# arrange(desc(Date), desc(DailyValue)) %>%
# arrange(Date) %>% group_by(Ticker) %>%
# mutate(StartUSD = Value[Date == min(Date)],
# RelChangeP = 100 - (100 * lag(Close) / Close),
# RelChangeUSD = Stocks * (Close - lag(Close)) - Expenses,
# RelChangePHist = ifelse(Date == min(Date), 0,
# 100 - (100 * StartUSD / Close)),
# RelChangeUSDHist = Stocks * (Close - StartUSD) - sum(Expenses)) %>%
# arrange(desc(Date)) %>%
# #mutate_if(is.numeric, funs(round(., 2))) %>%
# ungroup() %>%
# mutate_at(vars(-contains("Date")), funs(replace(., is.na(.), 0))) %>%
# group_by(Date, Ticker) %>% arrange(desc(Volume)) %>% slice(1) %>% ungroup()
#
# return(df)
#
# }
#
#
# ####################################################################
# # Stocks Overall Performance
# #
# # This function lets the user calculate stocks performance
# #
# # @family Investment
# # @param dailys Dataframe. Daily values. Structure: "Date", "Symbol",
# # "Open", "High", "Low", "Close", "Volume", "Adjusted", "Quant",
# # "Value", "Amount", "Expenses", "Stocks", "Div", "DivReal", "DailyDiv",
# # "DailyValue", "RelChangeP",
# # "RelChangeUSD"
# # @param cash_in Dataframe. Deposits and withdrawals. Structure: "ID", "Date", "Cash"
# # @param cash_fix Numeric. If you wish to algebraically sum a value
# # to your cash balance
# stocks_performance <- function(dailys, cash_in, cash_fix = 0) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# dailys_structure <- c("Date", "Symbol", "Open", "High", "Low", "Close", "Volume", "Adjusted",
# "Quant", "Value", "Amount", "Expenses", "Stocks", "Div", "DivReal",
# "DailyDiv", "DailyValue", "RelChangeP", "RelChangeUSD",
# "RelChangePHist", "RelChangeUSDHist")
#
# cash_structure <- c("ID", "Date", "Cash")
#
# if (!all(dailys_structure %in% colnames(dailys))) {
# stop(paste("The structure of the 'dailys' table should be:",
# paste(shQuote(dailys_structure), collapse = ", ")))
# }
#
# if (!all(cash_structure %in% colnames(cash_in))) {
# stop(paste("The structure of the 'cash_in' table should be:",
# paste(shQuote(cash_structure), collapse = ", ")))
# }
#
# result <- dailys %>% group_by(Date) %>%
# summarise(Stocks = n(),
# DailyStocks = sum(DailyValue),
# DailyTrans = sum(Amount),
# DailyExpen = sum(Expenses),
# DailyDiv = sum(DailyDiv),
# RelUSD = sum(RelChangeUSD)) %>%
# mutate(RelPer = round(100 * RelUSD / DailyStocks, 2),
# CumDiv = cumsum(DailyDiv),
# CumExpen = cumsum(DailyExpen)) %>%
# left_join(cash_in %>% select(Date, Cash), by = c('Date')) %>%
# mutate(DailyCash = ifelse(is.na(Cash), 0, Cash),
# CumCash = cumsum(DailyCash) - cumsum(DailyTrans) + cumsum(DailyDiv) + cash_fix,
# CumPortfolio = CumCash + DailyStocks,
# TotalUSD = DailyStocks - cumsum(DailyTrans),
# TotalPer = round(100 * DailyStocks / (cumsum(DailyTrans)), 2) - 100) %>%
# select(Date,CumPortfolio,TotalUSD,TotalPer,RelUSD,RelPer,DailyStocks,
# DailyTrans,DailyDiv,CumDiv,DailyCash,CumCash) %>% arrange(desc(Date)) %>%
# mutate_if(is.numeric, funs(round(., 2))) %>%
# distinct()
#
# return(result)
#
# }
#
#
# ####################################################################
# # Portfolio Overall Performance
# #
# # This function lets the user calculate portfolio performance
# #
# # @family Investment
# # @param portfolio Dataframe. Structure: "Symbol", "Stocks", "StockIniValue", "InvPerc", "Type", "Trans", "StartDate"
# # @param daily Dataframe. Daily data
# portfolio_performance <- function(portfolio, daily) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# portf_structure <- c("Symbol", "Stocks", "StockIniValue", "InvPerc", "Type", "Trans", "StartDate")
#
# if (!all(portf_structure %in% colnames(portfolio)))
# stop(paste("The structure of the 'portfolio' table should be:", vector2text(portf_structure)))
#
# divIn <- daily %>% group_by(Symbol) %>%
# summarise(DivIncome = sum(DailyDiv),
# DivPerc = round(100 * DivIncome / sum(Amount), 2)) %>%
# arrange(desc(DivPerc))
#
# result <- left_join(portfolio %>% mutate(Symbol = as.character(Symbol)),
# daily %>% filter(Date == max(Date)) %>% select(Symbol,Ticker,DailyValue),
# by = c('Symbol')) %>%
# mutate(DifUSD = DailyValue - Invested, DifPer = round(100 * DifUSD / Invested,2),
# StockValue = DailyValue / Stocks,
# InvPerc = 100 * InvPerc,
# RealPerc = round(100 * DailyValue/sum(DailyValue), 2)) %>%
# left_join(divIn, by = c('Symbol')) %>%
# mutate_if(is.numeric, funs(round(., 2))) %>%
# select(Symbol:StockIniValue, Ticker, StockValue, InvPerc, RealPerc, everything())
#
# return(result)
#
# }
#
#
# ####################################################################
# # Portfolio Daily Performance
# #
# # This function lets the user calculate daily portfolio performance
# #
# # @family Investment
# # @param data Dataframe. Result from get_stocks()
# # @param dailys Dataframe. Result from get_stocks_hist()
# # @param cash_fix Numeric. If you wish to algebraically sum a value
# # to your cash balance
# portfolio_daily <- function(data, dailys, cash_fix = 0) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# daily_fixed <- stocks_hist_fix(dailys = dailys$values,
# dividends = dailys$dividends,
# transactions = data$transactions)
#
# mindate <- as.Date(min(as.Date(daily_fixed$Date), origin = "1970-01-01"), na.rm = TRUE)
#
# result <- data.frame(Date = as.Date(mindate:Sys.Date(), origin = "1970-01-01")) %>%
# left_join(data$cash %>% select(Date, Cash) %>% rename(Deposit = Cash), "Date") %>%
# left_join(data$transactions %>% group_by(Date) %>% summarise(Amount = sum(Amount)) %>%
# select(Date, Amount) %>% rename(Invest = Amount), "Date") %>%
# left_join(daily_fixed %>% group_by(Date) %>%
# summarise(StocksValue = sum(DailyValue), Dividend = sum(DailyDiv),
# Expense = sum(Expenses)) %>%
# select(Date, StocksValue, Dividend, Expense), "Date") %>%
# arrange(Date) %>% replace(., is.na(.), 0) %>%
# mutate(Deposited = cumsum(Deposit)) %>%
# mutate(Invested = cumsum(Invest)) %>%
# mutate(StocksValue = ifelse(StocksValue == 0, lag(StocksValue), StocksValue)) %>%
# mutate(StocksValue = ifelse(StocksValue == 0, lag(StocksValue), StocksValue)) %>%
# mutate(StocksValue = ifelse(StocksValue == 0, lag(StocksValue), StocksValue)) %>%
# mutate(Dividends = cumsum(Dividend)) %>%
# mutate(Expenses = cumsum(Expense)) %>%
# mutate(Portfolio = (Deposited - Invested) + StocksValue + Dividends - Expenses) %>%
# mutate(Cash = Portfolio - StocksValue - Expenses + cash_fix) %>%
# mutate(Performance = round(100 * (1 - Invested/StocksValue), 2)) %>%
# select(Date,Deposit,Invest,Dividend,Expense,Deposited,
# Invested,Dividends,Expenses,StocksValue,everything())
#
# return(result)
#
# }
#
#
# ################# PLOTTING FUNCTIONS #################
#
# ####################################################################
# # Portfolio Daily Plot
# #
# # This function lets the user plot his portfolio daily change
# #
# # @family Investment
# # @param stocks_perf Dataframe. Output of the stocks_performance function
# # @param save Boolean. Export plot as an image?
# portfolio_daily_plot <- function(stocks_perf, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_roi() instead")
#
# stocks_perf <- stocks_perf %>%
# # Get rid of super picks
# filter(abs(RelPer) < 70) %>%
# # Add day before first date with zero data
# rbind(tail(stocks_perf, 1) %>% mutate(Date = Date - 1, TotalPer = 0))
#
# plot <- stocks_perf %>%
# filter(abs(RelPer) < 70) %>%
# mutate(color = ifelse(RelPer > 0, "Pos", "Neg")) %>%
# ggplot() +
# geom_area(aes(x = Date, y = TotalPer/(0.5*max(stocks_perf$TotalPer))), alpha = 0.15) +
# geom_bar(aes(x = Date, y = RelPer, fill = color), stat = 'identity', width = 1) +
# geom_line(aes(x = Date, y = TotalPer/(0.5*max(stocks_perf$TotalPer))), alpha = 0.9, colour = "black") +
# geom_hline(yintercept = 0, alpha = 0.5, color = "black") +
# guides(fill = FALSE) +
# scale_x_date(date_labels = "%b%y") +
# scale_y_continuous(
# labels = comma,
# sec.axis = sec_axis(~.*(0.5 * max(stocks_perf$TotalPer)),
# name = "% Portfolio Var", labels = comma)) +
# labs(y = '% Daily Var', x = '',
# title = 'Daily Portfolio\'s Growth (%) since Start',
# subtitle = paste(stocks_perf$Date[1]," (Includes Expenses): ",
# formatNum(stocks_perf$TotalPer[1],2),"% ($",
# formatNum(stocks_perf$TotalUSD[1], 0),") | $",
# formatNum(stocks_perf$CumPortfolio[1]), sep = "")) +
# theme_lares2()
#
# if (save) plot <- plot + ggsave("portf_daily_change.png", width = 8, height = 5, dpi = 300)
#
# return(plot)
#
# }
#
#
# ####################################################################
# # Stocks Total Performance Plot
# #
# # This function lets the user plot his stocks total performance
# #
# # @family Investment
# # @param stocks_perf Dataframe. Output of the stocks_performance function
# # @param portfolio_perf Dataframe. Output of the portfolio_performance function
# # @param daily Dataframe. Daily data
# # @param trans Dataframe. Transactions data
# # @param cash Dataframe. Cash data
# # @param save Boolean. Export plot as an image?
# stocks_total_plot <- function(stocks_perf, portfolio_perf, daily, trans, cash, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_summary() instead")
#
# tops <- max(rbind(portfolio_perf$Invested, portfolio_perf$DailyValue))
# summary <- rbind(
# paste0("Portfolio: $", formatNum(stocks_perf$CumPortfolio[1])," | ", max(daily$Date)),
# paste0("Stocks: $", formatNum(sum(stocks_perf$DailyStocks[1]),1)," & Cash: $",
# formatNum(stocks_perf$CumCash[1],1)),
# paste0("ROI: ", formatNum(stocks_perf$TotalPer[1], 2),"% ($",
# formatNum(stocks_perf$TotalUSD[1],0),")"),
# paste0("Dividends: $", formatNum(sum(daily$DailyDiv),0)," & Expenses: $",
# formatNum(sum(daily$Expenses),0)))
#
# plot <- portfolio_perf %>%
# mutate(shapeflag = ifelse(DifUSD < 0, 25, 24), box = -tops/5.5) %>% ungroup() %>%
# mutate(Symbol = paste0(Symbol, " (", formatNum(100*DailyValue/sum(DailyValue)), "%)")) %>%
# ggplot() +
# geom_hline(yintercept = 0, colour = "black") +
# geom_col(aes(x = reorder(Symbol, Invested), y = Invested, fill = Symbol, group = 1)) +
# geom_col(aes(x = Symbol, y = Invested + DifUSD, fill = Symbol), alpha = 0.5) +
# geom_col(aes(x = Symbol, y = box), fill = "grey", alpha = 0.5) +
# geom_point(aes(x = Symbol, y = Invested + DifUSD, shape = shapeflag), colour = "black") +
# scale_shape_identity() +
# geom_text(aes(label = paste0("$",formatNum(DifUSD,1)), y = Invested + DifUSD, x = Symbol),
# size = 2.9, hjust = -.2, vjust = -0.2) +
# geom_text(aes(label = paste0(DifPer, "%"), y = Invested + DifUSD, x = Symbol),
# size = 2.9, hjust = -.2, vjust = 1.2) +
# geom_text(aes(label = paste0("$", formatNum(DailyValue, 1)), y = box, x = Symbol),
# size = 3, hjust = -.1, vjust = -0.2) +
# geom_text(aes(label = paste0(Stocks, " @$", formatNum(DailyValue/Stocks, 2)), y = box, x = Symbol),
# size = 2, hjust = -.1, vjust = 1.5) +
# geom_text(aes(label = paste0("$", formatNum(Invested,1)), y = 0, x = Symbol, colour = Symbol),
# size = 2, hjust = 0, vjust = -0.2) +
# geom_text(aes(label = paste0("@$", formatNum(Invested/Stocks, 2)), y = 0, x = Symbol, colour = Symbol),
# size = 2, hjust = 0, vjust = 1.5) +
# annotate("label", x = length(unique(portfolio_perf$Stocks)) * 0.25, y = tops * 0.6,
# label = vector2text(summary,"\n",quotes = F), size = 3.5, hjust = 0, alpha = 0.55) +
# scale_y_continuous(limits = c(NA, tops*1.1), labels = comma, expand = c(0, 0)) +
# labs(y = '', x = '', title = "Stocks Distribution and Growth") +
# guides(fill = FALSE, colour = FALSE) + coord_flip() +
# theme_lares2(pal = 1)
#
# if (save) plot <- plot + ggsave("portf_stocks_change.png", width = 8, height = 8, dpi = 300)
#
# return(plot)
#
# }
#
#
# ####################################################################
# # Stocks Daily Plot
# #
# # This function lets the user plot stocks daily change
# #
# # @family Investment
# # @param portfolio Dataframe. Output of the portfolio_perf function
# # @param daily Dataframe. Daily data
# # @param weighted Boolean. Should variation values be weighted to the
# # portfolio (or simply compared with initial value)?
# # @param group Boolean. Group stocks by stocks type?
# # @param save Boolean. Export plot as an image?
# stocks_daily_plot <- function(portfolio, daily, weighted = TRUE, group = TRUE, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_change() instead")
#
# try_require("ggrepel")
#
# d <- daily %>%
# left_join(portfolio %>% select(Symbol,Type), by = 'Symbol') %>%
# arrange(Date) %>% group_by(Symbol) %>%
# mutate(Hist = if (weighted) {100*(1 - cumsum(Amount)/(Stocks*Adjusted))} else {RelChangePHist},
# BuySell = ifelse(Amount > 0, "Bought", ifelse(Amount < 0, "Sold", NA)))
# labels <- d %>% filter(Date == max(Date))
# amounts <- d %>% filter(Amount != 0) %>%
# mutate(label = paste0(round(Amount/1000,1),"K"))
# days <- as.integer(difftime(range(d$Date)[2], range(d$Date)[1], units = "days"))
# plot <- ggplot(d) + ylab('% Change since Start') +
# geom_hline(yintercept = 0, alpha = 0.8, color = "black") +
# geom_line(aes(x = Date, y = Hist, color = Symbol), alpha = 0.9, size = 0.5) +
# geom_point(aes(x = Date, y = Hist, size = abs(Amount), colour = BuySell), alpha = 0.6) +
# scale_y_continuous(position = "right") +
# scale_size(range = c(0, 3.2)) + guides(size = FALSE, colour = FALSE) +
# xlim(min(d$Date), max(d$Date) + round(days*0.08)) +
# labs(title = 'Daily Portfolio\'s Stocks Change (%) since Start', x = '',
# subtitle = 'Showing absolute delta values since first purchase', colour = '') +
# geom_label_repel(data = amounts, aes(x = Date, y = Hist, label = label), size = 2) +
# geom_label(data = labels, aes(x = Date, y = Hist, label = Symbol), size = 2.5, hjust = -0.2, alpha = 0.6) +
# theme_lares2(pal = 2)
#
# if (group) plot <- plot + facet_grid(Type ~ ., scales = "free", switch = "both")
# if (weighted) plot <- plot + labs(subtitle = "Showing real weighted portfolio delta values")
# if (save) plot <- plot + ggsave("portf_stocks_histchange.png", width = 8, height = 5, dpi = 300)
#
# return(plot)
#
# }
#
#
# ####################################################################
# # Portfolio's Category Distribution
# #
# # This function lets the user plot his portfolio's distribution
# #
# # @family Investment
# # @param portfolio_perf Dataframe. Output of the portfolio_performance function
# # @param save Boolean. Export plot as an image?
# portfolio_distr_plot <- function(portfolio_perf, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_types() instead")
#
# p <- portfolio_perf %>%
# group_by(Type) %>%
# mutate(label = paste0(Type, "\n", formatNum(
# 100*sum(DailyValue)/sum(portfolio_perf$DailyValue)),"%")) %>%
# ggplot() +
# geom_bar(aes(x = "", y = DailyValue, fill = Symbol), width = 1, stat = "identity") +
# facet_grid(. ~ label, scales = "free") +
# scale_y_continuous(labels = comma, expand = c(0, 0)) +
# theme_lares2(pal = 1) +
# labs(x = NULL, y = "Total value", title = "Portfolio's Category Distribution")
# if (save) p <- p + ggsave("portf_distribution.png", width = 8, height = 5, dpi = 300)
# return(p)
# }
#
#
# ####################################################################
# # Portfolio's Daily Cumulative
# #
# # This function lets the user plot his portfolio's daily cumulative
# #
# # @family Investment
# # @param portfolio Dataframe. Results from portfolio_daily()
# # @param save Boolean. Export plot as an image?
# portfolio_total_plot <- function(portfolio, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_growth() instead")
#
# try_require("ggrepel")
#
# labels <- portfolio %>% filter(Deposit != 0)
# caption <- paste0("Portfolio: $", formatNum(portfolio$Portfolio[nrow(portfolio)]),
# "\nInvested: $", formatNum(portfolio$StocksValue[nrow(portfolio)]))
#
# plot <- data.frame(Date = rep(portfolio$Date, 2),
# type = c(rep("Invested", nrow(portfolio)),
# rep("Cash", nrow(portfolio))),
# values = c(portfolio$StocksValue, portfolio$Cash)) %>%
# ggplot() +
# geom_area(aes(x = Date, y = values, fill = type, group = type),
# colour = "black", size = 0.2, alpha = 0.95) +
# labs(title = " Daily Total Portfolio Value", y = NULL, x = NULL, fill = "") +
# geom_label_repel(data = labels,
# aes(x = Date, y = Portfolio, label = formatNum(Deposit, 0)),
# vjust = -1.3, size = 2.5) +
# scale_y_continuous(position = "right", labels = comma) +
# scale_x_date(date_labels = "%b%y", expand = c(0, 0)) +
# annotate("text", label = caption, x = max(portfolio$Date),
# y = 0.09*max(portfolio$Portfolio),
# size = 3.3, colour = "white", hjust = 1.1) +
# theme_lares2(pal = 1) +
# theme(legend.position = "top", legend.justification = c(0, 1))
#
# if (save) plot <- plot +
# ggsave("portf_total_hist.png", width = 8, height = 5, dpi = 300)
#
# return(plot)
#
# }
#
#
# ################# REPORTING FUNCTIONS #################
#
# ####################################################################
# # Portfolio's Calculations and Plots
# #
# # This function lets the user create his portfolio's calculations and
# # plots for further study.
# #
# # @family Investment
# # @param data List. Containing the following dataframes: portfolio,
# # transactions, cash. They have to follow the original xlsx format
# # @param cash_fix Numeric. If you wish to algebraically sum a value
# # to your cash balance
# # @param tax Numeric. How much does of your dividends does the taxes take?
# # Range from 0 to 99
# # @param expenses Numeric. How much does that bank or broker charges per
# # transaction? Absolute value.
# # @param sectors Boolean. Return sectors segmentation for ETFs?
# stocks_objects <- function(data, cash_fix = 0, tax = 30, expenses = 7, sectors = TRUE) {
#
# warning("New portfolio/stocks functions created. Use stocks_obj() instead")
#
# tabs <- c('portfolio','transactions','cash')
# if (sum(names(data) %in% tabs) != 3) {
# not <- names(data)[!names(data) %in% tabs]
# stop(paste("The following objects are obligatory too:", vector2text(not)))
# }
#
# tempdir <- tempdir()
# on.exit(setwd(tempdir))
#
# # Data wrangling and calculations
# message(">>> Downloading historical data for each stock...")
# hist <- get_stocks_hist(symbols = data$portfolio$Symbol,
# from = data$portfolio$StartDate,
# tax = tax)
# daily <- stocks_hist_fix(dailys = hist$values,
# dividends = hist$dividends,
# transactions = data$transactions,
# expenses = expenses)
# stocks_perf <- stocks_performance(daily,
# cash_in = data$cash,
# cash_fix = cash_fix)
# portfolio_perf <- portfolio_performance(portfolio = data$portfolio,
# daily = daily)
# pf_daily <- portfolio_daily(data = data, dailys = hist, cash_fix = cash_fix)
#
# message("Calculations ready...")
#
# # Visualizations
# p1 <- portfolio_daily_plot(stocks_perf)
# p2 <- stocks_total_plot(stocks_perf, portfolio_perf, daily,
# trans = data$transactions,
# cash = data$cash)
# p3 <- stocks_daily_plot(portfolio = data$portfolio, daily, weighted = FALSE)
# p5 <- stocks_daily_plot(portfolio = data$portfolio, daily, weighted = TRUE)
# p6 <- portfolio_total_plot(pf_daily)
# p4 <- portfolio_distr_plot(portfolio_perf)
# if (sectors) p7 <- etf_sector_plot(portfolio_perf)
#
# message("Graphics ready...")
#
# # Consolidation
# results <- list(p_portf_daily_change = p1,
# p_portf_stocks_change = p2,
# p_portf_stocks_histchange_weighted = p5,
# p_portf_stocks_histchange_absolute = p3,
# p_portf_distribution = p4,
# p_portfolio_daily = p6,
# df_portfolio_perf = portfolio_perf,
# df_portfolio_daily = pf_daily,
# df_stocks_perf = stocks_perf,
# df_daily = daily,
# df_hist = hist)
# if (sectors) results[["p_sectors"]] <- p7
#
# unlink(tempdir, recursive = FALSE)
#
# message("All results ready to export!")
# return(results)
# }
#
# ####################################################################
# # Portfolio's Full Report in HTML
# #
# # This function lets the user create his portfolio's full report in HTML using
# # the library's results
# #
# # @family Investment
# # @param results List. Containing the following objects: portf_daily_change,
# # portf_stocks_change, portf_stocks_histchange, portf_distribution & portfolio_perf.
# # You can use simply use the stocks_objects(data) if you didn't mess with the order!
# stocks_html <- function(results) {
#
# try_require("rmarkdown")
# dir <- getwd()
# pandoc <- Sys.getenv("RSTUDIO_PANDOC")
# Sys.setenv(RSTUDIO_PANDOC = pandoc)
#
# # Can be more accurate with names but works for me!
# params <- list(
# portf_daily_change = results[["p_portf_daily_change"]],
# portf_stocks_change = results[["p_portf_stocks_change"]],
# portf_stocks_histchange_weighted = results[["p_portf_stocks_histchange_weighted"]],
# portf_stocks_histchange_absolute = results[["p_portf_stocks_histchange_absolute"]],
# portf_distribution = results[["p_portf_distribution"]],
# portf_daily = results[["p_portfolio_daily"]],
# portfolio_perf = results[["df_portfolio_perf"]])
# if ("p_sectors" %in% names(results))
# params[["portf_distribution_sectors"]] <- results[["p_sectors"]]
#
# invisible(file.copy(
# from = system.file("docs", "stocksReport.Rmd", package = "lares"),
# to = dir,
# overwrite = TRUE,
# recursive = FALSE,
# copy.mode = TRUE))
#
# render("stocksReport.Rmd",
# output_file = "stocksReport.html",
# params = params,
# envir = new.env(parent = globalenv()),
# quiet = TRUE)
#
# invisible(file.remove(paste0(dir, "/stocksReport.Rmd")))
# message("HTML report created succesfully!")
# }
#
# ######################### SHORT #####################################
# # df <- get_stocks() # Get data from my Dropbox
# # dfp <- stocks_objects(df) # Get historical data, make calculations and plots
# # stocks_html(dfp) # Create HTML report
# # stocks_report() # Create and send report to my mail
#
# ######################### LONG #####################################
# # df <- get_stocks() # Get data from my Dropbox
# # hist <- get_stocks_hist(symbols = df$portfolio$Symbol, from = df$portfolio$StartDate)
# # daily <- stocks_hist_fix(dailys = hist$values, dividends = hist$dividends, transactions = df$transactions)
| /R/stocks_old.R | no_license | vahidnouri/lares | R | false | false | 34,724 | r | # ###################### OLD FUNCTIONS ###############################
#
# ####################################################################
# # Get Personal Portfolio's Data
# #
# # This function downloads my personal Excel with my Portfolio data
# #
# # @family Investment
# # @param filename Characeter. Import a local Excel file
# # @param token_dir Character. Where is my personal token for Dropbox connection?
# # @param auto Boolean. Automatically user my local personal file?
# # @param sheets Character Vector. Names of each sheet containing Portfolio summary,
# # Cash, and Transactions information
# # @param keep_old Boolean. Keep 100% sold tickers?
# get_stocks <- function(filename = NA, token_dir = "~/Dropbox (Personal)/Documentos/Docs/Data",
# auto = TRUE, sheets = c("Portafolio","Fondos","Transacciones"),
# keep_old = TRUE) {
#
# warning("New portfolio/stocks functions created. Use stocks_file() instead")
#
# processFile <- function(file, keep_old = TRUE) {
# port <- read.xlsx(file, sheet = sheets[1], skipEmptyRows = TRUE, detectDates = TRUE)
# cash <- read.xlsx(file, sheet = sheets[2], skipEmptyRows = TRUE, detectDates = TRUE)
# trans <- read.xlsx(file, sheet = sheets[3], skipEmptyRows = TRUE, detectDates = TRUE)
# if (keep_old == FALSE) port <- port[port$Stocks != 0,]
# mylist <- list("portfolio" = port, "transactions" = trans, "cash" = cash)
# return(mylist)
# }
#
# # FOR PERSONAL USE
# local <- Sys.info()
# if (local[["nodename"]] == "MacBook-Pro-de-Bernardo.local" & auto == TRUE) {
# message("Using BL's local file...")
# local <- "~/Dropbox (Personal)/Documentos/Interactive Brokers/Portfolio/Portfolio LC.xlsx"
# results <- processFile(local, keep_old)
# } else {
# # FOR EVERYONE'S USE
# if (!is.na(filename)) {
# if (file.exists(filename)) results <- processFile(filename, keep_old) else
# stop("Error: that file doesn't exist or it's not in your working directory!")
# } else {
# # FOR DROPBOX'S USE
# token_dir <- token_dir
# load(paste0(token_dir, "/token_pers.rds"))
# x <- drop_search("Portfolio LC.xlsx", dtoken = token)
# file <- "temp.xlsx"
# invisible(
# drop_download(x$matches[[1]]$metadata$path_lower,
# local_path = file,
# overwrite = TRUE,
# dtoken = token))
# results <- processFile(file, keep_old)
# file.remove(file)
# }
# }
# message("File imported succesfully!")
# return(results)
# }
#
#
# ####################################################################
# # Download Stocks Historical Data
# #
# # This function lets the user download stocks historical data
# #
# # @family Investment
# # @param symbols Character Vector. List of symbols to download historical data.
# # Example: c('VTI','TSLA')
# # @param from Date. Since when do you wish to download historical data
# # @param today Boolean. Do you wish to additionaly download today's quote?
# # @param tax Numeric. Percentage for dividends real return. Range from 0 to 99
# # @param verbose Boolean. Print results and progress while downloading?
# get_stocks_hist <- function(symbols = NA,
# from = NA,
# today = TRUE,
# tax = 30,
# verbose = TRUE) {
#
# warning("New portfolio/stocks functions created. Use stocks_hist() instead")
#
# try_require("quantmod")
#
# if (!haveInternet()) stop("You currently have NO internet connection!")
#
# options("getSymbols.warning4.0" = FALSE)
# options("getSymbols.yahoo.warning" = FALSE)
# data <- divs <- c()
#
# if (!any(is.na(symbols))) {
# from[is.na(from)] <- Sys.Date() - 365
# if (length(from) == length(symbols)) {
# for (i in 1:length(symbols)) {
# symbol <- as.character(symbols[i])
# start_date <- as.character(from[i])
# values <- getSymbols(symbol, env = NULL, from = start_date, src = "yahoo") %>% data.frame()
# values <- cbind(row.names(values), as.character(symbol), values)
# colnames(values) <- c("Date","Symbol","Open","High","Low","Close","Volume","Adjusted")
# values <- mutate(values, Adjusted = rowMeans(select(values, High, Close), na.rm = TRUE))
# row.names(values) <- NULL
#
# # Add right now's data
# if (today) {
# quote <- function(ticks) {
# qRoot <- "https://query1.finance.yahoo.com/v7/finance/quote?fields=symbol,longName,regularMarketPrice,regularMarketChange,regularMarketTime&formatted=false&symbols="
# z <- fromJSON(paste(qRoot, paste(ticks, collapse = ","), sep = ""))
# z <- z$quoteResponse$result[,c("symbol", "regularMarketTime", "regularMarketPrice", "regularMarketChange", "longName")]
# row.names(z) <- z$symbol
# z$symbol <- NULL
# names(z) <- c("Time", "Price", "Change", "Name")
# z$Time <- as.POSIXct(z$Time, origin = '1970-01-01 00:00:00')
# return(z)
# }
# now <- quote(symbol)
# now <- data.frame(Date = as.character(as.Date(now$Time)), Symbol = symbol,
# Open = now$Price, High = now$Price, Low = now$Price, Close = now$Price,
# Volume = 0, Adjusted = now$Price)
# values <- rbind(values, now)
# }
#
# data <- rbind(data, values)
#
# # Dividends if case
# d <- getDividends(as.character(symbol), from = start_date)
# if (nrow(d) > 0) {
# div <- data.frame(Symbol = rep(symbol, nrow(d)),
# Date = ymd(row.names(data.frame(d))),
# Div = as.vector(d),
# DivReal = as.vector(d)*(100 - tax)/100)
# divs <- rbind(divs, div)
# }
# if (verbose == TRUE) {
# info <- paste(symbol, "since", start_date, " ")
# statusbar(i, length(symbols), info)
# }
# }
# } else {message("The parameters 'symbols' and 'from' should be the same length.") }
# } else {message("You need to define which stocks to bring. Use the 'symbols=' parameter.") }
#
# joined <- data %>%
# select(Date, Symbol, Adjusted) %>% rename(Value = Adjusted) %>%
# mutate(Date = as.Date(Date), Symbol = as.character(Symbol)) %>%
# left_join(mutate(divs, Symbol = as.character(Symbol)),
# by = c("Date", "Symbol")) %>%
# replace(is.na(.), 0) %>%
# arrange(desc(Date))
#
# results <- list("values" = data, "dividends" = divs, "joined" = joined)
# return(results)
# }
#
#
# ####################################################################
# # Fix Historical Data on Stocks
# #
# # This function lets the user fix downloaded stock data into a usefull
# # format output
# #
# # @family Investment
# # @param dailys Dataframe. Daily values. Structure: "Date", "Symbol",
# # "Open", "High", "Low", "Close", "Volume", "Adjusted"
# # @param dividends Dataframe. Dividends. Structure: "Symbol", "Date",
# # "Div", "DivReal"
# # @param transactions Dataframe. Transactions. Structure: "ID", "Inv",
# # "CODE", "Symbol", "Date", "Quant", "Value", "Amount", "Description"
# # @param expenses Numeric. How much does that bank or broker charges per
# # transaction? Absolute value.
# stocks_hist_fix <- function(dailys, dividends, transactions, expenses = 7) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# dailys_structure <- c("Date", "Symbol", "Open", "High", "Low", "Close", "Volume", "Adjusted")
# dividends_structure <- c("Symbol", "Date", "Div", "DivReal")
# trans_structure <- c("ID", "Inv", "CODE", "Symbol", "Date", "Quant", "Value", "Amount", "Description")
#
# if (!all(dailys_structure %in% colnames(dailys))) {
# stop(paste("The structure of the 'dailys' table should be:",
# paste(shQuote(dailys_structure), collapse = ", ")))
# }
#
# if (!all(dividends_structure %in% colnames(dividends))) {
# stop(paste("The structure of the 'dividends' table should be:",
# paste(shQuote(dividends_structure), collapse = ", ")))
# }
#
# if (!all(trans_structure %in% colnames(transactions))) {
# stop(paste("The structure of the 'transactions' table should be:",
# paste(shQuote(trans_structure), collapse = ", ")))
# }
#
# df <- dailys %>%
# mutate(Date = as.Date(as.character(Date)), Symbol = as.character(Symbol)) %>%
# arrange(Date, Symbol) %>%
# # Add transactions daily data
# left_join(transactions %>%
# mutate(Date = as.Date(as.character(Date)), Symbol = as.character(Symbol)) %>%
# select(Symbol, Date, Quant, Value, Amount),
# by = c('Symbol','Date')) %>%
# mutate(Expenses = ifelse(is.na(Quant), 0, expenses)) %>%
# group_by(Symbol) %>%
# mutate(Quant = ifelse(is.na(Quant), 0, Quant),
# Stocks = cumsum(Quant)) %>%
# # Add dividends daily data
# left_join(dividends %>%
# mutate(Date = as.Date(as.character(Date)), Symbol = as.character(Symbol)),
# by = c('Symbol','Date')) %>%
# mutate(DailyDiv = ifelse(is.na(DivReal), 0, Stocks * DivReal)) %>% ungroup() %>%
# # If sold everything and then restarted...
# group_by(Symbol) %>%
# mutate(group = ifelse(lead(Stocks) > 0 & Stocks == 0, 1, 0)) %>%
# mutate(groupi = cumsum(group)) %>% ungroup() %>%
# mutate(Ticker = ifelse(groupi > 0, paste0(Symbol, groupi + 1), Symbol)) %>%
# select(-group, -groupi) %>%
# # Some other cumulative calculations
# arrange(Date) %>% group_by(Stocks) %>%
# mutate(DailyValue = Close * Stocks) %>%
# arrange(desc(Date), desc(DailyValue)) %>%
# arrange(Date) %>% group_by(Ticker) %>%
# mutate(StartUSD = Value[Date == min(Date)],
# RelChangeP = 100 - (100 * lag(Close) / Close),
# RelChangeUSD = Stocks * (Close - lag(Close)) - Expenses,
# RelChangePHist = ifelse(Date == min(Date), 0,
# 100 - (100 * StartUSD / Close)),
# RelChangeUSDHist = Stocks * (Close - StartUSD) - sum(Expenses)) %>%
# arrange(desc(Date)) %>%
# #mutate_if(is.numeric, funs(round(., 2))) %>%
# ungroup() %>%
# mutate_at(vars(-contains("Date")), funs(replace(., is.na(.), 0))) %>%
# group_by(Date, Ticker) %>% arrange(desc(Volume)) %>% slice(1) %>% ungroup()
#
# return(df)
#
# }
#
#
# ####################################################################
# # Stocks Overall Performance
# #
# # This function lets the user calculate stocks performance
# #
# # @family Investment
# # @param dailys Dataframe. Daily values. Structure: "Date", "Symbol",
# # "Open", "High", "Low", "Close", "Volume", "Adjusted", "Quant",
# # "Value", "Amount", "Expenses", "Stocks", "Div", "DivReal", "DailyDiv",
# # "DailyValue", "RelChangeP",
# # "RelChangeUSD"
# # @param cash_in Dataframe. Deposits and withdrawals. Structure: "ID", "Date", "Cash"
# # @param cash_fix Numeric. If you wish to algebraically sum a value
# # to your cash balance
# stocks_performance <- function(dailys, cash_in, cash_fix = 0) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# dailys_structure <- c("Date", "Symbol", "Open", "High", "Low", "Close", "Volume", "Adjusted",
# "Quant", "Value", "Amount", "Expenses", "Stocks", "Div", "DivReal",
# "DailyDiv", "DailyValue", "RelChangeP", "RelChangeUSD",
# "RelChangePHist", "RelChangeUSDHist")
#
# cash_structure <- c("ID", "Date", "Cash")
#
# if (!all(dailys_structure %in% colnames(dailys))) {
# stop(paste("The structure of the 'dailys' table should be:",
# paste(shQuote(dailys_structure), collapse = ", ")))
# }
#
# if (!all(cash_structure %in% colnames(cash_in))) {
# stop(paste("The structure of the 'cash_in' table should be:",
# paste(shQuote(cash_structure), collapse = ", ")))
# }
#
# result <- dailys %>% group_by(Date) %>%
# summarise(Stocks = n(),
# DailyStocks = sum(DailyValue),
# DailyTrans = sum(Amount),
# DailyExpen = sum(Expenses),
# DailyDiv = sum(DailyDiv),
# RelUSD = sum(RelChangeUSD)) %>%
# mutate(RelPer = round(100 * RelUSD / DailyStocks, 2),
# CumDiv = cumsum(DailyDiv),
# CumExpen = cumsum(DailyExpen)) %>%
# left_join(cash_in %>% select(Date, Cash), by = c('Date')) %>%
# mutate(DailyCash = ifelse(is.na(Cash), 0, Cash),
# CumCash = cumsum(DailyCash) - cumsum(DailyTrans) + cumsum(DailyDiv) + cash_fix,
# CumPortfolio = CumCash + DailyStocks,
# TotalUSD = DailyStocks - cumsum(DailyTrans),
# TotalPer = round(100 * DailyStocks / (cumsum(DailyTrans)), 2) - 100) %>%
# select(Date,CumPortfolio,TotalUSD,TotalPer,RelUSD,RelPer,DailyStocks,
# DailyTrans,DailyDiv,CumDiv,DailyCash,CumCash) %>% arrange(desc(Date)) %>%
# mutate_if(is.numeric, funs(round(., 2))) %>%
# distinct()
#
# return(result)
#
# }
#
#
# ####################################################################
# # Portfolio Overall Performance
# #
# # This function lets the user calculate portfolio performance
# #
# # @family Investment
# # @param portfolio Dataframe. Structure: "Symbol", "Stocks", "StockIniValue", "InvPerc", "Type", "Trans", "StartDate"
# # @param daily Dataframe. Daily data
# portfolio_performance <- function(portfolio, daily) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# portf_structure <- c("Symbol", "Stocks", "StockIniValue", "InvPerc", "Type", "Trans", "StartDate")
#
# if (!all(portf_structure %in% colnames(portfolio)))
# stop(paste("The structure of the 'portfolio' table should be:", vector2text(portf_structure)))
#
# divIn <- daily %>% group_by(Symbol) %>%
# summarise(DivIncome = sum(DailyDiv),
# DivPerc = round(100 * DivIncome / sum(Amount), 2)) %>%
# arrange(desc(DivPerc))
#
# result <- left_join(portfolio %>% mutate(Symbol = as.character(Symbol)),
# daily %>% filter(Date == max(Date)) %>% select(Symbol,Ticker,DailyValue),
# by = c('Symbol')) %>%
# mutate(DifUSD = DailyValue - Invested, DifPer = round(100 * DifUSD / Invested,2),
# StockValue = DailyValue / Stocks,
# InvPerc = 100 * InvPerc,
# RealPerc = round(100 * DailyValue/sum(DailyValue), 2)) %>%
# left_join(divIn, by = c('Symbol')) %>%
# mutate_if(is.numeric, funs(round(., 2))) %>%
# select(Symbol:StockIniValue, Ticker, StockValue, InvPerc, RealPerc, everything())
#
# return(result)
#
# }
#
#
# ####################################################################
# # Portfolio Daily Performance
# #
# # This function lets the user calculate daily portfolio performance
# #
# # @family Investment
# # @param data Dataframe. Result from get_stocks()
# # @param dailys Dataframe. Result from get_stocks_hist()
# # @param cash_fix Numeric. If you wish to algebraically sum a value
# # to your cash balance
# portfolio_daily <- function(data, dailys, cash_fix = 0) {
#
# warning("New portfolio/stocks functions created. Use daily_stocks() and daily_portfolio() instead")
#
# daily_fixed <- stocks_hist_fix(dailys = dailys$values,
# dividends = dailys$dividends,
# transactions = data$transactions)
#
# mindate <- as.Date(min(as.Date(daily_fixed$Date), origin = "1970-01-01"), na.rm = TRUE)
#
# result <- data.frame(Date = as.Date(mindate:Sys.Date(), origin = "1970-01-01")) %>%
# left_join(data$cash %>% select(Date, Cash) %>% rename(Deposit = Cash), "Date") %>%
# left_join(data$transactions %>% group_by(Date) %>% summarise(Amount = sum(Amount)) %>%
# select(Date, Amount) %>% rename(Invest = Amount), "Date") %>%
# left_join(daily_fixed %>% group_by(Date) %>%
# summarise(StocksValue = sum(DailyValue), Dividend = sum(DailyDiv),
# Expense = sum(Expenses)) %>%
# select(Date, StocksValue, Dividend, Expense), "Date") %>%
# arrange(Date) %>% replace(., is.na(.), 0) %>%
# mutate(Deposited = cumsum(Deposit)) %>%
# mutate(Invested = cumsum(Invest)) %>%
# mutate(StocksValue = ifelse(StocksValue == 0, lag(StocksValue), StocksValue)) %>%
# mutate(StocksValue = ifelse(StocksValue == 0, lag(StocksValue), StocksValue)) %>%
# mutate(StocksValue = ifelse(StocksValue == 0, lag(StocksValue), StocksValue)) %>%
# mutate(Dividends = cumsum(Dividend)) %>%
# mutate(Expenses = cumsum(Expense)) %>%
# mutate(Portfolio = (Deposited - Invested) + StocksValue + Dividends - Expenses) %>%
# mutate(Cash = Portfolio - StocksValue - Expenses + cash_fix) %>%
# mutate(Performance = round(100 * (1 - Invested/StocksValue), 2)) %>%
# select(Date,Deposit,Invest,Dividend,Expense,Deposited,
# Invested,Dividends,Expenses,StocksValue,everything())
#
# return(result)
#
# }
#
#
# ################# PLOTTING FUNCTIONS #################
#
# ####################################################################
# # Portfolio Daily Plot
# #
# # This function lets the user plot his portfolio daily change
# #
# # @family Investment
# # @param stocks_perf Dataframe. Output of the stocks_performance function
# # @param save Boolean. Export plot as an image?
# portfolio_daily_plot <- function(stocks_perf, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_roi() instead")
#
# stocks_perf <- stocks_perf %>%
# # Get rid of super picks
# filter(abs(RelPer) < 70) %>%
# # Add day before first date with zero data
# rbind(tail(stocks_perf, 1) %>% mutate(Date = Date - 1, TotalPer = 0))
#
# plot <- stocks_perf %>%
# filter(abs(RelPer) < 70) %>%
# mutate(color = ifelse(RelPer > 0, "Pos", "Neg")) %>%
# ggplot() +
# geom_area(aes(x = Date, y = TotalPer/(0.5*max(stocks_perf$TotalPer))), alpha = 0.15) +
# geom_bar(aes(x = Date, y = RelPer, fill = color), stat = 'identity', width = 1) +
# geom_line(aes(x = Date, y = TotalPer/(0.5*max(stocks_perf$TotalPer))), alpha = 0.9, colour = "black") +
# geom_hline(yintercept = 0, alpha = 0.5, color = "black") +
# guides(fill = FALSE) +
# scale_x_date(date_labels = "%b%y") +
# scale_y_continuous(
# labels = comma,
# sec.axis = sec_axis(~.*(0.5 * max(stocks_perf$TotalPer)),
# name = "% Portfolio Var", labels = comma)) +
# labs(y = '% Daily Var', x = '',
# title = 'Daily Portfolio\'s Growth (%) since Start',
# subtitle = paste(stocks_perf$Date[1]," (Includes Expenses): ",
# formatNum(stocks_perf$TotalPer[1],2),"% ($",
# formatNum(stocks_perf$TotalUSD[1], 0),") | $",
# formatNum(stocks_perf$CumPortfolio[1]), sep = "")) +
# theme_lares2()
#
# if (save) plot <- plot + ggsave("portf_daily_change.png", width = 8, height = 5, dpi = 300)
#
# return(plot)
#
# }
#
#
# ####################################################################
# # Stocks Total Performance Plot
# #
# # This function lets the user plot his stocks total performance
# #
# # @family Investment
# # @param stocks_perf Dataframe. Output of the stocks_performance function
# # @param portfolio_perf Dataframe. Output of the portfolio_performance function
# # @param daily Dataframe. Daily data
# # @param trans Dataframe. Transactions data
# # @param cash Dataframe. Cash data
# # @param save Boolean. Export plot as an image?
# stocks_total_plot <- function(stocks_perf, portfolio_perf, daily, trans, cash, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_summary() instead")
#
# tops <- max(rbind(portfolio_perf$Invested, portfolio_perf$DailyValue))
# summary <- rbind(
# paste0("Portfolio: $", formatNum(stocks_perf$CumPortfolio[1])," | ", max(daily$Date)),
# paste0("Stocks: $", formatNum(sum(stocks_perf$DailyStocks[1]),1)," & Cash: $",
# formatNum(stocks_perf$CumCash[1],1)),
# paste0("ROI: ", formatNum(stocks_perf$TotalPer[1], 2),"% ($",
# formatNum(stocks_perf$TotalUSD[1],0),")"),
# paste0("Dividends: $", formatNum(sum(daily$DailyDiv),0)," & Expenses: $",
# formatNum(sum(daily$Expenses),0)))
#
# plot <- portfolio_perf %>%
# mutate(shapeflag = ifelse(DifUSD < 0, 25, 24), box = -tops/5.5) %>% ungroup() %>%
# mutate(Symbol = paste0(Symbol, " (", formatNum(100*DailyValue/sum(DailyValue)), "%)")) %>%
# ggplot() +
# geom_hline(yintercept = 0, colour = "black") +
# geom_col(aes(x = reorder(Symbol, Invested), y = Invested, fill = Symbol, group = 1)) +
# geom_col(aes(x = Symbol, y = Invested + DifUSD, fill = Symbol), alpha = 0.5) +
# geom_col(aes(x = Symbol, y = box), fill = "grey", alpha = 0.5) +
# geom_point(aes(x = Symbol, y = Invested + DifUSD, shape = shapeflag), colour = "black") +
# scale_shape_identity() +
# geom_text(aes(label = paste0("$",formatNum(DifUSD,1)), y = Invested + DifUSD, x = Symbol),
# size = 2.9, hjust = -.2, vjust = -0.2) +
# geom_text(aes(label = paste0(DifPer, "%"), y = Invested + DifUSD, x = Symbol),
# size = 2.9, hjust = -.2, vjust = 1.2) +
# geom_text(aes(label = paste0("$", formatNum(DailyValue, 1)), y = box, x = Symbol),
# size = 3, hjust = -.1, vjust = -0.2) +
# geom_text(aes(label = paste0(Stocks, " @$", formatNum(DailyValue/Stocks, 2)), y = box, x = Symbol),
# size = 2, hjust = -.1, vjust = 1.5) +
# geom_text(aes(label = paste0("$", formatNum(Invested,1)), y = 0, x = Symbol, colour = Symbol),
# size = 2, hjust = 0, vjust = -0.2) +
# geom_text(aes(label = paste0("@$", formatNum(Invested/Stocks, 2)), y = 0, x = Symbol, colour = Symbol),
# size = 2, hjust = 0, vjust = 1.5) +
# annotate("label", x = length(unique(portfolio_perf$Stocks)) * 0.25, y = tops * 0.6,
# label = vector2text(summary,"\n",quotes = F), size = 3.5, hjust = 0, alpha = 0.55) +
# scale_y_continuous(limits = c(NA, tops*1.1), labels = comma, expand = c(0, 0)) +
# labs(y = '', x = '', title = "Stocks Distribution and Growth") +
# guides(fill = FALSE, colour = FALSE) + coord_flip() +
# theme_lares2(pal = 1)
#
# if (save) plot <- plot + ggsave("portf_stocks_change.png", width = 8, height = 8, dpi = 300)
#
# return(plot)
#
# }
#
#
# ####################################################################
# # Stocks Daily Plot
# #
# # This function lets the user plot stocks daily change
# #
# # @family Investment
# # @param portfolio Dataframe. Output of the portfolio_perf function
# # @param daily Dataframe. Daily data
# # @param weighted Boolean. Should variation values be weighted to the
# # portfolio (or simply compared with initial value)?
# # @param group Boolean. Group stocks by stocks type?
# # @param save Boolean. Export plot as an image?
# stocks_daily_plot <- function(portfolio, daily, weighted = TRUE, group = TRUE, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_change() instead")
#
# try_require("ggrepel")
#
# d <- daily %>%
# left_join(portfolio %>% select(Symbol,Type), by = 'Symbol') %>%
# arrange(Date) %>% group_by(Symbol) %>%
# mutate(Hist = if (weighted) {100*(1 - cumsum(Amount)/(Stocks*Adjusted))} else {RelChangePHist},
# BuySell = ifelse(Amount > 0, "Bought", ifelse(Amount < 0, "Sold", NA)))
# labels <- d %>% filter(Date == max(Date))
# amounts <- d %>% filter(Amount != 0) %>%
# mutate(label = paste0(round(Amount/1000,1),"K"))
# days <- as.integer(difftime(range(d$Date)[2], range(d$Date)[1], units = "days"))
# plot <- ggplot(d) + ylab('% Change since Start') +
# geom_hline(yintercept = 0, alpha = 0.8, color = "black") +
# geom_line(aes(x = Date, y = Hist, color = Symbol), alpha = 0.9, size = 0.5) +
# geom_point(aes(x = Date, y = Hist, size = abs(Amount), colour = BuySell), alpha = 0.6) +
# scale_y_continuous(position = "right") +
# scale_size(range = c(0, 3.2)) + guides(size = FALSE, colour = FALSE) +
# xlim(min(d$Date), max(d$Date) + round(days*0.08)) +
# labs(title = 'Daily Portfolio\'s Stocks Change (%) since Start', x = '',
# subtitle = 'Showing absolute delta values since first purchase', colour = '') +
# geom_label_repel(data = amounts, aes(x = Date, y = Hist, label = label), size = 2) +
# geom_label(data = labels, aes(x = Date, y = Hist, label = Symbol), size = 2.5, hjust = -0.2, alpha = 0.6) +
# theme_lares2(pal = 2)
#
# if (group) plot <- plot + facet_grid(Type ~ ., scales = "free", switch = "both")
# if (weighted) plot <- plot + labs(subtitle = "Showing real weighted portfolio delta values")
# if (save) plot <- plot + ggsave("portf_stocks_histchange.png", width = 8, height = 5, dpi = 300)
#
# return(plot)
#
# }
#
#
# ####################################################################
# # Portfolio's Category Distribution
# #
# # This function lets the user plot his portfolio's distribution
# #
# # @family Investment
# # @param portfolio_perf Dataframe. Output of the portfolio_performance function
# # @param save Boolean. Export plot as an image?
# portfolio_distr_plot <- function(portfolio_perf, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_types() instead")
#
# p <- portfolio_perf %>%
# group_by(Type) %>%
# mutate(label = paste0(Type, "\n", formatNum(
# 100*sum(DailyValue)/sum(portfolio_perf$DailyValue)),"%")) %>%
# ggplot() +
# geom_bar(aes(x = "", y = DailyValue, fill = Symbol), width = 1, stat = "identity") +
# facet_grid(. ~ label, scales = "free") +
# scale_y_continuous(labels = comma, expand = c(0, 0)) +
# theme_lares2(pal = 1) +
# labs(x = NULL, y = "Total value", title = "Portfolio's Category Distribution")
# if (save) p <- p + ggsave("portf_distribution.png", width = 8, height = 5, dpi = 300)
# return(p)
# }
#
#
# ####################################################################
# # Portfolio's Daily Cumulative
# #
# # This function lets the user plot his portfolio's daily cumulative
# #
# # @family Investment
# # @param portfolio Dataframe. Results from portfolio_daily()
# # @param save Boolean. Export plot as an image?
# portfolio_total_plot <- function(portfolio, save = FALSE) {
#
# warning("New portfolio/stocks functions created. Use splot_growth() instead")
#
# try_require("ggrepel")
#
# labels <- portfolio %>% filter(Deposit != 0)
# caption <- paste0("Portfolio: $", formatNum(portfolio$Portfolio[nrow(portfolio)]),
# "\nInvested: $", formatNum(portfolio$StocksValue[nrow(portfolio)]))
#
# plot <- data.frame(Date = rep(portfolio$Date, 2),
# type = c(rep("Invested", nrow(portfolio)),
# rep("Cash", nrow(portfolio))),
# values = c(portfolio$StocksValue, portfolio$Cash)) %>%
# ggplot() +
# geom_area(aes(x = Date, y = values, fill = type, group = type),
# colour = "black", size = 0.2, alpha = 0.95) +
# labs(title = " Daily Total Portfolio Value", y = NULL, x = NULL, fill = "") +
# geom_label_repel(data = labels,
# aes(x = Date, y = Portfolio, label = formatNum(Deposit, 0)),
# vjust = -1.3, size = 2.5) +
# scale_y_continuous(position = "right", labels = comma) +
# scale_x_date(date_labels = "%b%y", expand = c(0, 0)) +
# annotate("text", label = caption, x = max(portfolio$Date),
# y = 0.09*max(portfolio$Portfolio),
# size = 3.3, colour = "white", hjust = 1.1) +
# theme_lares2(pal = 1) +
# theme(legend.position = "top", legend.justification = c(0, 1))
#
# if (save) plot <- plot +
# ggsave("portf_total_hist.png", width = 8, height = 5, dpi = 300)
#
# return(plot)
#
# }
#
#
# ################# REPORTING FUNCTIONS #################
#
# ####################################################################
# # Portfolio's Calculations and Plots
# #
# # This function lets the user create his portfolio's calculations and
# # plots for further study.
# #
# # @family Investment
# # @param data List. Containing the following dataframes: portfolio,
# # transactions, cash. They have to follow the original xlsx format
# # @param cash_fix Numeric. If you wish to algebraically sum a value
# # to your cash balance
# # @param tax Numeric. How much does of your dividends does the taxes take?
# # Range from 0 to 99
# # @param expenses Numeric. How much does that bank or broker charges per
# # transaction? Absolute value.
# # @param sectors Boolean. Return sectors segmentation for ETFs?
# stocks_objects <- function(data, cash_fix = 0, tax = 30, expenses = 7, sectors = TRUE) {
#
# warning("New portfolio/stocks functions created. Use stocks_obj() instead")
#
# tabs <- c('portfolio','transactions','cash')
# if (sum(names(data) %in% tabs) != 3) {
# not <- names(data)[!names(data) %in% tabs]
# stop(paste("The following objects are obligatory too:", vector2text(not)))
# }
#
# tempdir <- tempdir()
# on.exit(setwd(tempdir))
#
# # Data wrangling and calculations
# message(">>> Downloading historical data for each stock...")
# hist <- get_stocks_hist(symbols = data$portfolio$Symbol,
# from = data$portfolio$StartDate,
# tax = tax)
# daily <- stocks_hist_fix(dailys = hist$values,
# dividends = hist$dividends,
# transactions = data$transactions,
# expenses = expenses)
# stocks_perf <- stocks_performance(daily,
# cash_in = data$cash,
# cash_fix = cash_fix)
# portfolio_perf <- portfolio_performance(portfolio = data$portfolio,
# daily = daily)
# pf_daily <- portfolio_daily(data = data, dailys = hist, cash_fix = cash_fix)
#
# message("Calculations ready...")
#
# # Visualizations
# p1 <- portfolio_daily_plot(stocks_perf)
# p2 <- stocks_total_plot(stocks_perf, portfolio_perf, daily,
# trans = data$transactions,
# cash = data$cash)
# p3 <- stocks_daily_plot(portfolio = data$portfolio, daily, weighted = FALSE)
# p5 <- stocks_daily_plot(portfolio = data$portfolio, daily, weighted = TRUE)
# p6 <- portfolio_total_plot(pf_daily)
# p4 <- portfolio_distr_plot(portfolio_perf)
# if (sectors) p7 <- etf_sector_plot(portfolio_perf)
#
# message("Graphics ready...")
#
# # Consolidation
# results <- list(p_portf_daily_change = p1,
# p_portf_stocks_change = p2,
# p_portf_stocks_histchange_weighted = p5,
# p_portf_stocks_histchange_absolute = p3,
# p_portf_distribution = p4,
# p_portfolio_daily = p6,
# df_portfolio_perf = portfolio_perf,
# df_portfolio_daily = pf_daily,
# df_stocks_perf = stocks_perf,
# df_daily = daily,
# df_hist = hist)
# if (sectors) results[["p_sectors"]] <- p7
#
# unlink(tempdir, recursive = FALSE)
#
# message("All results ready to export!")
# return(results)
# }
#
# ####################################################################
# # Portfolio's Full Report in HTML
# #
# # This function lets the user create his portfolio's full report in HTML using
# # the library's results
# #
# # @family Investment
# # @param results List. Containing the following objects: portf_daily_change,
# # portf_stocks_change, portf_stocks_histchange, portf_distribution & portfolio_perf.
# # You can use simply use the stocks_objects(data) if you didn't mess with the order!
# stocks_html <- function(results) {
#
# try_require("rmarkdown")
# dir <- getwd()
# pandoc <- Sys.getenv("RSTUDIO_PANDOC")
# Sys.setenv(RSTUDIO_PANDOC = pandoc)
#
# # Can be more accurate with names but works for me!
# params <- list(
# portf_daily_change = results[["p_portf_daily_change"]],
# portf_stocks_change = results[["p_portf_stocks_change"]],
# portf_stocks_histchange_weighted = results[["p_portf_stocks_histchange_weighted"]],
# portf_stocks_histchange_absolute = results[["p_portf_stocks_histchange_absolute"]],
# portf_distribution = results[["p_portf_distribution"]],
# portf_daily = results[["p_portfolio_daily"]],
# portfolio_perf = results[["df_portfolio_perf"]])
# if ("p_sectors" %in% names(results))
# params[["portf_distribution_sectors"]] <- results[["p_sectors"]]
#
# invisible(file.copy(
# from = system.file("docs", "stocksReport.Rmd", package = "lares"),
# to = dir,
# overwrite = TRUE,
# recursive = FALSE,
# copy.mode = TRUE))
#
# render("stocksReport.Rmd",
# output_file = "stocksReport.html",
# params = params,
# envir = new.env(parent = globalenv()),
# quiet = TRUE)
#
# invisible(file.remove(paste0(dir, "/stocksReport.Rmd")))
# message("HTML report created succesfully!")
# }
#
# ######################### SHORT #####################################
# # df <- get_stocks() # Get data from my Dropbox
# # dfp <- stocks_objects(df) # Get historical data, make calculations and plots
# # stocks_html(dfp) # Create HTML report
# # stocks_report() # Create and send report to my mail
#
# ######################### LONG #####################################
# # df <- get_stocks() # Get data from my Dropbox
# # hist <- get_stocks_hist(symbols = df$portfolio$Symbol, from = df$portfolio$StartDate)
# # daily <- stocks_hist_fix(dailys = hist$values, dividends = hist$dividends, transactions = df$transactions)
|
# Piotr Jastrzebski
# Marcin Nazimek
evaluate.evaluate <-function(testDataFrame, trainDataFrame){
testSet = as.matrix(testDataFrame)
trainSet = as.matrix(trainDataFrame)
##### OUR ALGORITHMS #####
# [TP|FP|FN|TN]
# http://www.eol.ucar.edu/rsf/NEXRAD/dq_fy98/fig_2.4.gif
# kNN classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = knn.doIt(3, testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("kNN n=3 result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# kNN classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = knn.doIt(5, testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("kNN n=5 result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# kNN classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = knn.doIt(7, testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("kNN n=7 result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# Naive Bayes classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = bayes.doIt(testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("Naive Bayes result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
##### R LANGUAGE ALGORITHMS #####
# Random Forest classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
r <- forest.makeForest(trainDataFrame)
for(number in 1:nrow(testSet)){
result[number] = predict(r, testDataFrame[number,1:ncol(testDataFrame)-1])
}
print("[R] Random Forest result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result-1,expected)
print(resultMatrix)
# Tree Classification
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
tree <- tree.makeTree(trainDataFrame)
for(number in 1:nrow(testSet)){
result[number] = predict(tree, testDataFrame[number,1:ncol(testDataFrame)-1], type = "class")
}
print("Tree Classification result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result-1,expected)
print(resultMatrix)
# Bayes from R Classification
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
classifier <- bayesFromR.makeClassifier(trainDataFrame)
for(number in 1:nrow(testSet)){
result[number] = predict(classifier, testDataFrame[number,1:ncol(testDataFrame)-1])
}
print("[R] Bayes classification result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# Knn n = 3from R Classification
print("[R] kNN n=3 result table [TF|FN]")
print(" [FP|TP]")
print(knnFromR.doIt(trainDataFrame,testDataFrame,3))
# Knn n = 5 from R Classification
print("[R] kNN n=5 result table [TF|FN]")
print(" [FP|TP]")
print(knnFromR.doIt(trainDataFrame,testDataFrame,5))
# Knn n = 7 from R Classification
print("[R] kNN n=7 result table [TF|FN]")
print(" [FP|TP]")
print(knnFromR.doIt(trainDataFrame,testDataFrame,7))
###############################################################
} | /Temat16/src/utils/evaluate.r | no_license | ktp-forked-repos/MOW | R | false | false | 3,867 | r | # Piotr Jastrzebski
# Marcin Nazimek
evaluate.evaluate <-function(testDataFrame, trainDataFrame){
testSet = as.matrix(testDataFrame)
trainSet = as.matrix(trainDataFrame)
##### OUR ALGORITHMS #####
# [TP|FP|FN|TN]
# http://www.eol.ucar.edu/rsf/NEXRAD/dq_fy98/fig_2.4.gif
# kNN classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = knn.doIt(3, testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("kNN n=3 result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# kNN classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = knn.doIt(5, testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("kNN n=5 result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# kNN classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = knn.doIt(7, testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("kNN n=7 result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# Naive Bayes classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
for(number in 1:nrow(testSet)){
result[number] = bayes.doIt(testSet[number,1:(ncol(testSet)-1)], trainSet)
}
print("Naive Bayes result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
##### R LANGUAGE ALGORITHMS #####
# Random Forest classification test
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
r <- forest.makeForest(trainDataFrame)
for(number in 1:nrow(testSet)){
result[number] = predict(r, testDataFrame[number,1:ncol(testDataFrame)-1])
}
print("[R] Random Forest result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result-1,expected)
print(resultMatrix)
# Tree Classification
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
tree <- tree.makeTree(trainDataFrame)
for(number in 1:nrow(testSet)){
result[number] = predict(tree, testDataFrame[number,1:ncol(testDataFrame)-1], type = "class")
}
print("Tree Classification result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result-1,expected)
print(resultMatrix)
# Bayes from R Classification
result <- rep(NA, nrow(testSet))
expected = testSet[, ncol(testSet)]
classifier <- bayesFromR.makeClassifier(trainDataFrame)
for(number in 1:nrow(testSet)){
result[number] = predict(classifier, testDataFrame[number,1:ncol(testDataFrame)-1])
}
print("[R] Bayes classification result table [TF|FN]")
print(" [FP|TP]")
resultMatrix <-table(result,expected)
print(resultMatrix)
# Knn n = 3from R Classification
print("[R] kNN n=3 result table [TF|FN]")
print(" [FP|TP]")
print(knnFromR.doIt(trainDataFrame,testDataFrame,3))
# Knn n = 5 from R Classification
print("[R] kNN n=5 result table [TF|FN]")
print(" [FP|TP]")
print(knnFromR.doIt(trainDataFrame,testDataFrame,5))
# Knn n = 7 from R Classification
print("[R] kNN n=7 result table [TF|FN]")
print(" [FP|TP]")
print(knnFromR.doIt(trainDataFrame,testDataFrame,7))
###############################################################
} |
#' Athena driver class.
#'
#' @keywords internal
#' @export
#' @import RJDBC
#' @import methods
#' @importClassesFrom RJDBC JDBCDriver
setClass("AthenaDriver", contains = "JDBCDriver")
#' Athena DBI wrapper
#'
#' @export
Athena <- function() {
new("AthenaDriver")
}
#' Constructor of AthenaDriver
#'
#' @name AthenaDriver
#' @rdname AthenaDriver-class
setMethod(initialize, "AthenaDriver",
function(.Object, ...)
{
# passed to parent builder, than unboxed, yuck
# should ping RJDBC maintainers, and have them implement initialize methods instead
jdbc <- JDBC(driverClass="com.amazonaws.athena.jdbc.AthenaDriver",
identifier.quote="'")
.Object@jdrv = jdbc@jdrv
.Object@identifier.quote = jdbc@identifier.quote
.Object
})
#' Athena connection class.
#'
#' Class which represents the Athena connections.
#'
#' @export
#' @importClassesFrom RJDBC JDBCConnection
#' @keywords internal
setClass("AthenaConnection",
contains = "JDBCConnection",
slots = list(
region = "character",
s3_staging_dir = "character",
schema_name = "character"
)
)
#' Authentication credentials are read from the DefaultAWSCredentialsProviderChain, which includes the .aws folder and
#' environment variables.
#'
#' @param drv An object created by \code{Athena()}
#' @param region the AWS region
#' @param s3_staging_dir S3 bucket where results will be saved to
#' @param schema_name Athena schema to use
#' @param ... Other options
#' @rdname Athena
#' @seealso \href{http://docs.aws.amazon.com/athena/latest/ug/connect-with-jdbc.html#jdbc-options}{Athena Manual} for more connections options.
#' @export
#' @examples
#' \dontrun{
#' require(DBI)
#' con <- dbConnect(AWR.Athena::Athena(), region='us-west-2',
#' s3_staging_dir='s3://nfultz-athena-staging',
#' schema_name='default')
#' dbListTables(con)
#' dbGetQuery(con, "Select count(*) from sampledb.elb_logs")
#' }
setMethod("dbConnect", "AthenaDriver",
function(drv, region, s3_staging_dir, schema_name, ...) {
con <- callNextMethod(drv, url=sprintf('jdbc:awsathena://athena.%s.amazonaws.com:443/', region),
s3_staging_dir=s3_staging_dir,
schema_name=schema_name,
aws_credentials_provider_class="com.amazonaws.athena.jdbc.shaded.com.amazonaws.auth.DefaultAWSCredentialsProviderChain", ...)
new("AthenaConnection", jc = con@jc, identifier.quote = drv@identifier.quote, region=region, s3_staging_dir=s3_staging_dir, schema_name=schema_name)
})
#' Execute an Athena Query
#'
#' @param conn An Athena Connection
#' @param statement A SQL statement
#' @param ... delegated to JDBC
#'
#' @export
setMethod("dbSendQuery", c("AthenaConnection", "character"),
function(conn, statement, ...){
res <- callNextMethod(conn, statement, ...)
new("AthenaResult", jr = res@jr, md = res@md, pull = res@pull, stat=res@stat)
})
#' Athena Results class.
#'
#' Class which represents the Athena results
#'
#' @export
#' @importClassesFrom RJDBC JDBCResult
#' @keywords internal
setClass("AthenaResult",
contains = "JDBCResult"
)
#' Fetch Athena Results
#'
#' @param res an AthenaResult
#' @param n -1 for all, or how many records to fetch
#' @param ... delegated to JDBC
#'
#' @export
setMethod("fetch", c("AthenaResult", "numeric"),
function(res, n = -1, ...) {
# Note that Athena has restrictions on how many results to return,
# which may manifest as the following error:
# Error in .jcall(rp, "I", "fetch", stride, block) :
# java.sql.SQLException: The requested fetchSize is more than the allowed value in Athena.
# Please reduce the fetchSize and try again. Refer to the Athena documentation for valid fetchSize values.
res <- callNextMethod(res, n, block = 999, ...)
})
| /R/athena.R | no_license | hrbrmstr/AWR.Athena | R | false | false | 3,910 | r |
#' Athena driver class.
#'
#' @keywords internal
#' @export
#' @import RJDBC
#' @import methods
#' @importClassesFrom RJDBC JDBCDriver
setClass("AthenaDriver", contains = "JDBCDriver")
#' Athena DBI wrapper
#'
#' @export
Athena <- function() {
new("AthenaDriver")
}
#' Constructor of AthenaDriver
#'
#' @name AthenaDriver
#' @rdname AthenaDriver-class
setMethod(initialize, "AthenaDriver",
function(.Object, ...)
{
# passed to parent builder, than unboxed, yuck
# should ping RJDBC maintainers, and have them implement initialize methods instead
jdbc <- JDBC(driverClass="com.amazonaws.athena.jdbc.AthenaDriver",
identifier.quote="'")
.Object@jdrv = jdbc@jdrv
.Object@identifier.quote = jdbc@identifier.quote
.Object
})
#' Athena connection class.
#'
#' Class which represents the Athena connections.
#'
#' @export
#' @importClassesFrom RJDBC JDBCConnection
#' @keywords internal
setClass("AthenaConnection",
contains = "JDBCConnection",
slots = list(
region = "character",
s3_staging_dir = "character",
schema_name = "character"
)
)
#' Authentication credentials are read from the DefaultAWSCredentialsProviderChain, which includes the .aws folder and
#' environment variables.
#'
#' @param drv An object created by \code{Athena()}
#' @param region the AWS region
#' @param s3_staging_dir S3 bucket where results will be saved to
#' @param schema_name Athena schema to use
#' @param ... Other options
#' @rdname Athena
#' @seealso \href{http://docs.aws.amazon.com/athena/latest/ug/connect-with-jdbc.html#jdbc-options}{Athena Manual} for more connections options.
#' @export
#' @examples
#' \dontrun{
#' require(DBI)
#' con <- dbConnect(AWR.Athena::Athena(), region='us-west-2',
#' s3_staging_dir='s3://nfultz-athena-staging',
#' schema_name='default')
#' dbListTables(con)
#' dbGetQuery(con, "Select count(*) from sampledb.elb_logs")
#' }
setMethod("dbConnect", "AthenaDriver",
function(drv, region, s3_staging_dir, schema_name, ...) {
con <- callNextMethod(drv, url=sprintf('jdbc:awsathena://athena.%s.amazonaws.com:443/', region),
s3_staging_dir=s3_staging_dir,
schema_name=schema_name,
aws_credentials_provider_class="com.amazonaws.athena.jdbc.shaded.com.amazonaws.auth.DefaultAWSCredentialsProviderChain", ...)
new("AthenaConnection", jc = con@jc, identifier.quote = drv@identifier.quote, region=region, s3_staging_dir=s3_staging_dir, schema_name=schema_name)
})
#' Execute an Athena Query
#'
#' @param conn An Athena Connection
#' @param statement A SQL statement
#' @param ... delegated to JDBC
#'
#' @export
setMethod("dbSendQuery", c("AthenaConnection", "character"),
function(conn, statement, ...){
res <- callNextMethod(conn, statement, ...)
new("AthenaResult", jr = res@jr, md = res@md, pull = res@pull, stat=res@stat)
})
#' Athena Results class.
#'
#' Class which represents the Athena results
#'
#' @export
#' @importClassesFrom RJDBC JDBCResult
#' @keywords internal
setClass("AthenaResult",
contains = "JDBCResult"
)
#' Fetch Athena Results
#'
#' @param res an AthenaResult
#' @param n -1 for all, or how many records to fetch
#' @param ... delegated to JDBC
#'
#' @export
setMethod("fetch", c("AthenaResult", "numeric"),
function(res, n = -1, ...) {
# Note that Athena has restrictions on how many results to return,
# which may manifest as the following error:
# Error in .jcall(rp, "I", "fetch", stride, block) :
# java.sql.SQLException: The requested fetchSize is more than the allowed value in Athena.
# Please reduce the fetchSize and try again. Refer to the Athena documentation for valid fetchSize values.
res <- callNextMethod(res, n, block = 999, ...)
})
|
context("beautify")
b <- beautify()
test_that("beautify", {
expect_length(b, 31)
lapply(b, function(e) {
expect_true(is.function(e))
})
})
| /tests/testthat/test_beautify.R | no_license | cran/wyz.code.rdoc | R | false | false | 151 | r | context("beautify")
b <- beautify()
test_that("beautify", {
expect_length(b, 31)
lapply(b, function(e) {
expect_true(is.function(e))
})
})
|
source("https://bioconductor.org/biocLite.R")
biocLite("RRHO")
library(RRHO)
### Graph raw counts
setwd("~/Documents/dimitra/Workspace/RNA-Seq/Kits_comparison/")
pico = read.delim("gene_lists_filtered/Pico.counts.ILB.9579.txt")
v4 = read.delim("gene_lists_filtered/V4.counts.ILB.9579.txt")
truseq = read.delim("gene_lists_filtered/Truseq.counts.ILB.9579.txt")
#Pico vs V4
RRHO.pico.v4 <- RRHO(pico, v4, alternative='enrichment') #alternative='two.sided'
jpeg("RRHO.plots/RRHO.pico.v4.jpg")
image(RRHO.pico.v4$hypermat)
dev.off()
pval.pico.v4 <- pvalRRHO(RRHO.pico.v4,50)
# Pico vs Truseq
RRHO.pico.truseq <- RRHO(pico, truseq, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pico.truseq.jpg")
image(RRHO.pico.truseq$hypermat)
dev.off()
pval.pico.truseq <- pvalRRHO(RRHO.pico.truseq,50)
# V4 vs Truseq
RRHO.v4.truseq <- RRHO(v4, truseq, alternative='enrichment')
jpeg("RRHO.plots/RRHO.v4.truseq.jpg")
image(RRHO.v4.truseq$hypermat)
dev.off()
pval.v4.truseq <- pvalRRHO(RRHO.v4.truseq,50)
#To determine if the overlap between pico and v4 is statistically significant different from the overlap between pico and truseq
rrho.comp = RRHOComparison(v4, pico, truseq, stepsize=10, labels=c("v4","pico","truseq"), plots=TRUE, outputdir="RRHO.plots")
### Graph FCs
all = read.delim("gene_lists_filtered/common.genes.full.info.txt")
colnames(all)
fcs = all %>%
select(id, geneSymbol.Truseq, geneCoordinate.Truseq, logFC.Truseq, logFC.ClontechPico, logFC.ClontechV4)
pico.fc = fcs %>%
select(id, logFC.ClontechPico)
v4.fc = fcs %>%
select(id, logFC.ClontechV4)
truseq.fc = fcs %>%
select(id, logFC.Truseq)
#Pico vs V4
RRHO.fc.pico.v4 <- RRHO(pico.fc, v4.fc, alternative='enrichment')
jpeg("RRHO.plots/RRHO.fc.pico.v4.jpg")
image(RRHO.fc.pico.v4$hypermat)
dev.off()
pval.fc.pico.v4 <- pvalRRHO(RRHO.fc.pico.v4,50)
#Pico vs Truseq
RRHO.fc.pico.truseq <- RRHO(pico.fc, truseq.fc, alternative='enrichment')
jpeg("RRHO.plots/RRHO.fc.pico.truseq.jpg")
image(RRHO.fc.pico.truseq$hypermat)
dev.off()
pval.fc.pico.truseq <- pvalRRHO(RRHO.fc.pico.truseq,50)
#V4 vs Truseq
RRHO.fc.v4.truseq <- RRHO(v4.fc, truseq.fc, alternative='enrichment')
jpeg("RRHO.plots/RRHO.fc.v4.truseq.jpg")
image(RRHO.fc.v4.truseq$hypermat)
dev.off()
pval.fc.v4.truseq <- pvalRRHO(RRHO.fc.v4.truseq,50)
rrho.comparison.fc = RRHOComparison(v4.fc, pico.fc, truseq.fc, stepsize=10, labels=c("v4","pico","truseq"), plots=TRUE, outputdir="RRHO.plots")
### Graph Pvalues
all = read.delim("gene_lists_filtered/common.genes.full.info.txt")
pvalues = all %>%
select(id, geneSymbol.Truseq, geneCoordinate.Truseq, LimmaVoom.pvalue.Truseq, LimmaVoom.pvalue.ClontechPico, LimmaVoom.pvalue.ClontechV4)
pico.pv = pvalues %>%
select(id, LimmaVoom.pvalue.ClontechPico)
v4.pv = pvalues %>%
select(id, LimmaVoom.pvalue.ClontechV4)
truseq.pv = pvalues %>%
select(id, LimmaVoom.pvalue.Truseq)
#Pico vs V4
RRHO.pvalues.pico.v4 <- RRHO(pico.pv, v4.pv, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pvalues.pico.v4.jpg")
image(RRHO.pvalues.pico.v4$hypermat)
dev.off()
pval.pvalues.pico.v4 <- pvalRRHO(RRHO.pvalues.pico.v4,50)
#Pico vs Truseq
RRHO.pvalues.pico.truseq <- RRHO(pico.pv, truseq.pv, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pvalues.pico.truseq.jpg")
image(RRHO.pvalues.pico.truseq$hypermat)
dev.off()
pval.pvalues.pico.truseq <- pvalRRHO(RRHO.pvalues.pico.truseq,50)
#V4 vs Truseq
RRHO.pvalues.v4.truseq <- RRHO(v4.pv, truseq.pv, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pvalues.v4.truseq.jpg")
image(RRHO.pvalues.v4.truseq$hypermat)
dev.off()
pval.pvalues.v4.truseq <- pvalRRHO(RRHO.pvalues.v4.truseq,50)
rrho.comparison.pvalues = RRHOComparison(v4.pv, pico.pv, truseq.pv, stepsize=10, labels=c("v4","pico","truseq"), plots=TRUE, outputdir="RRHO.plots")
| /scripts/rrho.R | no_license | dimitras/scatterplots-clustering-in-R | R | false | false | 3,748 | r | source("https://bioconductor.org/biocLite.R")
biocLite("RRHO")
library(RRHO)
### Graph raw counts
setwd("~/Documents/dimitra/Workspace/RNA-Seq/Kits_comparison/")
pico = read.delim("gene_lists_filtered/Pico.counts.ILB.9579.txt")
v4 = read.delim("gene_lists_filtered/V4.counts.ILB.9579.txt")
truseq = read.delim("gene_lists_filtered/Truseq.counts.ILB.9579.txt")
#Pico vs V4
RRHO.pico.v4 <- RRHO(pico, v4, alternative='enrichment') #alternative='two.sided'
jpeg("RRHO.plots/RRHO.pico.v4.jpg")
image(RRHO.pico.v4$hypermat)
dev.off()
pval.pico.v4 <- pvalRRHO(RRHO.pico.v4,50)
# Pico vs Truseq
RRHO.pico.truseq <- RRHO(pico, truseq, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pico.truseq.jpg")
image(RRHO.pico.truseq$hypermat)
dev.off()
pval.pico.truseq <- pvalRRHO(RRHO.pico.truseq,50)
# V4 vs Truseq
RRHO.v4.truseq <- RRHO(v4, truseq, alternative='enrichment')
jpeg("RRHO.plots/RRHO.v4.truseq.jpg")
image(RRHO.v4.truseq$hypermat)
dev.off()
pval.v4.truseq <- pvalRRHO(RRHO.v4.truseq,50)
#To determine if the overlap between pico and v4 is statistically significant different from the overlap between pico and truseq
rrho.comp = RRHOComparison(v4, pico, truseq, stepsize=10, labels=c("v4","pico","truseq"), plots=TRUE, outputdir="RRHO.plots")
### Graph FCs
all = read.delim("gene_lists_filtered/common.genes.full.info.txt")
colnames(all)
fcs = all %>%
select(id, geneSymbol.Truseq, geneCoordinate.Truseq, logFC.Truseq, logFC.ClontechPico, logFC.ClontechV4)
pico.fc = fcs %>%
select(id, logFC.ClontechPico)
v4.fc = fcs %>%
select(id, logFC.ClontechV4)
truseq.fc = fcs %>%
select(id, logFC.Truseq)
#Pico vs V4
RRHO.fc.pico.v4 <- RRHO(pico.fc, v4.fc, alternative='enrichment')
jpeg("RRHO.plots/RRHO.fc.pico.v4.jpg")
image(RRHO.fc.pico.v4$hypermat)
dev.off()
pval.fc.pico.v4 <- pvalRRHO(RRHO.fc.pico.v4,50)
#Pico vs Truseq
RRHO.fc.pico.truseq <- RRHO(pico.fc, truseq.fc, alternative='enrichment')
jpeg("RRHO.plots/RRHO.fc.pico.truseq.jpg")
image(RRHO.fc.pico.truseq$hypermat)
dev.off()
pval.fc.pico.truseq <- pvalRRHO(RRHO.fc.pico.truseq,50)
#V4 vs Truseq
RRHO.fc.v4.truseq <- RRHO(v4.fc, truseq.fc, alternative='enrichment')
jpeg("RRHO.plots/RRHO.fc.v4.truseq.jpg")
image(RRHO.fc.v4.truseq$hypermat)
dev.off()
pval.fc.v4.truseq <- pvalRRHO(RRHO.fc.v4.truseq,50)
rrho.comparison.fc = RRHOComparison(v4.fc, pico.fc, truseq.fc, stepsize=10, labels=c("v4","pico","truseq"), plots=TRUE, outputdir="RRHO.plots")
### Graph Pvalues
all = read.delim("gene_lists_filtered/common.genes.full.info.txt")
pvalues = all %>%
select(id, geneSymbol.Truseq, geneCoordinate.Truseq, LimmaVoom.pvalue.Truseq, LimmaVoom.pvalue.ClontechPico, LimmaVoom.pvalue.ClontechV4)
pico.pv = pvalues %>%
select(id, LimmaVoom.pvalue.ClontechPico)
v4.pv = pvalues %>%
select(id, LimmaVoom.pvalue.ClontechV4)
truseq.pv = pvalues %>%
select(id, LimmaVoom.pvalue.Truseq)
#Pico vs V4
RRHO.pvalues.pico.v4 <- RRHO(pico.pv, v4.pv, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pvalues.pico.v4.jpg")
image(RRHO.pvalues.pico.v4$hypermat)
dev.off()
pval.pvalues.pico.v4 <- pvalRRHO(RRHO.pvalues.pico.v4,50)
#Pico vs Truseq
RRHO.pvalues.pico.truseq <- RRHO(pico.pv, truseq.pv, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pvalues.pico.truseq.jpg")
image(RRHO.pvalues.pico.truseq$hypermat)
dev.off()
pval.pvalues.pico.truseq <- pvalRRHO(RRHO.pvalues.pico.truseq,50)
#V4 vs Truseq
RRHO.pvalues.v4.truseq <- RRHO(v4.pv, truseq.pv, alternative='enrichment')
jpeg("RRHO.plots/RRHO.pvalues.v4.truseq.jpg")
image(RRHO.pvalues.v4.truseq$hypermat)
dev.off()
pval.pvalues.v4.truseq <- pvalRRHO(RRHO.pvalues.v4.truseq,50)
rrho.comparison.pvalues = RRHOComparison(v4.pv, pico.pv, truseq.pv, stepsize=10, labels=c("v4","pico","truseq"), plots=TRUE, outputdir="RRHO.plots")
|
## Function that take two arguments: the 2-character abbreviated name of a state and an
## outcome name. The function reads the outcome-of-care-measures.csv and returns a character vector
## with the name of the hospital that has the best (i.e. lowest) 30-day mortality for the specifed outcome
## in that state. The hospital name is the name provided in the Hospital.Name variable. The outcomes can
## be one of "heart attack", "heart failure", or "pneumonia". Hospitals that do not have data on a particular
## outcome should be excluded from the set of hospitals when deciding the rankings.
## Handling ties. If there is a tie for the best hospital for a given outcome, then the hospital names should
## be sorted in alphabetical order and the first hospital in that set should be chosen
## (i.e. if hospitals "b", "c", and "f" are tied for best, then hospital "b" should be returned).
best <- function(state, outcome) {
## Save old directory and set new directory
old.dir <- getwd()
setwd('rprog-data-ProgAssignment3-data')
## Read outcome data
outcomeData <- read.csv('outcome-of-care-measures.csv', header = TRUE, colClasses = 'character')
## Set old directory
setwd(old.dir)
## Converting to numeric and suppressing coversion warnings
## 'heart attack' is column 11
## 'heart failure' is column 17
## 'pneumonia' is column 23
outcomeData[,11] <- suppressWarnings(as.numeric(outcomeData[,11]))
outcomeData[,17] <- suppressWarnings(as.numeric(outcomeData[,17]))
outcomeData[,23] <- suppressWarnings(as.numeric(outcomeData[,23]))
## Check that state and outcome are valid
## State is column 7. get unique states
uniquestates <- unique(outcomeData[,7]);
validInputoutcome <- c('heart attack', 'heart failure', 'pneumonia')
## Check if state input is valid
if(is.na(match(state, uniquestates))){
##stop will display message as - Error in best("TXd", "heart failure") : invalid state
stop (' invalid state')
}
## Check if outcome input is valid
else if (is.na(match(outcome, validInputoutcome))){
## stop will display message as - Error in best("TX", "hearsst failures") : invalid outcome
stop(' invalid outcome')
}
## Return hospital name in that state with lowest 30-day death. Calls the bestHospname function
## 'heart attack' is column 11
## 'heart failure' is column 17
## 'pneumonia' is column 23
if(outcome == 'heart attack'){
##bestRankedHosp = bestRankHospname(outcomeData,11,state)
bestHospname = bestRankHospname(outcomeData,11,state)
}
else if(outcome == 'heart failure'){
##bestRankedHosp = bestRankHospname(outcomeData,17,state)
bestHospname = bestRankHospname(outcomeData,17,state)
}
else if(outcome == 'pneumonia'){
##bestRankedHosp = bestRankHospname(outcomeData,23,state)
bestHospname = bestRankHospname(outcomeData,23,state)
}
## rate
return(bestHospname)
}
## This function will subset the dataframe to States and considers the rate
## column number input to find hospital with least death rate for that outcome.
## And then subsets dataframe to 2 columns - hospital name and rate - and
## orders rate and hospital in ascending order.
## Function returns the hospital with least death rate i.e. best hospital
bestRankHospname <- function(df,outcomecol,state){
## Dataframe subset as per state. State is column 7
dfsubset <-subset(df, df[,7] == state)
## Extracting only 2 required - hospital name and rate - columns
dfsubset <- dfsubset[, c(2,outcomecol)]
##deleting the NA rows.
dfsubset <- dfsubset[complete.cases(dfsubset),]
##alternate way - dfsubset[,outcomecol] <- dfsubset[!is.na(dfsubset[,outcomecol])]
##Changing column name to HospName , Rates. This will allow for better column access
colnames(dfsubset)[1] <- paste('HospName')
colnames(dfsubset)[2] <- paste('Rates')
## Ordering the rates, followed by the hospname.
## This is to handle the Rate tie situation for 2+ hospitals.
## Function will return the best hospital name (alphabetical order), if tied.
## dfsubset <- dfsubset[order(dfsubset[,2], dfsubset[,1]),]
dfsubset <- dfsubset[order(dfsubset$Rates, dfsubset$HospName),]
return (dfsubset$HospName[1])
}
## Not to be used This is a alternate funciton to get minimum rate
bestRankHospnameBACKUPOPTION <- function(df,outcomecol,state){
dfstatesubset <- subset(df, df[,7] == state)
minrate <- min(dfstatesubset[,outcomecol], na.rm = TRUE)
print(minrate)
minrateindex <- which(dfstatesubset[,outcomecol] == minrate)
bestRankhosp <- c()
minrateindexlen <- length(minrateindex)
if(minrateindexlen > 1){
for(i in minrateindexlen){
bestRankhosp <- c(bestRankhosp,dfstatesubset[i,2])
}
bestRankhosp <- sort(bestRankhosp, decreasing = FALSE)
print(bestRankhosp)
}
else {
bestRankhosp <- dfstatesubset[minrateindex,2]
}
return(bestRankhosp[1])
}
| /Assignment function files -Final Copies/Programming Assignment 3 Hospital Quality/best.R | no_license | ashmachado/datasciencecoursera | R | false | false | 4,964 | r | ## Function that take two arguments: the 2-character abbreviated name of a state and an
## outcome name. The function reads the outcome-of-care-measures.csv and returns a character vector
## with the name of the hospital that has the best (i.e. lowest) 30-day mortality for the specifed outcome
## in that state. The hospital name is the name provided in the Hospital.Name variable. The outcomes can
## be one of "heart attack", "heart failure", or "pneumonia". Hospitals that do not have data on a particular
## outcome should be excluded from the set of hospitals when deciding the rankings.
## Handling ties. If there is a tie for the best hospital for a given outcome, then the hospital names should
## be sorted in alphabetical order and the first hospital in that set should be chosen
## (i.e. if hospitals "b", "c", and "f" are tied for best, then hospital "b" should be returned).
best <- function(state, outcome) {
## Save old directory and set new directory
old.dir <- getwd()
setwd('rprog-data-ProgAssignment3-data')
## Read outcome data
outcomeData <- read.csv('outcome-of-care-measures.csv', header = TRUE, colClasses = 'character')
## Set old directory
setwd(old.dir)
## Converting to numeric and suppressing coversion warnings
## 'heart attack' is column 11
## 'heart failure' is column 17
## 'pneumonia' is column 23
outcomeData[,11] <- suppressWarnings(as.numeric(outcomeData[,11]))
outcomeData[,17] <- suppressWarnings(as.numeric(outcomeData[,17]))
outcomeData[,23] <- suppressWarnings(as.numeric(outcomeData[,23]))
## Check that state and outcome are valid
## State is column 7. get unique states
uniquestates <- unique(outcomeData[,7]);
validInputoutcome <- c('heart attack', 'heart failure', 'pneumonia')
## Check if state input is valid
if(is.na(match(state, uniquestates))){
##stop will display message as - Error in best("TXd", "heart failure") : invalid state
stop (' invalid state')
}
## Check if outcome input is valid
else if (is.na(match(outcome, validInputoutcome))){
## stop will display message as - Error in best("TX", "hearsst failures") : invalid outcome
stop(' invalid outcome')
}
## Return hospital name in that state with lowest 30-day death. Calls the bestHospname function
## 'heart attack' is column 11
## 'heart failure' is column 17
## 'pneumonia' is column 23
if(outcome == 'heart attack'){
##bestRankedHosp = bestRankHospname(outcomeData,11,state)
bestHospname = bestRankHospname(outcomeData,11,state)
}
else if(outcome == 'heart failure'){
##bestRankedHosp = bestRankHospname(outcomeData,17,state)
bestHospname = bestRankHospname(outcomeData,17,state)
}
else if(outcome == 'pneumonia'){
##bestRankedHosp = bestRankHospname(outcomeData,23,state)
bestHospname = bestRankHospname(outcomeData,23,state)
}
## rate
return(bestHospname)
}
## This function will subset the dataframe to States and considers the rate
## column number input to find hospital with least death rate for that outcome.
## And then subsets dataframe to 2 columns - hospital name and rate - and
## orders rate and hospital in ascending order.
## Function returns the hospital with least death rate i.e. best hospital
bestRankHospname <- function(df,outcomecol,state){
## Dataframe subset as per state. State is column 7
dfsubset <-subset(df, df[,7] == state)
## Extracting only 2 required - hospital name and rate - columns
dfsubset <- dfsubset[, c(2,outcomecol)]
##deleting the NA rows.
dfsubset <- dfsubset[complete.cases(dfsubset),]
##alternate way - dfsubset[,outcomecol] <- dfsubset[!is.na(dfsubset[,outcomecol])]
##Changing column name to HospName , Rates. This will allow for better column access
colnames(dfsubset)[1] <- paste('HospName')
colnames(dfsubset)[2] <- paste('Rates')
## Ordering the rates, followed by the hospname.
## This is to handle the Rate tie situation for 2+ hospitals.
## Function will return the best hospital name (alphabetical order), if tied.
## dfsubset <- dfsubset[order(dfsubset[,2], dfsubset[,1]),]
dfsubset <- dfsubset[order(dfsubset$Rates, dfsubset$HospName),]
return (dfsubset$HospName[1])
}
## Not to be used This is a alternate funciton to get minimum rate
bestRankHospnameBACKUPOPTION <- function(df,outcomecol,state){
dfstatesubset <- subset(df, df[,7] == state)
minrate <- min(dfstatesubset[,outcomecol], na.rm = TRUE)
print(minrate)
minrateindex <- which(dfstatesubset[,outcomecol] == minrate)
bestRankhosp <- c()
minrateindexlen <- length(minrateindex)
if(minrateindexlen > 1){
for(i in minrateindexlen){
bestRankhosp <- c(bestRankhosp,dfstatesubset[i,2])
}
bestRankhosp <- sort(bestRankhosp, decreasing = FALSE)
print(bestRankhosp)
}
else {
bestRankhosp <- dfstatesubset[minrateindex,2]
}
return(bestRankhosp[1])
}
|
library(tidyquant)
#### Setup
context(paste0("Testing tq_performance"))
# Get returns for individual stock components grouped by symbol
Ra <- c("AAPL", "GOOG", "NFLX") %>%
tq_get(get = "stock.prices",
from = "2010-01-01",
to = "2015-12-31") %>%
group_by(symbol) %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "Ra")
# Get returns for SP500 as baseline
Rb <- "^GSPC" %>%
tq_get(get = "stock.prices",
from = "2010-01-01",
to = "2015-12-31") %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "Rb")
# Merge stock returns with baseline
RaRb <- left_join(Ra, Rb, by = c("date" = "date"))
# Get performance metrics
test1 <- RaRb %>%
tq_performance(Ra = Ra, performance_fun = SharpeRatio, p = 0.95)
test2 <- RaRb %>%
tq_performance(Ra = Ra, Rb = Rb, performance_fun = table.CAPM)
#### Test Successes -----
test_that("Test1 returns grouped tibble of appropriate size", {
# Tibble
expect_is(test1, "tbl")
# Number of groups
expect_equal(dplyr::group_size(test1) %>% length(), 3)
# Rows
expect_equal(nrow(test1), 3)
# Cols
# expect_equal(ncol(test1), 4)
})
test_that("Test2 returns grouped tibble of appropriate size", {
# Tibble
expect_is(test2, "tbl")
# Number of groups
expect_equal(dplyr::group_size(test2) %>% length(), 3)
# Rows
expect_equal(nrow(test2), 3)
# Cols
# expect_equal(ncol(test2), 13)
})
| /tests/testthat/test-tq_performance.R | no_license | aborodya/tidyquant | R | false | false | 1,500 | r | library(tidyquant)
#### Setup
context(paste0("Testing tq_performance"))
# Get returns for individual stock components grouped by symbol
Ra <- c("AAPL", "GOOG", "NFLX") %>%
tq_get(get = "stock.prices",
from = "2010-01-01",
to = "2015-12-31") %>%
group_by(symbol) %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "Ra")
# Get returns for SP500 as baseline
Rb <- "^GSPC" %>%
tq_get(get = "stock.prices",
from = "2010-01-01",
to = "2015-12-31") %>%
tq_transmute(adjusted, periodReturn, period = "monthly", col_rename = "Rb")
# Merge stock returns with baseline
RaRb <- left_join(Ra, Rb, by = c("date" = "date"))
# Get performance metrics
test1 <- RaRb %>%
tq_performance(Ra = Ra, performance_fun = SharpeRatio, p = 0.95)
test2 <- RaRb %>%
tq_performance(Ra = Ra, Rb = Rb, performance_fun = table.CAPM)
#### Test Successes -----
test_that("Test1 returns grouped tibble of appropriate size", {
# Tibble
expect_is(test1, "tbl")
# Number of groups
expect_equal(dplyr::group_size(test1) %>% length(), 3)
# Rows
expect_equal(nrow(test1), 3)
# Cols
# expect_equal(ncol(test1), 4)
})
test_that("Test2 returns grouped tibble of appropriate size", {
# Tibble
expect_is(test2, "tbl")
# Number of groups
expect_equal(dplyr::group_size(test2) %>% length(), 3)
# Rows
expect_equal(nrow(test2), 3)
# Cols
# expect_equal(ncol(test2), 13)
})
|
library(dplyr)
Sys.setlocale("LC_TIME", "C")
data <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
datarequired <- filter(data, Date=="1/2/2007" | Date=="2/2/2007")
datarequired[,1] <- as.Date(datarequired[,1], "%d/%m/%Y")
datarequired$numformat <- as.POSIXct(paste(datarequired[,1], datarequired[,2]))
png("plot2.png", width=480, height=480, bg="white")
plot(datarequired$numformat, as.numeric(datarequired[,3]), type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | manfrediruggeri/ExData_Plotting1 | R | false | false | 512 | r | library(dplyr)
Sys.setlocale("LC_TIME", "C")
data <- read.table("household_power_consumption.txt", sep=";", header=TRUE)
datarequired <- filter(data, Date=="1/2/2007" | Date=="2/2/2007")
datarequired[,1] <- as.Date(datarequired[,1], "%d/%m/%Y")
datarequired$numformat <- as.POSIXct(paste(datarequired[,1], datarequired[,2]))
png("plot2.png", width=480, height=480, bg="white")
plot(datarequired$numformat, as.numeric(datarequired[,3]), type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fresh-package.R
\docType{package}
\name{fresh}
\alias{fresh}
\alias{fresh-package}
\title{Fresh 'Shiny' Themes}
\description{
Customize 'Bootstrap' and 'Bootswatch' themes, like colors, fonts, grid layout, to use in 'Shiny' applications.
}
\author{
Victor Perrier & Fanny Meyer (\href{https://twitter.com/dreamRs_fr}{@dreamRs_fr})
}
| /man/fresh.Rd | no_license | pythseq/fresh | R | false | true | 411 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fresh-package.R
\docType{package}
\name{fresh}
\alias{fresh}
\alias{fresh-package}
\title{Fresh 'Shiny' Themes}
\description{
Customize 'Bootstrap' and 'Bootswatch' themes, like colors, fonts, grid layout, to use in 'Shiny' applications.
}
\author{
Victor Perrier & Fanny Meyer (\href{https://twitter.com/dreamRs_fr}{@dreamRs_fr})
}
|
#' Returns small data frame
#' Based on the quest it searches for correct place in the player log and calculates pointing direction from the player
#' position at that time adn the position of the goal of the quest as suggested from the last known transform
#'
#' @param choosings - evet times of ChooseDirections in Unity log - data.frame
#' @param quest - as returend by get_quest
#' @return data.frame
pointing_accuracy = function(quest_set, trial_sets, quest, choosings = NULL, correct_angle = NULL){
ALLOWED_DIFFERENCE = 0.1
pointing_times = get_step_timespans(quest, "Point in Direction")
n_pointing = nrow(pointing_times)
df = data.frame(pointing_order = as.numeric(rep(NA, n_pointing)),
target_angle = as.numeric(rep(NA, n_pointing)),
chosen_angle = as.numeric(rep(NA, n_pointing)),
quest_end_angle = as.numeric(rep(NA, n_pointing)),
point_start = as.numeric(rep(NA, n_pointing)),
point_end = as.numeric(rep(NA, n_pointing)))
# assumes that pointing times are in order
player_log = player_log_quest_trial(quest_set, trial_sets, quest = quest)
quest_start_finish = get_quest_start_finish_positions(quest_set, trial_sets, quest, include_teleport = F)
if(is.null(choosings)) choosings = get_event_times(trial_sets, "ChooseDirection")
quest_trial_set_id = get_quest_trial_set_id(quest_set, quest)
if (is.null(quest_trial_set_id)) return(df)
#' splitting to the first and second part
#' First shoudl be occuring on the start and second on the end
for (i in 1:n_pointing){
# time is the tiem between
dt_time = pointing_times[i, ]
#' This should be more accurate than StepFinished - selects ChooseDirection event
#' from the player log rather than from the quest log
player_point_time = choosings %>% filter(set_id == quest_trial_set_id) %>%
filter(Time > dt_time$StepActivated) %>%
filter((Time - dt_time$StepFinished) < ALLOWED_DIFFERENCE) %>%
select(Time) %>% first
if(length(player_point_time) != 1) next
#' selecets the correct position from the player log
pointing_moment = player_log[Time > player_point_time, .SD[1]]
player_pos = pointing_moment[, c(Position.x, Position.z)]
if(is.null(correct_angle)){
if(i == 1){target_pos = quest_start_finish$finish} else {target_pos = quest_start_finish$start}
target_angle = angle_from_positions(player_pos, target_pos)
} else {
target_angle = correct_angle
}
point_start = dt_time$StepActivated
point_end = player_point_time
quest_end_angle = get_rotation_at_time(quest_set, trial_sets, quest, point_start)
chosen_angle = pointing_moment$Rotation.X
df[i, ] = c(i, target_angle, chosen_angle, quest_end_angle, point_start, point_end)
}
return(df)
} | /R/Unity/pointing_accuracy.R | no_license | hejtmy/VR_City_Analysis | R | false | false | 2,866 | r | #' Returns small data frame
#' Based on the quest it searches for correct place in the player log and calculates pointing direction from the player
#' position at that time adn the position of the goal of the quest as suggested from the last known transform
#'
#' @param choosings - evet times of ChooseDirections in Unity log - data.frame
#' @param quest - as returend by get_quest
#' @return data.frame
pointing_accuracy = function(quest_set, trial_sets, quest, choosings = NULL, correct_angle = NULL){
ALLOWED_DIFFERENCE = 0.1
pointing_times = get_step_timespans(quest, "Point in Direction")
n_pointing = nrow(pointing_times)
df = data.frame(pointing_order = as.numeric(rep(NA, n_pointing)),
target_angle = as.numeric(rep(NA, n_pointing)),
chosen_angle = as.numeric(rep(NA, n_pointing)),
quest_end_angle = as.numeric(rep(NA, n_pointing)),
point_start = as.numeric(rep(NA, n_pointing)),
point_end = as.numeric(rep(NA, n_pointing)))
# assumes that pointing times are in order
player_log = player_log_quest_trial(quest_set, trial_sets, quest = quest)
quest_start_finish = get_quest_start_finish_positions(quest_set, trial_sets, quest, include_teleport = F)
if(is.null(choosings)) choosings = get_event_times(trial_sets, "ChooseDirection")
quest_trial_set_id = get_quest_trial_set_id(quest_set, quest)
if (is.null(quest_trial_set_id)) return(df)
#' splitting to the first and second part
#' First shoudl be occuring on the start and second on the end
for (i in 1:n_pointing){
# time is the tiem between
dt_time = pointing_times[i, ]
#' This should be more accurate than StepFinished - selects ChooseDirection event
#' from the player log rather than from the quest log
player_point_time = choosings %>% filter(set_id == quest_trial_set_id) %>%
filter(Time > dt_time$StepActivated) %>%
filter((Time - dt_time$StepFinished) < ALLOWED_DIFFERENCE) %>%
select(Time) %>% first
if(length(player_point_time) != 1) next
#' selecets the correct position from the player log
pointing_moment = player_log[Time > player_point_time, .SD[1]]
player_pos = pointing_moment[, c(Position.x, Position.z)]
if(is.null(correct_angle)){
if(i == 1){target_pos = quest_start_finish$finish} else {target_pos = quest_start_finish$start}
target_angle = angle_from_positions(player_pos, target_pos)
} else {
target_angle = correct_angle
}
point_start = dt_time$StepActivated
point_end = player_point_time
quest_end_angle = get_rotation_at_time(quest_set, trial_sets, quest, point_start)
chosen_angle = pointing_moment$Rotation.X
df[i, ] = c(i, target_angle, chosen_angle, quest_end_angle, point_start, point_end)
}
return(df)
} |
acc<-c()
for(i in 1:500)
{
print(i)
##Data Partition
inTraininglocal<-createDataPartition(iris$Species,p=.85,list = F)
training1<-iris[inTraininglocal,]
testing<-iris[-inTraininglocal,]
#Model building
fittree <- C5.0(training1$Species~., data=training1)
#Predicting
pred<-predict.C5.0(fittree,testing[,-5])
a<-table(testing$Species,pred)
#Accuracy
acc<-c(acc,sum(diag(a))/sum(a))
}
summary(acc)
| /data science partition.R | no_license | gowthamthenarasu/Datascience-R-codes | R | false | false | 438 | r | acc<-c()
for(i in 1:500)
{
print(i)
##Data Partition
inTraininglocal<-createDataPartition(iris$Species,p=.85,list = F)
training1<-iris[inTraininglocal,]
testing<-iris[-inTraininglocal,]
#Model building
fittree <- C5.0(training1$Species~., data=training1)
#Predicting
pred<-predict.C5.0(fittree,testing[,-5])
a<-table(testing$Species,pred)
#Accuracy
acc<-c(acc,sum(diag(a))/sum(a))
}
summary(acc)
|
add2<- function(x,y) {
x+y
}
above10 <- function(x) {
use <- x>10
x[use]
}
above <- function(x,n) {
use <- x>n
x[use]
}
columnmean <- function(y) {
nc <- ncol(y)
means <- numeric(nc)
for(i in 1:nc) means[i] <- mean(y[,i], na.rm=T)
means
} | /Rprogramming/week2samples.R | no_license | petersedivec/datasciencecoursera | R | false | false | 270 | r | add2<- function(x,y) {
x+y
}
above10 <- function(x) {
use <- x>10
x[use]
}
above <- function(x,n) {
use <- x>n
x[use]
}
columnmean <- function(y) {
nc <- ncol(y)
means <- numeric(nc)
for(i in 1:nc) means[i] <- mean(y[,i], na.rm=T)
means
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/numbers.R
\name{percent_formatting}
\alias{percent_formatting}
\title{Formatting percentages}
\usage{
percent_formatting(x)
}
\arguments{
\item{x}{the number you need to format}
}
\value{
a string
}
\description{
This function allows you to format percentages
}
\examples{
percent_formatting(0.012)
}
\keyword{format}
| /man/percent_formatting.Rd | no_license | pachevalier/tricky | R | false | true | 397 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/numbers.R
\name{percent_formatting}
\alias{percent_formatting}
\title{Formatting percentages}
\usage{
percent_formatting(x)
}
\arguments{
\item{x}{the number you need to format}
}
\value{
a string
}
\description{
This function allows you to format percentages
}
\examples{
percent_formatting(0.012)
}
\keyword{format}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/madb.R
\name{getmadbcountries}
\alias{getmadbcountries}
\title{Function to retrieve list of countries,
available at Market Access DB}
\usage{
getmadbcountries(url = "http://madb.europa.eu/madb/euTariffs.htm")
}
\description{
Function to retrieve list of countries,
available at Market Access DB
}
| /man/getmadbcountries.Rd | no_license | malexan/fclhs | R | false | false | 384 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/madb.R
\name{getmadbcountries}
\alias{getmadbcountries}
\title{Function to retrieve list of countries,
available at Market Access DB}
\usage{
getmadbcountries(url = "http://madb.europa.eu/madb/euTariffs.htm")
}
\description{
Function to retrieve list of countries,
available at Market Access DB
}
|
library(shiny)
shinyUI(pageWithSidebar(
headerPanel("966 stars"),
sidebarPanel(
sliderInput('mu','Guess the mean brightness', value = 7.00, min = 2.00, max = 14.00, step = 0.05)
),
mainPanel(
plotOutput('newHist')
)
)) | /ui.R | no_license | simplz/9CourseProject | R | false | false | 298 | r | library(shiny)
shinyUI(pageWithSidebar(
headerPanel("966 stars"),
sidebarPanel(
sliderInput('mu','Guess the mean brightness', value = 7.00, min = 2.00, max = 14.00, step = 0.05)
),
mainPanel(
plotOutput('newHist')
)
)) |
### Run simple e coli model
##### Code to run generalised function and examples
##*** Libraries needed
library(mvtnorm);library(plyr); library(ggplot2);library(reshape2);library(deSolve);library(grid);library(gtools); library(directlabels); library(mvtnorm)
theme_set(theme_gray(base_size = 24));
##*** Locations
home<-"~/Documents/Hetero_res_and_f/"
plots<-paste(home,"plots",sep="")
setwd(home)
theme_set(theme_bw(base_size = 34))
cbPalette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
##*** Code needed
# Loads functions for generalised mean function and simulation model that uses the generalised function
# Also 2 fitness level ode function, original Sourya model as a difference model and multiplots function
source("ec_generalised_function_withr.R")
##*** Setting up
# Number of discrete fitness levels? Resistance levels?
nfit = 5;
mres = 5;
# Array of distribution of fitness and resistance c[resistance, fitness]
M0 <- array(0,c(mres,nfit,10))
# Initial acquisition distribution - bivariate here normal distribution with mean 0.5 and deviation 0.05
x <- seq(1/mres,1,1/mres) # seq(from = 0, to = 1, length.out = mres)
y <- seq(1/nfit,1,1/nfit) #seq(from = 0, to = 1, length.out = nfit)
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.6, 0.6),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
acqdistn<-z
plot(rowSums(z),type="l");plot(colSums(z),type="l") # Same as normal distribution
z<-as.data.frame(z)
rownames(z) <- seq(1/mres,1,1/mres);colnames(z) <- seq(1,nfit,1);
z2<-as.data.frame(melt(z)); z2$res<-seq(1/mres,1,1/mres); colnames(z2)<-c("fitness","value","res")
p<-ggplot(z2, aes(x=res, y=value, fill=factor(fitness))) + geom_bar(stat="identity",colour="black") + facet_grid(~fitness)
p<-p + scale_x_continuous("Resistance level",breaks=c(0,0.2,0.4,0.6,0.8,1)) + scale_y_continuous("Proportion") + scale_fill_brewer("Fitness \nlevel",palette="Reds") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p
setwd(plots)
ggsave("acqdistn_06.pdf",width=14,height=10)
# ## Try another
# aa <- matrix(0,25,25)
# aa[1:5,21:25] <- acqdistn
# acqdistn <- aa
# nfit = 25;
# mres = 25;
# # Array of distribution of fitness and resistance c[resistance, fitness]
# M0 <- array(0,c(mres,nfit,10))
# Initial conditions
iniv<-c(98,1,1)
#iniv<-c(60,39,1)
#############********************************************** LOAD UP TO HERE *********************************************########
dt=0.1
tsteps<-500*(1/dt)
omega1 <- 24
omega2 <- 16
omega3 <- 2
omega4 <- 0.4
Sv20<-ec_funcf_mean_varsr(tsteps,home, c(omega1),iniv,M0,acqdistn,dt,500)
Sv15<-ec_funcf_mean_varsr(tsteps,home, c(omega2),iniv,M0,acqdistn,dt,500)
Sv10<-ec_funcf_mean_varsr(tsteps,home, c(omega3),iniv,M0,acqdistn,dt,500)
Sv05<-ec_funcf_mean_varsr(tsteps,home, c(omega4),iniv,M0,acqdistn,dt,500)
## NEED TO SPEED IT UP?? Fast for 5 x 5... ~6 sec on laptop
# What happens?
mm20<-c() ; mm10<-c() ; mm05<-c() ; mm15<-c()
ll<-dim(Sv20$M)[3];
ss<-seq(0,ll,1/dt) # Don't want to grab all
for(i in 2:length(ss)){
mm220<-as.data.frame(melt(Sv20$M[,,ss[i]]));
mm215<-as.data.frame(melt(Sv15$M[,,ss[i]]));
mm210<-as.data.frame(melt(Sv10$M[,,ss[i]]));
mm205<-as.data.frame(melt(Sv05$M[,,ss[i]]));
mm220$tstep=ss[i]*dt; mm215$tstep=ss[i]*dt; mm210$tstep=ss[i]*dt; mm205$tstep=ss[i]*dt # To go to generations
mm20<-rbind(mm20,mm220);mm15<-rbind(mm15,mm215); mm10<-rbind(mm10,mm210) ; mm05<-rbind(mm05,mm205)
}
colnames(mm20)<-c("x","y","z","t"); colnames(mm15)<-c("x","y","z","t"); colnames(mm10)<-c("x","y","z","t") ;colnames(mm05)<-c("x","y","z","t")
#mm20$x<-seq(mres,1,-1); mm10$x<-seq(mres,1,-1); mm05$x<-seq(mres,1,-1)
setwd(plots)
# Grab a subset
#sub<-c(1/dt,250,500,750,1000,1500,2000,2500,seq(3000,4001,1000),4500,tsteps)*dt
sub<-c(1/dt,100,250,500,750,1000,1500,2000,2500,seq(3000,4001,500))*dt
w<-which(mm20[,"t"] %in% sub)
# plots
p1<-ggplot(mm20[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega1,sep=""))
p1<-p1 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide = FALSE)
p1<-p1 + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p1<-p1 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p1
ggsave(paste("Array_w=",omega1,"_06.pdf",sep=""))
p2<-ggplot(mm15[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega2,sep=""))
p2<-p2 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide=FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p2<-p2 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p2
ggsave(paste("Array_w=",omega2,"_06.pdf",sep=""))
p3<-ggplot(mm10[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega3,sep=""))
p3<-p3 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide = FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p3<-p3 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p3
ggsave(paste("Array_w=",omega3,"_06.pdf",sep=""))
p4<-ggplot(mm05[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega4,sep=""))
p4<-p4 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide=FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p4<-p4 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p4
ggsave(paste("Array_w=",omega4,"_06.pdf",sep=""))
setwd(plots)
pdf("Array_w_all.pdf",width=18,height=18)
multiplot(p1,p2,p4,p3,cols=2)
dev.off()
## plot U, S & R over time
Mu <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(0,tsteps+1,1), Sv05$U, Sv10$U, Sv20$U))
Ms <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(1,tsteps+1,1), Sv05$S, Sv10$S, Sv20$S))
Mr <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(2,tsteps+1,1), Sv05$R, Sv10$R, Sv20$R))
Mmf <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(3,tsteps+1,1), Sv05$meanf[,1], Sv10$meanf[,1], Sv20$meanf[,1]))
Mmr <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(4,tsteps+1,1), Sv05$meanf[,2], Sv10$meanf[,2], Sv20$meanf[,2]))
Mhigh <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(5,tsteps+1,1), Sv05$M[5,5,], Sv10$M[5,5,],Sv20$M[5,5,]))
Musr <- rbind(Mu, Ms, Mr,Mmf,Mmr,Mhigh)
colnames(Musr) <- c("t", "pop",omega3, omega2, omega1)
Msrm <- melt(Musr, id.vars = c("t","pop"))
facet_names <- c(`0` = "U", `1` = "S", `2` = "R", `3` = "mean fit", `4` = "mean res", `5` = "Highest fit/res")
ggplot(Msrm, aes(x=t, y = value, colour = variable)) + geom_line() + facet_wrap(~pop,labeller = as_labeller(facet_names), scales = "free")
ggsave("TimeSeries_output_06.pdf")
# number in highest fitness changes but mean r and f don't?
# plots actual numbers with resistance
for(i in 1:length(w)){mm20[w[i],"zR"] <- Sv20$R[(1/dt)*mm20[w[i],"t"]]*mm20[w[i],"z"]}
for(i in 1:length(w)){mm10[w[i],"zR"] <- Sv10$R[(1/dt)*mm10[w[i],"t"]]*mm10[w[i],"z"]}
for(i in 1:length(w)){mm05[w[i],"zR"] <- Sv05$R[(1/dt)*mm05[w[i],"t"]]*mm05[w[i],"z"]}
p1<-ggplot(mm20[w,],aes(x,y,fill=zR)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega1,sep=""))
p1<-p1 + scale_fill_gradient("Proportion", limits=c(0,100),low="white", high="red",guide = FALSE)
p1<-p1 + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p1<-p1 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p1
ggsave(paste("Array_w=",omega1,"_zR_06.pdf",sep=""))
p9<-ggplot(mm10[w,],aes(x,y,fill=zR)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega2,sep=""))
p9<-p9 + scale_fill_gradient("Proportion", limits=c(0,100),low="white", high="red",guide = FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p9<-p9 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p9
ggsave(paste("Array_w=",omega2,"_zR_06.pdf",sep=""))
p4<-ggplot(mm05[w,],aes(x,y,fill=zR)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega3,sep=""))
p4<-p4 + scale_fill_gradient("Proportion", limits=c(0,100),low="white", high="red",guide=FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p4<-p4 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p4
ggsave(paste("Array_w=",omega3,"_zR_06.pdf",sep=""))
setwd(plots)
pdf("Array_w_all_zR.pdf",width=18,height=8)
multiplot(p4,p9,p1,cols=3)
dev.off()
# shows that not all potential 100 units are R
### Look at proportion in each of the 30 levels over time for each - facet = level
mm20$omega = omega1; mm10$omega = omega2; mm05$omega = omega3
mega<-as.data.frame(rbind(mm20,mm10,mm05)); colnames(mega)<-c("x","y","z","time","omega")
mega$level = c(seq(21,25,1),seq(16,20,1),seq(11,15,1),seq(6,10,1),seq(1,5,1))
g<-ggplot(mega,aes(x=time,y=z,colour=factor(omega))) + geom_line(size=2) + facet_wrap( ~ level, ncol=5) + scale_colour_manual(values=cbPalette,"Abx\nLevel",breaks=c(omega1,omega2, omega3))
g<-g + scale_x_continuous("Generations",breaks=c(0,200,400)) + scale_y_continuous("Proportion at this level",breaks=c(0.25,0.75))+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
g
setwd(plots)
ggsave("mega.pdf",width=12,height=8)
theme_set(theme_bw(base_size = 14))
g<-g+ facet_wrap( ~ level,scales = "free", ncol=5)+ scale_y_continuous("Proportion at this level")
ggsave("mega_freescale.pdf")
### Look at change in R & S over time
theme_set(theme_bw(base_size = 34))
rrmr<-as.data.frame(cbind(seq(1,ll,1)*dt,Sv05$R,Sv10$R,Sv20$R,1)); colnames(rrmr)<-c("time","05","10","20","type")
rrms<-as.data.frame(cbind(seq(1,ll,1)*dt,Sv05$S,Sv10$S,Sv20$S,2)); colnames(rrms)<-c("time","05","10","20","type")
rrm<-as.data.frame(rbind(rrmr,rrms))
rrm2<-melt(rrm,id.vars=c("time","type")); rrm2[which(rrm2$type==1),"type"]<-"Resistant"; rrm2[which(rrm2$type==2),"type"]<-"Susceptible"
g<-ggplot(rrm2,aes(x=time,y=value,colour=factor(variable))) + geom_line(size=2) + scale_x_continuous("Generations") + scale_y_continuous("Percentage with R")
g<-g + scale_colour_manual("Abx\nLevel",breaks=c("20","10","05"),labels=c(omega1, omega2,omega3),values = cbPalette) + facet_wrap(~type)
g
ggsave("r&s_overtime.pdf",width=12,height=8)
# Time to dominance...
#t05<-min(intersect(intersect(which(rrmr[,2]>79.99),which(rrmr[,2]< 80.007)),which(floor(rrmr[,2])==80))*dt)
#t10<-min(intersect(intersect(which(rrmr[,3]>79.99),which(rrmr[,3]< 80.002)),which(floor(rrmr[,3])==80))*dt)
#t20<-min(intersect(intersect(which(rrmr[,4]>79.99),which(rrmr[,4]< 80.007)),which(floor(rrmr[,4])==80))*dt)
t05 <- rrmr[min(which(rrmr[,2] > 50)),"time"]
t10 <- rrmr[min(which(rrmr[,3] > 50)),"time"]
t20 <- rrmr[min(which(rrmr[,4] > 50)),"time"]
mm20_2<-mm20;mm10_2<-mm10;mm05_2<-mm05
mm20_2$t<-mm20_2$t/t20;mm10_2$t<-mm10_2$t/t10;# mm05_2$t<-mm05_2$t/t05 no t05 at moment
mega_2<-as.data.frame(rbind(mm20_2,mm10_2)) #,mm05_2));
colnames(mega_2)<-c("x","y","z","time","omega")
mega_2$level = c(seq(21,25,1),seq(16,20,1),seq(11,15,1),seq(6,10,1),seq(1,5,1))
g<-ggplot(mega_2,aes(x=time,y=z,colour=factor(omega))) + geom_line(size=2) + facet_wrap( ~ level, ncol=5, scales = "free") + scale_colour_manual("Abx\nLevel",breaks=c(20,10,5),labels=c("0.2","0.1","0.05"),values = cbPalette)
g<-g + scale_x_continuous("Time to full resistance") + scale_y_continuous("Proportion at this level")+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
g
setwd(plots)
ggsave("mega_normtodom.pdf")
# Time to full resistance and fitness
w<-which(mega$level==5);
t05a<-min(intersect(which(mega[w,"omega"]==omega1),which(mega[w,"z"]>0.5)))
t10a<-min(intersect(which(mega[w,"omega"]==omega2),which(mega[w,"z"]>0.4)))
t20a<-min(intersect(which(mega[w,"omega"]==omega3),which(mega[w,"z"]>0.4)))
#t05<-mega[w[t05a],"time"];
t10<-mega[w[t10a],"time"]; t20<-mega[w[t20a],"time"]
mm20_2<-mm20;mm10_2<-mm10;#mm05_2<-mm05
mm20_2$t<-mm20_2$t/t20;mm10_2$t<-mm10_2$t/t10;#mm05_2$t<-mm05_2$t/t05
mega_2<-as.data.frame(rbind(mm20_2,mm10_2,mm05_2)); colnames(mega_2)<-c("x","y","z","time","omega")
mega_2$level = c(seq(21,25,1),seq(16,20,1),seq(11,15,1),seq(6,10,1),seq(1,5,1))
theme_set(theme_bw(base_size = 12));
g<-ggplot(mega_2,aes(x=time,y=z,colour=factor(omega))) + geom_line(size=2) + facet_wrap( ~ level, ncol=5, scales = "free") + scale_colour_manual("Abx\nLevel",breaks=c(omega1, omega2, omega3),values = cbPalette)
g<-g + scale_x_continuous("Time to full resistance") + scale_y_continuous("Proportion at this level")+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
g
setwd(plots)
ggsave("mega_normtofullR.pdf",width=12,height=8)
# Plot proportions in each fitness / resistance level over time
pp<-c();
ll<-dim(Sv20$M)[3];
ss<-seq(0,ll,1/dt) # Don't want to grab all
for(i in 2:length(ss)){
pp220<-c(ss[i]*dt,colSums(Sv20$M[,,ss[i]]), rowSums(Sv20$M[,,ss[i]]),20)
pp210<-c(ss[i]*dt,colSums(Sv10$M[,,ss[i]]), rowSums(Sv10$M[,,ss[i]]),10)
pp205<-c(ss[i]*dt,colSums(Sv05$M[,,ss[i]]), rowSums(Sv05$M[,,ss[i]]),5)
pp<-rbind(pp,pp220,pp210,pp205);
}
pp<-as.data.frame(pp);colnames(pp)<-c("t","Fitness level 1\n(low)","Fitness level 2","Fitness level 3","Fitness level 4","Fitness level 5\n(high)","Res. level 1\n(low)","Res. level 2","Res. level 3","Res. level 4","Res. level 5\n(high)","w");
pp2<-melt(pp,id.vars = c("t","w"))
theme_set(theme_bw(base_size = 34));
g<-ggplot(pp2,aes(x=t,y=value,colour=factor(w))) + facet_wrap(~variable,ncol=5) + geom_line(size=2) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g<-g + scale_x_continuous("Generation") + scale_y_continuous("Proportion") + scale_colour_manual(values=cbPalette,"Abx\nlevel",labels=c(0.05,0.1,0.2))
g # Suggests that although v similar proportions in the most fit fewer are in the higher resistance levels with low level antibiotics use. In fact with this model
# the same rate of selection for no cost mutations is seen whether there is high or low anitbiotic use
setwd(plots)
ggsave("f&r_overtime.pdf",width=18,height=12)
#### Compare with and without fitness and resistance levels.
### Range of omega
setwd(home)
omegav <- c(omega1,omega2,omega3)
para<-read.csv("data/para_ecoli.csv",header=TRUE,check.names=F,stringsAsFactors = FALSE)[,1:2]
for(i in 1:length(para[,1])){assign(para[i,1],para[i,2])}
# Correct for timestep
mu<-mu*dt;beta<-beta*dt;eps<-eps*dt
m<-dim(acqdistn)[1]; vs<-seq(1/m,1,1/m);
assign("f",sum(colSums(acqdistn)*vf))
## SAME as writeup_ecoli
#kr = 0.4; f = 0.6 ## 40% cost to both
bigall<-c(); lambdasv<-c(); lambdarv<-c();
endp<-200*1/dt
U<-matrix(0,1,endp); S<-matrix(0,1,endp); R<-matrix(0,1,endp);
U[1]<-iniv[1]; S[1]<-iniv[2]; R[1]<-iniv[3];
lambdasv<-matrix(0,1,endp);lambdarv<-matrix(0,1,endp);
lambdasv[1] = beta * S[1]/sum(iniv); lambdarv[1] = sum(colSums(acqdistn*seq(1/nfit,1,1/nfit))) * beta * R[1]/sum(iniv) # function outputs just meanfit when all popns 0
setwd(home) # might have para in different place for different models
for(j in 1:length(omegav)){
assign("omega",omegav[j])
for(i in 1:endp){
lambdas=lambdasv[i];lambdar=lambdarv[i];
# NEW Dynamics
U[i+1] = U[i] + mu*(S[i]+R[i]) - (lambdas+lambdar)*(U[i]/(U[i] + kk))
S[i+1] = S[i] + lambdas*(U[i]/(U[i] + kk)) - mu*S[i] - eps * S[i]
R[i+1] = R[i] + lambdar*(U[i]/(U[i] + kk)) - mu*R[i] + eps * S[i]
lambdasv[i+1] = max(0,(1-omega)/1) * beta * S[i+1] / ( S[i+1] + R[i+1] );
lambdarv[i+1] = f * max(0,(20-omega)/20) * beta * R[i+1] / ( S[i+1] + R[i+1] ); # resistant strain has an MIC of 6
}
all<-as.data.frame(cbind(seq(0,endp,1)*dt,U,S,R,omega)); colnames(all)<-c("time","U","Susceptible","Resistant","w")
bigall<-rbind(bigall,all)
}
allm<-melt(bigall[,c("time","Susceptible","Resistant","w")], id.vars=c("w","time"))
allm$nw = allm$w
theme_set(theme_bw(base_size = 34))
colnames(rrm2)<-c("time","variable","w","value")
rrm2n<-rrm2[,c("w","time","variable","value")];
rrm2n$with<-1; rrm2n$nw<-0
rrm2n[which(rrm2n$w=="05"),"nw"]=allm[12000,"nw"];
rrm2n[which(rrm2n$w=="10"),"nw"]<-allm[4000,"nw"];
rrm2n[which(rrm2n$w=="20"),"nw"]<-allm[1,"nw"]
allm$with<-0; allm$nw<-allm$w;
allmn<-rbind(allm,rrm2n)
w<-which(allmn$with == 0)
p<-ggplot(allmn[w,],aes(x=time,y=value,colour=variable,linetype=factor(with)))+geom_line(size=2) +
scale_x_continuous("Time steps",lim=c(0,endp*dt))
p<-p+scale_colour_manual("Sub-\npopulation",breaks=c("Susceptible","Resistant"), values = c("blue","red")) +
scale_y_continuous("Percentage of population", limits = c(0,100)) + facet_wrap( ~ nw) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p<-p + scale_linetype_discrete("With\ndiversity",breaks=c(0,1),labels=c("None","With diversity")) + theme(legend.position="none")
p
setwd(plots)
ggsave("Withoutdiversity.pdf",width=12,height=7)
p<-ggplot(allmn,aes(x=time,y=value,colour=variable,linetype=factor(with)))+geom_line(size=2) +
scale_x_continuous("Time steps",lim=c(0,endp*dt))
p<-p+scale_colour_manual("Sub-\npopulation",breaks=c("Susceptible","Resistant"), values = c("blue","red")) +
scale_y_continuous("Percentage of population", limits = c(0,100)) + facet_wrap( ~ nw) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p<-p + scale_linetype_discrete("With\ndiversity",breaks=c(0,1),labels=c("None","With diversity"))
p
setwd(plots)
ggsave("WithnWithoutdiversity.pdf",width=12,height=7)
p + theme(legend.position="none")
ggsave("WithnWithoutdiversity_nolegend.pdf",width=12,height=7)
p + scale_y_continuous("Percentage of population",lim=c(0,10))
ggsave("WithnWithoutdiversity_zoom.pdf",width=12,height=7)
## Plot like in Gulberg
pp2n<-pp2[7501:15000,]
w<-intersect(which(pp2n$w==5),c(which(pp2n$t==100),which(pp2n$t==200),which(pp2n$t==300),which(pp2n$t==400),which(pp2n$t==500)))
ggplot(pp2n[w,], aes(x=t, y= value,colour=factor(variable))) + geom_point(aes(shape = factor(variable)),size=5)
library(data.table)
drrm2n <- data.table(pp2n[w,])
factors <- unique(drrm2n[, variable])
nfactors <- length(factors)
width = 25
for (id in seq_along(factors)) {
drrm2n[variable == factors[id], adj.time := t - width + (id - 1) * (2*width) / (nfactors - 1)]
}
g<-ggplot(drrm2n, aes(x=adj.time, y= value,colour=factor(variable))) + geom_point(aes(shape = factor(variable)),size=5)+ theme(legend.position="top")
g<-g+ scale_x_continuous("Generations") + scale_y_continuous("Proportion") + scale_colour_discrete("")+ scale_shape_discrete("")
setwd(plots)
ggsave("Gulberg5d.pdf",width=16,height=8)
#############********************************************************************************************
#############********************************************************************************************
#############********************************************** NEW ACQDISTN *********************************************########
#### change the initial conditions
#############********************************************************************************************
#############********************************************************************************************
# 2: normalise but mean at 0.2
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.2, 0.2),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
acqdistn<-z
plot_diff_acd_output(acqdistn,plots,2)
# 3: flatten to all low resistance but high fitness
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.01, 0.01),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
zn<-z
zn[1:5,]<-colSums(z) # Doesn't matter if col or row Sums.
acqdistn<-zn/sum(zn)
plot_diff_acd_output(acqdistn,plots,3)
# 4: flatten to all low resistance but high fitness
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.01, 0.01),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
zn<-z
zn[,1:5]<-colSums(z) # Doesn't matter if col or row Sums.
acqdistn<-t(zn)/sum(zn)
plot_diff_acd_output(acqdistn,plots,4)
# 5: EG: start at 0.8
x=y=seq(0.2,1,0.2)
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.2, 0.2),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
nfit = 10; mres = 10;
# Array of distribution of fitness and resistance c[resistance, fitness]
M0 <- array(0,c(mres,nfit,10))
aa<-M0[,,1]
aa[5,1:5]<-z[1,];aa[4,1:5]<-z[2,];aa[3,1:5]<-z[3,];aa[2,1:5]<-z[4,];aa[1,1:5]<-z[5,]
acqdistn<-aa
x=y=seq(0.1,1,0.1)
dev.off(); persp(x, y, aa, theta = -30, phi = 30, ticktype = "detailed")
plot_diff_acd_output(acqdistn,plots,5)
# 6: use standard initial distribution but start at 0.8
x=y=seq(0.2,1,0.2)
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.6, 0.6),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
nfit = 25; mres = 25;
# Array of distribution of fitness and resistance c[resistance, fitness]
M0 <- array(0,c(mres,nfit,10))
aa<-M0[,,1]
aa[20,21:25]<-z[1,];aa[21,21:25]<-z[2,];aa[22,21:25]<-z[3,];aa[23,21:25]<-z[4,];aa[24,21:25]<-z[5,]
acqdistn<-aa
x=y=seq(1/25,1,1/25)
dev.off(); persp(x, y, aa, theta = -30, phi = 30, ticktype = "detailed")
plot_diff_acd_output(acqdistn,plots,6)
| /ec_generalised_function_withr_run.R | no_license | nmfuller/Hetero_res_and_f_nf | R | false | false | 22,179 | r | ### Run simple e coli model
##### Code to run generalised function and examples
##*** Libraries needed
library(mvtnorm);library(plyr); library(ggplot2);library(reshape2);library(deSolve);library(grid);library(gtools); library(directlabels); library(mvtnorm)
theme_set(theme_gray(base_size = 24));
##*** Locations
home<-"~/Documents/Hetero_res_and_f/"
plots<-paste(home,"plots",sep="")
setwd(home)
theme_set(theme_bw(base_size = 34))
cbPalette <- c("#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
##*** Code needed
# Loads functions for generalised mean function and simulation model that uses the generalised function
# Also 2 fitness level ode function, original Sourya model as a difference model and multiplots function
source("ec_generalised_function_withr.R")
##*** Setting up
# Number of discrete fitness levels? Resistance levels?
nfit = 5;
mres = 5;
# Array of distribution of fitness and resistance c[resistance, fitness]
M0 <- array(0,c(mres,nfit,10))
# Initial acquisition distribution - bivariate here normal distribution with mean 0.5 and deviation 0.05
x <- seq(1/mres,1,1/mres) # seq(from = 0, to = 1, length.out = mres)
y <- seq(1/nfit,1,1/nfit) #seq(from = 0, to = 1, length.out = nfit)
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.6, 0.6),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
acqdistn<-z
plot(rowSums(z),type="l");plot(colSums(z),type="l") # Same as normal distribution
z<-as.data.frame(z)
rownames(z) <- seq(1/mres,1,1/mres);colnames(z) <- seq(1,nfit,1);
z2<-as.data.frame(melt(z)); z2$res<-seq(1/mres,1,1/mres); colnames(z2)<-c("fitness","value","res")
p<-ggplot(z2, aes(x=res, y=value, fill=factor(fitness))) + geom_bar(stat="identity",colour="black") + facet_grid(~fitness)
p<-p + scale_x_continuous("Resistance level",breaks=c(0,0.2,0.4,0.6,0.8,1)) + scale_y_continuous("Proportion") + scale_fill_brewer("Fitness \nlevel",palette="Reds") + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p
setwd(plots)
ggsave("acqdistn_06.pdf",width=14,height=10)
# ## Try another
# aa <- matrix(0,25,25)
# aa[1:5,21:25] <- acqdistn
# acqdistn <- aa
# nfit = 25;
# mres = 25;
# # Array of distribution of fitness and resistance c[resistance, fitness]
# M0 <- array(0,c(mres,nfit,10))
# Initial conditions
iniv<-c(98,1,1)
#iniv<-c(60,39,1)
#############********************************************** LOAD UP TO HERE *********************************************########
dt=0.1
tsteps<-500*(1/dt)
omega1 <- 24
omega2 <- 16
omega3 <- 2
omega4 <- 0.4
Sv20<-ec_funcf_mean_varsr(tsteps,home, c(omega1),iniv,M0,acqdistn,dt,500)
Sv15<-ec_funcf_mean_varsr(tsteps,home, c(omega2),iniv,M0,acqdistn,dt,500)
Sv10<-ec_funcf_mean_varsr(tsteps,home, c(omega3),iniv,M0,acqdistn,dt,500)
Sv05<-ec_funcf_mean_varsr(tsteps,home, c(omega4),iniv,M0,acqdistn,dt,500)
## NEED TO SPEED IT UP?? Fast for 5 x 5... ~6 sec on laptop
# What happens?
mm20<-c() ; mm10<-c() ; mm05<-c() ; mm15<-c()
ll<-dim(Sv20$M)[3];
ss<-seq(0,ll,1/dt) # Don't want to grab all
for(i in 2:length(ss)){
mm220<-as.data.frame(melt(Sv20$M[,,ss[i]]));
mm215<-as.data.frame(melt(Sv15$M[,,ss[i]]));
mm210<-as.data.frame(melt(Sv10$M[,,ss[i]]));
mm205<-as.data.frame(melt(Sv05$M[,,ss[i]]));
mm220$tstep=ss[i]*dt; mm215$tstep=ss[i]*dt; mm210$tstep=ss[i]*dt; mm205$tstep=ss[i]*dt # To go to generations
mm20<-rbind(mm20,mm220);mm15<-rbind(mm15,mm215); mm10<-rbind(mm10,mm210) ; mm05<-rbind(mm05,mm205)
}
colnames(mm20)<-c("x","y","z","t"); colnames(mm15)<-c("x","y","z","t"); colnames(mm10)<-c("x","y","z","t") ;colnames(mm05)<-c("x","y","z","t")
#mm20$x<-seq(mres,1,-1); mm10$x<-seq(mres,1,-1); mm05$x<-seq(mres,1,-1)
setwd(plots)
# Grab a subset
#sub<-c(1/dt,250,500,750,1000,1500,2000,2500,seq(3000,4001,1000),4500,tsteps)*dt
sub<-c(1/dt,100,250,500,750,1000,1500,2000,2500,seq(3000,4001,500))*dt
w<-which(mm20[,"t"] %in% sub)
# plots
p1<-ggplot(mm20[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega1,sep=""))
p1<-p1 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide = FALSE)
p1<-p1 + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p1<-p1 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p1
ggsave(paste("Array_w=",omega1,"_06.pdf",sep=""))
p2<-ggplot(mm15[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega2,sep=""))
p2<-p2 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide=FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p2<-p2 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p2
ggsave(paste("Array_w=",omega2,"_06.pdf",sep=""))
p3<-ggplot(mm10[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega3,sep=""))
p3<-p3 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide = FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p3<-p3 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p3
ggsave(paste("Array_w=",omega3,"_06.pdf",sep=""))
p4<-ggplot(mm05[w,],aes(x,y,fill=z)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega4,sep=""))
p4<-p4 + scale_fill_gradient("Proportion", limits=c(0,1),low="white", high="red",guide=FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p4<-p4 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p4
ggsave(paste("Array_w=",omega4,"_06.pdf",sep=""))
setwd(plots)
pdf("Array_w_all.pdf",width=18,height=18)
multiplot(p1,p2,p4,p3,cols=2)
dev.off()
## plot U, S & R over time
Mu <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(0,tsteps+1,1), Sv05$U, Sv10$U, Sv20$U))
Ms <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(1,tsteps+1,1), Sv05$S, Sv10$S, Sv20$S))
Mr <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(2,tsteps+1,1), Sv05$R, Sv10$R, Sv20$R))
Mmf <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(3,tsteps+1,1), Sv05$meanf[,1], Sv10$meanf[,1], Sv20$meanf[,1]))
Mmr <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(4,tsteps+1,1), Sv05$meanf[,2], Sv10$meanf[,2], Sv20$meanf[,2]))
Mhigh <- as.data.frame(cbind(seq(0, tsteps, 1),matrix(5,tsteps+1,1), Sv05$M[5,5,], Sv10$M[5,5,],Sv20$M[5,5,]))
Musr <- rbind(Mu, Ms, Mr,Mmf,Mmr,Mhigh)
colnames(Musr) <- c("t", "pop",omega3, omega2, omega1)
Msrm <- melt(Musr, id.vars = c("t","pop"))
facet_names <- c(`0` = "U", `1` = "S", `2` = "R", `3` = "mean fit", `4` = "mean res", `5` = "Highest fit/res")
ggplot(Msrm, aes(x=t, y = value, colour = variable)) + geom_line() + facet_wrap(~pop,labeller = as_labeller(facet_names), scales = "free")
ggsave("TimeSeries_output_06.pdf")
# number in highest fitness changes but mean r and f don't?
# plots actual numbers with resistance
for(i in 1:length(w)){mm20[w[i],"zR"] <- Sv20$R[(1/dt)*mm20[w[i],"t"]]*mm20[w[i],"z"]}
for(i in 1:length(w)){mm10[w[i],"zR"] <- Sv10$R[(1/dt)*mm10[w[i],"t"]]*mm10[w[i],"z"]}
for(i in 1:length(w)){mm05[w[i],"zR"] <- Sv05$R[(1/dt)*mm05[w[i],"t"]]*mm05[w[i],"z"]}
p1<-ggplot(mm20[w,],aes(x,y,fill=zR)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega1,sep=""))
p1<-p1 + scale_fill_gradient("Proportion", limits=c(0,100),low="white", high="red",guide = FALSE)
p1<-p1 + theme(axis.text.x = element_text(angle = 90, hjust = 1))
p1<-p1 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p1
ggsave(paste("Array_w=",omega1,"_zR_06.pdf",sep=""))
p9<-ggplot(mm10[w,],aes(x,y,fill=zR)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega2,sep=""))
p9<-p9 + scale_fill_gradient("Proportion", limits=c(0,100),low="white", high="red",guide = FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p9<-p9 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p9
ggsave(paste("Array_w=",omega2,"_zR_06.pdf",sep=""))
p4<-ggplot(mm05[w,],aes(x,y,fill=zR)) + facet_wrap( ~ t, ncol=3) + ggtitle(paste("w = ", omega3,sep=""))
p4<-p4 + scale_fill_gradient("Proportion", limits=c(0,100),low="white", high="red",guide=FALSE)+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
p4<-p4 + geom_tile() + scale_y_continuous(breaks=c(1,nfit),"Relative fitness levels",labels=c("Least","Most")) + scale_x_continuous(breaks=c(mres,1),"Resistance levels",labels=c("Most","Least"))
p4
ggsave(paste("Array_w=",omega3,"_zR_06.pdf",sep=""))
setwd(plots)
pdf("Array_w_all_zR.pdf",width=18,height=8)
multiplot(p4,p9,p1,cols=3)
dev.off()
# shows that not all potential 100 units are R
### Look at proportion in each of the 30 levels over time for each - facet = level
mm20$omega = omega1; mm10$omega = omega2; mm05$omega = omega3
mega<-as.data.frame(rbind(mm20,mm10,mm05)); colnames(mega)<-c("x","y","z","time","omega")
mega$level = c(seq(21,25,1),seq(16,20,1),seq(11,15,1),seq(6,10,1),seq(1,5,1))
g<-ggplot(mega,aes(x=time,y=z,colour=factor(omega))) + geom_line(size=2) + facet_wrap( ~ level, ncol=5) + scale_colour_manual(values=cbPalette,"Abx\nLevel",breaks=c(omega1,omega2, omega3))
g<-g + scale_x_continuous("Generations",breaks=c(0,200,400)) + scale_y_continuous("Proportion at this level",breaks=c(0.25,0.75))+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
g
setwd(plots)
ggsave("mega.pdf",width=12,height=8)
theme_set(theme_bw(base_size = 14))
g<-g+ facet_wrap( ~ level,scales = "free", ncol=5)+ scale_y_continuous("Proportion at this level")
ggsave("mega_freescale.pdf")
### Look at change in R & S over time
theme_set(theme_bw(base_size = 34))
rrmr<-as.data.frame(cbind(seq(1,ll,1)*dt,Sv05$R,Sv10$R,Sv20$R,1)); colnames(rrmr)<-c("time","05","10","20","type")
rrms<-as.data.frame(cbind(seq(1,ll,1)*dt,Sv05$S,Sv10$S,Sv20$S,2)); colnames(rrms)<-c("time","05","10","20","type")
rrm<-as.data.frame(rbind(rrmr,rrms))
rrm2<-melt(rrm,id.vars=c("time","type")); rrm2[which(rrm2$type==1),"type"]<-"Resistant"; rrm2[which(rrm2$type==2),"type"]<-"Susceptible"
g<-ggplot(rrm2,aes(x=time,y=value,colour=factor(variable))) + geom_line(size=2) + scale_x_continuous("Generations") + scale_y_continuous("Percentage with R")
g<-g + scale_colour_manual("Abx\nLevel",breaks=c("20","10","05"),labels=c(omega1, omega2,omega3),values = cbPalette) + facet_wrap(~type)
g
ggsave("r&s_overtime.pdf",width=12,height=8)
# Time to dominance...
#t05<-min(intersect(intersect(which(rrmr[,2]>79.99),which(rrmr[,2]< 80.007)),which(floor(rrmr[,2])==80))*dt)
#t10<-min(intersect(intersect(which(rrmr[,3]>79.99),which(rrmr[,3]< 80.002)),which(floor(rrmr[,3])==80))*dt)
#t20<-min(intersect(intersect(which(rrmr[,4]>79.99),which(rrmr[,4]< 80.007)),which(floor(rrmr[,4])==80))*dt)
t05 <- rrmr[min(which(rrmr[,2] > 50)),"time"]
t10 <- rrmr[min(which(rrmr[,3] > 50)),"time"]
t20 <- rrmr[min(which(rrmr[,4] > 50)),"time"]
mm20_2<-mm20;mm10_2<-mm10;mm05_2<-mm05
mm20_2$t<-mm20_2$t/t20;mm10_2$t<-mm10_2$t/t10;# mm05_2$t<-mm05_2$t/t05 no t05 at moment
mega_2<-as.data.frame(rbind(mm20_2,mm10_2)) #,mm05_2));
colnames(mega_2)<-c("x","y","z","time","omega")
mega_2$level = c(seq(21,25,1),seq(16,20,1),seq(11,15,1),seq(6,10,1),seq(1,5,1))
g<-ggplot(mega_2,aes(x=time,y=z,colour=factor(omega))) + geom_line(size=2) + facet_wrap( ~ level, ncol=5, scales = "free") + scale_colour_manual("Abx\nLevel",breaks=c(20,10,5),labels=c("0.2","0.1","0.05"),values = cbPalette)
g<-g + scale_x_continuous("Time to full resistance") + scale_y_continuous("Proportion at this level")+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
g
setwd(plots)
ggsave("mega_normtodom.pdf")
# Time to full resistance and fitness
w<-which(mega$level==5);
t05a<-min(intersect(which(mega[w,"omega"]==omega1),which(mega[w,"z"]>0.5)))
t10a<-min(intersect(which(mega[w,"omega"]==omega2),which(mega[w,"z"]>0.4)))
t20a<-min(intersect(which(mega[w,"omega"]==omega3),which(mega[w,"z"]>0.4)))
#t05<-mega[w[t05a],"time"];
t10<-mega[w[t10a],"time"]; t20<-mega[w[t20a],"time"]
mm20_2<-mm20;mm10_2<-mm10;#mm05_2<-mm05
mm20_2$t<-mm20_2$t/t20;mm10_2$t<-mm10_2$t/t10;#mm05_2$t<-mm05_2$t/t05
mega_2<-as.data.frame(rbind(mm20_2,mm10_2,mm05_2)); colnames(mega_2)<-c("x","y","z","time","omega")
mega_2$level = c(seq(21,25,1),seq(16,20,1),seq(11,15,1),seq(6,10,1),seq(1,5,1))
theme_set(theme_bw(base_size = 12));
g<-ggplot(mega_2,aes(x=time,y=z,colour=factor(omega))) + geom_line(size=2) + facet_wrap( ~ level, ncol=5, scales = "free") + scale_colour_manual("Abx\nLevel",breaks=c(omega1, omega2, omega3),values = cbPalette)
g<-g + scale_x_continuous("Time to full resistance") + scale_y_continuous("Proportion at this level")+ theme(axis.text.x = element_text(angle = 90, hjust = 1))
g
setwd(plots)
ggsave("mega_normtofullR.pdf",width=12,height=8)
# Plot proportions in each fitness / resistance level over time
pp<-c();
ll<-dim(Sv20$M)[3];
ss<-seq(0,ll,1/dt) # Don't want to grab all
for(i in 2:length(ss)){
pp220<-c(ss[i]*dt,colSums(Sv20$M[,,ss[i]]), rowSums(Sv20$M[,,ss[i]]),20)
pp210<-c(ss[i]*dt,colSums(Sv10$M[,,ss[i]]), rowSums(Sv10$M[,,ss[i]]),10)
pp205<-c(ss[i]*dt,colSums(Sv05$M[,,ss[i]]), rowSums(Sv05$M[,,ss[i]]),5)
pp<-rbind(pp,pp220,pp210,pp205);
}
pp<-as.data.frame(pp);colnames(pp)<-c("t","Fitness level 1\n(low)","Fitness level 2","Fitness level 3","Fitness level 4","Fitness level 5\n(high)","Res. level 1\n(low)","Res. level 2","Res. level 3","Res. level 4","Res. level 5\n(high)","w");
pp2<-melt(pp,id.vars = c("t","w"))
theme_set(theme_bw(base_size = 34));
g<-ggplot(pp2,aes(x=t,y=value,colour=factor(w))) + facet_wrap(~variable,ncol=5) + geom_line(size=2) + theme(axis.text.x = element_text(angle = 90, hjust = 1))
g<-g + scale_x_continuous("Generation") + scale_y_continuous("Proportion") + scale_colour_manual(values=cbPalette,"Abx\nlevel",labels=c(0.05,0.1,0.2))
g # Suggests that although v similar proportions in the most fit fewer are in the higher resistance levels with low level antibiotics use. In fact with this model
# the same rate of selection for no cost mutations is seen whether there is high or low anitbiotic use
setwd(plots)
ggsave("f&r_overtime.pdf",width=18,height=12)
#### Compare with and without fitness and resistance levels.
### Range of omega
setwd(home)
omegav <- c(omega1,omega2,omega3)
para<-read.csv("data/para_ecoli.csv",header=TRUE,check.names=F,stringsAsFactors = FALSE)[,1:2]
for(i in 1:length(para[,1])){assign(para[i,1],para[i,2])}
# Correct for timestep
mu<-mu*dt;beta<-beta*dt;eps<-eps*dt
m<-dim(acqdistn)[1]; vs<-seq(1/m,1,1/m);
assign("f",sum(colSums(acqdistn)*vf))
## SAME as writeup_ecoli
#kr = 0.4; f = 0.6 ## 40% cost to both
bigall<-c(); lambdasv<-c(); lambdarv<-c();
endp<-200*1/dt
U<-matrix(0,1,endp); S<-matrix(0,1,endp); R<-matrix(0,1,endp);
U[1]<-iniv[1]; S[1]<-iniv[2]; R[1]<-iniv[3];
lambdasv<-matrix(0,1,endp);lambdarv<-matrix(0,1,endp);
lambdasv[1] = beta * S[1]/sum(iniv); lambdarv[1] = sum(colSums(acqdistn*seq(1/nfit,1,1/nfit))) * beta * R[1]/sum(iniv) # function outputs just meanfit when all popns 0
setwd(home) # might have para in different place for different models
for(j in 1:length(omegav)){
assign("omega",omegav[j])
for(i in 1:endp){
lambdas=lambdasv[i];lambdar=lambdarv[i];
# NEW Dynamics
U[i+1] = U[i] + mu*(S[i]+R[i]) - (lambdas+lambdar)*(U[i]/(U[i] + kk))
S[i+1] = S[i] + lambdas*(U[i]/(U[i] + kk)) - mu*S[i] - eps * S[i]
R[i+1] = R[i] + lambdar*(U[i]/(U[i] + kk)) - mu*R[i] + eps * S[i]
lambdasv[i+1] = max(0,(1-omega)/1) * beta * S[i+1] / ( S[i+1] + R[i+1] );
lambdarv[i+1] = f * max(0,(20-omega)/20) * beta * R[i+1] / ( S[i+1] + R[i+1] ); # resistant strain has an MIC of 6
}
all<-as.data.frame(cbind(seq(0,endp,1)*dt,U,S,R,omega)); colnames(all)<-c("time","U","Susceptible","Resistant","w")
bigall<-rbind(bigall,all)
}
allm<-melt(bigall[,c("time","Susceptible","Resistant","w")], id.vars=c("w","time"))
allm$nw = allm$w
theme_set(theme_bw(base_size = 34))
colnames(rrm2)<-c("time","variable","w","value")
rrm2n<-rrm2[,c("w","time","variable","value")];
rrm2n$with<-1; rrm2n$nw<-0
rrm2n[which(rrm2n$w=="05"),"nw"]=allm[12000,"nw"];
rrm2n[which(rrm2n$w=="10"),"nw"]<-allm[4000,"nw"];
rrm2n[which(rrm2n$w=="20"),"nw"]<-allm[1,"nw"]
allm$with<-0; allm$nw<-allm$w;
allmn<-rbind(allm,rrm2n)
w<-which(allmn$with == 0)
p<-ggplot(allmn[w,],aes(x=time,y=value,colour=variable,linetype=factor(with)))+geom_line(size=2) +
scale_x_continuous("Time steps",lim=c(0,endp*dt))
p<-p+scale_colour_manual("Sub-\npopulation",breaks=c("Susceptible","Resistant"), values = c("blue","red")) +
scale_y_continuous("Percentage of population", limits = c(0,100)) + facet_wrap( ~ nw) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p<-p + scale_linetype_discrete("With\ndiversity",breaks=c(0,1),labels=c("None","With diversity")) + theme(legend.position="none")
p
setwd(plots)
ggsave("Withoutdiversity.pdf",width=12,height=7)
p<-ggplot(allmn,aes(x=time,y=value,colour=variable,linetype=factor(with)))+geom_line(size=2) +
scale_x_continuous("Time steps",lim=c(0,endp*dt))
p<-p+scale_colour_manual("Sub-\npopulation",breaks=c("Susceptible","Resistant"), values = c("blue","red")) +
scale_y_continuous("Percentage of population", limits = c(0,100)) + facet_wrap( ~ nw) +
theme(axis.text.x = element_text(angle = 90, hjust = 1))
p<-p + scale_linetype_discrete("With\ndiversity",breaks=c(0,1),labels=c("None","With diversity"))
p
setwd(plots)
ggsave("WithnWithoutdiversity.pdf",width=12,height=7)
p + theme(legend.position="none")
ggsave("WithnWithoutdiversity_nolegend.pdf",width=12,height=7)
p + scale_y_continuous("Percentage of population",lim=c(0,10))
ggsave("WithnWithoutdiversity_zoom.pdf",width=12,height=7)
## Plot like in Gulberg
pp2n<-pp2[7501:15000,]
w<-intersect(which(pp2n$w==5),c(which(pp2n$t==100),which(pp2n$t==200),which(pp2n$t==300),which(pp2n$t==400),which(pp2n$t==500)))
ggplot(pp2n[w,], aes(x=t, y= value,colour=factor(variable))) + geom_point(aes(shape = factor(variable)),size=5)
library(data.table)
drrm2n <- data.table(pp2n[w,])
factors <- unique(drrm2n[, variable])
nfactors <- length(factors)
width = 25
for (id in seq_along(factors)) {
drrm2n[variable == factors[id], adj.time := t - width + (id - 1) * (2*width) / (nfactors - 1)]
}
g<-ggplot(drrm2n, aes(x=adj.time, y= value,colour=factor(variable))) + geom_point(aes(shape = factor(variable)),size=5)+ theme(legend.position="top")
g<-g+ scale_x_continuous("Generations") + scale_y_continuous("Proportion") + scale_colour_discrete("")+ scale_shape_discrete("")
setwd(plots)
ggsave("Gulberg5d.pdf",width=16,height=8)
#############********************************************************************************************
#############********************************************************************************************
#############********************************************** NEW ACQDISTN *********************************************########
#### change the initial conditions
#############********************************************************************************************
#############********************************************************************************************
# 2: normalise but mean at 0.2
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.2, 0.2),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
acqdistn<-z
plot_diff_acd_output(acqdistn,plots,2)
# 3: flatten to all low resistance but high fitness
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.01, 0.01),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
zn<-z
zn[1:5,]<-colSums(z) # Doesn't matter if col or row Sums.
acqdistn<-zn/sum(zn)
plot_diff_acd_output(acqdistn,plots,3)
# 4: flatten to all low resistance but high fitness
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.01, 0.01),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
zn<-z
zn[,1:5]<-colSums(z) # Doesn't matter if col or row Sums.
acqdistn<-t(zn)/sum(zn)
plot_diff_acd_output(acqdistn,plots,4)
# 5: EG: start at 0.8
x=y=seq(0.2,1,0.2)
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.2, 0.2),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
nfit = 10; mres = 10;
# Array of distribution of fitness and resistance c[resistance, fitness]
M0 <- array(0,c(mres,nfit,10))
aa<-M0[,,1]
aa[5,1:5]<-z[1,];aa[4,1:5]<-z[2,];aa[3,1:5]<-z[3,];aa[2,1:5]<-z[4,];aa[1,1:5]<-z[5,]
acqdistn<-aa
x=y=seq(0.1,1,0.1)
dev.off(); persp(x, y, aa, theta = -30, phi = 30, ticktype = "detailed")
plot_diff_acd_output(acqdistn,plots,5)
# 6: use standard initial distribution but start at 0.8
x=y=seq(0.2,1,0.2)
f <- function(x, y) dmvnorm(cbind(x, y), mean = c(0.6, 0.6),sigma = diag(2)/20)
z <- outer(x, y, FUN = f); z <- z/sum(z) # Generate and normalise
dev.off(); persp(x, y, z, theta = -30, phi = 30, ticktype = "detailed")
nfit = 25; mres = 25;
# Array of distribution of fitness and resistance c[resistance, fitness]
M0 <- array(0,c(mres,nfit,10))
aa<-M0[,,1]
aa[20,21:25]<-z[1,];aa[21,21:25]<-z[2,];aa[22,21:25]<-z[3,];aa[23,21:25]<-z[4,];aa[24,21:25]<-z[5,]
acqdistn<-aa
x=y=seq(1/25,1,1/25)
dev.off(); persp(x, y, aa, theta = -30, phi = 30, ticktype = "detailed")
plot_diff_acd_output(acqdistn,plots,6)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.