content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
#
# renderMutation
# Generating SVG elements of mutation in
# gene and protein
#
renderMutation <- function(obj, reverse = T) {
# merge transcript
trans <- as.character(unique(obj$mutation.data$Transcript))
if ( sum(!trans %in% obj$transcript.coding) > 0 ) {
message("[WARNING] invalid transcript, use defalut transcripts instead")
sub <- obj$transcript.zoom[grep("coding", obj$transcript.zoom$type), ]
sub <- sub[order(sub$length, decreasing = T), ]
idx <- !obj$mutation.data$Transcript %in% obj$transcript.coding
sub.symbol <- obj$mutation.data$Symbol[idx]
obj$mutation.data$Transcript[idx] <- as.character(sub$transcript[match(sub.symbol, sub$symbol)])
}
pos.pro <- gene2protein(obj)
obj$mutation.data$convert <- pos.pro
# Transcript mutation profile
mark.transcript <- unique(obj$mutation.data$Transcript)
mutation.gene.svg <- lapply(1:length(mark.transcript), function(x) {
sub.name <- mark.transcript[x]
sub.info <- obj$mutation.data[which(obj$mutation.data$Transcript == sub.name), ]
sub.info <- sub.info[order(sub.info$convert), ]
pos.tmp <- table(sub.info$VariantPos)
sub.mut.type <- lapply(1:length(pos.tmp), function(xx) {
tmp <- sub.info[which(sub.info$VariantPos == names(pos.tmp)[xx]), ]
tmp <- length(table(tmp$Tag))
})
pos.tmp <- data.frame(
pos = names(pos.tmp),
freq = as.numeric(pos.tmp),
type = unlist(sub.mut.type)
)
# dynamic position
})
return(obj)
}
|
/R/renderMutation.R
|
no_license
|
wangdi2014/gfplots
|
R
| false
| false
| 1,493
|
r
|
#
# renderMutation
# Generating SVG elements of mutation in
# gene and protein
#
renderMutation <- function(obj, reverse = T) {
# merge transcript
trans <- as.character(unique(obj$mutation.data$Transcript))
if ( sum(!trans %in% obj$transcript.coding) > 0 ) {
message("[WARNING] invalid transcript, use defalut transcripts instead")
sub <- obj$transcript.zoom[grep("coding", obj$transcript.zoom$type), ]
sub <- sub[order(sub$length, decreasing = T), ]
idx <- !obj$mutation.data$Transcript %in% obj$transcript.coding
sub.symbol <- obj$mutation.data$Symbol[idx]
obj$mutation.data$Transcript[idx] <- as.character(sub$transcript[match(sub.symbol, sub$symbol)])
}
pos.pro <- gene2protein(obj)
obj$mutation.data$convert <- pos.pro
# Transcript mutation profile
mark.transcript <- unique(obj$mutation.data$Transcript)
mutation.gene.svg <- lapply(1:length(mark.transcript), function(x) {
sub.name <- mark.transcript[x]
sub.info <- obj$mutation.data[which(obj$mutation.data$Transcript == sub.name), ]
sub.info <- sub.info[order(sub.info$convert), ]
pos.tmp <- table(sub.info$VariantPos)
sub.mut.type <- lapply(1:length(pos.tmp), function(xx) {
tmp <- sub.info[which(sub.info$VariantPos == names(pos.tmp)[xx]), ]
tmp <- length(table(tmp$Tag))
})
pos.tmp <- data.frame(
pos = names(pos.tmp),
freq = as.numeric(pos.tmp),
type = unlist(sub.mut.type)
)
# dynamic position
})
return(obj)
}
|
#' Create a local lazy tibble
#'
#' These functions are useful for testing SQL generation without having to
#' have an active database connection. See [simulate_dbi()] for a list
#' available database simulations.
#'
#' @keywords internal
#' @export
#' @examples
#' library(dplyr)
#' df <- data.frame(x = 1, y = 2)
#'
#' df_sqlite <- tbl_lazy(df, con = simulate_sqlite())
#' df_sqlite %>% summarise(x = sd(x, na.rm = TRUE)) %>% show_query()
tbl_lazy <- function(df, con = NULL, src = NULL) {
if (!is.null(src)) {
warn("`src` is deprecated; please use `con` instead")
con <- src
}
con <- con %||% sql_current_con() %||% simulate_dbi()
subclass <- class(con)[[1]]
dplyr::make_tbl(
purrr::compact(c(subclass, "lazy")),
ops = op_base_local(df),
src = src_dbi(con)
)
}
setOldClass(c("tbl_lazy", "tbl"))
#' @export
#' @rdname tbl_lazy
lazy_frame <- function(..., con = NULL, src = NULL) {
con <- con %||% sql_current_con() %||% simulate_dbi()
tbl_lazy(tibble(...), con = con, src = src)
}
#' @export
dimnames.tbl_lazy <- function(x) {
list(NULL, op_vars(x$ops))
}
#' @export
dim.tbl_lazy <- function(x) {
c(NA, length(op_vars(x$ops)))
}
#' @export
print.tbl_lazy <- function(x, ...) {
show_query(x)
}
#' @export
as.data.frame.tbl_lazy <- function(x, row.names, optional, ...) {
stop("Can not coerce `tbl_lazy` to data.frame", call. = FALSE)
}
#' @importFrom dplyr same_src
#' @export
same_src.tbl_lazy <- function(x, y) {
inherits(y, "tbl_lazy")
}
#' @importFrom dplyr tbl_vars
#' @export
tbl_vars.tbl_lazy <- function(x) {
op_vars(x$ops)
}
#' @importFrom dplyr groups
#' @export
groups.tbl_lazy <- function(x) {
lapply(group_vars(x), as.name)
}
# Manually registered in zzz.R
group_by_drop_default.tbl_lazy <- function(x) {
TRUE
}
#' @importFrom dplyr group_vars
#' @export
group_vars.tbl_lazy <- function(x) {
op_grps(x$ops)
}
|
/R/tbl-lazy.R
|
permissive
|
edgararuiz/dbplyr
|
R
| false
| false
| 1,889
|
r
|
#' Create a local lazy tibble
#'
#' These functions are useful for testing SQL generation without having to
#' have an active database connection. See [simulate_dbi()] for a list
#' available database simulations.
#'
#' @keywords internal
#' @export
#' @examples
#' library(dplyr)
#' df <- data.frame(x = 1, y = 2)
#'
#' df_sqlite <- tbl_lazy(df, con = simulate_sqlite())
#' df_sqlite %>% summarise(x = sd(x, na.rm = TRUE)) %>% show_query()
tbl_lazy <- function(df, con = NULL, src = NULL) {
if (!is.null(src)) {
warn("`src` is deprecated; please use `con` instead")
con <- src
}
con <- con %||% sql_current_con() %||% simulate_dbi()
subclass <- class(con)[[1]]
dplyr::make_tbl(
purrr::compact(c(subclass, "lazy")),
ops = op_base_local(df),
src = src_dbi(con)
)
}
setOldClass(c("tbl_lazy", "tbl"))
#' @export
#' @rdname tbl_lazy
lazy_frame <- function(..., con = NULL, src = NULL) {
con <- con %||% sql_current_con() %||% simulate_dbi()
tbl_lazy(tibble(...), con = con, src = src)
}
#' @export
dimnames.tbl_lazy <- function(x) {
list(NULL, op_vars(x$ops))
}
#' @export
dim.tbl_lazy <- function(x) {
c(NA, length(op_vars(x$ops)))
}
#' @export
print.tbl_lazy <- function(x, ...) {
show_query(x)
}
#' @export
as.data.frame.tbl_lazy <- function(x, row.names, optional, ...) {
stop("Can not coerce `tbl_lazy` to data.frame", call. = FALSE)
}
#' @importFrom dplyr same_src
#' @export
same_src.tbl_lazy <- function(x, y) {
inherits(y, "tbl_lazy")
}
#' @importFrom dplyr tbl_vars
#' @export
tbl_vars.tbl_lazy <- function(x) {
op_vars(x$ops)
}
#' @importFrom dplyr groups
#' @export
groups.tbl_lazy <- function(x) {
lapply(group_vars(x), as.name)
}
# Manually registered in zzz.R
group_by_drop_default.tbl_lazy <- function(x) {
TRUE
}
#' @importFrom dplyr group_vars
#' @export
group_vars.tbl_lazy <- function(x) {
op_grps(x$ops)
}
|
## code to place a missing extant taxon into a tree using ML or REML on continuous data
## written by Liam J. Revell 2014, 2018
locate.yeti<-function(tree,X,...){
if(!inherits(tree,"phylo")) stop("tree should be object of class \"phylo\".")
if(hasArg(method)) method<-list(...)$method
else method<-"ML"
if(hasArg(search)) search<-list(...)$search
else search<-"heuristic"
if(hasArg(plot)) plot<-list(...)$plot
else plot<-FALSE
if(hasArg(quiet)) quiet<-list(...)$quiet
else quiet<-FALSE
if(hasArg(rotate)) rotate<-list(...)$rotate
else rotate<-if(method=="ML") TRUE else FALSE
root.node<-Ntip(tree)+1
if(hasArg(constraint)){
if(search=="exhaustive") constraint<-list(...)$constraint
else {
cat("constraint only works with search==\"exhaustive\"\n")
constraint<-c(root.node,tree$edge[,2])
}
} else constraint<-c(root.node,tree$edge[,2])
if(!is.matrix(X)) X<-as.matrix(X)
tip<-setdiff(rownames(X),tree$tip.label)
if(method=="ML") mltree<-yetiML(tree,X,quiet,tip,root.node,constraint,plot,search,rotate)
else if(method=="REML") mltree<-yetiREML(tree,X,quiet,tip,root.node,constraint,plot,search)
else {
cat(paste("Do not recognize method ",method,".\n",sep=""))
stop()
}
mltree
}
yetiML<-function(tree,X,quiet,tip,root.node,constraint,plot,search,rotate){
if(!quiet) cat(paste("Optimizing the phylogenetic position of ",tip," using ML. Please wait....\n",sep=""))
if(ncol(X)>1&&rotate){
pca<-phyl.pca(tree,X[tree$tip.label,])
obj<-phyl.vcv(X[tree$tip.label,],vcv(tree),1)
X<-(X-matrix(rep(obj$a[,1],nrow(X)),nrow(X),ncol(X),byrow=TRUE))%*%pca$Evec
}
if(search=="heuristic"){
trees<-list()
ee<-c(root.node,tree$edge[,2])
for(i in 1:length(ee)) trees[[i]]<-bind.tip(tree,tip,where=ee[i],position=if(ee[i]==root.node) 0 else 0.5*tree$edge.length[i-1])
class(trees)<-"multiPhylo"
lik.edge<-function(tree,XX,rotate){
if(!rotate) XX<-phyl.pca(tree,XX[tree$tip.label,])$S
obj<-phyl.vcv(as.matrix(XX[tree$tip.label,]),vcv(tree),1)
ll<-vector()
for(i in 1:ncol(XX)) ll[i]<-sum(dmnorm(XX[tree$tip.label,i],mean=rep(obj$a[i,1],nrow(XX)),obj$C*obj$R[i,i],log=TRUE))
sum(ll)
}
logL<-sapply(trees,lik.edge,XX=X,rotate=rotate)
if(plot){
ll<-logL[2:length(logL)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(logL[2:length(logL)],1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
edge<-ee[which(logL==max(logL))]
}
lik.tree<-function(position,tip,tree,edge,XX,rt,rotate){
if(edge==rt) tree<-bind.tip(tree,tip,edge.length=position,where=edge)
else tree<-bind.tip(tree,tip,where=edge,position=position)
if(!rotate) XX<-phyl.pca(tree,XX[tree$tip.label,])$S
obj<-phyl.vcv(as.matrix(XX[tree$tip.label,]),vcv(tree),1)
ll<-vector()
for(i in 1:ncol(XX)) ll[i]<-sum(dmnorm(XX[tree$tip.label,i],mean=rep(obj$a[i,1],nrow(XX)),obj$C*obj$R[i,i],log=TRUE))
sum(ll)
}
if(search=="heuristic"){
ee<-edge
if(edge!=root.node) ee<-c(ee,getAncestors(tree,node=edge,type="parent"))
if(edge>Ntip(tree)) ee<-c(ee,tree$edge[which(tree$edge[,1]==edge),2])
} else if(search=="exhaustive") ee<-c(root.node,tree$edge[,2])
ee<-intersect(ee,constraint)
fit<-vector(mode="list",length=length(ee))
for(i in 1:length(ee)){
if(ee[i]==root.node) fit[[i]]<-optimize(lik.tree,interval=c(max(nodeHeights(tree)),10*max(nodeHeights(tree))),tip=tip,tree=tree,
edge=ee[i],XX=X,rt=root.node,rotate=rotate,maximum=TRUE)
else fit[[i]]<-optimize(lik.tree,interval=c(0,tree$edge.length[which(tree$edge[,2]==ee[i])]),tip=tip,tree=tree,edge=ee[i],
XX=X,rt=root.node,rotate=rotate,maximum=TRUE)
}
logL<-sapply(fit,function(x) x$objective)
if(search=="exhaustive"&&plot){
ll<-sapply(fit,function(x) x$objective)
ll<-ll[2:length(ll)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(ll,1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
fit<-fit[[which(logL==max(logL))]]
edge<-ee[which(logL==max(logL))]
mltree<-if(edge==root.node) midpoint.root(bind.tip(tree,tip,where=edge,edge.length=fit$maximum)) else bind.tip(tree,tip,where=edge,position=fit$maximum)
mltree$logL<-fit$objective
if(!quiet) cat("Done.\n")
mltree
}
yetiREML<-function(tree,X,quiet,tip,root.node,constraint,plot,search){
if(!quiet){
cat("---------------------------------------------------------------\n")
cat("| **Warning: method=\"REML\" has not been thoroughly tested. |\n")
cat("| Use with caution.** |\n")
cat("---------------------------------------------------------------\n\n")
}
if(!quiet) cat(paste("Optimizing the phylogenetic position of ",tip," using REML. Please wait....\n",sep=""))
if(search=="heuristic"){
trees<-list()
ee<-c(root.node,tree$edge[,2])
for(i in 1:length(ee)) trees[[i]]<-bind.tip(tree,tip,where=ee[i],position=if(ee[i]==root.node) 0 else 0.5*tree$edge.length[i-1])
class(trees)<-"multiPhylo"
lik.edge<-function(tree,XX){
tree<-multi2di(tree)
YY<-apply(XX[tree$tip.label,],2,pic,phy=tree)
vcv<-t(YY)%*%YY/nrow(YY)
E<-eigen(vcv)$vectors
##a<-apply(XX,2,function(x,tree) ace(x,tree,type="continuous",method="pic")$ace[1],tree=tree)
##S<-(X-matrix(rep(a,nrow(X)),nrow(X),ncol(X),byrow=TRUE))%*%E
##ZZ<-apply(S,2,pic,phy=tree)
ZZ<-YY%*%E
vars<-diag(t(ZZ)%*%ZZ/nrow(ZZ))
ll<-vector()
for(i in 1:ncol(ZZ)) ll[i]<-sum(dnorm(ZZ[,i],mean=0,sd=sqrt(vars[i]),log=TRUE))
sum(ll)
}
logL<-sapply(trees,lik.edge,XX=X)
if(plot){
ll<-logL[2:length(logL)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(logL[2:length(logL)],1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
edge<-ee[which(logL==max(logL))]
}
lik.tree<-function(position,tip,tree,edge,XX,rt){
if(edge==rt) tree<-bind.tip(tree,tip,edge.length=position,where=edge)
else tree<-bind.tip(tree,tip,where=edge,position=position)
tree<-multi2di(tree)
YY<-apply(XX,2,pic,phy=tree,scaled=FALSE)
vcv<-t(YY)%*%YY/nrow(YY)
sum(dmnorm(YY,mean=rep(0,ncol(YY)),varcov=vcv,log=TRUE))
}
if(search=="heuristic"){
ee<-edge
if(edge!=root.node) ee<-c(ee,getAncestors(tree,node=edge,type="parent"))
if(edge>Ntip(tree)) ee<-c(ee,tree$edge[which(tree$edge[,1]==edge),2])
} else if(search=="exhaustive") ee<-c(root.node,tree$edge[,2])
ee<-intersect(ee,constraint)
fit<-vector(mode="list",length=length(ee))
for(i in 1:length(ee)){
if(ee[i]==root.node) fit[[i]]<-optimize(lik.tree,interval=c(max(nodeHeights(tree)),10*max(nodeHeights(tree))),tip=tip,tree=tree,edge=ee[i],XX=X,rt=root.node,maximum=TRUE)
else fit[[i]]<-optimize(lik.tree,interval=c(0,tree$edge.length[which(tree$edge[,2]==ee[i])]),tip=tip,tree=tree,edge=ee[i],XX=X,rt=root.node,maximum=TRUE)
}
logL<-sapply(fit,function(x) x$objective)
if(search=="exhaustive"&&plot){
ll<-sapply(fit,function(x) x$objective)
ll<-ll[2:length(ll)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(ll,1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
fit<-fit[[which(logL==max(logL))]]
edge<-ee[which(logL==max(logL))]
mltree<-if(edge==root.node) midpoint.root(bind.tip(tree,tip,where=edge,edge.length=fit$maximum)) else bind.tip(tree,tip,where=edge,position=fit$maximum)
mltree$logL<-fit$objective
if(!quiet) cat("Done.\n")
mltree
}
|
/R/locate.yeti.R
|
no_license
|
phamasaur/phytools
|
R
| false
| false
| 8,356
|
r
|
## code to place a missing extant taxon into a tree using ML or REML on continuous data
## written by Liam J. Revell 2014, 2018
locate.yeti<-function(tree,X,...){
if(!inherits(tree,"phylo")) stop("tree should be object of class \"phylo\".")
if(hasArg(method)) method<-list(...)$method
else method<-"ML"
if(hasArg(search)) search<-list(...)$search
else search<-"heuristic"
if(hasArg(plot)) plot<-list(...)$plot
else plot<-FALSE
if(hasArg(quiet)) quiet<-list(...)$quiet
else quiet<-FALSE
if(hasArg(rotate)) rotate<-list(...)$rotate
else rotate<-if(method=="ML") TRUE else FALSE
root.node<-Ntip(tree)+1
if(hasArg(constraint)){
if(search=="exhaustive") constraint<-list(...)$constraint
else {
cat("constraint only works with search==\"exhaustive\"\n")
constraint<-c(root.node,tree$edge[,2])
}
} else constraint<-c(root.node,tree$edge[,2])
if(!is.matrix(X)) X<-as.matrix(X)
tip<-setdiff(rownames(X),tree$tip.label)
if(method=="ML") mltree<-yetiML(tree,X,quiet,tip,root.node,constraint,plot,search,rotate)
else if(method=="REML") mltree<-yetiREML(tree,X,quiet,tip,root.node,constraint,plot,search)
else {
cat(paste("Do not recognize method ",method,".\n",sep=""))
stop()
}
mltree
}
yetiML<-function(tree,X,quiet,tip,root.node,constraint,plot,search,rotate){
if(!quiet) cat(paste("Optimizing the phylogenetic position of ",tip," using ML. Please wait....\n",sep=""))
if(ncol(X)>1&&rotate){
pca<-phyl.pca(tree,X[tree$tip.label,])
obj<-phyl.vcv(X[tree$tip.label,],vcv(tree),1)
X<-(X-matrix(rep(obj$a[,1],nrow(X)),nrow(X),ncol(X),byrow=TRUE))%*%pca$Evec
}
if(search=="heuristic"){
trees<-list()
ee<-c(root.node,tree$edge[,2])
for(i in 1:length(ee)) trees[[i]]<-bind.tip(tree,tip,where=ee[i],position=if(ee[i]==root.node) 0 else 0.5*tree$edge.length[i-1])
class(trees)<-"multiPhylo"
lik.edge<-function(tree,XX,rotate){
if(!rotate) XX<-phyl.pca(tree,XX[tree$tip.label,])$S
obj<-phyl.vcv(as.matrix(XX[tree$tip.label,]),vcv(tree),1)
ll<-vector()
for(i in 1:ncol(XX)) ll[i]<-sum(dmnorm(XX[tree$tip.label,i],mean=rep(obj$a[i,1],nrow(XX)),obj$C*obj$R[i,i],log=TRUE))
sum(ll)
}
logL<-sapply(trees,lik.edge,XX=X,rotate=rotate)
if(plot){
ll<-logL[2:length(logL)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(logL[2:length(logL)],1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
edge<-ee[which(logL==max(logL))]
}
lik.tree<-function(position,tip,tree,edge,XX,rt,rotate){
if(edge==rt) tree<-bind.tip(tree,tip,edge.length=position,where=edge)
else tree<-bind.tip(tree,tip,where=edge,position=position)
if(!rotate) XX<-phyl.pca(tree,XX[tree$tip.label,])$S
obj<-phyl.vcv(as.matrix(XX[tree$tip.label,]),vcv(tree),1)
ll<-vector()
for(i in 1:ncol(XX)) ll[i]<-sum(dmnorm(XX[tree$tip.label,i],mean=rep(obj$a[i,1],nrow(XX)),obj$C*obj$R[i,i],log=TRUE))
sum(ll)
}
if(search=="heuristic"){
ee<-edge
if(edge!=root.node) ee<-c(ee,getAncestors(tree,node=edge,type="parent"))
if(edge>Ntip(tree)) ee<-c(ee,tree$edge[which(tree$edge[,1]==edge),2])
} else if(search=="exhaustive") ee<-c(root.node,tree$edge[,2])
ee<-intersect(ee,constraint)
fit<-vector(mode="list",length=length(ee))
for(i in 1:length(ee)){
if(ee[i]==root.node) fit[[i]]<-optimize(lik.tree,interval=c(max(nodeHeights(tree)),10*max(nodeHeights(tree))),tip=tip,tree=tree,
edge=ee[i],XX=X,rt=root.node,rotate=rotate,maximum=TRUE)
else fit[[i]]<-optimize(lik.tree,interval=c(0,tree$edge.length[which(tree$edge[,2]==ee[i])]),tip=tip,tree=tree,edge=ee[i],
XX=X,rt=root.node,rotate=rotate,maximum=TRUE)
}
logL<-sapply(fit,function(x) x$objective)
if(search=="exhaustive"&&plot){
ll<-sapply(fit,function(x) x$objective)
ll<-ll[2:length(ll)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(ll,1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
fit<-fit[[which(logL==max(logL))]]
edge<-ee[which(logL==max(logL))]
mltree<-if(edge==root.node) midpoint.root(bind.tip(tree,tip,where=edge,edge.length=fit$maximum)) else bind.tip(tree,tip,where=edge,position=fit$maximum)
mltree$logL<-fit$objective
if(!quiet) cat("Done.\n")
mltree
}
yetiREML<-function(tree,X,quiet,tip,root.node,constraint,plot,search){
if(!quiet){
cat("---------------------------------------------------------------\n")
cat("| **Warning: method=\"REML\" has not been thoroughly tested. |\n")
cat("| Use with caution.** |\n")
cat("---------------------------------------------------------------\n\n")
}
if(!quiet) cat(paste("Optimizing the phylogenetic position of ",tip," using REML. Please wait....\n",sep=""))
if(search=="heuristic"){
trees<-list()
ee<-c(root.node,tree$edge[,2])
for(i in 1:length(ee)) trees[[i]]<-bind.tip(tree,tip,where=ee[i],position=if(ee[i]==root.node) 0 else 0.5*tree$edge.length[i-1])
class(trees)<-"multiPhylo"
lik.edge<-function(tree,XX){
tree<-multi2di(tree)
YY<-apply(XX[tree$tip.label,],2,pic,phy=tree)
vcv<-t(YY)%*%YY/nrow(YY)
E<-eigen(vcv)$vectors
##a<-apply(XX,2,function(x,tree) ace(x,tree,type="continuous",method="pic")$ace[1],tree=tree)
##S<-(X-matrix(rep(a,nrow(X)),nrow(X),ncol(X),byrow=TRUE))%*%E
##ZZ<-apply(S,2,pic,phy=tree)
ZZ<-YY%*%E
vars<-diag(t(ZZ)%*%ZZ/nrow(ZZ))
ll<-vector()
for(i in 1:ncol(ZZ)) ll[i]<-sum(dnorm(ZZ[,i],mean=0,sd=sqrt(vars[i]),log=TRUE))
sum(ll)
}
logL<-sapply(trees,lik.edge,XX=X)
if(plot){
ll<-logL[2:length(logL)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(logL[2:length(logL)],1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
edge<-ee[which(logL==max(logL))]
}
lik.tree<-function(position,tip,tree,edge,XX,rt){
if(edge==rt) tree<-bind.tip(tree,tip,edge.length=position,where=edge)
else tree<-bind.tip(tree,tip,where=edge,position=position)
tree<-multi2di(tree)
YY<-apply(XX,2,pic,phy=tree,scaled=FALSE)
vcv<-t(YY)%*%YY/nrow(YY)
sum(dmnorm(YY,mean=rep(0,ncol(YY)),varcov=vcv,log=TRUE))
}
if(search=="heuristic"){
ee<-edge
if(edge!=root.node) ee<-c(ee,getAncestors(tree,node=edge,type="parent"))
if(edge>Ntip(tree)) ee<-c(ee,tree$edge[which(tree$edge[,1]==edge),2])
} else if(search=="exhaustive") ee<-c(root.node,tree$edge[,2])
ee<-intersect(ee,constraint)
fit<-vector(mode="list",length=length(ee))
for(i in 1:length(ee)){
if(ee[i]==root.node) fit[[i]]<-optimize(lik.tree,interval=c(max(nodeHeights(tree)),10*max(nodeHeights(tree))),tip=tip,tree=tree,edge=ee[i],XX=X,rt=root.node,maximum=TRUE)
else fit[[i]]<-optimize(lik.tree,interval=c(0,tree$edge.length[which(tree$edge[,2]==ee[i])]),tip=tip,tree=tree,edge=ee[i],XX=X,rt=root.node,maximum=TRUE)
}
logL<-sapply(fit,function(x) x$objective)
if(search=="exhaustive"&&plot){
ll<-sapply(fit,function(x) x$objective)
ll<-ll[2:length(ll)]
ll[ll<=sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]]<-sort(ll,decreasing=TRUE)[ceiling(nrow(tree$edge)/2)]
layout(matrix(c(1,2),2,1),heights=c(0.95,0.05))
plotBranchbyTrait(tree,ll,mode="edges",title="log(L)",show.tip.label=FALSE)
edgelabels(round(ll,1),cex=0.5)
plot.new()
text(paste("Note: logL <=",round(min(ll),2),"set to",round(min(ll),2),"for visualization only"),x=0.5,y=0.5)
}
fit<-fit[[which(logL==max(logL))]]
edge<-ee[which(logL==max(logL))]
mltree<-if(edge==root.node) midpoint.root(bind.tip(tree,tip,where=edge,edge.length=fit$maximum)) else bind.tip(tree,tip,where=edge,position=fit$maximum)
mltree$logL<-fit$objective
if(!quiet) cat("Done.\n")
mltree
}
|
#############################################################
# Setup working directory in your desktop
setwd("/home/alf/Scrivania/lav_primicerio_final")
#############################################################
# Load dependencies and functions
# Before load library check and install the packages as usual if necessary.
#############################################################
# A Load spatial libraries
library(maptools)
library(spatstat)
library(spdep)
library(rgdal)
library(raster)
library(rgeos)
library(leaflet)
library(Cairo)
library(MASS)
library(rpart.plot)
citation("maptools")
citation("spatstat")
citation("spdep")
citation("rgdal")
citation("rpart.plot")
citation("spdep")
citation("rgeos")
citation("MASS")
citation("raster")
#############################################################
# B Load graphical libraries
library(ggplot2)
library(sjPlot)
library(sjmisc)
citation("ggplot2")
citation("sjPlot")
#############################################################
# C Load modeling libraries
library(rpart)
library(plotROC)
citation("rpart")
citation("plotROC")
#############################################################
# load other function
source("auxillary_bulichella.r")
################################################################################################
# Load geo data
mat_bulichella_sp=readRDS("geo_bulichella/mat_bulichella.rds")
buliGEO=brick("raster/buliGEO.tif")
buli_mask=brick("raster/C_mask.tif")
#########################################################################################################################à
# trattamenti raster per calcolo bound e distanze
r_bulichella <- stack(SpatialPixelsDataFrame(mat_bulichella_sp, tolerance = 0.00001, mat_bulichella_sp@data))
proj4string(r_bulichella)=CRS("+init=epsg:4326")
writeRaster(r_bulichella,"raster/r_bulichella.tif",overwrite=TRUE)
r_mask_bulichella=r_bulichella[["Area"]]*1
setValues(r_mask_bulichella)=NA
writeRaster(r_mask_bulichella,"raster/r_mask_bulichella.tif",overwrite=TRUE)
# Calculate Dist_bound parameter in unit images
# IT Calcolo della forma convessa e della distanza
qhull_bulichella=gConvexHull(mat_bulichella_sp)
proj4string(qhull_bulichella)=CRS("+init=epsg:4326")
coords_edges_bulichella=as.data.frame(getEdges(qhull_bulichella))
coords_edges_bulichella$id=1
coordinates(coords_edges_bulichella)= ~ x+y
class(coords_edges_bulichella)
bound_line_bulichella=gBoundary(qhull_bulichella)
saveRDS(bound_line_bulichella,"geo_bulichella/bound_line_bulichella.rds")
bound_line_bulichella=readRDS("geo_bulichella/bound_line_bulichella.rds")
proj4string(bound_line_bulichella)=proj4string(mat_bulichella_sp)
dist_bound=gDistance(mat_bulichella_sp,bound_line_bulichella,byid=T)
# Calculate factor variable ( 0- 1, False True ) quantile UnderPerc_Area if feature is under the 90th percentile
q_area=quantile(values(r_bulichella[["Area"]]),probs = seq(0, 1, 0.1),na.rm=T)
mat_bulichella_sp$Underperc=extract(r_bulichella[["Area"]]>q_area[10],mat_bulichella_sp)
####################################################################################
# estract missing plants fetatures
mat_bulichella_miss=mat_bulichella_sp[mat_bulichella_sp$MISSING>0,]
##################################################################################
# Create a spatastat spatial object to visualize missing plant density
sSp_bulichella_miss <- as(SpatialPoints(mat_bulichella_miss), "ppp") # convert points to pp class
Dens_bulichella_miss <- density(sSp_bulichella_miss, adjust = 0.2) # create density object
class(Dens_bulichella_miss) # just for interest: it's got it's of pixel image class
plot(Dens_bulichella_miss) # default plot for spatial density
contour(density(sSp_bulichella_miss, adjust = 0.2), nlevels = 4) # plot as contours - this is where we're heading plot of chunk Contour plot
Dsg_bulichella_miss <- as(Dens_bulichella_miss, "SpatialGridDataFrame") # convert to spatial grid class
Dim_bulichella_miss <- as.image.SpatialGridDataFrame(Dsg_bulichella_miss) # convert again to an image
Dcl_bulichella_miss <- contourLines(Dim_bulichella_miss, nlevels = 8) # create contour object - change 8 for more/fewer levels
SLDF_bulichella_miss<- ContourLines2SLDF(Dcl_bulichella_miss, CRS("+init=epsg:4326")) # convert to SpatialLinesDataFrame
SLDF_bulichella_miss=SLDF_bulichella_miss[SLDF_bulichella_miss$level!=0,] # leave data boudary
plot(SLDF_bulichella_miss, col = terrain.colors(4))
################################################################################################################
CairoPNG(filename = "results/plot_bulichella_image.png",res=300)
plotRGB(buliGEO)
plot(bound_line_bulichella,col ='red',add=TRUE)
dev.off()
CairoPNG(filename = "results/plot_bulichella_over_mask.png",res=300)
plotRGB(buliGEO)
plotRGB(buli_mask,alpha=120,colNA='red',add=TRUE)
plot(bound_line_bulichella,col ='red',add=TRUE)
dev.off()
CairoPNG(filename = "results/plot_density_missing.png",res=300)
plotRGB(buliGEO)
plot(bound_line_bulichella,col ='red',add=TRUE)
plot(SLDF_bulichella_miss, col = terrain.colors(4),add=TRUE)
plot(mat_bulichella_miss,pch = 19,cex = .1,col ='brown2',add=TRUE)
dev.off()
saveRDS(SLDF_bulichella_miss,"geo_bulichella/SLDF_bulichella_miss.rds")
####################################################################################
# Local moran calculation by using of N matrix of neighbours
mat_bulichella_50=nb2listw(knn2nb(knearneigh(mat_bulichella_sp,k=50))) # 50 plants
mat_bulichella_30=nb2listw(knn2nb(knearneigh(mat_bulichella_sp,k=30))) # 30 plants
mat_bulichella_sp@data$L_moran_50=localmoran(mat_bulichella_sp$Area, mat_bulichella_50)[,1]
mat_bulichella_sp@data$L_moran_30=localmoran(mat_bulichella_sp$Area, mat_bulichella_30)[,1]
mat_bulichella_sp@data$L_moran_30_p=localmoran(mat_bulichella_sp$Perimetro, mat_bulichella_30)[,1]
###############################################################################################
# LOESS deviation by line vigour model : model_primicerio function available in auxillary_bulichella.r
mat_bulichella_ls=split(mat_bulichella_sp@data,mat_bulichella_sp@data$FILARE)
res=list()
for (i in seq_along(mat_bulichella_ls)) {
res[[i]]=model_primicerio(mat_bulichella_ls[[i]],
saveplot=TRUE,
titlefig=paste0("Modeling Plant Missing FILARE_",as.character(i)),
namefig=paste0("results/Modeling_Plant_Missing_FILARE ",as.character(i),".png"),
treshshold=100)
}
ls_model_residuals=lapply(res,function(x) x$model_residuals)
mat_bulichella_sp@data$Line_res=unlist(ls_model_residuals)
#########################################################################################################
# create guess variable candidate of missing when deviation are higher than treshsold
ls_canditate=lapply(res,function(x) x$vector)
mat_bulichella_sp@data$candidate=unlist(ls_canditate)
names(mat_bulichella_sp@data)
saveRDS(mat_bulichella_sp,"mat_bulichella_sp.rds")
#############################################################################################################################
# Modeling steps
mat_bulichella_sp=readRDS("mat_bulichella_sp.rds")
#############################################################################################################################
# model multilogistic selected eliminating no useful data fields
modelfit_sel=stepAIC(glm(formula=MISSING ~.-FILARE-PIANTA-X-Y-candidate, family=binomial(), data=na.omit(mat_bulichella_sp@data)))
summary(modelfit_sel)
#############################################################################################################################
# model multilogistic NO selection but choices
formula_classifier_A1 ="MISSING ~ Area + Roughness + Line_res"
formula_classifier_A2 ="MISSING ~ Area + Roughness + Underperc"
formula_classifier_A2 ="MISSING ~ Area + Roughness + L_moran_50"
modelfit_A1 <- glm(formula=formula_classifier_A1 , family=binomial(), data=na.omit(mat_bulichella_sp@data))
summary(modelfit_A1)
modelfit_A2 <- glm(formula=formula_classifier_A2 , family=binomial(), data=na.omit(mat_bulichella_sp@data))
summary(modelfit_A2)
modelfit_A3 <- glm(formula=formula_classifier_A2 , family=binomial(), data=na.omit(mat_bulichella_sp@data))
summary(modelfit_A3)
sjt.glm(modelfit_A1 , modelfit_A2 , modelfit_A3 ,file="results/table_glm_compare.html")
sjt.glm(modelfit_A1,file="results/table_glm_A1.html")
sjt.glm(modelfit_A2,file="results/table_glm_A2.html")
sjt.glm(modelfit_A3,file="results/table_glm_A3.html")
##########################################################################################################
observed=mat_bulichella_sp@data$MISSING
prob_A1 <- predict(modelfit_A1, newdata=mat_bulichella_sp@data, type='response')
prob_A2 <- predict(modelfit_A2, newdata=mat_bulichella_sp@data, type='response')
prob_A3 <- predict(modelfit_A3, newdata=mat_bulichella_sp@data, type='response')
roc_data <- data.frame(Obs = observed, Model_A1 = prob_A1, Model_A3 = prob_A3, stringsAsFactors = FALSE)
longtest <- melt_roc(roc_data,"Obs", c("Model_A1","Model_A3"))
names(longtest)[3]="Classifier"
a=ggplot(longtest, aes(d = D, m = M, color = Classifier)) + geom_roc() + style_roc()
a+annotate("text", x = .75, y = .25,
label = paste(" AUC A1=", round(calc_auc(a)$AUC[1], 2),"\n",
"AUC A3=", round(calc_auc(a)$AUC[2], 2)))
ggsave("results/modelfit_ROC_glm.png",dpi=300)
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8,0.9)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A1>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_glm_A1.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8,0.9)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A2>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_glm_A2.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8,0.9)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A3>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_glm_A3.html")
#############################################################################################################################
# decision trees models
treemodel_full_A1 <- rpart(formula(modelfit_A1), data=mat_bulichella_sp@data)
png("results/decision_tree_model_A1.png")
rpart.plot(treemodel_full_A1)
dev.off()
treemodel_full_A2 <- rpart(formula(modelfit_A2), data=mat_bulichella_sp@data)
png("results/decision_tree_model_A1.png")
rpart.plot(treemodel_full_A2)
dev.off()
treemodel_full_A3 <- rpart(formula(modelfit_A3), data=mat_bulichella_sp@data)
png("results/decision_tree_model_A3.png")
rpart.plot(treemodel_full_A3)
dev.off()
###############################################################################################################
prob_A1 <- predict(treemodel_full_A1, newdata=mat_bulichella_sp@data)
prob_A2 <- predict(treemodel_full_A2, newdata=mat_bulichella_sp@data)
prob_A3 <- predict(treemodel_full_A3, newdata=mat_bulichella_sp@data)
roc_data <- data.frame(Obs = observed, DTree_A1 = prob_A1, DTree_A3 = prob_A3, stringsAsFactors = FALSE)
longtest <- melt_roc(roc_data,"Obs", c("DTree_A1","DTree_A3"))
names(longtest)[3]="Classifier"
a=ggplot(longtest, aes(d = D, m = M, color = Classifier)) + geom_roc() + style_roc()
a+annotate("text", x = .75, y = .25,
label = paste(" AUC A1=", round(calc_auc(a)$AUC[1], 2),"\n",
"AUC A3=", round(calc_auc(a)$AUC[2], 2)))
ggsave("results/modelfit_ROC_tree.png",dpi=300)
#########################################################################################################################
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A1>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_tree_A1.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A2>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_tree_A2.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A3>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_tree_A3.html")
# A. Baddeley, E. Rubak and R.Turner. Spatial Point Patterns: Methodology and Applications with R. Chapman and Hall/CRC Press, 2015.
|
/primicerio_work_final.code.r
|
no_license
|
alfcrisci/UAVmissingplants
|
R
| false
| false
| 13,933
|
r
|
#############################################################
# Setup working directory in your desktop
setwd("/home/alf/Scrivania/lav_primicerio_final")
#############################################################
# Load dependencies and functions
# Before load library check and install the packages as usual if necessary.
#############################################################
# A Load spatial libraries
library(maptools)
library(spatstat)
library(spdep)
library(rgdal)
library(raster)
library(rgeos)
library(leaflet)
library(Cairo)
library(MASS)
library(rpart.plot)
citation("maptools")
citation("spatstat")
citation("spdep")
citation("rgdal")
citation("rpart.plot")
citation("spdep")
citation("rgeos")
citation("MASS")
citation("raster")
#############################################################
# B Load graphical libraries
library(ggplot2)
library(sjPlot)
library(sjmisc)
citation("ggplot2")
citation("sjPlot")
#############################################################
# C Load modeling libraries
library(rpart)
library(plotROC)
citation("rpart")
citation("plotROC")
#############################################################
# load other function
source("auxillary_bulichella.r")
################################################################################################
# Load geo data
mat_bulichella_sp=readRDS("geo_bulichella/mat_bulichella.rds")
buliGEO=brick("raster/buliGEO.tif")
buli_mask=brick("raster/C_mask.tif")
#########################################################################################################################à
# trattamenti raster per calcolo bound e distanze
r_bulichella <- stack(SpatialPixelsDataFrame(mat_bulichella_sp, tolerance = 0.00001, mat_bulichella_sp@data))
proj4string(r_bulichella)=CRS("+init=epsg:4326")
writeRaster(r_bulichella,"raster/r_bulichella.tif",overwrite=TRUE)
r_mask_bulichella=r_bulichella[["Area"]]*1
setValues(r_mask_bulichella)=NA
writeRaster(r_mask_bulichella,"raster/r_mask_bulichella.tif",overwrite=TRUE)
# Calculate Dist_bound parameter in unit images
# IT Calcolo della forma convessa e della distanza
qhull_bulichella=gConvexHull(mat_bulichella_sp)
proj4string(qhull_bulichella)=CRS("+init=epsg:4326")
coords_edges_bulichella=as.data.frame(getEdges(qhull_bulichella))
coords_edges_bulichella$id=1
coordinates(coords_edges_bulichella)= ~ x+y
class(coords_edges_bulichella)
bound_line_bulichella=gBoundary(qhull_bulichella)
saveRDS(bound_line_bulichella,"geo_bulichella/bound_line_bulichella.rds")
bound_line_bulichella=readRDS("geo_bulichella/bound_line_bulichella.rds")
proj4string(bound_line_bulichella)=proj4string(mat_bulichella_sp)
dist_bound=gDistance(mat_bulichella_sp,bound_line_bulichella,byid=T)
# Calculate factor variable ( 0- 1, False True ) quantile UnderPerc_Area if feature is under the 90th percentile
q_area=quantile(values(r_bulichella[["Area"]]),probs = seq(0, 1, 0.1),na.rm=T)
mat_bulichella_sp$Underperc=extract(r_bulichella[["Area"]]>q_area[10],mat_bulichella_sp)
####################################################################################
# estract missing plants fetatures
mat_bulichella_miss=mat_bulichella_sp[mat_bulichella_sp$MISSING>0,]
##################################################################################
# Create a spatastat spatial object to visualize missing plant density
sSp_bulichella_miss <- as(SpatialPoints(mat_bulichella_miss), "ppp") # convert points to pp class
Dens_bulichella_miss <- density(sSp_bulichella_miss, adjust = 0.2) # create density object
class(Dens_bulichella_miss) # just for interest: it's got it's of pixel image class
plot(Dens_bulichella_miss) # default plot for spatial density
contour(density(sSp_bulichella_miss, adjust = 0.2), nlevels = 4) # plot as contours - this is where we're heading plot of chunk Contour plot
Dsg_bulichella_miss <- as(Dens_bulichella_miss, "SpatialGridDataFrame") # convert to spatial grid class
Dim_bulichella_miss <- as.image.SpatialGridDataFrame(Dsg_bulichella_miss) # convert again to an image
Dcl_bulichella_miss <- contourLines(Dim_bulichella_miss, nlevels = 8) # create contour object - change 8 for more/fewer levels
SLDF_bulichella_miss<- ContourLines2SLDF(Dcl_bulichella_miss, CRS("+init=epsg:4326")) # convert to SpatialLinesDataFrame
SLDF_bulichella_miss=SLDF_bulichella_miss[SLDF_bulichella_miss$level!=0,] # leave data boudary
plot(SLDF_bulichella_miss, col = terrain.colors(4))
################################################################################################################
CairoPNG(filename = "results/plot_bulichella_image.png",res=300)
plotRGB(buliGEO)
plot(bound_line_bulichella,col ='red',add=TRUE)
dev.off()
CairoPNG(filename = "results/plot_bulichella_over_mask.png",res=300)
plotRGB(buliGEO)
plotRGB(buli_mask,alpha=120,colNA='red',add=TRUE)
plot(bound_line_bulichella,col ='red',add=TRUE)
dev.off()
CairoPNG(filename = "results/plot_density_missing.png",res=300)
plotRGB(buliGEO)
plot(bound_line_bulichella,col ='red',add=TRUE)
plot(SLDF_bulichella_miss, col = terrain.colors(4),add=TRUE)
plot(mat_bulichella_miss,pch = 19,cex = .1,col ='brown2',add=TRUE)
dev.off()
saveRDS(SLDF_bulichella_miss,"geo_bulichella/SLDF_bulichella_miss.rds")
####################################################################################
# Local moran calculation by using of N matrix of neighbours
mat_bulichella_50=nb2listw(knn2nb(knearneigh(mat_bulichella_sp,k=50))) # 50 plants
mat_bulichella_30=nb2listw(knn2nb(knearneigh(mat_bulichella_sp,k=30))) # 30 plants
mat_bulichella_sp@data$L_moran_50=localmoran(mat_bulichella_sp$Area, mat_bulichella_50)[,1]
mat_bulichella_sp@data$L_moran_30=localmoran(mat_bulichella_sp$Area, mat_bulichella_30)[,1]
mat_bulichella_sp@data$L_moran_30_p=localmoran(mat_bulichella_sp$Perimetro, mat_bulichella_30)[,1]
###############################################################################################
# LOESS deviation by line vigour model : model_primicerio function available in auxillary_bulichella.r
mat_bulichella_ls=split(mat_bulichella_sp@data,mat_bulichella_sp@data$FILARE)
res=list()
for (i in seq_along(mat_bulichella_ls)) {
res[[i]]=model_primicerio(mat_bulichella_ls[[i]],
saveplot=TRUE,
titlefig=paste0("Modeling Plant Missing FILARE_",as.character(i)),
namefig=paste0("results/Modeling_Plant_Missing_FILARE ",as.character(i),".png"),
treshshold=100)
}
ls_model_residuals=lapply(res,function(x) x$model_residuals)
mat_bulichella_sp@data$Line_res=unlist(ls_model_residuals)
#########################################################################################################
# create guess variable candidate of missing when deviation are higher than treshsold
ls_canditate=lapply(res,function(x) x$vector)
mat_bulichella_sp@data$candidate=unlist(ls_canditate)
names(mat_bulichella_sp@data)
saveRDS(mat_bulichella_sp,"mat_bulichella_sp.rds")
#############################################################################################################################
# Modeling steps
mat_bulichella_sp=readRDS("mat_bulichella_sp.rds")
#############################################################################################################################
# model multilogistic selected eliminating no useful data fields
modelfit_sel=stepAIC(glm(formula=MISSING ~.-FILARE-PIANTA-X-Y-candidate, family=binomial(), data=na.omit(mat_bulichella_sp@data)))
summary(modelfit_sel)
#############################################################################################################################
# model multilogistic NO selection but choices
formula_classifier_A1 ="MISSING ~ Area + Roughness + Line_res"
formula_classifier_A2 ="MISSING ~ Area + Roughness + Underperc"
formula_classifier_A2 ="MISSING ~ Area + Roughness + L_moran_50"
modelfit_A1 <- glm(formula=formula_classifier_A1 , family=binomial(), data=na.omit(mat_bulichella_sp@data))
summary(modelfit_A1)
modelfit_A2 <- glm(formula=formula_classifier_A2 , family=binomial(), data=na.omit(mat_bulichella_sp@data))
summary(modelfit_A2)
modelfit_A3 <- glm(formula=formula_classifier_A2 , family=binomial(), data=na.omit(mat_bulichella_sp@data))
summary(modelfit_A3)
sjt.glm(modelfit_A1 , modelfit_A2 , modelfit_A3 ,file="results/table_glm_compare.html")
sjt.glm(modelfit_A1,file="results/table_glm_A1.html")
sjt.glm(modelfit_A2,file="results/table_glm_A2.html")
sjt.glm(modelfit_A3,file="results/table_glm_A3.html")
##########################################################################################################
observed=mat_bulichella_sp@data$MISSING
prob_A1 <- predict(modelfit_A1, newdata=mat_bulichella_sp@data, type='response')
prob_A2 <- predict(modelfit_A2, newdata=mat_bulichella_sp@data, type='response')
prob_A3 <- predict(modelfit_A3, newdata=mat_bulichella_sp@data, type='response')
roc_data <- data.frame(Obs = observed, Model_A1 = prob_A1, Model_A3 = prob_A3, stringsAsFactors = FALSE)
longtest <- melt_roc(roc_data,"Obs", c("Model_A1","Model_A3"))
names(longtest)[3]="Classifier"
a=ggplot(longtest, aes(d = D, m = M, color = Classifier)) + geom_roc() + style_roc()
a+annotate("text", x = .75, y = .25,
label = paste(" AUC A1=", round(calc_auc(a)$AUC[1], 2),"\n",
"AUC A3=", round(calc_auc(a)$AUC[2], 2)))
ggsave("results/modelfit_ROC_glm.png",dpi=300)
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8,0.9)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A1>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_glm_A1.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8,0.9)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A2>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_glm_A2.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8,0.9)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A3>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_glm_A3.html")
#############################################################################################################################
# decision trees models
treemodel_full_A1 <- rpart(formula(modelfit_A1), data=mat_bulichella_sp@data)
png("results/decision_tree_model_A1.png")
rpart.plot(treemodel_full_A1)
dev.off()
treemodel_full_A2 <- rpart(formula(modelfit_A2), data=mat_bulichella_sp@data)
png("results/decision_tree_model_A1.png")
rpart.plot(treemodel_full_A2)
dev.off()
treemodel_full_A3 <- rpart(formula(modelfit_A3), data=mat_bulichella_sp@data)
png("results/decision_tree_model_A3.png")
rpart.plot(treemodel_full_A3)
dev.off()
###############################################################################################################
prob_A1 <- predict(treemodel_full_A1, newdata=mat_bulichella_sp@data)
prob_A2 <- predict(treemodel_full_A2, newdata=mat_bulichella_sp@data)
prob_A3 <- predict(treemodel_full_A3, newdata=mat_bulichella_sp@data)
roc_data <- data.frame(Obs = observed, DTree_A1 = prob_A1, DTree_A3 = prob_A3, stringsAsFactors = FALSE)
longtest <- melt_roc(roc_data,"Obs", c("DTree_A1","DTree_A3"))
names(longtest)[3]="Classifier"
a=ggplot(longtest, aes(d = D, m = M, color = Classifier)) + geom_roc() + style_roc()
a+annotate("text", x = .75, y = .25,
label = paste(" AUC A1=", round(calc_auc(a)$AUC[1], 2),"\n",
"AUC A3=", round(calc_auc(a)$AUC[2], 2)))
ggsave("results/modelfit_ROC_tree.png",dpi=300)
#########################################################################################################################
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A1>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_tree_A1.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A2>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_tree_A2.html")
res=list()
tresh_miss=c(0.05,0.1,0.15,0.2,0.8)
for ( i in 1:length(tresh_miss)){
prediction=ifelse(prob_A3>tresh_miss[i],1,0)
res[[i]]=as.data.frame.array(table(observed,prediction))
}
res_df=do.call("rbind",lapply(res,function(x) data.frame(hit_plant=x[1,1],nohit_plants=x[1,2],nohit_miss=x[2,1],hit_miss=x[2,2])))
res_df$tresh=tresh_miss
sjt.df(res_df, describe = FALSE,show.rownames = FALSE, file = "results/model_results_tree_A3.html")
# A. Baddeley, E. Rubak and R.Turner. Spatial Point Patterns: Methodology and Applications with R. Chapman and Hall/CRC Press, 2015.
|
# library(RHRV)
# setwd("c:/users/kristan/documents/github/trelliscope/data/mimic2db")
# #use string with x and y:
# #ex for x: "a40024"
# #and y: "a40024/"
# hrvextract <- function(x,y){
# hrv.data = CreateHRVData()
# hrv.data = LoadBeatWFDB(hrv.data, x, RecordPath = y, annotator = "qrs")
# hrv.data = BuildNIHR(hrv.data)
# range(hrv.data$Beat$niHR)
# hrv.data$Beat$niHR[!is.finite(hrv.data$Beat$niHR)] <- 300
# hrv.data = FilterNIHR(hrv.data)
# hrv.data=InterpolateNIHR (hrv.data, freqhr = 4)
# hrv.data = CreateTimeAnalysis(hrv.data,size=300,interval = 7.8125)
# HRData <- data.frame(hrv.data$HR ,1)
# names(HRData) <- c("HR", "Segment")
# Segments <- rep(c(1:ceiling(nrow(HRData)/7200)), each=7200)
# HRData$Segment <- Segments[1:length(HRData$Segment)]
# HRData$Shift <- ceiling(HRData$Segment/24)
# HRData$Patient <- x
# HRData$SegmentShiftMean <- ave(HRData$HR, HRData$Shift, HRData$Segment, FUN=mean)
# HRData$SegmentShiftSD <- ave(HRData$HR, HRData$Shift, HRData$Segment, FUN=sd)
# HRData$SegmentShiftlower <- HRData$SegmentShiftMean-(1.96*HRData$SegmentShiftSD)
# HRData$SegmentShiftupper <- HRData$SegmentShiftMean+(1.96*HRData$SegmentShiftSD)
# return(HRData)
# }
#
# ############################################
# a40075 <- hrvextract("a40075", "a40075/")
# a40076 <- hrvextract("a40076", "a40076/")
# a40086 <- hrvextract("a40086", "a40086/")
# #a40109 <- hrvextract("a40109", "a40109/")
# a40075$Seconds <- seq(from=0, to=((nrow(a40075)-1)/4)+.2, by=.25)
# a40076$Seconds <- seq(from=0, to=((nrow(a40076)-1)/4), by=.25)
# a40086$Seconds <- seq(from=0, to=((nrow(a40086)-1)/4), by=.25)
#
#
# alldata <- rbind(a40075,a40076,a40086)
setwd("c:/users/kristan/documents/github/trelliscope/data/mimic2db")
load("workingdata.RData")
library(shiny)
library(datadr); library(trelliscope)
library(reshape2)
library(rCharts)
byPatientShiftSeg <- divide(alldata, by = c("Patient", "Shift", "Segment"), update = TRUE)
vdbConn("MIMIC2DBGraphs/vdb", autoYes = TRUE)
heartCog <- function(x) {list(
meanHR = cogMean(x$HR),
RangeHR = cogRange(x$HR),
nObs = cog(sum(!is.na(x$HR)),
desc = "number of sensor readings")
)}
heartCog(byPatientShiftSeg[[1]][[2]])
# make and test panel function
timePanelhc <- function(x){
hp <- hPlot(HR ~ Seconds, data = x, type='line', radius=0)
hp
}
timePanelhc(byPatientShiftSeg[[1]][[2]])
# add display panel and cog function to vdb
makeDisplay(byPatientShiftSeg,
name = "HR_Shift_30Min_Interval",
desc = "Heart Rate Overview, 30 Minute Intervals",
panelFn = timePanelhc, cogFn = heartCog,
width = 400, height = 400)
##############
#with upper and lower bounds
# make and test panel function
timePanel <- function(x){
plot(x=x$Seconds, y=x$HR, type="l", xlab="Seconds", ylab="HR")
lines(x=x$Seconds, y=x$SegmentShiftupper, col="gold", lty=2)
lines(x=x$Seconds, y=x$SegmentShiftlower, col="gold", lty=2)
lines(x=x$Seconds, y=rep(30, length(x$Seconds)), col="darkred", lty=3)
lines(x=x$Seconds, y=rep(140, length(x$Seconds)), col="darkred", lty=3)
}
timePanel(byPatientShiftSeg[[1]][[2]])
# add display panel and cog function to vdb
makeDisplay(byPatientShiftSeg,
name = "HR_Shift_30Min_Interval_Cutoffs",
desc = "Heart Rate Overview, 30 Minute Intervals, With Alarm Settings",
panelFn = timePanel, cogFn = heartCog,
width = 400, height = 400)
# view the display
library(shiny)
runApp("../../inst/trelliscopeViewerAlacer", launch.browser=TRUE)
|
/Analyze/data/ecg/mimic2db/analysis.R
|
permissive
|
alacer/renaissance
|
R
| false
| false
| 3,561
|
r
|
# library(RHRV)
# setwd("c:/users/kristan/documents/github/trelliscope/data/mimic2db")
# #use string with x and y:
# #ex for x: "a40024"
# #and y: "a40024/"
# hrvextract <- function(x,y){
# hrv.data = CreateHRVData()
# hrv.data = LoadBeatWFDB(hrv.data, x, RecordPath = y, annotator = "qrs")
# hrv.data = BuildNIHR(hrv.data)
# range(hrv.data$Beat$niHR)
# hrv.data$Beat$niHR[!is.finite(hrv.data$Beat$niHR)] <- 300
# hrv.data = FilterNIHR(hrv.data)
# hrv.data=InterpolateNIHR (hrv.data, freqhr = 4)
# hrv.data = CreateTimeAnalysis(hrv.data,size=300,interval = 7.8125)
# HRData <- data.frame(hrv.data$HR ,1)
# names(HRData) <- c("HR", "Segment")
# Segments <- rep(c(1:ceiling(nrow(HRData)/7200)), each=7200)
# HRData$Segment <- Segments[1:length(HRData$Segment)]
# HRData$Shift <- ceiling(HRData$Segment/24)
# HRData$Patient <- x
# HRData$SegmentShiftMean <- ave(HRData$HR, HRData$Shift, HRData$Segment, FUN=mean)
# HRData$SegmentShiftSD <- ave(HRData$HR, HRData$Shift, HRData$Segment, FUN=sd)
# HRData$SegmentShiftlower <- HRData$SegmentShiftMean-(1.96*HRData$SegmentShiftSD)
# HRData$SegmentShiftupper <- HRData$SegmentShiftMean+(1.96*HRData$SegmentShiftSD)
# return(HRData)
# }
#
# ############################################
# a40075 <- hrvextract("a40075", "a40075/")
# a40076 <- hrvextract("a40076", "a40076/")
# a40086 <- hrvextract("a40086", "a40086/")
# #a40109 <- hrvextract("a40109", "a40109/")
# a40075$Seconds <- seq(from=0, to=((nrow(a40075)-1)/4)+.2, by=.25)
# a40076$Seconds <- seq(from=0, to=((nrow(a40076)-1)/4), by=.25)
# a40086$Seconds <- seq(from=0, to=((nrow(a40086)-1)/4), by=.25)
#
#
# alldata <- rbind(a40075,a40076,a40086)
setwd("c:/users/kristan/documents/github/trelliscope/data/mimic2db")
load("workingdata.RData")
library(shiny)
library(datadr); library(trelliscope)
library(reshape2)
library(rCharts)
byPatientShiftSeg <- divide(alldata, by = c("Patient", "Shift", "Segment"), update = TRUE)
vdbConn("MIMIC2DBGraphs/vdb", autoYes = TRUE)
heartCog <- function(x) {list(
meanHR = cogMean(x$HR),
RangeHR = cogRange(x$HR),
nObs = cog(sum(!is.na(x$HR)),
desc = "number of sensor readings")
)}
heartCog(byPatientShiftSeg[[1]][[2]])
# make and test panel function
timePanelhc <- function(x){
hp <- hPlot(HR ~ Seconds, data = x, type='line', radius=0)
hp
}
timePanelhc(byPatientShiftSeg[[1]][[2]])
# add display panel and cog function to vdb
makeDisplay(byPatientShiftSeg,
name = "HR_Shift_30Min_Interval",
desc = "Heart Rate Overview, 30 Minute Intervals",
panelFn = timePanelhc, cogFn = heartCog,
width = 400, height = 400)
##############
#with upper and lower bounds
# make and test panel function
timePanel <- function(x){
plot(x=x$Seconds, y=x$HR, type="l", xlab="Seconds", ylab="HR")
lines(x=x$Seconds, y=x$SegmentShiftupper, col="gold", lty=2)
lines(x=x$Seconds, y=x$SegmentShiftlower, col="gold", lty=2)
lines(x=x$Seconds, y=rep(30, length(x$Seconds)), col="darkred", lty=3)
lines(x=x$Seconds, y=rep(140, length(x$Seconds)), col="darkred", lty=3)
}
timePanel(byPatientShiftSeg[[1]][[2]])
# add display panel and cog function to vdb
makeDisplay(byPatientShiftSeg,
name = "HR_Shift_30Min_Interval_Cutoffs",
desc = "Heart Rate Overview, 30 Minute Intervals, With Alarm Settings",
panelFn = timePanel, cogFn = heartCog,
width = 400, height = 400)
# view the display
library(shiny)
runApp("../../inst/trelliscopeViewerAlacer", launch.browser=TRUE)
|
### write_haystack
#' Function to write haystack result data to file.
#'
#' @param res.haystack A 'haystack' result variable
#' @param file A file to write to
#'
#' @export
#'
#' @examples
#' # using the toy example of the singleCellHaystack package
#' # define a logical matrix with detection of each gene (rows) in each cell (columns)
#' dat.detection <- dat.expression > 1
#'
#' # running haystack in default mode
#' res <- haystack(dat.tsne, detection=dat.detection, method = "2D")
#'
#' outfile <- file.path(tempdir(), "output.csv")
#'
#' # write result to file outfile.csv
#' write_haystack(res, file = outfile)
#'
#' # read in result from file
#' res.copy <- read_haystack(file = outfile)
write_haystack = function (res.haystack, file){
# check input
if(missing(res.haystack))
stop("Parameter 'res.haystack' ('haystack' result) is missing")
if(class(res.haystack)!="haystack")
stop("'res.haystack' must be of class 'haystack'")
if(is.null(res.haystack$results))
stop("Results seem to be missing from 'haystack' result. Is 'res.haystack' a valid 'haystack' result?")
if(missing(file))
stop("Parameter 'file' is missing")
write.csv(x = res.haystack$results, file = file)
}
### read_haystack
#' Function to read haystack results from file.
#'
#' @param file A file containing 'haystack' results to read
#'
#' @return An object of class "haystack"
#' @export
#'
#' @examples
#' # using the toy example of the singleCellHaystack package
#' # define a logical matrix with detection of each gene (rows) in each cell (columns)
#' dat.detection <- dat.expression > 1
#'
#' # running haystack in default mode
#' res <- haystack(dat.tsne, detection=dat.detection, method = "2D")
#'
#' outfile <- file.path(tempdir(), "output.csv")
#'
#' # write result to file outfile.csv
#' write_haystack(res, file = outfile)
#'
#' # read in result from file
#' res.copy <- read_haystack(file = outfile)
read_haystack = function (file){
# check input
if(missing(file))
stop("Parameter 'file' is missing")
x <- read.csv(file = file, row.names=1)
# prepare the 'haystack' object to return
res <- list(
results = x
)
class(res) <- "haystack"
res
}
|
/R/haystack_IO.R
|
permissive
|
AmrR101/singleCellHaystack
|
R
| false
| false
| 2,190
|
r
|
### write_haystack
#' Function to write haystack result data to file.
#'
#' @param res.haystack A 'haystack' result variable
#' @param file A file to write to
#'
#' @export
#'
#' @examples
#' # using the toy example of the singleCellHaystack package
#' # define a logical matrix with detection of each gene (rows) in each cell (columns)
#' dat.detection <- dat.expression > 1
#'
#' # running haystack in default mode
#' res <- haystack(dat.tsne, detection=dat.detection, method = "2D")
#'
#' outfile <- file.path(tempdir(), "output.csv")
#'
#' # write result to file outfile.csv
#' write_haystack(res, file = outfile)
#'
#' # read in result from file
#' res.copy <- read_haystack(file = outfile)
write_haystack = function (res.haystack, file){
# check input
if(missing(res.haystack))
stop("Parameter 'res.haystack' ('haystack' result) is missing")
if(class(res.haystack)!="haystack")
stop("'res.haystack' must be of class 'haystack'")
if(is.null(res.haystack$results))
stop("Results seem to be missing from 'haystack' result. Is 'res.haystack' a valid 'haystack' result?")
if(missing(file))
stop("Parameter 'file' is missing")
write.csv(x = res.haystack$results, file = file)
}
### read_haystack
#' Function to read haystack results from file.
#'
#' @param file A file containing 'haystack' results to read
#'
#' @return An object of class "haystack"
#' @export
#'
#' @examples
#' # using the toy example of the singleCellHaystack package
#' # define a logical matrix with detection of each gene (rows) in each cell (columns)
#' dat.detection <- dat.expression > 1
#'
#' # running haystack in default mode
#' res <- haystack(dat.tsne, detection=dat.detection, method = "2D")
#'
#' outfile <- file.path(tempdir(), "output.csv")
#'
#' # write result to file outfile.csv
#' write_haystack(res, file = outfile)
#'
#' # read in result from file
#' res.copy <- read_haystack(file = outfile)
read_haystack = function (file){
# check input
if(missing(file))
stop("Parameter 'file' is missing")
x <- read.csv(file = file, row.names=1)
# prepare the 'haystack' object to return
res <- list(
results = x
)
class(res) <- "haystack"
res
}
|
#' A Cat Function
#'
#' This function reminds you to feed your cats before going to bed.
#' @param feed Have you fed cats? Defaults to TRUE.
#' @keywords cats
#' @export
#' @examples
#' cats_function()
cats_function <- function(feed=TRUE){
if(feed==TRUE){
print("Great night!")
}
else {
print("Be woke up by scratching and meowing.")
}
}
|
/R/cats_function.R
|
no_license
|
lweicdsor/A-bare-minimum-R-package-CATS
|
R
| false
| false
| 386
|
r
|
#' A Cat Function
#'
#' This function reminds you to feed your cats before going to bed.
#' @param feed Have you fed cats? Defaults to TRUE.
#' @keywords cats
#' @export
#' @examples
#' cats_function()
cats_function <- function(feed=TRUE){
if(feed==TRUE){
print("Great night!")
}
else {
print("Be woke up by scratching and meowing.")
}
}
|
#' 3D surface-based rendering of volume images.
#'
#' Will use rgl to render a substrate (e.g. anatomical) and overlay image (e.g.
#' functional).
#'
#' @param surfimg Input image to use as rendering substrate.
#' @param funcimg Input list of images to use as functional overlays.
#' @param surfval intensity level that defines isosurface
#' @param basefval intensity level that defines lower threshold for functional
#' image
#' @param offsetfval intensity level that defines upper threshold for
#' functional image
#' @param smoothsval smoothing for the surface image
#' @param smoothfval smoothing for the functional image
#' @param blobrender render a blob as opposed to a surface patch
#' @param alphasurf alpha for the surface contour
#' @param alphafunc alpha value for functional blobs
#' @param outdir output directory
#' @param outfn output file name
#' @param mycol name of color or colors
#' @param physical boolean
#' @param movieDuration in seconds
#' @param zoom magnification factor
#' @return 0 -- Success\cr 1 -- Failure
#' @author Avants B, Kandel B
#' @seealso \code{\link{plotBasicNetwork}}
#' @examples
#' \dontrun{
#' mnit<-getANTsRData("mni")
#' mnit<-antsImageRead(mnit)
#' mnia<-getANTsRData("mnia")
#' mnia<-antsImageRead(mnia)
#' mnit<-thresholdImage( mnit, 1, max(mnit) )
#' mnia<-thresholdImage( mnia, 1, 2 )
#' brain<-renderSurfaceFunction( surfimg =list( mnit ) ,
#' list(mnia), alphasurf=0.1 ,smoothsval = 1.5 )
#' }
#' @export renderSurfaceFunction
renderSurfaceFunction <- function(
surfimg,
funcimg,
surfval = 0.5,
basefval,
offsetfval,
smoothsval = 0,
smoothfval = 0,
blobrender = TRUE,
alphasurf = 1,
alphafunc = 1,
outdir = "./",
outfn = NA,
mycol,
physical = TRUE,
movieDuration = 6,
zoom = 1.1 ) {
if (missing(surfimg)) {
stop("Check usage: at minimum, you need to call \n renderSurfaceFunction( list(an_ants_image) ) \n ")
}
havemsc3d<-usePkg("misc3d")
if ( ! havemsc3d ) {
print("Need misc3d for this")
return(NA)
}
smoothsval <- rep(smoothsval, length.out = length(surfimg))
for (i in 1:length(surfimg)) {
if (smoothsval[i] > 0) {
simg <- antsImageClone(surfimg[[i]])
simg<-smoothImage(simg, smoothsval[i])
surfimg[[i]] <- simg
}
}
surfval <- rep(surfval, length.out = length(surfimg))
if (length(alphasurf) != length(surfimg))
alphasurf <- rep(alphasurf, length.out = length(surfimg))
mylist <- list()
if (missing(funcimg)) {
for (i in 1:length(surfimg)) {
surf <- as.array(surfimg[[i]])
brain <- misc3d::contour3d(surf, level = c(surfval[i]), alpha = alphasurf[i],
draw = FALSE, smooth = FALSE, material = "metal", depth = 0.6, color = "white")
# each point has an ID, 3 points make a triangle , the points are laid out as c(
# x1 , y1, z1, x2, y2, z2 , ... , xn, yn, zn ) indices are just numbers
# vertices<-c( brain <- subdivision3d(brain)
if (physical == TRUE) {
brain$v1 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v1)
brain$v2 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v2)
brain$v3 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v3)
}
mylist[[i]] <- brain
}
misc3d::drawScene.rgl(mylist)
return(mylist)
}
if (smoothfval > 0) {
for (i in 1:length(funcimg)) {
fimg <- antsImageClone(funcimg[[i]])
fimg<-smoothImage( fimg, smoothfval )
funcimg[[i]] <- fimg
}
}
if (missing(mycol)) {
mycol <- rainbow(length(funcimg))
}
if (length(alphafunc) != length(funcimg))
alphafunc <- rep(alphafunc, length.out = length(funcimg))
for (i in 1:length(surfimg)) {
surf <- as.array(surfimg[[i]])
brain <- misc3d::contour3d(surf, level = c(surfval[i]), alpha = alphasurf[i], draw = FALSE,
smooth = FALSE, material = "metal", depth = 0.6, color = "white")
if (physical == TRUE) {
brain$v1 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v1)
brain$v2 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v2)
brain$v3 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v3)
}
mylist[[i]] <- brain
}
for (i in 1:length(funcimg)) {
func <- as.array(funcimg[[i]])
vals <- abs(funcimg[[i]][funcimg[[i]] > 0])
if (missing(basefval)) {
# just threshold at mean > 0
usefval <- mean(vals)
# print(usefval)
} else usefval <- basefval
if (missing(offsetfval))
offsetfval <- sd(vals[vals > usefval])
# print(paste(i, usefval, alphafunc[i]))
blob <- misc3d::contour3d(func, level = c(usefval), alpha = alphafunc[i], draw = FALSE,
smooth = FALSE, material = "metal", depth = 0.6, color = mycol[[i]])
if (physical == TRUE) {
blob$v1 <- antsTransformIndexToPhysicalPoint(funcimg[[i]], blob$v1)
blob$v2 <- antsTransformIndexToPhysicalPoint(funcimg[[i]], blob$v2)
blob$v3 <- antsTransformIndexToPhysicalPoint(funcimg[[i]], blob$v3)
}
mylist <- lappend(mylist, list(blob))
}
# s<-scene3d() s$rgl::par3d$windowRect <- c(0, 0, 500, 500) # make the window large
# 1.5*s$rgl::par3d$windowRect s$par3d$zoom = 1.1 # larger values make the image
# smaller
misc3d::drawScene.rgl(mylist) # surface render
rgl::par3d(windowRect = c(0, 0, 500, 500)) # make the window large
rgl::par3d(zoom = zoom ) # larger values make the image smaller
misc3d::drawScene.rgl(mylist) # surface render
if (!is.na(outfn))
rgl::movie3d(rgl::spin3d(), duration = movieDuration,
dir = outdir, movie = outfn, clean = TRUE )
return(mylist)
}
# Make a function that will make each facet from data returned from
# surfaceTriangles applied to a function (probably a more elegant way to do
# this?)
.makefacet <- function(data) {
# Code for 3D function->stl files for molding and casting stl creation functions
# similar to week 4 files Laura Perovich Oct 2012 Load package misc3d that
# includes surfaceTriangles function Define character constants used in the stl
# files
tristart1 <- "facet normal 0 0 0"
tristart2 <- " outer loop"
triend1 <- " endloop"
triend2 <- "endfacet"
startline1 <- "+"
startline2 <- " solid LAURA"
endline <- " endsolid LAURA"
facetvector <- c()
progress <- txtProgressBar(min = 0, max = nrow(data[[1]]), style = 3)
for (i in 1:nrow(data[[1]])) {
v1 <- paste(" vertex", as.character(data[[1]][i, 1]), as.character(data[[1]][i,
2]), as.character(data[[1]][i, 3]), sep = " ")
v2 <- paste(" vertex", as.character(data[[2]][i, 1]), as.character(data[[2]][i,
2]), as.character(data[[2]][i, 3]), sep = " ")
v3 <- paste(" vertex", as.character(data[[3]][i, 1]), as.character(data[[3]][i,
2]), as.character(data[[3]][i, 3]), sep = " ")
facetvector <- c(facetvector, tristart1, tristart2, v1, v2, v3, triend1,
triend2)
if (i%%50 == 0) {
setTxtProgressBar(progress, i)
}
}
return(facetvector)
}
# Make a function that puts the facets together with the file headers and writes
# it out
.makestl <- function(facetvector, outfile) {
# Code for 3D function->stl files for molding and casting stl creation functions
# similar to week 4 files Laura Perovich Oct 2012 Load package misc3d that
# includes surfaceTriangles function
havemsc3d<-usePkg("misc3d")
if ( ! havemsc3d ) {
print("Need misc3d for this")
return(NA)
}
# Define character constants used in the stl files
tristart1 <- "facet normal 0 0 0"
tristart2 <- " outer loop"
triend1 <- " endloop"
triend2 <- "endfacet"
startline1 <- "+"
startline2 <- " solid LAURA"
endline <- " endsolid LAURA"
fileConn <- file(outfile)
myout <- c(startline1, startline2, facetvector, endline)
writeLines(myout, fileConn)
close(fileConn)
}
############################ to use this do ############################ ############################
############################ source('R/renderSurfaceFunction.R')
############################ fn<-'/Users/stnava/Downloads/resimplerenderingexample/wmss.nii.gz'
############################ img<-antsImageRead(fn,3) brain<-renderSurfaceFunction( img )
############################ fv<-.makefacet(brain[[1]]) .makestl(fv,'/tmp/temp.stl')
# vtri <- surfaceTriangles(vertices[,1], vertices[,2], vertices[,3] ,
# color='red') drawScene(updateTriangles(vtri, material = 'default', smooth = 3)
# )
|
/R/renderSurfaceFunction.R
|
permissive
|
ANTsX/ANTsR
|
R
| false
| false
| 8,438
|
r
|
#' 3D surface-based rendering of volume images.
#'
#' Will use rgl to render a substrate (e.g. anatomical) and overlay image (e.g.
#' functional).
#'
#' @param surfimg Input image to use as rendering substrate.
#' @param funcimg Input list of images to use as functional overlays.
#' @param surfval intensity level that defines isosurface
#' @param basefval intensity level that defines lower threshold for functional
#' image
#' @param offsetfval intensity level that defines upper threshold for
#' functional image
#' @param smoothsval smoothing for the surface image
#' @param smoothfval smoothing for the functional image
#' @param blobrender render a blob as opposed to a surface patch
#' @param alphasurf alpha for the surface contour
#' @param alphafunc alpha value for functional blobs
#' @param outdir output directory
#' @param outfn output file name
#' @param mycol name of color or colors
#' @param physical boolean
#' @param movieDuration in seconds
#' @param zoom magnification factor
#' @return 0 -- Success\cr 1 -- Failure
#' @author Avants B, Kandel B
#' @seealso \code{\link{plotBasicNetwork}}
#' @examples
#' \dontrun{
#' mnit<-getANTsRData("mni")
#' mnit<-antsImageRead(mnit)
#' mnia<-getANTsRData("mnia")
#' mnia<-antsImageRead(mnia)
#' mnit<-thresholdImage( mnit, 1, max(mnit) )
#' mnia<-thresholdImage( mnia, 1, 2 )
#' brain<-renderSurfaceFunction( surfimg =list( mnit ) ,
#' list(mnia), alphasurf=0.1 ,smoothsval = 1.5 )
#' }
#' @export renderSurfaceFunction
renderSurfaceFunction <- function(
surfimg,
funcimg,
surfval = 0.5,
basefval,
offsetfval,
smoothsval = 0,
smoothfval = 0,
blobrender = TRUE,
alphasurf = 1,
alphafunc = 1,
outdir = "./",
outfn = NA,
mycol,
physical = TRUE,
movieDuration = 6,
zoom = 1.1 ) {
if (missing(surfimg)) {
stop("Check usage: at minimum, you need to call \n renderSurfaceFunction( list(an_ants_image) ) \n ")
}
havemsc3d<-usePkg("misc3d")
if ( ! havemsc3d ) {
print("Need misc3d for this")
return(NA)
}
smoothsval <- rep(smoothsval, length.out = length(surfimg))
for (i in 1:length(surfimg)) {
if (smoothsval[i] > 0) {
simg <- antsImageClone(surfimg[[i]])
simg<-smoothImage(simg, smoothsval[i])
surfimg[[i]] <- simg
}
}
surfval <- rep(surfval, length.out = length(surfimg))
if (length(alphasurf) != length(surfimg))
alphasurf <- rep(alphasurf, length.out = length(surfimg))
mylist <- list()
if (missing(funcimg)) {
for (i in 1:length(surfimg)) {
surf <- as.array(surfimg[[i]])
brain <- misc3d::contour3d(surf, level = c(surfval[i]), alpha = alphasurf[i],
draw = FALSE, smooth = FALSE, material = "metal", depth = 0.6, color = "white")
# each point has an ID, 3 points make a triangle , the points are laid out as c(
# x1 , y1, z1, x2, y2, z2 , ... , xn, yn, zn ) indices are just numbers
# vertices<-c( brain <- subdivision3d(brain)
if (physical == TRUE) {
brain$v1 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v1)
brain$v2 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v2)
brain$v3 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v3)
}
mylist[[i]] <- brain
}
misc3d::drawScene.rgl(mylist)
return(mylist)
}
if (smoothfval > 0) {
for (i in 1:length(funcimg)) {
fimg <- antsImageClone(funcimg[[i]])
fimg<-smoothImage( fimg, smoothfval )
funcimg[[i]] <- fimg
}
}
if (missing(mycol)) {
mycol <- rainbow(length(funcimg))
}
if (length(alphafunc) != length(funcimg))
alphafunc <- rep(alphafunc, length.out = length(funcimg))
for (i in 1:length(surfimg)) {
surf <- as.array(surfimg[[i]])
brain <- misc3d::contour3d(surf, level = c(surfval[i]), alpha = alphasurf[i], draw = FALSE,
smooth = FALSE, material = "metal", depth = 0.6, color = "white")
if (physical == TRUE) {
brain$v1 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v1)
brain$v2 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v2)
brain$v3 <- antsTransformIndexToPhysicalPoint(surfimg[[i]], brain$v3)
}
mylist[[i]] <- brain
}
for (i in 1:length(funcimg)) {
func <- as.array(funcimg[[i]])
vals <- abs(funcimg[[i]][funcimg[[i]] > 0])
if (missing(basefval)) {
# just threshold at mean > 0
usefval <- mean(vals)
# print(usefval)
} else usefval <- basefval
if (missing(offsetfval))
offsetfval <- sd(vals[vals > usefval])
# print(paste(i, usefval, alphafunc[i]))
blob <- misc3d::contour3d(func, level = c(usefval), alpha = alphafunc[i], draw = FALSE,
smooth = FALSE, material = "metal", depth = 0.6, color = mycol[[i]])
if (physical == TRUE) {
blob$v1 <- antsTransformIndexToPhysicalPoint(funcimg[[i]], blob$v1)
blob$v2 <- antsTransformIndexToPhysicalPoint(funcimg[[i]], blob$v2)
blob$v3 <- antsTransformIndexToPhysicalPoint(funcimg[[i]], blob$v3)
}
mylist <- lappend(mylist, list(blob))
}
# s<-scene3d() s$rgl::par3d$windowRect <- c(0, 0, 500, 500) # make the window large
# 1.5*s$rgl::par3d$windowRect s$par3d$zoom = 1.1 # larger values make the image
# smaller
misc3d::drawScene.rgl(mylist) # surface render
rgl::par3d(windowRect = c(0, 0, 500, 500)) # make the window large
rgl::par3d(zoom = zoom ) # larger values make the image smaller
misc3d::drawScene.rgl(mylist) # surface render
if (!is.na(outfn))
rgl::movie3d(rgl::spin3d(), duration = movieDuration,
dir = outdir, movie = outfn, clean = TRUE )
return(mylist)
}
# Make a function that will make each facet from data returned from
# surfaceTriangles applied to a function (probably a more elegant way to do
# this?)
.makefacet <- function(data) {
# Code for 3D function->stl files for molding and casting stl creation functions
# similar to week 4 files Laura Perovich Oct 2012 Load package misc3d that
# includes surfaceTriangles function Define character constants used in the stl
# files
tristart1 <- "facet normal 0 0 0"
tristart2 <- " outer loop"
triend1 <- " endloop"
triend2 <- "endfacet"
startline1 <- "+"
startline2 <- " solid LAURA"
endline <- " endsolid LAURA"
facetvector <- c()
progress <- txtProgressBar(min = 0, max = nrow(data[[1]]), style = 3)
for (i in 1:nrow(data[[1]])) {
v1 <- paste(" vertex", as.character(data[[1]][i, 1]), as.character(data[[1]][i,
2]), as.character(data[[1]][i, 3]), sep = " ")
v2 <- paste(" vertex", as.character(data[[2]][i, 1]), as.character(data[[2]][i,
2]), as.character(data[[2]][i, 3]), sep = " ")
v3 <- paste(" vertex", as.character(data[[3]][i, 1]), as.character(data[[3]][i,
2]), as.character(data[[3]][i, 3]), sep = " ")
facetvector <- c(facetvector, tristart1, tristart2, v1, v2, v3, triend1,
triend2)
if (i%%50 == 0) {
setTxtProgressBar(progress, i)
}
}
return(facetvector)
}
# Make a function that puts the facets together with the file headers and writes
# it out
.makestl <- function(facetvector, outfile) {
# Code for 3D function->stl files for molding and casting stl creation functions
# similar to week 4 files Laura Perovich Oct 2012 Load package misc3d that
# includes surfaceTriangles function
havemsc3d<-usePkg("misc3d")
if ( ! havemsc3d ) {
print("Need misc3d for this")
return(NA)
}
# Define character constants used in the stl files
tristart1 <- "facet normal 0 0 0"
tristart2 <- " outer loop"
triend1 <- " endloop"
triend2 <- "endfacet"
startline1 <- "+"
startline2 <- " solid LAURA"
endline <- " endsolid LAURA"
fileConn <- file(outfile)
myout <- c(startline1, startline2, facetvector, endline)
writeLines(myout, fileConn)
close(fileConn)
}
############################ to use this do ############################ ############################
############################ source('R/renderSurfaceFunction.R')
############################ fn<-'/Users/stnava/Downloads/resimplerenderingexample/wmss.nii.gz'
############################ img<-antsImageRead(fn,3) brain<-renderSurfaceFunction( img )
############################ fv<-.makefacet(brain[[1]]) .makestl(fv,'/tmp/temp.stl')
# vtri <- surfaceTriangles(vertices[,1], vertices[,2], vertices[,3] ,
# color='red') drawScene(updateTriangles(vtri, material = 'default', smooth = 3)
# )
|
testlist <- list(nmod = NULL, id = NULL, score = NULL, rsp = NULL, id = NULL, score = NULL, nbr = NULL, id = NULL, bk_nmod = integer(0), booklet_id = integer(0), booklet_score = integer(0), include_rsp = integer(0), item_id = integer(0), item_score = integer(0), module_nbr = integer(0), person_id = c(16777216L, 0L, 1409351680L, 682962941L, 1615462481L, 167774546L, 1801886528L, -1519479597L, -158300141L, 1701913732L, 1152883163L, 35860266L, 1969689444L, -1318203443L, -2131865434L, 1632280887L, 637082149L, 260799231L, 1754027460L, -1055514020L, -1311932986L, -203530874L, -428367857L, -1995603215L, 1192022832L, -996667132L, 432518541L, 815996035L, 1157250652L, 751417555L, 116882132L, 1085030516L, 1202941484L, 15623892L, -1661386009L, 45373390L, 426686228L, 1254131289L, 749806690L, -1501899956L, -1876835267L, 574719753L, 12138419L, -194575513L, 1795807422L, -1377650222L, 453135142L, -1780159831L, 992888811L, -1345548849L, -449112064L, 903217161L, 1678998078L, 759393453L, 786045775L, 1752098800L, 455895826L, -1331816706L, 391475866L, 1748544614L, 19691586L, 1176953756L, 349411874L, 2121585973L, -301177052L, 1082896916L, -450872028L, -636931467L, -53289638L, 1570865300L, -1237972204L, -2096910335L, 1674636960L, 729445123L, 1415150763L, -611049483L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::make_booklets_unsafe,testlist)
str(result)
|
/dexterMST/inst/testfiles/make_booklets_unsafe/AFL_make_booklets_unsafe/make_booklets_unsafe_valgrind_files/1615943159-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 1,455
|
r
|
testlist <- list(nmod = NULL, id = NULL, score = NULL, rsp = NULL, id = NULL, score = NULL, nbr = NULL, id = NULL, bk_nmod = integer(0), booklet_id = integer(0), booklet_score = integer(0), include_rsp = integer(0), item_id = integer(0), item_score = integer(0), module_nbr = integer(0), person_id = c(16777216L, 0L, 1409351680L, 682962941L, 1615462481L, 167774546L, 1801886528L, -1519479597L, -158300141L, 1701913732L, 1152883163L, 35860266L, 1969689444L, -1318203443L, -2131865434L, 1632280887L, 637082149L, 260799231L, 1754027460L, -1055514020L, -1311932986L, -203530874L, -428367857L, -1995603215L, 1192022832L, -996667132L, 432518541L, 815996035L, 1157250652L, 751417555L, 116882132L, 1085030516L, 1202941484L, 15623892L, -1661386009L, 45373390L, 426686228L, 1254131289L, 749806690L, -1501899956L, -1876835267L, 574719753L, 12138419L, -194575513L, 1795807422L, -1377650222L, 453135142L, -1780159831L, 992888811L, -1345548849L, -449112064L, 903217161L, 1678998078L, 759393453L, 786045775L, 1752098800L, 455895826L, -1331816706L, 391475866L, 1748544614L, 19691586L, 1176953756L, 349411874L, 2121585973L, -301177052L, 1082896916L, -450872028L, -636931467L, -53289638L, 1570865300L, -1237972204L, -2096910335L, 1674636960L, 729445123L, 1415150763L, -611049483L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::make_booklets_unsafe,testlist)
str(result)
|
# install.packages('rvest')
# install.packages('tidyverse')
# install.packages('tidyquant')
# install.packages('ggthemes')
library('rvest')
library('dplyr')
library('ggplot2')
library('tidyquant')
library('ggthemes')
library('reshape2')
url <- 'https://en.wikipedia.org/wiki/Opinion_polling_for_the_Italian_general_election,_2018'
table_2018 <- url %>%
read_html() %>%
html_node(xpath='/html/body/div[3]/div[3]/div[4]/div/table[7]') %>%
html_table(fill = TRUE) %>%
setNames(c('date', 'firm','centre-left','centre-right','m5s','leu','others','lead')) %>%
tail(-1)
for(i in c(3:ncol(table_2018))) {
table_2018[,i] <- as.numeric(as.character(table_2018[,i]))
}
data_2018 <- table_2018 %>%
group_by(date) %>%
mutate(cut_date = paste(tail(strsplit(date, "–")[[1]], n=1), " 2018")) %>%
mutate(clean_date = as.Date(cut_date, format="%d %b %Y")) %>%
ungroup() %>%
select(-date, -firm, -lead, -cut_date) %>%
melt(id="clean_date")
ggplot(data = data_2018, aes(clean_date, value, color=variable)) +
geom_point() +
geom_ma(ma_fun = SMA, n = 3)
one_year <- url %>%
read_html() %>%
html_node(xpath='/html/body/div[3]/div[3]/div[4]/div/table[8]') %>%
html_table(fill = TRUE) %>%
setNames(c('date', 'firm','centre-left','centre-right','m5s','leu','others','lead')) %>%
tail(-1)
for(i in c(3:ncol(one_year))) {
one_year[,i] <- as.numeric(as.character(one_year[,i]))
}
data_2017 <- one_year %>% group_by(date) %>%
mutate(cut_date = paste(tail(strsplit(date, "–")[[1]], n=1), " 2017")) %>%
mutate(clean_date = as.Date(cut_date, format="%d %b %Y")) %>%
ungroup() %>%
select(-date, -firm, -lead, -cut_date) %>%
melt(id="clean_date")
ggplot(data = data_2017, aes(clean_date, value, color=variable)) +
geom_point(aes(shape="21",alpha=1/100)) +
geom_ma(ma_fun = SMA, n = 10)
two_years<- url %>%
read_html() %>%
html_node(xpath='/html/body/div[3]/div[3]/div[4]/div/table[6]') %>%
html_table(fill = TRUE) %>%
setNames(c('date', 'firm', 'lead')) %>%
tail(-2)
for(i in c(3:13:ncol(two_years))) {
two_years[,i] <- as.numeric(as.character(two_years[,i]))
}
#########################
# merge data together now
data <- merge(data_2018, data_2017, all=TRUE)
# rename
data <- data %>% rename(date = clean_date, coalition = variable) %>%
group_by(coalition) %>%
mutate(mean20_missing = rollapply(value, width = 20, fill = NA, partial = TRUE,
FUN=function(x) mean(x, na.rm=TRUE), align = "right"))
ggplot(data,
aes(date, color=coalition)) +
geom_point(aes(y=value, shape="21", alpha=1/100)) +
geom_line(aes(y=mean20_missing, color=coalition))
write_csv(data, "data/coalition.csv")
|
/coalitions.R
|
no_license
|
basilesimon/italian-election-2018-poll-of-polls
|
R
| false
| false
| 2,818
|
r
|
# install.packages('rvest')
# install.packages('tidyverse')
# install.packages('tidyquant')
# install.packages('ggthemes')
library('rvest')
library('dplyr')
library('ggplot2')
library('tidyquant')
library('ggthemes')
library('reshape2')
url <- 'https://en.wikipedia.org/wiki/Opinion_polling_for_the_Italian_general_election,_2018'
table_2018 <- url %>%
read_html() %>%
html_node(xpath='/html/body/div[3]/div[3]/div[4]/div/table[7]') %>%
html_table(fill = TRUE) %>%
setNames(c('date', 'firm','centre-left','centre-right','m5s','leu','others','lead')) %>%
tail(-1)
for(i in c(3:ncol(table_2018))) {
table_2018[,i] <- as.numeric(as.character(table_2018[,i]))
}
data_2018 <- table_2018 %>%
group_by(date) %>%
mutate(cut_date = paste(tail(strsplit(date, "–")[[1]], n=1), " 2018")) %>%
mutate(clean_date = as.Date(cut_date, format="%d %b %Y")) %>%
ungroup() %>%
select(-date, -firm, -lead, -cut_date) %>%
melt(id="clean_date")
ggplot(data = data_2018, aes(clean_date, value, color=variable)) +
geom_point() +
geom_ma(ma_fun = SMA, n = 3)
one_year <- url %>%
read_html() %>%
html_node(xpath='/html/body/div[3]/div[3]/div[4]/div/table[8]') %>%
html_table(fill = TRUE) %>%
setNames(c('date', 'firm','centre-left','centre-right','m5s','leu','others','lead')) %>%
tail(-1)
for(i in c(3:ncol(one_year))) {
one_year[,i] <- as.numeric(as.character(one_year[,i]))
}
data_2017 <- one_year %>% group_by(date) %>%
mutate(cut_date = paste(tail(strsplit(date, "–")[[1]], n=1), " 2017")) %>%
mutate(clean_date = as.Date(cut_date, format="%d %b %Y")) %>%
ungroup() %>%
select(-date, -firm, -lead, -cut_date) %>%
melt(id="clean_date")
ggplot(data = data_2017, aes(clean_date, value, color=variable)) +
geom_point(aes(shape="21",alpha=1/100)) +
geom_ma(ma_fun = SMA, n = 10)
two_years<- url %>%
read_html() %>%
html_node(xpath='/html/body/div[3]/div[3]/div[4]/div/table[6]') %>%
html_table(fill = TRUE) %>%
setNames(c('date', 'firm', 'lead')) %>%
tail(-2)
for(i in c(3:13:ncol(two_years))) {
two_years[,i] <- as.numeric(as.character(two_years[,i]))
}
#########################
# merge data together now
data <- merge(data_2018, data_2017, all=TRUE)
# rename
data <- data %>% rename(date = clean_date, coalition = variable) %>%
group_by(coalition) %>%
mutate(mean20_missing = rollapply(value, width = 20, fill = NA, partial = TRUE,
FUN=function(x) mean(x, na.rm=TRUE), align = "right"))
ggplot(data,
aes(date, color=coalition)) +
geom_point(aes(y=value, shape="21", alpha=1/100)) +
geom_line(aes(y=mean20_missing, color=coalition))
write_csv(data, "data/coalition.csv")
|
# Data Sample -------------------------------------------------------------
library(tidyverse)
library(rsample)
set.seed(36802911)
colleges <- read_rds('../data/processed/colleges.rds')
colleges <- colleges %>% filter(!is.na(compl_rpy_5yr_rt))
# create a function that take a data column with NAs
# and spits out a column where the nas have been filled
noNA = function(array){
# check the type of array
type <- typeof(array)
n <- array %>% n_distinct()
if(type == "double" & n < 6){
naless = array %>% replace_na(-1)
}else{
naless = array
}
return(naless)
}
# check what the function is doing
# save data
colleges <- colleges %>%
map_df(noNA)
# looking at the data
# colleges %>%
# map_df(n_distinct) %>%
# pivot_longer( -unitid, names_to = "var", values_to = "n") %>%
# arrange(n) %>% view()
terms <- read_rds('../data/processed/terms.rds')
# Splitting all the the data ----------------------------------------------------------
# ---------- Created modeling data set: 70%
colleges_split_initial <- colleges %>% initial_split(7/10)
# Training dataset for performance
colleges_train <-colleges_split_initial %>% training()
# ----------- Create (model selection) holdout dataset: 15%
colleges_split_holdouts <- colleges_split_initial %>% testing() %>% initial_split(1/2)
# Validataion dataset for selection
colleges_select <- colleges_split_holdouts %>% training()
# Validataion dataset for selection
colleges_perform <- colleges_split_holdouts %>% testing()
|
/documents/sample_md.R
|
no_license
|
dxre-v3/college-scorecard
|
R
| false
| false
| 1,508
|
r
|
# Data Sample -------------------------------------------------------------
library(tidyverse)
library(rsample)
set.seed(36802911)
colleges <- read_rds('../data/processed/colleges.rds')
colleges <- colleges %>% filter(!is.na(compl_rpy_5yr_rt))
# create a function that take a data column with NAs
# and spits out a column where the nas have been filled
noNA = function(array){
# check the type of array
type <- typeof(array)
n <- array %>% n_distinct()
if(type == "double" & n < 6){
naless = array %>% replace_na(-1)
}else{
naless = array
}
return(naless)
}
# check what the function is doing
# save data
colleges <- colleges %>%
map_df(noNA)
# looking at the data
# colleges %>%
# map_df(n_distinct) %>%
# pivot_longer( -unitid, names_to = "var", values_to = "n") %>%
# arrange(n) %>% view()
terms <- read_rds('../data/processed/terms.rds')
# Splitting all the the data ----------------------------------------------------------
# ---------- Created modeling data set: 70%
colleges_split_initial <- colleges %>% initial_split(7/10)
# Training dataset for performance
colleges_train <-colleges_split_initial %>% training()
# ----------- Create (model selection) holdout dataset: 15%
colleges_split_holdouts <- colleges_split_initial %>% testing() %>% initial_split(1/2)
# Validataion dataset for selection
colleges_select <- colleges_split_holdouts %>% training()
# Validataion dataset for selection
colleges_perform <- colleges_split_holdouts %>% testing()
|
#mRNA-protein dataset merging
# This script will
# 1. map Protein groups to ENSG identifiers
# 2. merge genentech, ccle and anger ExpressionAtlas datasets with proteomics matrix
# 3. create a separate df for each cell line to perform mRNA-protein correlations
# 4. final plots
require(reshape)
require(data.table)
### #first run script to prepare protein data: "generate_protein_quant_files.R"
################## 1. ########################################################
# load the mapping reference file - this is now a file provided by UniProt but can ba any reference file
##
xxy.map <- read.table( "HUMAN_9606_idmapping_selected.tab", header = F, sep = "\t", fill = T, stringsAsFactors = FALSE)
colnames(xxy.map) <- c("UniProtKB.AC","UniProtKB.ID","GeneID..EntrezGene.","RefSeq","GI", "PDB","GO","UniRef100","UniRef90","UniRef50","UniParc","PIR","NCBI.taxon","MIM","UniGene", "PubMed","EMBL","EMBL.CDS","Ensembl","Ensembl_TRS","Ensembl_PRO","Additional.PubMed")
uniprot.map <- xxy.map[ , c(1, 19)]
rm(xxy.map)
# # using cl data frame - prepared by "generate_protein_quant_files.R" script
# cl is the cell lines matrix
x <- cl
x <- 2^x
xx <- melt(t(x))
colnames(xx) <- c("ID", "Variable", "Value")
#
xx <- cast(xx, ID~Variable, value = "Value", mean)
row.names(xx) <- xx$ID
xx <- xx[ , -1]
xx <- as.data.frame(xx)
xx <- as.data.frame(t(xx))
table(colnames(xx))
table(colnames(cl))
##### perform the protein group to gene mapping
c.l.cl <- colnames(cl)
data.to.map <- as.data.frame(cl) # xx
colnames(data.to.map) <- c.l.cl
data.to.map$Majority.protein.IDs <- row.names(data.to.map)
##
data.to.map$ENSG <- "NA"
for(i in 1:nrow(data.to.map)){
x <- data.frame(strsplit(data.to.map[ i, "Majority.protein.IDs"], split = ";"), stringsAsFactors = FALSE)
colnames(x) <- "prot"
# extract canonical UniProt protein ID
x[,1] <- regmatches(x[,1],regexpr("[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}", x[,1]))
x <- merge(x, uniprot.map, by.x = "prot", by.y = "UniProtKB.AC" )
all.genes <- sub(" ","", unlist(strsplit(x[ ,2], ";" )))
data.to.map[ i, "ENSG"] <- paste( unique(all.genes), collapse = ";")
}
######################### match the protein ids with the uniprot data ###############################################
xx <- data.to.map
# remove protein groups that have no mapping to an ENSG gene IDs
xx <- xx[ xx$ENSG != "" , ]
# remove all protein groups that map to multiple ENSG gene IDs (this is quiet a lot) - the reasoning here is that we cannot establish for sure which gene is contibuting the signal to protein; all genes contribute equally or onbe gene is a majority?
xx <- xx[ grep(";", xx$ENSG, invert = TRUE) , ]
# for genes that map to multiple proteins, in order to detemine the amount of protein that gene is producing - sum the protein quantification values
xx.Majority.protein.IDs <- aggregate(xx$Majority.protein.IDs, list(ESNG = xx$ENSG ), function(x) paste0( (x) ) )
xx <- aggregate(xx[ , 1:(ncol(xx)-2)], list(ENSG = xx$ENSG), sum, na.rm =TRUE)
#
xx <- cbind(xx.Majority.protein.IDs, xx)
###########################################################################
##################################################################################
genentech <- read.table("E-MTAB-2706-query-results.fpkms.tsv", header = T, sep = "\t", fill = T, stringsAsFactors = FALSE)# , check.names = FALSE
ccle <- read.table("E-MTAB-2770-query-results.fpkms.tsv", header = T, sep = "\t", fill = T, stringsAsFactors = FALSE, comment.char = "#")
sanger <- read.table("E-MTAB-3983-query-results.fpkms.tsv", header = T, sep = "\t", fill = T, stringsAsFactors = FALSE, comment.char = "#")
#######
MyMerge <- function(x, y){
df <- merge(x, y, by= "Gene.ID", all.x= TRUE, all.y= TRUE)
return(df)
}
cgs <- Reduce(MyMerge, list(ccle, genentech, sanger))
cgs.cols <- colnames(cgs)
row.names(cgs) <- cgs$Gene.ID
ind <- cgs$Gene.ID %in% xx$ESNG
rna <- cgs[ ind , ]
rna.cells <- sapply( strsplit( colnames(rna), "\\.\\."), "[",1 )
rna.cells <- toupper(rna.cells)
rna.cells <- sub("NCI\\.", "", rna.cells)
rna.cells <- gsub("\\.", "", rna.cells)
rna.cells <- gsub("^X", "", rna.cells)
rna.cells
cgs <- cgs[ ind , -c(1, grep("GENENAM.*" , rna.cells)) ]
colnames(cgs) <- rna.cells[-c(1, grep("GENENAM.*" , rna.cells))]
cgs <- melt(t(cgs))
#
colnames(cgs) <- c("ID", "Variable", "Value")
cgs$ID <- as.character(cgs$ID)
cgs$Variable <- as.character(cgs$Variable)
cgs <- as.data.table(cgs)
cgs <- cgs[, mean(Value, na.rm = T ), by = c("ID", "Variable")]
cgs <- dcast(cgs, ID~Variable, value.var = "V1")
row.names(cgs) <- cgs$ID
cgs.cols <- cgs$ID
cgs <- as.data.frame(cgs[ , -1])
cgs <- as.data.frame(t(cgs))
colnames(cgs) <- cgs.cols
#########################################################################################
#########################################################################################
prot <- xx
#########################################################################################
#########################################################################################
row.names(prot) <- prot$ESNG
prot <- prot[ , -c(1:3)]
colnames(prot) <- c.l.cl
##############################
l.prot <- list()
l.rna <- list()
cors <- vector(mode = "numeric", length = length(colnames(prot)))
names(cors) <- colnames(prot)
cors.on.how.many.genes <- c()
for(i in 1:length(colnames(prot))){
print(names(prot)[i])
rna.1 <- cgs[, colnames(cgs) %in% colnames(prot)[i], drop = FALSE]
prot.1 <- prot[ , i, drop = FALSE]
rna.1$prot <- row.names(rna.1)
prot.1$prot <- row.names(prot.1)
rna.prot <- merge(rna.1, prot.1, by="prot", all= F)
rna.prot <- rna.prot[ complete.cases(rna.prot), ]
print( nrow(rna.prot) )
if(ncol(rna.prot) == 2){
cors[i] <- NA
cors.on.how.many.genes[i] <- NA
} else {
cor.1 <- cor( rna.prot[ ,2], rna.prot[,3],use="pairwise.complete.obs", method = "spearman")
cors[i] <- cor.1
cors.on.how.many.genes[i] <- nrow(rna.prot)
}
}
names(cors)
cors <- data.frame(cors)
cors$c.l <- c.l.cl
cors$assay.names <- assay.names
##
mean(cors.on.how.many.genes, na.rm = T )
hist(cors[,1], xlim = c(0.4,0.75), col = "#fa9fb5")#breaks = 147,
abline(v = median(cors[,1], na.rm = T), lwd = 2, col = "black", lty = 2.5)
summary(cors$cors)
table(round(cors, 2))
median(cors[,1], na.rm = T)
#
############
############
#
meta <- cell.metadata <- read.table( "Supplementary-Table-1-samples-linegae-metadata_FINAL.txt", quote = "\"", header = TRUE, sep = "\t", stringsAsFactors = FALSE, na.strings = c(NA, "NA", "NaN"), strip.white = T)
cell.metadata <- read.table( "cell-lines-metadata.complete.cosmic.txt", sep ="\t", header = TRUE, stringsAsFactors = FALSE)
cell.metadata <- cell.metadata[ !duplicated(cell.metadata$my.cells), ]
lineage <- as.character(cell.metadata$Lineage[match((cors$c.l), cell.metadata$my.cells)])
lineage.stats <- table(lineage)
lineage <- gsub("large_intestine" , "colorectal", lineage)
lineage <- paste0(toupper(substr(lineage , 1, 1)), substr(lineage , 2, nchar(lineage )))
cors <- cbind(cors, lineage)
table(cors$lineage)
cors <- cors[ complete.cases(cors), ]
cors <- cors[cors$lineage %in% names(which(table(cors$lineage) > 2)), ]
cors <- droplevels(cors)
cors$batch <- factor(cors$batch)
cors$lineage <- factor(cors$lineage)
boxplot(cors$cors ~ cors$lineage, las = 2)
aggregate(cors ~ as.factor(lineage), data = cors, median)
bymedian.cors <- with(cors, reorder(lineage, -cors, median))
op <- par(mar=c(7,4,4,1))
boxplot(cors ~ bymedian.cors, data = cors,
ylab = "Correlation",
main = "", varwidth = F,
col = "#fde0dd", las = 2,outline=FALSE, ylim = c(min(cors$cors, na.rm = T), max(cors$cors, na.rm = T)),frame.plot = FALSE ) #xlab = "Cell line lineage",
stripchart(cors ~ bymedian.cors, data = cors, vertical=T, method="jitter", pch=19,add=TRUE, col = "#737373", cex = 1)
rm(op)#
cors$batch <- sapply(strsplit(cors$assay.names, "_"), "[" , 3)
bymedian.cors <- with(cors, reorder(batch, -cors, median))
boxplot(cors ~ bymedian.cors, data = cors,
ylab = "Correlation",
main = "", varwidth = F,
col = "#fde0dd", las = 2,outline=FALSE, ylim = c(min(cors$cors, na.rm = T), max(cors$cors, na.rm = T)),frame.plot = FALSE ) #xlab = "Cell line lineage",
stripchart(cors ~ bymedian.cors, data = cors, vertical=T, method="jitter", pch=19,add=TRUE, col = "#737373", cex = 1)
kruskal.test(cors$cors ~ cors$lineage)
kruskal.test(cors$cors ~ as.factor(cors$batch))
aov1 <- aov(cors$cors ~ cors$lineage * as.factor(cors$batch))
summary(aov1)
glm1 <- glm(cors$cors ~ cors$lineage * cors$batch)
summary(glm1)
#
two.way <- aov(cors$cors ~ cors$lineage + cors$batch)
summary(two.way)
plot(two.way)
tukey.two.way<-TukeyHSD(two.way)
x <- tukey.two.way$`cors$lineage`
x.x <-tukey.two.way$`cors$batch`
plot(tukey.two.way, las = 1)
boxplot(tukey.two.way)
##
tes.result <- pairwise.wilcox.test(cors$cors , cors$lineage,
p.adjust.method = "BH")
tes.result <- pairwise.wilcox.test(cors$cors , as.factor(cors$batch),
p.adjust.method = "BH")
tes.result
sum(tes.result$p.value < 0.05, na.rm = T)
tes.result <- tes.result[["p.value"]][tes.result$p.value < 0.05]
require(plyr)
library(corrplot)
plot.table(as.matrix(tes.result$p.value))
# , smain='Cohort(users)', highlight = TRUE, colorbar = TRUE)
M <- as.matrix(tes.result$p.value)
corrplot(M, is.cor = FALSE, typ = "lower", tl.col = "black", method = "number", col = "red",p.mat = tes.result$p.value, sig.level = c(.05), order = "original", insig = "blank", cl.pos = "n", number.digits = 3 )
|
/main-analysis-scripts/Fig4-Fig5-get-mRNAA-protein-correlations.R
|
no_license
|
J-Andy/Protein-expression-in-human-cancer
|
R
| false
| false
| 9,604
|
r
|
#mRNA-protein dataset merging
# This script will
# 1. map Protein groups to ENSG identifiers
# 2. merge genentech, ccle and anger ExpressionAtlas datasets with proteomics matrix
# 3. create a separate df for each cell line to perform mRNA-protein correlations
# 4. final plots
require(reshape)
require(data.table)
### #first run script to prepare protein data: "generate_protein_quant_files.R"
################## 1. ########################################################
# load the mapping reference file - this is now a file provided by UniProt but can ba any reference file
##
xxy.map <- read.table( "HUMAN_9606_idmapping_selected.tab", header = F, sep = "\t", fill = T, stringsAsFactors = FALSE)
colnames(xxy.map) <- c("UniProtKB.AC","UniProtKB.ID","GeneID..EntrezGene.","RefSeq","GI", "PDB","GO","UniRef100","UniRef90","UniRef50","UniParc","PIR","NCBI.taxon","MIM","UniGene", "PubMed","EMBL","EMBL.CDS","Ensembl","Ensembl_TRS","Ensembl_PRO","Additional.PubMed")
uniprot.map <- xxy.map[ , c(1, 19)]
rm(xxy.map)
# # using cl data frame - prepared by "generate_protein_quant_files.R" script
# cl is the cell lines matrix
x <- cl
x <- 2^x
xx <- melt(t(x))
colnames(xx) <- c("ID", "Variable", "Value")
#
xx <- cast(xx, ID~Variable, value = "Value", mean)
row.names(xx) <- xx$ID
xx <- xx[ , -1]
xx <- as.data.frame(xx)
xx <- as.data.frame(t(xx))
table(colnames(xx))
table(colnames(cl))
##### perform the protein group to gene mapping
c.l.cl <- colnames(cl)
data.to.map <- as.data.frame(cl) # xx
colnames(data.to.map) <- c.l.cl
data.to.map$Majority.protein.IDs <- row.names(data.to.map)
##
data.to.map$ENSG <- "NA"
for(i in 1:nrow(data.to.map)){
x <- data.frame(strsplit(data.to.map[ i, "Majority.protein.IDs"], split = ";"), stringsAsFactors = FALSE)
colnames(x) <- "prot"
# extract canonical UniProt protein ID
x[,1] <- regmatches(x[,1],regexpr("[OPQ][0-9][A-Z0-9]{3}[0-9]|[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2}", x[,1]))
x <- merge(x, uniprot.map, by.x = "prot", by.y = "UniProtKB.AC" )
all.genes <- sub(" ","", unlist(strsplit(x[ ,2], ";" )))
data.to.map[ i, "ENSG"] <- paste( unique(all.genes), collapse = ";")
}
######################### match the protein ids with the uniprot data ###############################################
xx <- data.to.map
# remove protein groups that have no mapping to an ENSG gene IDs
xx <- xx[ xx$ENSG != "" , ]
# remove all protein groups that map to multiple ENSG gene IDs (this is quiet a lot) - the reasoning here is that we cannot establish for sure which gene is contibuting the signal to protein; all genes contribute equally or onbe gene is a majority?
xx <- xx[ grep(";", xx$ENSG, invert = TRUE) , ]
# for genes that map to multiple proteins, in order to detemine the amount of protein that gene is producing - sum the protein quantification values
xx.Majority.protein.IDs <- aggregate(xx$Majority.protein.IDs, list(ESNG = xx$ENSG ), function(x) paste0( (x) ) )
xx <- aggregate(xx[ , 1:(ncol(xx)-2)], list(ENSG = xx$ENSG), sum, na.rm =TRUE)
#
xx <- cbind(xx.Majority.protein.IDs, xx)
###########################################################################
##################################################################################
genentech <- read.table("E-MTAB-2706-query-results.fpkms.tsv", header = T, sep = "\t", fill = T, stringsAsFactors = FALSE)# , check.names = FALSE
ccle <- read.table("E-MTAB-2770-query-results.fpkms.tsv", header = T, sep = "\t", fill = T, stringsAsFactors = FALSE, comment.char = "#")
sanger <- read.table("E-MTAB-3983-query-results.fpkms.tsv", header = T, sep = "\t", fill = T, stringsAsFactors = FALSE, comment.char = "#")
#######
MyMerge <- function(x, y){
df <- merge(x, y, by= "Gene.ID", all.x= TRUE, all.y= TRUE)
return(df)
}
cgs <- Reduce(MyMerge, list(ccle, genentech, sanger))
cgs.cols <- colnames(cgs)
row.names(cgs) <- cgs$Gene.ID
ind <- cgs$Gene.ID %in% xx$ESNG
rna <- cgs[ ind , ]
rna.cells <- sapply( strsplit( colnames(rna), "\\.\\."), "[",1 )
rna.cells <- toupper(rna.cells)
rna.cells <- sub("NCI\\.", "", rna.cells)
rna.cells <- gsub("\\.", "", rna.cells)
rna.cells <- gsub("^X", "", rna.cells)
rna.cells
cgs <- cgs[ ind , -c(1, grep("GENENAM.*" , rna.cells)) ]
colnames(cgs) <- rna.cells[-c(1, grep("GENENAM.*" , rna.cells))]
cgs <- melt(t(cgs))
#
colnames(cgs) <- c("ID", "Variable", "Value")
cgs$ID <- as.character(cgs$ID)
cgs$Variable <- as.character(cgs$Variable)
cgs <- as.data.table(cgs)
cgs <- cgs[, mean(Value, na.rm = T ), by = c("ID", "Variable")]
cgs <- dcast(cgs, ID~Variable, value.var = "V1")
row.names(cgs) <- cgs$ID
cgs.cols <- cgs$ID
cgs <- as.data.frame(cgs[ , -1])
cgs <- as.data.frame(t(cgs))
colnames(cgs) <- cgs.cols
#########################################################################################
#########################################################################################
prot <- xx
#########################################################################################
#########################################################################################
row.names(prot) <- prot$ESNG
prot <- prot[ , -c(1:3)]
colnames(prot) <- c.l.cl
##############################
l.prot <- list()
l.rna <- list()
cors <- vector(mode = "numeric", length = length(colnames(prot)))
names(cors) <- colnames(prot)
cors.on.how.many.genes <- c()
for(i in 1:length(colnames(prot))){
print(names(prot)[i])
rna.1 <- cgs[, colnames(cgs) %in% colnames(prot)[i], drop = FALSE]
prot.1 <- prot[ , i, drop = FALSE]
rna.1$prot <- row.names(rna.1)
prot.1$prot <- row.names(prot.1)
rna.prot <- merge(rna.1, prot.1, by="prot", all= F)
rna.prot <- rna.prot[ complete.cases(rna.prot), ]
print( nrow(rna.prot) )
if(ncol(rna.prot) == 2){
cors[i] <- NA
cors.on.how.many.genes[i] <- NA
} else {
cor.1 <- cor( rna.prot[ ,2], rna.prot[,3],use="pairwise.complete.obs", method = "spearman")
cors[i] <- cor.1
cors.on.how.many.genes[i] <- nrow(rna.prot)
}
}
names(cors)
cors <- data.frame(cors)
cors$c.l <- c.l.cl
cors$assay.names <- assay.names
##
mean(cors.on.how.many.genes, na.rm = T )
hist(cors[,1], xlim = c(0.4,0.75), col = "#fa9fb5")#breaks = 147,
abline(v = median(cors[,1], na.rm = T), lwd = 2, col = "black", lty = 2.5)
summary(cors$cors)
table(round(cors, 2))
median(cors[,1], na.rm = T)
#
############
############
#
meta <- cell.metadata <- read.table( "Supplementary-Table-1-samples-linegae-metadata_FINAL.txt", quote = "\"", header = TRUE, sep = "\t", stringsAsFactors = FALSE, na.strings = c(NA, "NA", "NaN"), strip.white = T)
cell.metadata <- read.table( "cell-lines-metadata.complete.cosmic.txt", sep ="\t", header = TRUE, stringsAsFactors = FALSE)
cell.metadata <- cell.metadata[ !duplicated(cell.metadata$my.cells), ]
lineage <- as.character(cell.metadata$Lineage[match((cors$c.l), cell.metadata$my.cells)])
lineage.stats <- table(lineage)
lineage <- gsub("large_intestine" , "colorectal", lineage)
lineage <- paste0(toupper(substr(lineage , 1, 1)), substr(lineage , 2, nchar(lineage )))
cors <- cbind(cors, lineage)
table(cors$lineage)
cors <- cors[ complete.cases(cors), ]
cors <- cors[cors$lineage %in% names(which(table(cors$lineage) > 2)), ]
cors <- droplevels(cors)
cors$batch <- factor(cors$batch)
cors$lineage <- factor(cors$lineage)
boxplot(cors$cors ~ cors$lineage, las = 2)
aggregate(cors ~ as.factor(lineage), data = cors, median)
bymedian.cors <- with(cors, reorder(lineage, -cors, median))
op <- par(mar=c(7,4,4,1))
boxplot(cors ~ bymedian.cors, data = cors,
ylab = "Correlation",
main = "", varwidth = F,
col = "#fde0dd", las = 2,outline=FALSE, ylim = c(min(cors$cors, na.rm = T), max(cors$cors, na.rm = T)),frame.plot = FALSE ) #xlab = "Cell line lineage",
stripchart(cors ~ bymedian.cors, data = cors, vertical=T, method="jitter", pch=19,add=TRUE, col = "#737373", cex = 1)
rm(op)#
cors$batch <- sapply(strsplit(cors$assay.names, "_"), "[" , 3)
bymedian.cors <- with(cors, reorder(batch, -cors, median))
boxplot(cors ~ bymedian.cors, data = cors,
ylab = "Correlation",
main = "", varwidth = F,
col = "#fde0dd", las = 2,outline=FALSE, ylim = c(min(cors$cors, na.rm = T), max(cors$cors, na.rm = T)),frame.plot = FALSE ) #xlab = "Cell line lineage",
stripchart(cors ~ bymedian.cors, data = cors, vertical=T, method="jitter", pch=19,add=TRUE, col = "#737373", cex = 1)
kruskal.test(cors$cors ~ cors$lineage)
kruskal.test(cors$cors ~ as.factor(cors$batch))
aov1 <- aov(cors$cors ~ cors$lineage * as.factor(cors$batch))
summary(aov1)
glm1 <- glm(cors$cors ~ cors$lineage * cors$batch)
summary(glm1)
#
two.way <- aov(cors$cors ~ cors$lineage + cors$batch)
summary(two.way)
plot(two.way)
tukey.two.way<-TukeyHSD(two.way)
x <- tukey.two.way$`cors$lineage`
x.x <-tukey.two.way$`cors$batch`
plot(tukey.two.way, las = 1)
boxplot(tukey.two.way)
##
tes.result <- pairwise.wilcox.test(cors$cors , cors$lineage,
p.adjust.method = "BH")
tes.result <- pairwise.wilcox.test(cors$cors , as.factor(cors$batch),
p.adjust.method = "BH")
tes.result
sum(tes.result$p.value < 0.05, na.rm = T)
tes.result <- tes.result[["p.value"]][tes.result$p.value < 0.05]
require(plyr)
library(corrplot)
plot.table(as.matrix(tes.result$p.value))
# , smain='Cohort(users)', highlight = TRUE, colorbar = TRUE)
M <- as.matrix(tes.result$p.value)
corrplot(M, is.cor = FALSE, typ = "lower", tl.col = "black", method = "number", col = "red",p.mat = tes.result$p.value, sig.level = c(.05), order = "original", insig = "blank", cl.pos = "n", number.digits = 3 )
|
#' @title Plot \code{did} objects using \code{ggplot2}
#'
#' @description Function to plot objects from the \code{did} package
#'
#' @param object either a \code{MP} object or \code{AGGTEobj} object
#' @param ... other arguments
#'
#' @export
ggdid <- function(object, ...) {
UseMethod("ggdid", object)
}
## #' @param type the type of plot, should be one of "attgt", "dynamic",
## #' "group", "calendar", "dynsel". "attgt" is the default and plots
## #' all group-time average treatment effects separately by group (including
## #' pre-treatment time periods); "dynamic" plots dynamic treatment effects --
## #' these are the same as event studies; "group" plots average effects
## #' of the treatment separately by group (which allows for selective treatment
## #' timing); "calendar" plots average treatment effects by time period; and
## #' "dynsel" plots dynamic effects allowing for selective treatment timing
## #' (this also requires setting the additional paramater e1)
#' @title Plot \code{MP} objects using \code{ggplot2}
#'
#' @description A function to plot \code{MP} objects
#'
#' @inheritParams ggdid
#' @param ylim optional y limits for the plot; settng here makes the y limits
#' the same across different plots
#' @param xlab optional x-axis label
#' @param ylab optional y-axis label
#' @param title optional plot title
#' @param xgap optional gap between the labels on the x-axis. For example,
#' \code{xgap=3} indicates that the labels should show up for every third
#' value on the x-axis. The default is 1.
#' @param ncol The number of columns to include in the resulting plot. The
#' default is 1.
#' @param legend Whether or not to include a legend (which will indicate color
#' of pre- and post-treatment estimates). Default is \code{TRUE}.
#' @param group Vector for which groups to include in the plots of ATT(g,t).
#' Default is NULL, and, in this case, plots for all groups will be included.
#'
#' @export
ggdid.MP <- function(object,
ylim=NULL,
xlab=NULL,
ylab=NULL,
title="Group",
xgap=1,
ncol=1,
legend=TRUE,
group=NULL,
...) {
mpobj <- object
G <- length(unique(mpobj$group))
Y <- length(unique(mpobj$t))## drop 1 period bc DID
g <- unique(mpobj$group)[order(unique(mpobj$group))] ## -1 to drop control group
y <- unique(mpobj$t)
results <- data.frame(year=rep(y,G))
results$group <- unlist(lapply(g, function(x) { rep(x, Y) }))
results$att <- mpobj$att
n <- mpobj$n
results$att.se <- mpobj$se #sqrt(diag(mpobj$V)/n)
results$post <- as.factor(1*(results$year >= results$group))
results$year <- as.factor(results$year)
results$c <- mpobj$c
#vcovatt <- mpobj$V/n
alp <- mpobj$alp
mplots <- lapply(g, function(g) {
# If group is not specified, plot all. If group is specified, only plot g in group
if(is.null(group) | g %in% group) {
thisdta <- subset(results, group==g)
gplot(thisdta, ylim, xlab, ylab, title, xgap, legend)
}
})
# Remove NULL
mplots <- mplots[!sapply(mplots, is.null)]
do.call("ggarrange", c(mplots, ncol=ncol))
}
#' @title Plot \code{AGGTEobj} objects
#'
#' @description A function to plot \code{AGGTEobj} objects
#'
#' @inheritParams ggdid.MP
#'
#' @export
ggdid.AGGTEobj <- function(object,
ylim=NULL,
xlab=NULL,
ylab=NULL,
title="",
xgap=1,
legend=TRUE,
...) {
if ( !(object$type %in% c("dynamic","group","calendar")) ) {
stop(paste0("Plot method not available for this type of aggregation"))
}
post.treat <- 1*(object$egt >= 0)
results <- cbind.data.frame(year=object$egt,
att=object$att.egt,
att.se=object$se.egt,
post=as.factor(post.treat))
results$c <- ifelse(is.null(object$crit.val.egt), abs(qnorm(.025)), object$crit.val.egt)
if (title == "") {
# get title right depending on which aggregation
title <- ifelse(object$type=="group", "Average Effect by Group", ifelse(object$type=="dynamic", "Average Effect by Length of Exposure", "Average Effect by Time Period"))
}
if (object$type == "group") {
# alternative plot if selective/group treatment timing plot
p <- splot(results, ylim, xlab, ylab, title, legend)
} else {
p <- gplot(results, ylim, xlab, ylab, title, xgap, legend)
}
p
}
|
/R/ggdid.R
|
no_license
|
dunhe001/did
|
R
| false
| false
| 4,794
|
r
|
#' @title Plot \code{did} objects using \code{ggplot2}
#'
#' @description Function to plot objects from the \code{did} package
#'
#' @param object either a \code{MP} object or \code{AGGTEobj} object
#' @param ... other arguments
#'
#' @export
ggdid <- function(object, ...) {
UseMethod("ggdid", object)
}
## #' @param type the type of plot, should be one of "attgt", "dynamic",
## #' "group", "calendar", "dynsel". "attgt" is the default and plots
## #' all group-time average treatment effects separately by group (including
## #' pre-treatment time periods); "dynamic" plots dynamic treatment effects --
## #' these are the same as event studies; "group" plots average effects
## #' of the treatment separately by group (which allows for selective treatment
## #' timing); "calendar" plots average treatment effects by time period; and
## #' "dynsel" plots dynamic effects allowing for selective treatment timing
## #' (this also requires setting the additional paramater e1)
#' @title Plot \code{MP} objects using \code{ggplot2}
#'
#' @description A function to plot \code{MP} objects
#'
#' @inheritParams ggdid
#' @param ylim optional y limits for the plot; settng here makes the y limits
#' the same across different plots
#' @param xlab optional x-axis label
#' @param ylab optional y-axis label
#' @param title optional plot title
#' @param xgap optional gap between the labels on the x-axis. For example,
#' \code{xgap=3} indicates that the labels should show up for every third
#' value on the x-axis. The default is 1.
#' @param ncol The number of columns to include in the resulting plot. The
#' default is 1.
#' @param legend Whether or not to include a legend (which will indicate color
#' of pre- and post-treatment estimates). Default is \code{TRUE}.
#' @param group Vector for which groups to include in the plots of ATT(g,t).
#' Default is NULL, and, in this case, plots for all groups will be included.
#'
#' @export
ggdid.MP <- function(object,
ylim=NULL,
xlab=NULL,
ylab=NULL,
title="Group",
xgap=1,
ncol=1,
legend=TRUE,
group=NULL,
...) {
mpobj <- object
G <- length(unique(mpobj$group))
Y <- length(unique(mpobj$t))## drop 1 period bc DID
g <- unique(mpobj$group)[order(unique(mpobj$group))] ## -1 to drop control group
y <- unique(mpobj$t)
results <- data.frame(year=rep(y,G))
results$group <- unlist(lapply(g, function(x) { rep(x, Y) }))
results$att <- mpobj$att
n <- mpobj$n
results$att.se <- mpobj$se #sqrt(diag(mpobj$V)/n)
results$post <- as.factor(1*(results$year >= results$group))
results$year <- as.factor(results$year)
results$c <- mpobj$c
#vcovatt <- mpobj$V/n
alp <- mpobj$alp
mplots <- lapply(g, function(g) {
# If group is not specified, plot all. If group is specified, only plot g in group
if(is.null(group) | g %in% group) {
thisdta <- subset(results, group==g)
gplot(thisdta, ylim, xlab, ylab, title, xgap, legend)
}
})
# Remove NULL
mplots <- mplots[!sapply(mplots, is.null)]
do.call("ggarrange", c(mplots, ncol=ncol))
}
#' @title Plot \code{AGGTEobj} objects
#'
#' @description A function to plot \code{AGGTEobj} objects
#'
#' @inheritParams ggdid.MP
#'
#' @export
ggdid.AGGTEobj <- function(object,
ylim=NULL,
xlab=NULL,
ylab=NULL,
title="",
xgap=1,
legend=TRUE,
...) {
if ( !(object$type %in% c("dynamic","group","calendar")) ) {
stop(paste0("Plot method not available for this type of aggregation"))
}
post.treat <- 1*(object$egt >= 0)
results <- cbind.data.frame(year=object$egt,
att=object$att.egt,
att.se=object$se.egt,
post=as.factor(post.treat))
results$c <- ifelse(is.null(object$crit.val.egt), abs(qnorm(.025)), object$crit.val.egt)
if (title == "") {
# get title right depending on which aggregation
title <- ifelse(object$type=="group", "Average Effect by Group", ifelse(object$type=="dynamic", "Average Effect by Length of Exposure", "Average Effect by Time Period"))
}
if (object$type == "group") {
# alternative plot if selective/group treatment timing plot
p <- splot(results, ylim, xlab, ylab, title, legend)
} else {
p <- gplot(results, ylim, xlab, ylab, title, xgap, legend)
}
p
}
|
context("test do_svd")
test_that("test do_svd skv with NA", {
test_df <- data.frame(
row = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df <- test_df %>% rename(`ro w`=row, `co l`=col, `val ue`=value)
test_df$value[[3]] <- NA_real_
ret <- do_svd(test_df, skv = c("ro w", "co l", "val ue"))
expect_equal(colnames(ret), c("ro w", "new.dimension", "value.new"))
})
test_that("test do_svd cols with NA long", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "long")
expect_equal(nrow(ret), 6)
expect_true(all(ret$row %in% seq(3)))
})
test_that("test do_svd cols with NA", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "wide")
expect_equal(nrow(ret), 4)
expect_equal(colnames(ret), c("axis1", "axis1.new", "axis2"))
})
test_that("test do_svd cols dimension with NA long", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "long", type = "dimension")
expect_equal(nrow(ret), 6)
expect_true(all(ret$colname %in% colnames(test_df)))
})
test_that("test do_svd cols dimension with NA wide", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "wide", type = "dimension")
expect_equal(nrow(ret), 3)
expect_true(all(ret$colname %in% colnames(test_df)))
})
test_that("test do_svd cols variance with NA long", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "long", type = "variance")
expect_equal(nrow(ret), 2)
expect_equal(colnames(ret), c("new.dimension", "value"))
})
test_that("test do_svd cols variance with NA wide", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "wide", type = "variance")
expect_equal(nrow(ret), 1)
expect_equal(colnames(ret), c("axis1", "axis2"))
})
|
/tests/testthat/test_do_svd.R
|
permissive
|
yuhonghong7035/exploratory_func
|
R
| false
| false
| 3,103
|
r
|
context("test do_svd")
test_that("test do_svd skv with NA", {
test_df <- data.frame(
row = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df <- test_df %>% rename(`ro w`=row, `co l`=col, `val ue`=value)
test_df$value[[3]] <- NA_real_
ret <- do_svd(test_df, skv = c("ro w", "co l", "val ue"))
expect_equal(colnames(ret), c("ro w", "new.dimension", "value.new"))
})
test_that("test do_svd cols with NA long", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "long")
expect_equal(nrow(ret), 6)
expect_true(all(ret$row %in% seq(3)))
})
test_that("test do_svd cols with NA", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "wide")
expect_equal(nrow(ret), 4)
expect_equal(colnames(ret), c("axis1", "axis1.new", "axis2"))
})
test_that("test do_svd cols dimension with NA long", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "long", type = "dimension")
expect_equal(nrow(ret), 6)
expect_true(all(ret$colname %in% colnames(test_df)))
})
test_that("test do_svd cols dimension with NA wide", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "wide", type = "dimension")
expect_equal(nrow(ret), 3)
expect_true(all(ret$colname %in% colnames(test_df)))
})
test_that("test do_svd cols variance with NA long", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "long", type = "variance")
expect_equal(nrow(ret), 2)
expect_equal(colnames(ret), c("new.dimension", "value"))
})
test_that("test do_svd cols variance with NA wide", {
test_df <- data.frame(
axis1 = rep(paste("row", 1:4), 3),
col = rep(paste("col", 1:3), each = 4),
value = seq(12)
)
test_df$value[[3]] <- NA_real_
test_df <- pivot(test_df, axis1 ~ col, value = value)
ret <- do_svd(test_df, dplyr::starts_with("col"), output = "wide", type = "variance")
expect_equal(nrow(ret), 1)
expect_equal(colnames(ret), c("axis1", "axis2"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KnitRev.R
\name{KnitRev}
\alias{KnitRev}
\title{Knitr engine for RevBayes}
\usage{
KnitRev()
}
\description{
Rev code is ran directly in knitr chunks, and using the wrapper functions isn't necessary.
Any created variables will be put in RevEnv, and defined variables can be used across multiple
chunks.
}
|
/man/KnitRev.Rd
|
no_license
|
tpellard/Revticulate
|
R
| false
| true
| 383
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/KnitRev.R
\name{KnitRev}
\alias{KnitRev}
\title{Knitr engine for RevBayes}
\usage{
KnitRev()
}
\description{
Rev code is ran directly in knitr chunks, and using the wrapper functions isn't necessary.
Any created variables will be put in RevEnv, and defined variables can be used across multiple
chunks.
}
|
if (!require(ggplot2)) install.packages('ggplot2')
library(ggplot2)
if (!require(data.table)) install.packages('data.table')
library(data.table)
if (!require(RColorBrewer)) install.packages('RColorBrewer')
library(RColorBrewer)
if (!require(dplyr)) install.packages('dplyr')
library(dplyr)
if (!require(ggrepel)) install.packages('ggrepel')
library(ggrepel)
##Set wd based on source
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
dir.create("output_data/")
#----- data -------
fer<-read.table("input_data/Cas9_fertility_Niki.txt", header=T, sep="\t")
fer <- fer[,0:7]
fer$replicate <- as.factor(fer$replicate)
fer <- within(fer, strain <- relevel(strain, ref = 12))
fer <- within(fer, sex <- relevel(sex, ref = 3))
#fer <- subset(fer, cross != "WT")
fer <- subset(fer, sex != "female")
fer <- aggregate(.~strain+cross+sex, data=fer, median)
fer$replicate <- NULL
fer$survival <- fer$adults/fer$embryos
fer$crsex <- with(fer, interaction(cross, sex))
fer <- droplevels(fer)
#fer$crsex = factor(fer$crsex,levels(fer$crsex)[c(1,3,2,4)])
#fer$crsex = factor(fer$crsex,levels(fer$crsex)[c(1,2,4,3,5)])
#----- glm
out <- glm(cbind(as.integer(adults),(as.integer(embryos)-as.integer(adults))) ~ crsex, family=binomial, data=fer)
print(summary(outs))
group.colors3 <- c(Cas9.1 = "dodgerblue2", Cas9.2 = "dodgerblue2",WT="grey")
group.colors2 <- c(Cas9.1 = "dodgerblue2", Cas9.2 = "dodgerblue2",WT="grey")
ggplot(data = fer, mapping = aes(x = crsex, y = survival,fill=cross))+
geom_boxplot(alpha=0.7)+
geom_point(alpha=0.7, shape=21, color="black", size=3)+
theme_bw(base_size=15)+
xlab("")+ylab("")+
theme(legend.position = "none")+
scale_fill_manual(values=group.colors3)+
ylim(0.1,0.4)
ggsave("output_data/fertility2.png",width=2.5, height=3)
|
/Figure_3/Figure3_fertility2.R
|
no_license
|
genome-traffic/medflyXpaper
|
R
| false
| false
| 1,783
|
r
|
if (!require(ggplot2)) install.packages('ggplot2')
library(ggplot2)
if (!require(data.table)) install.packages('data.table')
library(data.table)
if (!require(RColorBrewer)) install.packages('RColorBrewer')
library(RColorBrewer)
if (!require(dplyr)) install.packages('dplyr')
library(dplyr)
if (!require(ggrepel)) install.packages('ggrepel')
library(ggrepel)
##Set wd based on source
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
getwd()
dir.create("output_data/")
#----- data -------
fer<-read.table("input_data/Cas9_fertility_Niki.txt", header=T, sep="\t")
fer <- fer[,0:7]
fer$replicate <- as.factor(fer$replicate)
fer <- within(fer, strain <- relevel(strain, ref = 12))
fer <- within(fer, sex <- relevel(sex, ref = 3))
#fer <- subset(fer, cross != "WT")
fer <- subset(fer, sex != "female")
fer <- aggregate(.~strain+cross+sex, data=fer, median)
fer$replicate <- NULL
fer$survival <- fer$adults/fer$embryos
fer$crsex <- with(fer, interaction(cross, sex))
fer <- droplevels(fer)
#fer$crsex = factor(fer$crsex,levels(fer$crsex)[c(1,3,2,4)])
#fer$crsex = factor(fer$crsex,levels(fer$crsex)[c(1,2,4,3,5)])
#----- glm
out <- glm(cbind(as.integer(adults),(as.integer(embryos)-as.integer(adults))) ~ crsex, family=binomial, data=fer)
print(summary(outs))
group.colors3 <- c(Cas9.1 = "dodgerblue2", Cas9.2 = "dodgerblue2",WT="grey")
group.colors2 <- c(Cas9.1 = "dodgerblue2", Cas9.2 = "dodgerblue2",WT="grey")
ggplot(data = fer, mapping = aes(x = crsex, y = survival,fill=cross))+
geom_boxplot(alpha=0.7)+
geom_point(alpha=0.7, shape=21, color="black", size=3)+
theme_bw(base_size=15)+
xlab("")+ylab("")+
theme(legend.position = "none")+
scale_fill_manual(values=group.colors3)+
ylim(0.1,0.4)
ggsave("output_data/fertility2.png",width=2.5, height=3)
|
# Set Up
rm(list=ls())
cur_dir = getwd()
setwd(cur_dir)
library(boot)
library(ROCR)
library(caret)
# 1. Nodal : Nodal Involvement in Prostate Cancer
## Data Load
data(nodal)
## EDA
str(nodal) # 53 obs. of 7 variables
summary(nodal) # Min, 1st Qu, Median, Mean, 3rd Qu, Max
?nodal
table(nodal$m) # Needed to drop column m
table(nodal$r) # Target variable
## Check NaN
sum(is.na(nodal)) # 0, if NA, remove NA by row by using data <- na.omit(data)
## Data Preprocessing
nd = nodal[,-1]
table(nd$r) # 0 : 33, 1 : 20
## Logistic Regression
model = glm(r~., data = nd, family = binomial)
summary(model)
predict(model) # output -> logit
sigmoid = function(x) {
return(exp(x)/(1+exp(x)))
}
sigmoid(predict(model)) # Needed to use sigmoid function to get the desired probability value
predict(model, type = "response") # The output -> the values that went through the sigmoid function(type='response')
# 2. Bank : Data for prediction of whether customers sign up for a bank deposit
## Data Load
bank = read.csv("bank-additional.csv", sep = ";")
## EDA
str(bank) # 4119 obs. of 21 variables
summary(bank) # Target valuable(no : 3668 yes : 451) -> Upsampling Needed
## Check NaN
sum(is.na(bank)) # 0, if NA, remove NA by row by using data <- na.omit(data)
## Data Preprocessing - Feature Selection by Hand
select = colnames(bank)[c(1,2,3,6,7,8:10,12,15,17:19,21)]
select_form = colnames(bank)[c(1,2,3,6,7,8:10,12,15,17:19)]
formula1 = formula(paste("y~",paste(select_form, collapse=" + ")))
bank = bank[select]
bank$y = as.factor(ifelse(bank$y == "no",0,1)) # Target variable -> categorical variable
str(bank)
## Train/Test Partition
idx = createDataPartition(bank$y, p = 0.7, list = F)
banktrain = bank[idx,]
banktest = bank[-idx,]
## Model1 : High Accuracy, but Low Specificity
model.glm1 = glm(formula1, banktrain, family = binomial)
pred.glm1 = as.numeric(predict(model.glm1, banktest, type = "response") > 0.5) # cutoff value : .50
confusionMatrix(as.factor(pred.glm1),as.factor(banktest$y)) # numeric to factor : `data` and `reference` should be factors with the same levels
table(pred.glm1)
## Model2 : Specificity risen -> predicted customers better who would actually sign up for a bank deposit
model.glm2 = glm(formula1, banktrain, family = binomial)
pred.glm2 = as.numeric(predict(model.glm2, banktest, type = "response") > 0.3) # cutoff value : 0.30
confusionMatrix(as.factor(pred.glm2),as.factor(banktest$y))
table(pred.glm2)
## Upsample
table(banktrain$y)
banktrain_up = upSample(subset(banktrain, select=-y), banktrain$y)
table(banktrain_up$Class) # upsample train set
formula2 = formula(paste("Class~",paste(select_form, collapse=" + ")))
## Model3 : The Best Model in terms of both Sensitivity and Specificity
model.glm3 = glm(formula2, banktrain_up, family = binomial)
pred.glm3 = as.numeric(predict(model.glm3, banktest, type = "response") > 0.5) # cutoff value : 0.50
confusionMatrix(as.factor(pred.glm3),banktest$y)
table(pred.glm3)
## ROC
pred_glm <- prediction(as.numeric(pred.glm3),as.numeric(banktest$y))
perf_glm <- performance(pred_glm, measure = "tpr", x.measure = "fpr")
plot(perf_glm, main = "ROC curve for GLM", col = "blue", lwd = 2)
## AUC
auc_glm = performance(pred_glm, measure = "auc")
auc_glm@y.values[[1]] # AUC : 0.7469024
|
/week1/LogisticRegression/logistic_regression.R
|
no_license
|
YunhoJung/tobigs-2018
|
R
| false
| false
| 3,281
|
r
|
# Set Up
rm(list=ls())
cur_dir = getwd()
setwd(cur_dir)
library(boot)
library(ROCR)
library(caret)
# 1. Nodal : Nodal Involvement in Prostate Cancer
## Data Load
data(nodal)
## EDA
str(nodal) # 53 obs. of 7 variables
summary(nodal) # Min, 1st Qu, Median, Mean, 3rd Qu, Max
?nodal
table(nodal$m) # Needed to drop column m
table(nodal$r) # Target variable
## Check NaN
sum(is.na(nodal)) # 0, if NA, remove NA by row by using data <- na.omit(data)
## Data Preprocessing
nd = nodal[,-1]
table(nd$r) # 0 : 33, 1 : 20
## Logistic Regression
model = glm(r~., data = nd, family = binomial)
summary(model)
predict(model) # output -> logit
sigmoid = function(x) {
return(exp(x)/(1+exp(x)))
}
sigmoid(predict(model)) # Needed to use sigmoid function to get the desired probability value
predict(model, type = "response") # The output -> the values that went through the sigmoid function(type='response')
# 2. Bank : Data for prediction of whether customers sign up for a bank deposit
## Data Load
bank = read.csv("bank-additional.csv", sep = ";")
## EDA
str(bank) # 4119 obs. of 21 variables
summary(bank) # Target valuable(no : 3668 yes : 451) -> Upsampling Needed
## Check NaN
sum(is.na(bank)) # 0, if NA, remove NA by row by using data <- na.omit(data)
## Data Preprocessing - Feature Selection by Hand
select = colnames(bank)[c(1,2,3,6,7,8:10,12,15,17:19,21)]
select_form = colnames(bank)[c(1,2,3,6,7,8:10,12,15,17:19)]
formula1 = formula(paste("y~",paste(select_form, collapse=" + ")))
bank = bank[select]
bank$y = as.factor(ifelse(bank$y == "no",0,1)) # Target variable -> categorical variable
str(bank)
## Train/Test Partition
idx = createDataPartition(bank$y, p = 0.7, list = F)
banktrain = bank[idx,]
banktest = bank[-idx,]
## Model1 : High Accuracy, but Low Specificity
model.glm1 = glm(formula1, banktrain, family = binomial)
pred.glm1 = as.numeric(predict(model.glm1, banktest, type = "response") > 0.5) # cutoff value : .50
confusionMatrix(as.factor(pred.glm1),as.factor(banktest$y)) # numeric to factor : `data` and `reference` should be factors with the same levels
table(pred.glm1)
## Model2 : Specificity risen -> predicted customers better who would actually sign up for a bank deposit
model.glm2 = glm(formula1, banktrain, family = binomial)
pred.glm2 = as.numeric(predict(model.glm2, banktest, type = "response") > 0.3) # cutoff value : 0.30
confusionMatrix(as.factor(pred.glm2),as.factor(banktest$y))
table(pred.glm2)
## Upsample
table(banktrain$y)
banktrain_up = upSample(subset(banktrain, select=-y), banktrain$y)
table(banktrain_up$Class) # upsample train set
formula2 = formula(paste("Class~",paste(select_form, collapse=" + ")))
## Model3 : The Best Model in terms of both Sensitivity and Specificity
model.glm3 = glm(formula2, banktrain_up, family = binomial)
pred.glm3 = as.numeric(predict(model.glm3, banktest, type = "response") > 0.5) # cutoff value : 0.50
confusionMatrix(as.factor(pred.glm3),banktest$y)
table(pred.glm3)
## ROC
pred_glm <- prediction(as.numeric(pred.glm3),as.numeric(banktest$y))
perf_glm <- performance(pred_glm, measure = "tpr", x.measure = "fpr")
plot(perf_glm, main = "ROC curve for GLM", col = "blue", lwd = 2)
## AUC
auc_glm = performance(pred_glm, measure = "auc")
auc_glm@y.values[[1]] # AUC : 0.7469024
|
# setup params to sim from
n.seq = c(200)
tht.seq = c(-.75, .75)
p.seq = c(.2)
r.seq = 3
Nsim = 200 # total iterations per param configuration
total.iter = Nsim * length(n.seq) * length(tht.seq) *
length(p.seq) * length(r.seq)
save.cols = c("estim.method", "n",
"tht.true", "tht.est", "tht.se",
"p.true", "p.est", "p.se",
"r.true", "r.est", "r.se")
d = data.frame(matrix(nrow = total.iter, ncol = length(save.cols)))
colnames(d) = save.cols
print(sprintf("total iterations: %s", total.iter))
idx = 1
pb <- txtProgressBar(min=2,max=total.iter,style=3)
for(n.idx in 1:length(n.seq)){
for(tht.idx in 1:length(tht.seq)){
for(p.idx in 1:length(p.seq)){
for(r.idx in 1:length(r.seq)){
for(dumb.variable in 1:Nsim){
n = n.seq[n.idx]
tht = tht.seq[tht.idx]
p = p.seq[p.idx]
r = r.seq[r.idx]
x = sim_negbinom_ma1(n = n, theta=tht, p=p, r=r)
out.optim = LGC(x,
count.family = "negbinom",
gauss.series = "MA", q=1,
estim.method = "gaussianLik")
# store output in data.frame
d$estim.method[idx] = "gaussianLik"
d$n = n[idx]
d$tht.true[idx] = tht
d$tht.est[idx] = out.optim$par[3]
d$tht.se[idx] = out.optim$stder[3]
d$p.true[idx] = p
d$p.est[idx] = out.optim$par[2]
d$p.se[idx] = out.optim$stder[2]
d$r.true[idx] = r
d$r.est[idx] = out.optim$par[1]
d$r.se[idx] = out.optim$stder[1]
idx = idx + 1
setTxtProgressBar(pb,idx)
}}}}}
close(pb)
simResults_negbin_ma1 = d
# save(simResults_negbin_ma1, file = "simResults_negbin_ma1.Rdata")
|
/tests/simulations/negbin-ma1/sim_negbinom_ma1.R
|
no_license
|
jlivsey/LatentGaussCounts
|
R
| false
| false
| 1,683
|
r
|
# setup params to sim from
n.seq = c(200)
tht.seq = c(-.75, .75)
p.seq = c(.2)
r.seq = 3
Nsim = 200 # total iterations per param configuration
total.iter = Nsim * length(n.seq) * length(tht.seq) *
length(p.seq) * length(r.seq)
save.cols = c("estim.method", "n",
"tht.true", "tht.est", "tht.se",
"p.true", "p.est", "p.se",
"r.true", "r.est", "r.se")
d = data.frame(matrix(nrow = total.iter, ncol = length(save.cols)))
colnames(d) = save.cols
print(sprintf("total iterations: %s", total.iter))
idx = 1
pb <- txtProgressBar(min=2,max=total.iter,style=3)
for(n.idx in 1:length(n.seq)){
for(tht.idx in 1:length(tht.seq)){
for(p.idx in 1:length(p.seq)){
for(r.idx in 1:length(r.seq)){
for(dumb.variable in 1:Nsim){
n = n.seq[n.idx]
tht = tht.seq[tht.idx]
p = p.seq[p.idx]
r = r.seq[r.idx]
x = sim_negbinom_ma1(n = n, theta=tht, p=p, r=r)
out.optim = LGC(x,
count.family = "negbinom",
gauss.series = "MA", q=1,
estim.method = "gaussianLik")
# store output in data.frame
d$estim.method[idx] = "gaussianLik"
d$n = n[idx]
d$tht.true[idx] = tht
d$tht.est[idx] = out.optim$par[3]
d$tht.se[idx] = out.optim$stder[3]
d$p.true[idx] = p
d$p.est[idx] = out.optim$par[2]
d$p.se[idx] = out.optim$stder[2]
d$r.true[idx] = r
d$r.est[idx] = out.optim$par[1]
d$r.se[idx] = out.optim$stder[1]
idx = idx + 1
setTxtProgressBar(pb,idx)
}}}}}
close(pb)
simResults_negbin_ma1 = d
# save(simResults_negbin_ma1, file = "simResults_negbin_ma1.Rdata")
|
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of parameter optimization.
#'
#' @description \code{model_evaluation_optimization} will take as input a setting of parameters (data source weights and hyperparameters) and layer-specific networks to construct a ligand-target matrix and evaluate its performance on input validation settings (average performance for both target gene prediction and ligand activity prediction, as measured via the auroc and aupr).
#'
#' @usage
#' model_evaluation_optimization(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...)
#'
#' @inheritParams evaluate_model
#' @inheritParams construct_ligand_target_matrix
#' @param x A list containing parameter values for parameter optimization. $source_weights: numeric vector representing the weight for each data source; $lr_sig_hub: hub correction factor for the ligand-signaling network; $gr_hub: hub correction factor for the gene regulatory network; $damping_factor: damping factor in the PPR algorithm if using PPR and optionally $ltf_cutoff: the cutoff on the ligand-tf matrix. For more information about these parameters: see \code{construct_ligand_target_matrix} and \code{apply_hub_correction}.
#' @param source_names Character vector containing the names of the data sources. The order of data source names accords to the order of weights in x$source_weights.
#' @param correct_topology This parameter indicates whether the PPR-constructed ligand-target matrix will be subtracted by a PR-constructed target matrix. TRUE or FALSE.
#' @param damping_factor The value of the damping factor if damping factor is a fixed parameter and will not be optimized and thus not belong to x. Default NULL.
#' @param ... Additional arguments to \code{make_discrete_ligand_target_matrix}.
#'
#' @return A numeric vector of length 4 containing the average auroc for target gene prediction, average aupr (corrected for TP fraction) for target gene prediction, average auroc for ligand activity prediction and average aupr for ligand activity prediction.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' nr_datasources = source_weights_df$source %>% unique() %>% length()
#' test_input = list("source_weights" = rep(0.5, times = nr_datasources), "lr_sig_hub" = 0.5, "gr_hub" = 0.5, "damping_factor" = 0.5)
# test_evaluation_optimization = model_evaluation_optimization(test_input, source_weights_df$source %>% unique(), "PPR", TRUE, lr_network, sig_network, gr_network, lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no")
#' }
#'
#' @export
#'
model_evaluation_optimization = function(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...){
requireNamespace("dplyr")
if (!is.null(damping_factor) & is.null(x$damping_factor)){ # for the case damping factor is a fixed parameter
x$damping_factor = damping_factor
}
#input check
if (!is.list(x))
stop("x should be a list!")
if (!is.numeric(x$source_weights))
stop("x$source_weights should be a numeric vector")
if (x$lr_sig_hub < 0 | x$lr_sig_hub > 1)
stop("x$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (x$gr_hub < 0 | x$gr_hub > 1)
stop("x$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(x$ltf_cutoff)){
if( (algorithm == "PPR" | algorithm == "SPL") & correct_topology == FALSE)
warning("Did you not forget to give a value to x$ltf_cutoff?")
} else {
if (x$ltf_cutoff < 0 | x$ltf_cutoff > 1)
stop("x$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if(algorithm == "PPR"){
if (x$damping_factor < 0 | x$damping_factor >= 1)
stop("x$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (algorithm != "PPR" & algorithm != "SPL" & algorithm != "direct")
stop("algorithm must be 'PPR' or 'SPL' or 'direct'")
if (correct_topology != TRUE & correct_topology != FALSE)
stop("correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
if(!is.character(source_names))
stop("source_names should be a character vector")
if(length(source_names) != length(x$source_weights))
stop("Length of source_names should be the same as length of x$source_weights")
if(correct_topology == TRUE && !is.null(x$ltf_cutoff))
warning("Because PPR-ligand-target matrix will be corrected for topology, the proposed cutoff on the ligand-tf matrix will be ignored (x$ltf_cutoff")
if(correct_topology == TRUE && algorithm != "PPR")
warning("Topology correction is PPR-specific and makes no sense when the algorithm is not PPR")
names(x$source_weights) = source_names
parameters_setting = list(model_name = "query_design", source_weights = x$source_weights)
if (algorithm == "PPR") {
if (correct_topology == TRUE){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = 0, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = TRUE)
} else {
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = FALSE)
}
}
if (algorithm == "SPL" | algorithm == "direct"){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = NULL,correct_topology = FALSE)
}
output_evaluation = evaluate_model(parameters_setting, lr_network, sig_network, gr_network, settings,calculate_popularity_bias_target_prediction = FALSE,calculate_popularity_bias_ligand_prediction=FALSE,ncitations = ncitations, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links, n_target_bins = 3, ...)
ligands_evaluation = settings %>% sapply(function(x){x$from}) %>% unlist() %>% unique()
ligand_activity_performance_setting_summary = output_evaluation$performances_ligand_prediction_single %>% select(-setting, -ligand) %>% group_by(importance_measure) %>% summarise_all(mean) %>% group_by(importance_measure) %>% mutate(geom_average = exp(mean(log(c(auroc,aupr_corrected)))))
best_metric = ligand_activity_performance_setting_summary %>% ungroup() %>% filter(geom_average == max(geom_average)) %>% pull(importance_measure) %>% .[1]
performances_ligand_prediction_single_summary = output_evaluation$performances_ligand_prediction_single %>% filter(importance_measure == best_metric)
performances_target_prediction_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, output_evaluation$performances_target_prediction,"median") %>% bind_rows() %>% drop_na()
performances_ligand_prediction_single_summary_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, performances_ligand_prediction_single_summary %>% select(-importance_measure),"median") %>% bind_rows() %>% drop_na()
mean_auroc_target_prediction = performances_target_prediction_averaged$auroc %>% mean(na.rm = TRUE) %>% unique()
mean_aupr_target_prediction = performances_target_prediction_averaged$aupr_corrected %>% mean(na.rm = TRUE) %>% unique()
median_auroc_ligand_prediction = performances_ligand_prediction_single_summary_averaged$auroc %>% median(na.rm = TRUE) %>% unique()
median_aupr_ligand_prediction = performances_ligand_prediction_single_summary_averaged$aupr_corrected %>% median(na.rm = TRUE) %>% unique()
return(c(mean_auroc_target_prediction, mean_aupr_target_prediction, median_auroc_ligand_prediction, median_aupr_ligand_prediction))
}
#' @title Optimization of objective functions via model-based optimization.
#'
#' @description \code{mlrmbo_optimization} will execute multi-objective model-based optimization of an objective function. The defined surrogate learner here is "kriging".
#'
#' @usage
#' mlrmbo_optimization(run_id,obj_fun,niter,ncores,nstart,additional_arguments)
#'
#' @param run_id Indicate the id of the optimization run.
#' @param obj_fun An objective function as created by the function \code{mlrMBO::makeMultiObjectiveFunction}.
#' @param niter The number of iterations during the optimization process.
#' @param ncores The number of cores on which several parameter settings will be evaluated in parallel.
#' @param nstart The number of different parameter settings used in the begin design.
#' @param additional_arguments A list of named additional arguments that will be passed on the objective function.
#'
#' @return A result object from the function \code{mlrMBO::mbo}. Among other things, this contains the optimal parameter settings, the output corresponding to every input etc.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' library(mlrMBO)
#' library(parallelMap)
#' additional_arguments_topology_correction = list(source_names = source_weights_df$source %>% unique(), algorithm = "PPR", correct_topology = TRUE,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings = lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no", cutoff_method = "quantile")
#' nr_datasources = additional_arguments_topology_correction$source_names %>% length()
#'
#' obj_fun_multi_topology_correction = makeMultiObjectiveFunction(name = "nichenet_optimization",description = "data source weight and hyperparameter optimization: expensive black-box function", fn = model_evaluation_optimization, par.set = makeParamSet( makeNumericVectorParam("source_weights", len = nr_datasources, lower = 0, upper = 1), makeNumericVectorParam("lr_sig_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("gr_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("damping_factor", len = 1, lower = 0, upper = 0.99)), has.simple.signature = FALSE,n.objectives = 4, noisy = FALSE,minimize = c(FALSE,FALSE,FALSE,FALSE))
#'
#' mlrmbo_optimization = lapply(1,mlrmbo_optimization, obj_fun = obj_fun_multi_topology_correction, niter = 3, ncores = 8, nstart = 100, additional_arguments = additional_arguments_topology_correction)
#'
#' }
#'
#' @export
#'
mlrmbo_optimization = function(run_id,obj_fun,niter,ncores,nstart,additional_arguments){
requireNamespace("mlrMBO")
requireNamespace("parallelMap")
requireNamespace("dplyr")
# input check
if (length(run_id) != 1)
stop("run_id should be a vector of length 1")
if(!is.function(obj_fun) | !is.list(attributes(obj_fun)$par.set$pars))
stop("obj_fun should be a function (and generated by mlrMBO::makeMultiObjectiveFunction)")
if(niter <= 0)
stop("niter should be a number higher than 0")
if(ncores <= 0)
stop("ncores should be a number higher than 0")
nparams = attributes(obj_fun)$par.set$pars %>% lapply(function(x){x$len}) %>% unlist() %>% sum()
if(nstart < nparams)
stop("nstart should be equal or larger than the number of parameters")
if (!is.list(additional_arguments))
stop("additional_arguments should be a list!")
ctrl = makeMBOControl(n.objectives = attributes(obj_fun) %>% .$n.objectives, propose.points = ncores)
ctrl = setMBOControlMultiObj(ctrl, method = "dib",dib.indicator = "sms")
ctrl = setMBOControlInfill(ctrl, crit = makeMBOInfillCritDIB())
ctrl = setMBOControlMultiPoint(ctrl, method = "cb")
ctrl = setMBOControlTermination(ctrl, iters = niter)
design = generateDesign(n = nstart, par.set = getParamSet(obj_fun))
configureMlr(on.learner.warning = "quiet", show.learner.output = FALSE)
parallelStartMulticore(cpus = ncores, show.info = TRUE)
surr.rf = makeLearner("regr.km", predict.type = "se")
print(design)
print(ctrl)
res = mbo(obj_fun, design = design, learner = surr.rf ,control = ctrl, show.info = TRUE, more.args = additional_arguments)
parallelStop()
return(res)
}
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of hyperparameter optimization.
#'
#' @description \code{model_evaluation_hyperparameter_optimization} will take as input a setting of parameters (hyperparameters), data source weights and layer-specific networks to construct a ligand-target matrix and evaluate its performance on input validation settings (average performance for both target gene prediction and ligand activity prediction, as measured via the auroc and aupr).
#'
#' @usage
#' model_evaluation_hyperparameter_optimization(x, source_weights, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...)
#'
#' @inheritParams model_evaluation_optimization
#' @param x A list containing the following elements. $lr_sig_hub: hub correction factor for the ligand-signaling network; $gr_hub: hub correction factor for the gene regulatory network; $damping_factor: damping factor in the PPR algorithm if using PPR and optionally $ltf_cutoff: the cutoff on the ligand-tf matrix. For more information about these parameters: see \code{construct_ligand_target_matrix} and \code{apply_hub_correction}.
#' @param source_weights A named numeric vector indicating the weight for every data source.
#' @param ... Additional arguments to \code{make_discrete_ligand_target_matrix}.
#'
#' @return A numeric vector of length 4 containing the average auroc for target gene prediction, average aupr (corrected for TP fraction) for target gene prediction, average auroc for ligand activity prediction and average aupr for ligand activity prediction.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' nr_datasources = source_weights_df$source %>% unique() %>% length()
#' test_input = list("lr_sig_hub" = 0.5, "gr_hub" = 0.5, "damping_factor" = 0.5)
#' source_weights = source_weights_df$weight
#' names(source_weights) = source_weights_df$source
# test_evaluation_optimization = model_evaluation_hyperparameter_optimization(test_input, source_weights, "PPR", TRUE, lr_network, sig_network, gr_network, lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no")
#' }
#'
#' @export
#'
model_evaluation_hyperparameter_optimization = function(x, source_weights, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...){
requireNamespace("dplyr")
if (!is.null(damping_factor) & is.null(x$damping_factor)){ # for the case damping factor is a fixed parameter
x$damping_factor = damping_factor
}
#input check
if (!is.list(x))
stop("x should be a list!")
if (x$lr_sig_hub < 0 | x$lr_sig_hub > 1)
stop("x$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (x$gr_hub < 0 | x$gr_hub > 1)
stop("x$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(x$ltf_cutoff)){
if( (algorithm == "PPR" | algorithm == "SPL") & correct_topology == FALSE)
warning("Did you not forget to give a value to x$ltf_cutoff?")
} else {
if (x$ltf_cutoff < 0 | x$ltf_cutoff > 1)
stop("x$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if (!is.numeric(source_weights) | is.null(names(source_weights)))
stop("source_weights should be a named numeric vector")
if(algorithm == "PPR"){
if (x$damping_factor < 0 | x$damping_factor >= 1)
stop("x$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (algorithm != "PPR" & algorithm != "SPL" & algorithm != "direct")
stop("algorithm must be 'PPR' or 'SPL' or 'direct'")
if (correct_topology != TRUE & correct_topology != FALSE)
stop("correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
if(correct_topology == TRUE && !is.null(x$ltf_cutoff))
warning("Because PPR-ligand-target matrix will be corrected for topology, the proposed cutoff on the ligand-tf matrix will be ignored (x$ltf_cutoff")
if(correct_topology == TRUE && algorithm != "PPR")
warning("Topology correction is PPR-specific and makes no sense when the algorithm is not PPR")
parameters_setting = list(model_name = "query_design", source_weights = source_weights)
if (algorithm == "PPR") {
if (correct_topology == TRUE){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = 0, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = TRUE)
} else {
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = FALSE)
}
}
if (algorithm == "SPL" | algorithm == "direct"){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = NULL,correct_topology = FALSE)
}
output_evaluation = evaluate_model(parameters_setting, lr_network, sig_network, gr_network, settings,calculate_popularity_bias_target_prediction = FALSE,calculate_popularity_bias_ligand_prediction=FALSE,ncitations = ncitations, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links, n_target_bins = 3, ...)
ligands_evaluation = settings %>% sapply(function(x){x$from}) %>% unlist() %>% unique()
ligand_activity_performance_setting_summary = output_evaluation$performances_ligand_prediction_single %>% select(-setting, -ligand) %>% group_by(importance_measure) %>% summarise_all(mean) %>% group_by(importance_measure) %>% mutate(geom_average = exp(mean(log(c(auroc,aupr_corrected)))))
best_metric = ligand_activity_performance_setting_summary %>% ungroup() %>% filter(geom_average == max(geom_average)) %>% pull(importance_measure) %>% .[1]
performances_ligand_prediction_single_summary = output_evaluation$performances_ligand_prediction_single %>% filter(importance_measure == best_metric)
performances_target_prediction_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, output_evaluation$performances_target_prediction,"median") %>% bind_rows() %>% drop_na()
performances_ligand_prediction_single_summary_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, performances_ligand_prediction_single_summary %>% select(-importance_measure),"median") %>% bind_rows() %>% drop_na()
mean_auroc_target_prediction = performances_target_prediction_averaged$auroc %>% mean(na.rm = TRUE) %>% unique()
mean_aupr_target_prediction = performances_target_prediction_averaged$aupr_corrected %>% mean(na.rm = TRUE) %>% unique()
median_auroc_ligand_prediction = performances_ligand_prediction_single_summary_averaged$auroc %>% median(na.rm = TRUE) %>% unique()
median_aupr_ligand_prediction = performances_ligand_prediction_single_summary_averaged$aupr_corrected %>% median(na.rm = TRUE) %>% unique()
return(c(mean_auroc_target_prediction, mean_aupr_target_prediction, median_auroc_ligand_prediction, median_aupr_ligand_prediction))
}
#' @title Process the output of mlrmbo multi-objective optimization to extract optimal parameter values.
#'
#' @description \code{process_mlrmbo_nichenet_optimization} will process the output of multi-objective mlrmbo optimization. As a result, a list containing the optimal parameter values for model construction will be returned.
#'
#' @usage
#' process_mlrmbo_nichenet_optimization(optimization_results,source_names,parameter_set_index = NULL)
#'
#' @param optimization_results A list generated as output from multi-objective optimization by mlrMBO. Should contain the elements $pareto.front, $pareto.set See \code{mlrmbo_optimization}.
#' @param source_names Character vector containing the names of the data sources. The order of data source names accords to the order of weights in x$source_weights.
#' @param parameter_set_index Number indicating which of the proposed solutions must be selected to extract optimal parameters. If NULL: the solution with the highest geometric mean will be selected. Default: NULL.
#'
#' @return A list containing the parameter values leading to maximal performance and thus with the following elements: $source_weight_df, $lr_sig_hub, $gr_hub, $ltf_cutoff, $damping_factor
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' library(mlrMBO)
#' library(parallelMap)
#' additional_arguments_topology_correction = list(source_names = source_weights_df$source %>% unique(), algorithm = "PPR", correct_topology = TRUE,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings = lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no", cutoff_method = "quantile")
#' nr_datasources = additional_arguments_topology_correction$source_names %>% length()
#'
#' obj_fun_multi_topology_correction = makeMultiObjectiveFunction(name = "nichenet_optimization",description = "data source weight and hyperparameter optimization: expensive black-box function", fn = model_evaluation_optimization, par.set = makeParamSet( makeNumericVectorParam("source_weights", len = nr_datasources, lower = 0, upper = 1), makeNumericVectorParam("lr_sig_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("gr_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("damping_factor", len = 1, lower = 0, upper = 0.99)), has.simple.signature = FALSE,n.objectives = 4, noisy = FALSE,minimize = c(FALSE,FALSE,FALSE,FALSE))
#'
#' mlrmbo_optimization_result = lapply(1,mlrmbo_optimization, obj_fun = obj_fun_multi_topology_correction, niter = 3, ncores = 8, nstart = 100, additional_arguments = additional_arguments_topology_correction)
#' optimized_parameters = process_mlrmbo_nichenet_optimization(mlrmbo_optimization_result[[1]],additional_arguments_topology_correction$source_names)
#'
#' }
#'
#' @export
#'
process_mlrmbo_nichenet_optimization = function(optimization_results,source_names,parameter_set_index = NULL){
requireNamespace("dplyr")
requireNamespace("tibble")
if(length(optimization_results) == 1){
optimization_results = optimization_results[[1]]
}
# input check
if (!is.list(optimization_results))
stop("optimization_results should be a list!")
if (!is.list(optimization_results$pareto.set))
stop("optimization_results$pareto.set should be a list! Are you sure you provided the output of mlrMBO::mbo (multi-objective)?")
if (!is.matrix(optimization_results$pareto.front))
stop("optimization_results$pareto.front should be a matrix! Are you sure you provided the output of mlrMBO::mbo (multi-objective?")
if (!is.character(source_names))
stop("source_names should be a character vector")
if(!is.numeric(parameter_set_index) & !is.null(parameter_set_index))
stop("parameter_set_index should be a number or NULL")
# winning parameter set
if(is.null(parameter_set_index)){
# parameter_set_index = optimization_results$pareto.front %>% tbl_df() %>% mutate(average = apply(.,1,mean), index = seq(nrow(.))) %>% filter(average == max(average)) %>% .$index
parameter_set_index = optimization_results$pareto.front %>% tbl_df() %>% mutate(average = apply(.,1,function(x){exp(mean(log(x)))}), index = seq(nrow(.))) %>% filter(average == max(average)) %>% .$index # take the best parameter setting considering the geometric mean of the objective function results
}
if(parameter_set_index > nrow(optimization_results$pareto.front))
stop("parameter_set_index may not be a number higher than the total number of proposed solutions")
parameter_set = optimization_results$pareto.set[[parameter_set_index]]
# data source weight model parameter
source_weights = parameter_set$source_weights
names(source_weights) = source_names
# "hyperparameters"
lr_sig_hub = parameter_set$lr_sig_hub
gr_hub = parameter_set$gr_hub
ltf_cutoff = parameter_set$ltf_cutoff
damping_factor = parameter_set$damping_factor
source_weight_df = tibble(source = names(source_weights), weight = source_weights)
output_optimization = list(source_weight_df = source_weight_df, lr_sig_hub = lr_sig_hub, gr_hub = gr_hub,ltf_cutoff = ltf_cutoff, damping_factor = damping_factor)
return(output_optimization)
}
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of parameter optimization for multi-ligand application.
#'
#' @description \code{model_evaluation_optimization_application} will take as input a setting of parameters (data source weights and hyperparameters) and layer-specific networks to construct a ligand-target matrix and evaluate its performance on input application settings (average performance for target gene prediction, as measured via the auroc and aupr).
#'
#' @usage
#' model_evaluation_optimization_application(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",classification_algorithm = "lda",damping_factor = NULL,...)
#'
#' @inheritParams model_evaluation_optimization
#' @param classification_algorithm The name of the classification algorithm to be applied. Should be supported by the caret package. Examples of algorithms we recommend: with embedded feature selection: "rf","glm","fda","glmnet","sdwd","gam","glmboost", "pls" (load "pls" package before!); without: "lda","naive_bayes", "pcaNNet". Please notice that not all these algorithms work when the features (i.e. ligand vectors) are categorical (i.e. discrete class assignments).
#' @param ... Additional arguments to \code{evaluate_multi_ligand_target_prediction}.
#'
#' @return A numeric vector of length 2 containing the average auroc and aupr for target gene prediction.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' nr_datasources = source_weights_df$source %>% unique() %>% length()
#' test_input = list("source_weights" = rep(0.5, times = nr_datasources), "lr_sig_hub" = 0.5, "gr_hub" = 0.5, "damping_factor" = 0.5)
# test_evaluation_optimization = model_evaluation_optimization_application(test_input, source_weights_df$source %>% unique(), algorithm = "PPR", TRUE, lr_network, sig_network, gr_network, list(convert_expression_settings_evaluation(expression_settings_validation$TGFB_IL6_timeseries)), secondary_targets = FALSE, remove_direct_links = "no", classification_algorithm = "lda", var_imps = FALSE, cv_number = 5, cv_repeats = 4)
#' }
#'
#' @export
#'
model_evaluation_optimization_application = function(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",classification_algorithm = "lda",damping_factor = NULL,...){
requireNamespace("dplyr")
if (!is.null(damping_factor) & is.null(x$damping_factor)){ # for the case damping factor is a fixed parameter
x$damping_factor = damping_factor
}
#input check
if (!is.list(x))
stop("x should be a list!")
if (!is.numeric(x$source_weights))
stop("x$source_weights should be a numeric vector")
if (x$lr_sig_hub < 0 | x$lr_sig_hub > 1)
stop("x$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (x$gr_hub < 0 | x$gr_hub > 1)
stop("x$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(x$ltf_cutoff)){
if( (algorithm == "PPR" | algorithm == "SPL") & correct_topology == FALSE)
warning("Did you not forget to give a value to x$ltf_cutoff?")
} else {
if (x$ltf_cutoff < 0 | x$ltf_cutoff > 1)
stop("x$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if(algorithm == "PPR"){
if (x$damping_factor < 0 | x$damping_factor >= 1)
stop("x$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (algorithm != "PPR" & algorithm != "SPL" & algorithm != "direct")
stop("algorithm must be 'PPR' or 'SPL' or 'direct'")
if (correct_topology != TRUE & correct_topology != FALSE)
stop("correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
if(!is.character(source_names))
stop("source_names should be a character vector")
if(length(source_names) != length(x$source_weights))
stop("Length of source_names should be the same as length of x$source_weights")
if(correct_topology == TRUE && !is.null(x$ltf_cutoff))
warning("Because PPR-ligand-target matrix will be corrected for topology, the proposed cutoff on the ligand-tf matrix will be ignored (x$ltf_cutoff")
if(correct_topology == TRUE && algorithm != "PPR")
warning("Topology correction is PPR-specific and makes no sense when the algorithm is not PPR")
if(!is.character(classification_algorithm))
stop("classification_algorithm should be a character vector of length 1")
names(x$source_weights) = source_names
parameters_setting = list(model_name = "query_design", source_weights = x$source_weights)
if (algorithm == "PPR") {
if (correct_topology == TRUE){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = 0, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = TRUE)
} else {
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = FALSE)
}
}
if (algorithm == "SPL" | algorithm == "direct"){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = NULL,correct_topology = FALSE)
}
output_evaluation = evaluate_model_application_multi_ligand(parameters_setting, lr_network, sig_network, gr_network, settings, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links, classification_algorithm = classification_algorithm,...)
mean_auroc_target_prediction = output_evaluation$performances_target_prediction$auroc %>% mean()
mean_aupr_target_prediction = output_evaluation$performances_target_prediction$aupr_corrected %>% mean()
return(c(mean_auroc_target_prediction, mean_aupr_target_prediction))
}
#' @title Estimate data source weights of data sources of interest based on leave-one-in and leave-one-out characterization performances.
#'
#' @description \code{estimate_source_weights_characterization} will estimate data source weights of data sources of interest based on a model that was trained to predict weights of data sources based on leave-one-in and leave-one-out characterization performances.
#'
#' @usage
#' estimate_source_weights_characterization(loi_performances,loo_performances,source_weights_df, sources_oi, random_forest =FALSE)
#'
#' @param loi_performances Performances of models in which a particular data source of interest was the only data source in or the ligand-signaling or the gene regulatory network.
#' @param loo_performances Performances of models in which a particular data source of interest was removed from the ligand-signaling or the gene regulatory network before model construction.
#' @param source_weights_df A data frame / tibble containing the weights associated to each individual data source. Sources with higher weights will contribute more to the final model performance (required columns: source, weight). Note that only interactions described by sources included here, will be retained during model construction.
#' @param sources_oi The names of the data sources of which data source weights should be estimated based on leave-one-in and leave-one-out performances.
#' @param random_forest Indicate whether for the regression between leave-one-in + leave-one-out performances and data source weights a random forest model should be trained (TRUE) or a linear model (FALSE). Default: FALSE
#'
#' @return A list containing two elements. $source_weights_df (the input source_weights_df extended by the estimated source_weighs for data sources of interest) and $model (model object of the regression between leave-one-in, leave-one-out performances and data source weights).
#'
#' @importFrom purrr reduce
#' @importFrom randomForest randomForest
#'
#' @examples
#' \dontrun{
#' library(dplyr)
# run characterization loi
#' settings = lapply(expression_settings_validation[1:4], convert_expression_settings_evaluation)
#' weights_settings_loi = prepare_settings_leave_one_in_characterization(lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, source_weights_df)
#' weights_settings_loi = lapply(weights_settings_loi,add_hyperparameters_parameter_settings, lr_sig_hub = 0.25,gr_hub = 0.5,ltf_cutoff = 0,algorithm = "PPR", damping_factor = 0.2, correct_topology = TRUE)
#' doMC::registerDoMC(cores = 4)
#' job_characterization_loi = parallel::mclapply(weights_settings_loi[1:4], evaluate_model,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings,calculate_popularity_bias_target_prediction = FALSE, calculate_popularity_bias_ligand_prediction = FALSE, ncitations, mc.cores = 4)
#' loi_performances = process_characterization_target_prediction_average(job_characterization_loi)
# run characterization loo
#' weights_settings_loo = prepare_settings_leave_one_out_characterization(lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, source_weights_df)
#' weights_settings_loo = lapply(weights_settings_loo,add_hyperparameters_parameter_settings, lr_sig_hub = 0.25,gr_hub = 0.5,ltf_cutoff = 0,algorithm = "PPR", damping_factor = 0.2, correct_topology = TRUE)
#' doMC::registerDoMC(cores = 4)
#' job_characterization_loo = parallel::mclapply(weights_settings_loo[1:4], evaluate_model,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings,calculate_popularity_bias_target_prediction = FALSE, calculate_popularity_bias_ligand_prediction = FALSE,ncitations,mc.cores = 4)
#' loo_performances = process_characterization_target_prediction_average(job_characterization_loo)
# run the regression
#' sources_oi = c("kegg_cytokines")
#' output = estimate_source_weights_characterization(loi_performances,loo_performances,source_weights_df %>% filter(source != "kegg_cytokines"), sources_oi, random_forest =FALSE)
#' }
#'
#' @export
#'
estimate_source_weights_characterization = function(loi_performances,loo_performances,source_weights_df, sources_oi, random_forest =FALSE){
requireNamespace("dplyr")
requireNamespace("tibble")
#input check
if(!is.data.frame(loi_performances))
stop("loi_performances should be a data frame")
if(!is.character(loi_performances$model_name))
stop("loi_performances$model_name should be a character vector")
if(!is.data.frame(loo_performances))
stop("loo_performances should be a data frame")
if(!is.character(loo_performances$model_name))
stop("loo_performances$model_name should be a character vector")
if (!is.data.frame(source_weights_df) || sum((source_weights_df$weight > 1)) != 0)
stop("source_weights_df must be a data frame or tibble object and no data source weight may be higher than 1")
if(!is.character(sources_oi))
stop("sources_oi should be a character vector")
if(random_forest != TRUE & random_forest != FALSE)
stop("random_forest should be TRUE or FALSE")
loi_performances_train = loi_performances %>% filter((model_name %in% sources_oi) == FALSE)
loo_performances_train = loo_performances %>% filter((model_name %in% sources_oi) == FALSE)
loi_performances_test = loi_performances %>% filter(model_name == "complete_model" | (model_name %in% sources_oi))
loo_performances_test = loo_performances %>% filter(model_name == "complete_model" | (model_name %in% sources_oi))
output_regression_model = regression_characterization_optimization(loi_performances_train, loo_performances_train, source_weights_df, random_forest = random_forest)
new_source_weight_df = assign_new_weight(loi_performances_test, loo_performances_test,output_regression_model,source_weights_df)
return(list(source_weights_df = new_source_weight_df, model = output_regression_model))
}
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of evaluating cross-validation models.
#'
#' @description \code{evaluate_model_cv} will take as input a setting of parameters (data source weights and hyperparameters) and layer-specific networks to construct a ligand-target matrix and calculate the model's performance in target gene prediction and feature importance scores for ligand prediction).
#'
#' @usage
#' evaluate_model_cv(parameters_setting, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",...)
#'
#' @inheritParams evaluate_model
#'
#' @return A list containing following elements: $performances_target_prediction, $importances_ligand_prediction.
#'
#' @importFrom tibble tibble
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' settings = lapply(expression_settings_validation[1:4], convert_expression_settings_evaluation)
#' weights_settings_loi = prepare_settings_leave_one_in_characterization(lr_network,sig_network, gr_network, source_weights_df)
#' weights_settings_loi = lapply(weights_settings_loi,add_hyperparameters_parameter_settings, lr_sig_hub = 0.25,gr_hub = 0.5,ltf_cutoff = 0,algorithm = "PPR",damping_factor = 0.8,correct_topology = TRUE)
#' doMC::registerDoMC(cores = 8)
#' output_characterization = parallel::mclapply(weights_settings_loi[1:3],evaluate_model_cv,lr_network,sig_network, gr_network,settings,calculate_popularity_bias_target_prediction = TRUE, calculate_popularity_bias_ligand_prediction = TRUE, ncitations, mc.cores = 3)
#' }
#'
#' @export
#'
evaluate_model_cv = function(parameters_setting, lr_network, sig_network, gr_network, settings,secondary_targets = FALSE, remove_direct_links = "no", ...){
requireNamespace("dplyr")
# input check
if (!is.list(parameters_setting))
stop("parameters_setting should be a list!")
if (!is.character(parameters_setting$model_name))
stop("parameters_setting$model_name should be a character vector")
if (!is.numeric(parameters_setting$source_weights) | is.null(names(parameters_setting$source_weights)))
stop("parameters_setting$source_weights should be a named numeric vector")
if (parameters_setting$lr_sig_hub < 0 | parameters_setting$lr_sig_hub > 1)
stop("parameters_setting$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (parameters_setting$gr_hub < 0 | parameters_setting$gr_hub > 1)
stop("parameters_setting$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(parameters_setting$ltf_cutoff)){
if( parameters_setting$algorithm == "PPR" | parameters_setting$algorithm == "SPL" )
warning("Did you not forget to give a value to parameters_setting$ltf_cutoff?")
} else {
if (parameters_setting$ltf_cutoff < 0 | parameters_setting$ltf_cutoff > 1)
stop("parameters_setting$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if (parameters_setting$algorithm != "PPR" & parameters_setting$algorithm != "SPL" & parameters_setting$algorithm != "direct")
stop("parameters_setting$algorithm must be 'PPR' or 'SPL' or 'direct'")
if(parameters_setting$algorithm == "PPR"){
if (parameters_setting$damping_factor < 0 | parameters_setting$damping_factor >= 1)
stop("parameters_setting$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (parameters_setting$correct_topology != TRUE & parameters_setting$correct_topology != FALSE)
stop("parameters_setting$correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
# construct model
ligands = extract_ligands_from_settings(settings)
output_model_construction = construct_model(parameters_setting, lr_network, sig_network, gr_network, ligands, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links)
model_name = output_model_construction$model_name
ligand_target_matrix = output_model_construction$model
# ligand_target_matrix_discrete = ligand_target_matrix %>% make_discrete_ligand_target_matrix(...)
## if in ligand-target matrix: all targets are zero for some ligands
ligands_zero = ligand_target_matrix %>% colnames() %>% sapply(function(ligand){sum(ligand_target_matrix[,ligand]) == 0}) %>% .[. == TRUE]
if (length(ligands_zero > 0)){
noisy_target_scores = runif(nrow(ligand_target_matrix), min = 0, max = min(ligand_target_matrix[ligand_target_matrix>0])) # give ligands not in model a very low noisy random score; why not all 0 --> ties --> problem aupr calculation
ligand_target_matrix[,names(ligands_zero)] = noisy_target_scores
}
# transcriptional response evaluation
performances_target_prediction = bind_rows(lapply(settings,evaluate_target_prediction, ligand_target_matrix))
# performances_target_prediction_discrete = bind_rows(lapply(settings,evaluate_target_prediction,ligand_target_matrix_discrete))
# performances_target_prediction = performances_target_prediction %>% full_join(performances_target_prediction_discrete, by = c("setting", "ligand"))
# ligand activity state prediction
all_ligands = unlist(extract_ligands_from_settings(settings, combination = FALSE))
settings_ligand_pred = convert_settings_ligand_prediction(settings, all_ligands, validation = TRUE, single = TRUE)
ligand_importances = bind_rows(lapply(settings_ligand_pred, get_single_ligand_importances, ligand_target_matrix[, all_ligands]))
# ligand_importances_discrete = bind_rows(lapply(settings_ligand_pred, get_single_ligand_importances, ligand_target_matrix_discrete[, all_ligands]))
# settings_ligand_pred = convert_settings_ligand_prediction(settings, all_ligands, validation = TRUE, single = FALSE)
# ligand_importances_glm = bind_rows(lapply(settings_ligand_pred, get_multi_ligand_importances, ligand_target_matrix[,all_ligands], algorithm = "glm", cv = FALSE)) %>% rename(glm_imp = importance)
# all_importances = full_join(ligand_importances, ligand_importances_glm, by = c("setting","test_ligand","ligand")) %>% full_join(ligand_importances_discrete, by = c("setting","test_ligand", "ligand"))
ligand_importances$pearson[is.na(ligand_importances$pearson)] = 0
ligand_importances$spearman[is.na(ligand_importances$spearman)] = 0
ligand_importances$pearson_log_pval[is.na(ligand_importances$pearson_log_pval)] = 0
ligand_importances$spearman_log_pval[is.na(ligand_importances$spearman_log_pval)] = 0
all_importances = ligand_importances %>% select_if(.predicate = function(x){sum(is.na(x)) == 0})
performances_ligand_prediction_single = all_importances$setting %>% unique() %>% lapply(function(x){x}) %>%
lapply(wrapper_evaluate_single_importances_ligand_prediction,all_importances) %>%
bind_rows() %>% inner_join(all_importances %>% distinct(setting,ligand))
# performances_ligand_prediction_single = evaluate_single_importances_ligand_prediction(all_importances, "median")
return(list(performances_target_prediction = performances_target_prediction, importances_ligand_prediction = all_importances, performances_ligand_prediction_single = performances_ligand_prediction_single))
}
|
/R/parameter_optimization.R
|
no_license
|
rsggsr/nichenetr
|
R
| false
| false
| 47,775
|
r
|
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of parameter optimization.
#'
#' @description \code{model_evaluation_optimization} will take as input a setting of parameters (data source weights and hyperparameters) and layer-specific networks to construct a ligand-target matrix and evaluate its performance on input validation settings (average performance for both target gene prediction and ligand activity prediction, as measured via the auroc and aupr).
#'
#' @usage
#' model_evaluation_optimization(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...)
#'
#' @inheritParams evaluate_model
#' @inheritParams construct_ligand_target_matrix
#' @param x A list containing parameter values for parameter optimization. $source_weights: numeric vector representing the weight for each data source; $lr_sig_hub: hub correction factor for the ligand-signaling network; $gr_hub: hub correction factor for the gene regulatory network; $damping_factor: damping factor in the PPR algorithm if using PPR and optionally $ltf_cutoff: the cutoff on the ligand-tf matrix. For more information about these parameters: see \code{construct_ligand_target_matrix} and \code{apply_hub_correction}.
#' @param source_names Character vector containing the names of the data sources. The order of data source names accords to the order of weights in x$source_weights.
#' @param correct_topology This parameter indicates whether the PPR-constructed ligand-target matrix will be subtracted by a PR-constructed target matrix. TRUE or FALSE.
#' @param damping_factor The value of the damping factor if damping factor is a fixed parameter and will not be optimized and thus not belong to x. Default NULL.
#' @param ... Additional arguments to \code{make_discrete_ligand_target_matrix}.
#'
#' @return A numeric vector of length 4 containing the average auroc for target gene prediction, average aupr (corrected for TP fraction) for target gene prediction, average auroc for ligand activity prediction and average aupr for ligand activity prediction.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' nr_datasources = source_weights_df$source %>% unique() %>% length()
#' test_input = list("source_weights" = rep(0.5, times = nr_datasources), "lr_sig_hub" = 0.5, "gr_hub" = 0.5, "damping_factor" = 0.5)
# test_evaluation_optimization = model_evaluation_optimization(test_input, source_weights_df$source %>% unique(), "PPR", TRUE, lr_network, sig_network, gr_network, lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no")
#' }
#'
#' @export
#'
model_evaluation_optimization = function(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...){
requireNamespace("dplyr")
if (!is.null(damping_factor) & is.null(x$damping_factor)){ # for the case damping factor is a fixed parameter
x$damping_factor = damping_factor
}
#input check
if (!is.list(x))
stop("x should be a list!")
if (!is.numeric(x$source_weights))
stop("x$source_weights should be a numeric vector")
if (x$lr_sig_hub < 0 | x$lr_sig_hub > 1)
stop("x$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (x$gr_hub < 0 | x$gr_hub > 1)
stop("x$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(x$ltf_cutoff)){
if( (algorithm == "PPR" | algorithm == "SPL") & correct_topology == FALSE)
warning("Did you not forget to give a value to x$ltf_cutoff?")
} else {
if (x$ltf_cutoff < 0 | x$ltf_cutoff > 1)
stop("x$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if(algorithm == "PPR"){
if (x$damping_factor < 0 | x$damping_factor >= 1)
stop("x$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (algorithm != "PPR" & algorithm != "SPL" & algorithm != "direct")
stop("algorithm must be 'PPR' or 'SPL' or 'direct'")
if (correct_topology != TRUE & correct_topology != FALSE)
stop("correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
if(!is.character(source_names))
stop("source_names should be a character vector")
if(length(source_names) != length(x$source_weights))
stop("Length of source_names should be the same as length of x$source_weights")
if(correct_topology == TRUE && !is.null(x$ltf_cutoff))
warning("Because PPR-ligand-target matrix will be corrected for topology, the proposed cutoff on the ligand-tf matrix will be ignored (x$ltf_cutoff")
if(correct_topology == TRUE && algorithm != "PPR")
warning("Topology correction is PPR-specific and makes no sense when the algorithm is not PPR")
names(x$source_weights) = source_names
parameters_setting = list(model_name = "query_design", source_weights = x$source_weights)
if (algorithm == "PPR") {
if (correct_topology == TRUE){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = 0, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = TRUE)
} else {
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = FALSE)
}
}
if (algorithm == "SPL" | algorithm == "direct"){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = NULL,correct_topology = FALSE)
}
output_evaluation = evaluate_model(parameters_setting, lr_network, sig_network, gr_network, settings,calculate_popularity_bias_target_prediction = FALSE,calculate_popularity_bias_ligand_prediction=FALSE,ncitations = ncitations, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links, n_target_bins = 3, ...)
ligands_evaluation = settings %>% sapply(function(x){x$from}) %>% unlist() %>% unique()
ligand_activity_performance_setting_summary = output_evaluation$performances_ligand_prediction_single %>% select(-setting, -ligand) %>% group_by(importance_measure) %>% summarise_all(mean) %>% group_by(importance_measure) %>% mutate(geom_average = exp(mean(log(c(auroc,aupr_corrected)))))
best_metric = ligand_activity_performance_setting_summary %>% ungroup() %>% filter(geom_average == max(geom_average)) %>% pull(importance_measure) %>% .[1]
performances_ligand_prediction_single_summary = output_evaluation$performances_ligand_prediction_single %>% filter(importance_measure == best_metric)
performances_target_prediction_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, output_evaluation$performances_target_prediction,"median") %>% bind_rows() %>% drop_na()
performances_ligand_prediction_single_summary_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, performances_ligand_prediction_single_summary %>% select(-importance_measure),"median") %>% bind_rows() %>% drop_na()
mean_auroc_target_prediction = performances_target_prediction_averaged$auroc %>% mean(na.rm = TRUE) %>% unique()
mean_aupr_target_prediction = performances_target_prediction_averaged$aupr_corrected %>% mean(na.rm = TRUE) %>% unique()
median_auroc_ligand_prediction = performances_ligand_prediction_single_summary_averaged$auroc %>% median(na.rm = TRUE) %>% unique()
median_aupr_ligand_prediction = performances_ligand_prediction_single_summary_averaged$aupr_corrected %>% median(na.rm = TRUE) %>% unique()
return(c(mean_auroc_target_prediction, mean_aupr_target_prediction, median_auroc_ligand_prediction, median_aupr_ligand_prediction))
}
#' @title Optimization of objective functions via model-based optimization.
#'
#' @description \code{mlrmbo_optimization} will execute multi-objective model-based optimization of an objective function. The defined surrogate learner here is "kriging".
#'
#' @usage
#' mlrmbo_optimization(run_id,obj_fun,niter,ncores,nstart,additional_arguments)
#'
#' @param run_id Indicate the id of the optimization run.
#' @param obj_fun An objective function as created by the function \code{mlrMBO::makeMultiObjectiveFunction}.
#' @param niter The number of iterations during the optimization process.
#' @param ncores The number of cores on which several parameter settings will be evaluated in parallel.
#' @param nstart The number of different parameter settings used in the begin design.
#' @param additional_arguments A list of named additional arguments that will be passed on the objective function.
#'
#' @return A result object from the function \code{mlrMBO::mbo}. Among other things, this contains the optimal parameter settings, the output corresponding to every input etc.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' library(mlrMBO)
#' library(parallelMap)
#' additional_arguments_topology_correction = list(source_names = source_weights_df$source %>% unique(), algorithm = "PPR", correct_topology = TRUE,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings = lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no", cutoff_method = "quantile")
#' nr_datasources = additional_arguments_topology_correction$source_names %>% length()
#'
#' obj_fun_multi_topology_correction = makeMultiObjectiveFunction(name = "nichenet_optimization",description = "data source weight and hyperparameter optimization: expensive black-box function", fn = model_evaluation_optimization, par.set = makeParamSet( makeNumericVectorParam("source_weights", len = nr_datasources, lower = 0, upper = 1), makeNumericVectorParam("lr_sig_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("gr_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("damping_factor", len = 1, lower = 0, upper = 0.99)), has.simple.signature = FALSE,n.objectives = 4, noisy = FALSE,minimize = c(FALSE,FALSE,FALSE,FALSE))
#'
#' mlrmbo_optimization = lapply(1,mlrmbo_optimization, obj_fun = obj_fun_multi_topology_correction, niter = 3, ncores = 8, nstart = 100, additional_arguments = additional_arguments_topology_correction)
#'
#' }
#'
#' @export
#'
mlrmbo_optimization = function(run_id,obj_fun,niter,ncores,nstart,additional_arguments){
requireNamespace("mlrMBO")
requireNamespace("parallelMap")
requireNamespace("dplyr")
# input check
if (length(run_id) != 1)
stop("run_id should be a vector of length 1")
if(!is.function(obj_fun) | !is.list(attributes(obj_fun)$par.set$pars))
stop("obj_fun should be a function (and generated by mlrMBO::makeMultiObjectiveFunction)")
if(niter <= 0)
stop("niter should be a number higher than 0")
if(ncores <= 0)
stop("ncores should be a number higher than 0")
nparams = attributes(obj_fun)$par.set$pars %>% lapply(function(x){x$len}) %>% unlist() %>% sum()
if(nstart < nparams)
stop("nstart should be equal or larger than the number of parameters")
if (!is.list(additional_arguments))
stop("additional_arguments should be a list!")
ctrl = makeMBOControl(n.objectives = attributes(obj_fun) %>% .$n.objectives, propose.points = ncores)
ctrl = setMBOControlMultiObj(ctrl, method = "dib",dib.indicator = "sms")
ctrl = setMBOControlInfill(ctrl, crit = makeMBOInfillCritDIB())
ctrl = setMBOControlMultiPoint(ctrl, method = "cb")
ctrl = setMBOControlTermination(ctrl, iters = niter)
design = generateDesign(n = nstart, par.set = getParamSet(obj_fun))
configureMlr(on.learner.warning = "quiet", show.learner.output = FALSE)
parallelStartMulticore(cpus = ncores, show.info = TRUE)
surr.rf = makeLearner("regr.km", predict.type = "se")
print(design)
print(ctrl)
res = mbo(obj_fun, design = design, learner = surr.rf ,control = ctrl, show.info = TRUE, more.args = additional_arguments)
parallelStop()
return(res)
}
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of hyperparameter optimization.
#'
#' @description \code{model_evaluation_hyperparameter_optimization} will take as input a setting of parameters (hyperparameters), data source weights and layer-specific networks to construct a ligand-target matrix and evaluate its performance on input validation settings (average performance for both target gene prediction and ligand activity prediction, as measured via the auroc and aupr).
#'
#' @usage
#' model_evaluation_hyperparameter_optimization(x, source_weights, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...)
#'
#' @inheritParams model_evaluation_optimization
#' @param x A list containing the following elements. $lr_sig_hub: hub correction factor for the ligand-signaling network; $gr_hub: hub correction factor for the gene regulatory network; $damping_factor: damping factor in the PPR algorithm if using PPR and optionally $ltf_cutoff: the cutoff on the ligand-tf matrix. For more information about these parameters: see \code{construct_ligand_target_matrix} and \code{apply_hub_correction}.
#' @param source_weights A named numeric vector indicating the weight for every data source.
#' @param ... Additional arguments to \code{make_discrete_ligand_target_matrix}.
#'
#' @return A numeric vector of length 4 containing the average auroc for target gene prediction, average aupr (corrected for TP fraction) for target gene prediction, average auroc for ligand activity prediction and average aupr for ligand activity prediction.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' nr_datasources = source_weights_df$source %>% unique() %>% length()
#' test_input = list("lr_sig_hub" = 0.5, "gr_hub" = 0.5, "damping_factor" = 0.5)
#' source_weights = source_weights_df$weight
#' names(source_weights) = source_weights_df$source
# test_evaluation_optimization = model_evaluation_hyperparameter_optimization(test_input, source_weights, "PPR", TRUE, lr_network, sig_network, gr_network, lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no")
#' }
#'
#' @export
#'
model_evaluation_hyperparameter_optimization = function(x, source_weights, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",damping_factor = NULL,...){
requireNamespace("dplyr")
if (!is.null(damping_factor) & is.null(x$damping_factor)){ # for the case damping factor is a fixed parameter
x$damping_factor = damping_factor
}
#input check
if (!is.list(x))
stop("x should be a list!")
if (x$lr_sig_hub < 0 | x$lr_sig_hub > 1)
stop("x$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (x$gr_hub < 0 | x$gr_hub > 1)
stop("x$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(x$ltf_cutoff)){
if( (algorithm == "PPR" | algorithm == "SPL") & correct_topology == FALSE)
warning("Did you not forget to give a value to x$ltf_cutoff?")
} else {
if (x$ltf_cutoff < 0 | x$ltf_cutoff > 1)
stop("x$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if (!is.numeric(source_weights) | is.null(names(source_weights)))
stop("source_weights should be a named numeric vector")
if(algorithm == "PPR"){
if (x$damping_factor < 0 | x$damping_factor >= 1)
stop("x$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (algorithm != "PPR" & algorithm != "SPL" & algorithm != "direct")
stop("algorithm must be 'PPR' or 'SPL' or 'direct'")
if (correct_topology != TRUE & correct_topology != FALSE)
stop("correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
if(correct_topology == TRUE && !is.null(x$ltf_cutoff))
warning("Because PPR-ligand-target matrix will be corrected for topology, the proposed cutoff on the ligand-tf matrix will be ignored (x$ltf_cutoff")
if(correct_topology == TRUE && algorithm != "PPR")
warning("Topology correction is PPR-specific and makes no sense when the algorithm is not PPR")
parameters_setting = list(model_name = "query_design", source_weights = source_weights)
if (algorithm == "PPR") {
if (correct_topology == TRUE){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = 0, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = TRUE)
} else {
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = FALSE)
}
}
if (algorithm == "SPL" | algorithm == "direct"){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = NULL,correct_topology = FALSE)
}
output_evaluation = evaluate_model(parameters_setting, lr_network, sig_network, gr_network, settings,calculate_popularity_bias_target_prediction = FALSE,calculate_popularity_bias_ligand_prediction=FALSE,ncitations = ncitations, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links, n_target_bins = 3, ...)
ligands_evaluation = settings %>% sapply(function(x){x$from}) %>% unlist() %>% unique()
ligand_activity_performance_setting_summary = output_evaluation$performances_ligand_prediction_single %>% select(-setting, -ligand) %>% group_by(importance_measure) %>% summarise_all(mean) %>% group_by(importance_measure) %>% mutate(geom_average = exp(mean(log(c(auroc,aupr_corrected)))))
best_metric = ligand_activity_performance_setting_summary %>% ungroup() %>% filter(geom_average == max(geom_average)) %>% pull(importance_measure) %>% .[1]
performances_ligand_prediction_single_summary = output_evaluation$performances_ligand_prediction_single %>% filter(importance_measure == best_metric)
performances_target_prediction_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, output_evaluation$performances_target_prediction,"median") %>% bind_rows() %>% drop_na()
performances_ligand_prediction_single_summary_averaged = ligands_evaluation %>% lapply(function(x){x}) %>%
lapply(wrapper_average_performances, performances_ligand_prediction_single_summary %>% select(-importance_measure),"median") %>% bind_rows() %>% drop_na()
mean_auroc_target_prediction = performances_target_prediction_averaged$auroc %>% mean(na.rm = TRUE) %>% unique()
mean_aupr_target_prediction = performances_target_prediction_averaged$aupr_corrected %>% mean(na.rm = TRUE) %>% unique()
median_auroc_ligand_prediction = performances_ligand_prediction_single_summary_averaged$auroc %>% median(na.rm = TRUE) %>% unique()
median_aupr_ligand_prediction = performances_ligand_prediction_single_summary_averaged$aupr_corrected %>% median(na.rm = TRUE) %>% unique()
return(c(mean_auroc_target_prediction, mean_aupr_target_prediction, median_auroc_ligand_prediction, median_aupr_ligand_prediction))
}
#' @title Process the output of mlrmbo multi-objective optimization to extract optimal parameter values.
#'
#' @description \code{process_mlrmbo_nichenet_optimization} will process the output of multi-objective mlrmbo optimization. As a result, a list containing the optimal parameter values for model construction will be returned.
#'
#' @usage
#' process_mlrmbo_nichenet_optimization(optimization_results,source_names,parameter_set_index = NULL)
#'
#' @param optimization_results A list generated as output from multi-objective optimization by mlrMBO. Should contain the elements $pareto.front, $pareto.set See \code{mlrmbo_optimization}.
#' @param source_names Character vector containing the names of the data sources. The order of data source names accords to the order of weights in x$source_weights.
#' @param parameter_set_index Number indicating which of the proposed solutions must be selected to extract optimal parameters. If NULL: the solution with the highest geometric mean will be selected. Default: NULL.
#'
#' @return A list containing the parameter values leading to maximal performance and thus with the following elements: $source_weight_df, $lr_sig_hub, $gr_hub, $ltf_cutoff, $damping_factor
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' library(mlrMBO)
#' library(parallelMap)
#' additional_arguments_topology_correction = list(source_names = source_weights_df$source %>% unique(), algorithm = "PPR", correct_topology = TRUE,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings = lapply(expression_settings_validation,convert_expression_settings_evaluation), secondary_targets = FALSE, remove_direct_links = "no", cutoff_method = "quantile")
#' nr_datasources = additional_arguments_topology_correction$source_names %>% length()
#'
#' obj_fun_multi_topology_correction = makeMultiObjectiveFunction(name = "nichenet_optimization",description = "data source weight and hyperparameter optimization: expensive black-box function", fn = model_evaluation_optimization, par.set = makeParamSet( makeNumericVectorParam("source_weights", len = nr_datasources, lower = 0, upper = 1), makeNumericVectorParam("lr_sig_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("gr_hub", len = 1, lower = 0, upper = 1), makeNumericVectorParam("damping_factor", len = 1, lower = 0, upper = 0.99)), has.simple.signature = FALSE,n.objectives = 4, noisy = FALSE,minimize = c(FALSE,FALSE,FALSE,FALSE))
#'
#' mlrmbo_optimization_result = lapply(1,mlrmbo_optimization, obj_fun = obj_fun_multi_topology_correction, niter = 3, ncores = 8, nstart = 100, additional_arguments = additional_arguments_topology_correction)
#' optimized_parameters = process_mlrmbo_nichenet_optimization(mlrmbo_optimization_result[[1]],additional_arguments_topology_correction$source_names)
#'
#' }
#'
#' @export
#'
process_mlrmbo_nichenet_optimization = function(optimization_results,source_names,parameter_set_index = NULL){
requireNamespace("dplyr")
requireNamespace("tibble")
if(length(optimization_results) == 1){
optimization_results = optimization_results[[1]]
}
# input check
if (!is.list(optimization_results))
stop("optimization_results should be a list!")
if (!is.list(optimization_results$pareto.set))
stop("optimization_results$pareto.set should be a list! Are you sure you provided the output of mlrMBO::mbo (multi-objective)?")
if (!is.matrix(optimization_results$pareto.front))
stop("optimization_results$pareto.front should be a matrix! Are you sure you provided the output of mlrMBO::mbo (multi-objective?")
if (!is.character(source_names))
stop("source_names should be a character vector")
if(!is.numeric(parameter_set_index) & !is.null(parameter_set_index))
stop("parameter_set_index should be a number or NULL")
# winning parameter set
if(is.null(parameter_set_index)){
# parameter_set_index = optimization_results$pareto.front %>% tbl_df() %>% mutate(average = apply(.,1,mean), index = seq(nrow(.))) %>% filter(average == max(average)) %>% .$index
parameter_set_index = optimization_results$pareto.front %>% tbl_df() %>% mutate(average = apply(.,1,function(x){exp(mean(log(x)))}), index = seq(nrow(.))) %>% filter(average == max(average)) %>% .$index # take the best parameter setting considering the geometric mean of the objective function results
}
if(parameter_set_index > nrow(optimization_results$pareto.front))
stop("parameter_set_index may not be a number higher than the total number of proposed solutions")
parameter_set = optimization_results$pareto.set[[parameter_set_index]]
# data source weight model parameter
source_weights = parameter_set$source_weights
names(source_weights) = source_names
# "hyperparameters"
lr_sig_hub = parameter_set$lr_sig_hub
gr_hub = parameter_set$gr_hub
ltf_cutoff = parameter_set$ltf_cutoff
damping_factor = parameter_set$damping_factor
source_weight_df = tibble(source = names(source_weights), weight = source_weights)
output_optimization = list(source_weight_df = source_weight_df, lr_sig_hub = lr_sig_hub, gr_hub = gr_hub,ltf_cutoff = ltf_cutoff, damping_factor = damping_factor)
return(output_optimization)
}
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of parameter optimization for multi-ligand application.
#'
#' @description \code{model_evaluation_optimization_application} will take as input a setting of parameters (data source weights and hyperparameters) and layer-specific networks to construct a ligand-target matrix and evaluate its performance on input application settings (average performance for target gene prediction, as measured via the auroc and aupr).
#'
#' @usage
#' model_evaluation_optimization_application(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",classification_algorithm = "lda",damping_factor = NULL,...)
#'
#' @inheritParams model_evaluation_optimization
#' @param classification_algorithm The name of the classification algorithm to be applied. Should be supported by the caret package. Examples of algorithms we recommend: with embedded feature selection: "rf","glm","fda","glmnet","sdwd","gam","glmboost", "pls" (load "pls" package before!); without: "lda","naive_bayes", "pcaNNet". Please notice that not all these algorithms work when the features (i.e. ligand vectors) are categorical (i.e. discrete class assignments).
#' @param ... Additional arguments to \code{evaluate_multi_ligand_target_prediction}.
#'
#' @return A numeric vector of length 2 containing the average auroc and aupr for target gene prediction.
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' nr_datasources = source_weights_df$source %>% unique() %>% length()
#' test_input = list("source_weights" = rep(0.5, times = nr_datasources), "lr_sig_hub" = 0.5, "gr_hub" = 0.5, "damping_factor" = 0.5)
# test_evaluation_optimization = model_evaluation_optimization_application(test_input, source_weights_df$source %>% unique(), algorithm = "PPR", TRUE, lr_network, sig_network, gr_network, list(convert_expression_settings_evaluation(expression_settings_validation$TGFB_IL6_timeseries)), secondary_targets = FALSE, remove_direct_links = "no", classification_algorithm = "lda", var_imps = FALSE, cv_number = 5, cv_repeats = 4)
#' }
#'
#' @export
#'
model_evaluation_optimization_application = function(x, source_names, algorithm, correct_topology, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",classification_algorithm = "lda",damping_factor = NULL,...){
requireNamespace("dplyr")
if (!is.null(damping_factor) & is.null(x$damping_factor)){ # for the case damping factor is a fixed parameter
x$damping_factor = damping_factor
}
#input check
if (!is.list(x))
stop("x should be a list!")
if (!is.numeric(x$source_weights))
stop("x$source_weights should be a numeric vector")
if (x$lr_sig_hub < 0 | x$lr_sig_hub > 1)
stop("x$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (x$gr_hub < 0 | x$gr_hub > 1)
stop("x$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(x$ltf_cutoff)){
if( (algorithm == "PPR" | algorithm == "SPL") & correct_topology == FALSE)
warning("Did you not forget to give a value to x$ltf_cutoff?")
} else {
if (x$ltf_cutoff < 0 | x$ltf_cutoff > 1)
stop("x$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if(algorithm == "PPR"){
if (x$damping_factor < 0 | x$damping_factor >= 1)
stop("x$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (algorithm != "PPR" & algorithm != "SPL" & algorithm != "direct")
stop("algorithm must be 'PPR' or 'SPL' or 'direct'")
if (correct_topology != TRUE & correct_topology != FALSE)
stop("correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
if(!is.character(source_names))
stop("source_names should be a character vector")
if(length(source_names) != length(x$source_weights))
stop("Length of source_names should be the same as length of x$source_weights")
if(correct_topology == TRUE && !is.null(x$ltf_cutoff))
warning("Because PPR-ligand-target matrix will be corrected for topology, the proposed cutoff on the ligand-tf matrix will be ignored (x$ltf_cutoff")
if(correct_topology == TRUE && algorithm != "PPR")
warning("Topology correction is PPR-specific and makes no sense when the algorithm is not PPR")
if(!is.character(classification_algorithm))
stop("classification_algorithm should be a character vector of length 1")
names(x$source_weights) = source_names
parameters_setting = list(model_name = "query_design", source_weights = x$source_weights)
if (algorithm == "PPR") {
if (correct_topology == TRUE){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = 0, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = TRUE)
} else {
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = x$damping_factor,correct_topology = FALSE)
}
}
if (algorithm == "SPL" | algorithm == "direct"){
parameters_setting = add_hyperparameters_parameter_settings(parameters_setting, lr_sig_hub = x$lr_sig_hub, gr_hub = x$gr_hub, ltf_cutoff = x$ltf_cutoff, algorithm = algorithm,damping_factor = NULL,correct_topology = FALSE)
}
output_evaluation = evaluate_model_application_multi_ligand(parameters_setting, lr_network, sig_network, gr_network, settings, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links, classification_algorithm = classification_algorithm,...)
mean_auroc_target_prediction = output_evaluation$performances_target_prediction$auroc %>% mean()
mean_aupr_target_prediction = output_evaluation$performances_target_prediction$aupr_corrected %>% mean()
return(c(mean_auroc_target_prediction, mean_aupr_target_prediction))
}
#' @title Estimate data source weights of data sources of interest based on leave-one-in and leave-one-out characterization performances.
#'
#' @description \code{estimate_source_weights_characterization} will estimate data source weights of data sources of interest based on a model that was trained to predict weights of data sources based on leave-one-in and leave-one-out characterization performances.
#'
#' @usage
#' estimate_source_weights_characterization(loi_performances,loo_performances,source_weights_df, sources_oi, random_forest =FALSE)
#'
#' @param loi_performances Performances of models in which a particular data source of interest was the only data source in or the ligand-signaling or the gene regulatory network.
#' @param loo_performances Performances of models in which a particular data source of interest was removed from the ligand-signaling or the gene regulatory network before model construction.
#' @param source_weights_df A data frame / tibble containing the weights associated to each individual data source. Sources with higher weights will contribute more to the final model performance (required columns: source, weight). Note that only interactions described by sources included here, will be retained during model construction.
#' @param sources_oi The names of the data sources of which data source weights should be estimated based on leave-one-in and leave-one-out performances.
#' @param random_forest Indicate whether for the regression between leave-one-in + leave-one-out performances and data source weights a random forest model should be trained (TRUE) or a linear model (FALSE). Default: FALSE
#'
#' @return A list containing two elements. $source_weights_df (the input source_weights_df extended by the estimated source_weighs for data sources of interest) and $model (model object of the regression between leave-one-in, leave-one-out performances and data source weights).
#'
#' @importFrom purrr reduce
#' @importFrom randomForest randomForest
#'
#' @examples
#' \dontrun{
#' library(dplyr)
# run characterization loi
#' settings = lapply(expression_settings_validation[1:4], convert_expression_settings_evaluation)
#' weights_settings_loi = prepare_settings_leave_one_in_characterization(lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, source_weights_df)
#' weights_settings_loi = lapply(weights_settings_loi,add_hyperparameters_parameter_settings, lr_sig_hub = 0.25,gr_hub = 0.5,ltf_cutoff = 0,algorithm = "PPR", damping_factor = 0.2, correct_topology = TRUE)
#' doMC::registerDoMC(cores = 4)
#' job_characterization_loi = parallel::mclapply(weights_settings_loi[1:4], evaluate_model,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings,calculate_popularity_bias_target_prediction = FALSE, calculate_popularity_bias_ligand_prediction = FALSE, ncitations, mc.cores = 4)
#' loi_performances = process_characterization_target_prediction_average(job_characterization_loi)
# run characterization loo
#' weights_settings_loo = prepare_settings_leave_one_out_characterization(lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, source_weights_df)
#' weights_settings_loo = lapply(weights_settings_loo,add_hyperparameters_parameter_settings, lr_sig_hub = 0.25,gr_hub = 0.5,ltf_cutoff = 0,algorithm = "PPR", damping_factor = 0.2, correct_topology = TRUE)
#' doMC::registerDoMC(cores = 4)
#' job_characterization_loo = parallel::mclapply(weights_settings_loo[1:4], evaluate_model,lr_network = lr_network, sig_network = sig_network, gr_network = gr_network, settings,calculate_popularity_bias_target_prediction = FALSE, calculate_popularity_bias_ligand_prediction = FALSE,ncitations,mc.cores = 4)
#' loo_performances = process_characterization_target_prediction_average(job_characterization_loo)
# run the regression
#' sources_oi = c("kegg_cytokines")
#' output = estimate_source_weights_characterization(loi_performances,loo_performances,source_weights_df %>% filter(source != "kegg_cytokines"), sources_oi, random_forest =FALSE)
#' }
#'
#' @export
#'
estimate_source_weights_characterization = function(loi_performances,loo_performances,source_weights_df, sources_oi, random_forest =FALSE){
requireNamespace("dplyr")
requireNamespace("tibble")
#input check
if(!is.data.frame(loi_performances))
stop("loi_performances should be a data frame")
if(!is.character(loi_performances$model_name))
stop("loi_performances$model_name should be a character vector")
if(!is.data.frame(loo_performances))
stop("loo_performances should be a data frame")
if(!is.character(loo_performances$model_name))
stop("loo_performances$model_name should be a character vector")
if (!is.data.frame(source_weights_df) || sum((source_weights_df$weight > 1)) != 0)
stop("source_weights_df must be a data frame or tibble object and no data source weight may be higher than 1")
if(!is.character(sources_oi))
stop("sources_oi should be a character vector")
if(random_forest != TRUE & random_forest != FALSE)
stop("random_forest should be TRUE or FALSE")
loi_performances_train = loi_performances %>% filter((model_name %in% sources_oi) == FALSE)
loo_performances_train = loo_performances %>% filter((model_name %in% sources_oi) == FALSE)
loi_performances_test = loi_performances %>% filter(model_name == "complete_model" | (model_name %in% sources_oi))
loo_performances_test = loo_performances %>% filter(model_name == "complete_model" | (model_name %in% sources_oi))
output_regression_model = regression_characterization_optimization(loi_performances_train, loo_performances_train, source_weights_df, random_forest = random_forest)
new_source_weight_df = assign_new_weight(loi_performances_test, loo_performances_test,output_regression_model,source_weights_df)
return(list(source_weights_df = new_source_weight_df, model = output_regression_model))
}
#' @title Construct and evaluate a ligand-target model given input parameters with the purpose of evaluating cross-validation models.
#'
#' @description \code{evaluate_model_cv} will take as input a setting of parameters (data source weights and hyperparameters) and layer-specific networks to construct a ligand-target matrix and calculate the model's performance in target gene prediction and feature importance scores for ligand prediction).
#'
#' @usage
#' evaluate_model_cv(parameters_setting, lr_network, sig_network, gr_network, settings, secondary_targets = FALSE, remove_direct_links = "no",...)
#'
#' @inheritParams evaluate_model
#'
#' @return A list containing following elements: $performances_target_prediction, $importances_ligand_prediction.
#'
#' @importFrom tibble tibble
#'
#' @examples
#' \dontrun{
#' library(dplyr)
#' settings = lapply(expression_settings_validation[1:4], convert_expression_settings_evaluation)
#' weights_settings_loi = prepare_settings_leave_one_in_characterization(lr_network,sig_network, gr_network, source_weights_df)
#' weights_settings_loi = lapply(weights_settings_loi,add_hyperparameters_parameter_settings, lr_sig_hub = 0.25,gr_hub = 0.5,ltf_cutoff = 0,algorithm = "PPR",damping_factor = 0.8,correct_topology = TRUE)
#' doMC::registerDoMC(cores = 8)
#' output_characterization = parallel::mclapply(weights_settings_loi[1:3],evaluate_model_cv,lr_network,sig_network, gr_network,settings,calculate_popularity_bias_target_prediction = TRUE, calculate_popularity_bias_ligand_prediction = TRUE, ncitations, mc.cores = 3)
#' }
#'
#' @export
#'
evaluate_model_cv = function(parameters_setting, lr_network, sig_network, gr_network, settings,secondary_targets = FALSE, remove_direct_links = "no", ...){
requireNamespace("dplyr")
# input check
if (!is.list(parameters_setting))
stop("parameters_setting should be a list!")
if (!is.character(parameters_setting$model_name))
stop("parameters_setting$model_name should be a character vector")
if (!is.numeric(parameters_setting$source_weights) | is.null(names(parameters_setting$source_weights)))
stop("parameters_setting$source_weights should be a named numeric vector")
if (parameters_setting$lr_sig_hub < 0 | parameters_setting$lr_sig_hub > 1)
stop("parameters_setting$lr_sig_hub must be a number between 0 and 1 (0 and 1 included)")
if (parameters_setting$gr_hub < 0 | parameters_setting$gr_hub > 1)
stop("parameters_setting$gr_hub must be a number between 0 and 1 (0 and 1 included)")
if(is.null(parameters_setting$ltf_cutoff)){
if( parameters_setting$algorithm == "PPR" | parameters_setting$algorithm == "SPL" )
warning("Did you not forget to give a value to parameters_setting$ltf_cutoff?")
} else {
if (parameters_setting$ltf_cutoff < 0 | parameters_setting$ltf_cutoff > 1)
stop("parameters_setting$ltf_cutoff must be a number between 0 and 1 (0 and 1 included)")
}
if (parameters_setting$algorithm != "PPR" & parameters_setting$algorithm != "SPL" & parameters_setting$algorithm != "direct")
stop("parameters_setting$algorithm must be 'PPR' or 'SPL' or 'direct'")
if(parameters_setting$algorithm == "PPR"){
if (parameters_setting$damping_factor < 0 | parameters_setting$damping_factor >= 1)
stop("parameters_setting$damping_factor must be a number between 0 and 1 (0 included, 1 not)")
}
if (parameters_setting$correct_topology != TRUE & parameters_setting$correct_topology != FALSE)
stop("parameters_setting$correct_topology must be TRUE or FALSE")
if (!is.data.frame(lr_network))
stop("lr_network must be a data frame or tibble object")
if (!is.data.frame(sig_network))
stop("sig_network must be a data frame or tibble object")
if (!is.data.frame(gr_network))
stop("gr_network must be a data frame or tibble object")
if (!is.list(settings))
stop("settings should be a list!")
if(!is.character(settings[[1]]$from) | !is.character(settings[[1]]$name))
stop("setting$from and setting$name should be character vectors")
if(!is.logical(settings[[1]]$response) | is.null(names(settings[[1]]$response)))
stop("setting$response should be named logical vector containing class labels of the response that needs to be predicted ")
if (secondary_targets != TRUE & secondary_targets != FALSE)
stop("secondary_targets must be TRUE or FALSE")
if (remove_direct_links != "no" & remove_direct_links != "ligand" & remove_direct_links != "ligand-receptor")
stop("remove_direct_links must be 'no' or 'ligand' or 'ligand-receptor'")
# construct model
ligands = extract_ligands_from_settings(settings)
output_model_construction = construct_model(parameters_setting, lr_network, sig_network, gr_network, ligands, secondary_targets = secondary_targets, remove_direct_links = remove_direct_links)
model_name = output_model_construction$model_name
ligand_target_matrix = output_model_construction$model
# ligand_target_matrix_discrete = ligand_target_matrix %>% make_discrete_ligand_target_matrix(...)
## if in ligand-target matrix: all targets are zero for some ligands
ligands_zero = ligand_target_matrix %>% colnames() %>% sapply(function(ligand){sum(ligand_target_matrix[,ligand]) == 0}) %>% .[. == TRUE]
if (length(ligands_zero > 0)){
noisy_target_scores = runif(nrow(ligand_target_matrix), min = 0, max = min(ligand_target_matrix[ligand_target_matrix>0])) # give ligands not in model a very low noisy random score; why not all 0 --> ties --> problem aupr calculation
ligand_target_matrix[,names(ligands_zero)] = noisy_target_scores
}
# transcriptional response evaluation
performances_target_prediction = bind_rows(lapply(settings,evaluate_target_prediction, ligand_target_matrix))
# performances_target_prediction_discrete = bind_rows(lapply(settings,evaluate_target_prediction,ligand_target_matrix_discrete))
# performances_target_prediction = performances_target_prediction %>% full_join(performances_target_prediction_discrete, by = c("setting", "ligand"))
# ligand activity state prediction
all_ligands = unlist(extract_ligands_from_settings(settings, combination = FALSE))
settings_ligand_pred = convert_settings_ligand_prediction(settings, all_ligands, validation = TRUE, single = TRUE)
ligand_importances = bind_rows(lapply(settings_ligand_pred, get_single_ligand_importances, ligand_target_matrix[, all_ligands]))
# ligand_importances_discrete = bind_rows(lapply(settings_ligand_pred, get_single_ligand_importances, ligand_target_matrix_discrete[, all_ligands]))
# settings_ligand_pred = convert_settings_ligand_prediction(settings, all_ligands, validation = TRUE, single = FALSE)
# ligand_importances_glm = bind_rows(lapply(settings_ligand_pred, get_multi_ligand_importances, ligand_target_matrix[,all_ligands], algorithm = "glm", cv = FALSE)) %>% rename(glm_imp = importance)
# all_importances = full_join(ligand_importances, ligand_importances_glm, by = c("setting","test_ligand","ligand")) %>% full_join(ligand_importances_discrete, by = c("setting","test_ligand", "ligand"))
ligand_importances$pearson[is.na(ligand_importances$pearson)] = 0
ligand_importances$spearman[is.na(ligand_importances$spearman)] = 0
ligand_importances$pearson_log_pval[is.na(ligand_importances$pearson_log_pval)] = 0
ligand_importances$spearman_log_pval[is.na(ligand_importances$spearman_log_pval)] = 0
all_importances = ligand_importances %>% select_if(.predicate = function(x){sum(is.na(x)) == 0})
performances_ligand_prediction_single = all_importances$setting %>% unique() %>% lapply(function(x){x}) %>%
lapply(wrapper_evaluate_single_importances_ligand_prediction,all_importances) %>%
bind_rows() %>% inner_join(all_importances %>% distinct(setting,ligand))
# performances_ligand_prediction_single = evaluate_single_importances_ligand_prediction(all_importances, "median")
return(list(performances_target_prediction = performances_target_prediction, importances_ligand_prediction = all_importances, performances_ligand_prediction_single = performances_ligand_prediction_single))
}
|
/P5/P_5.R
|
no_license
|
JoseAngelGarcia/SimulacionNano
|
R
| false
| false
| 1,691
|
r
| ||
library(data.table)
library(dplyr)
library(Matrix)
library(BuenColors)
library(stringr)
library(cowplot)
library(irlba)
#-----------
# Parameters
#-----------
# n_cells Number of cells from each group to be simulated; either a number (all the same) or a vector
# of length(which_celltypes)
# which_celltypes Number of groups to partition cells into (even groups); must evenly divide into n_cells
# n_frags_per_cell number of ragments in peaks to be simulated per single cell
# rate_noise number between 0 (perfect downsample) and 1 (nonsense) for noise
# seed rando parameter for setting the seed
# shuffle Randomly order the resulting cells and peaks
bulk <- data.matrix(data.frame(fread("../data/exp100-bulk.counts.txt")))
colnames(bulk) <- c("B", "CD4", "CD8", "CLP", "CMP", "Ery", "GMP", "GMP-A", "GMP-B", "GMP-C",
"HSC", "LMPP", "MCP", "mDC", "Mega", "MEP", "Mono", "MPP",
"NK", "pDC", "GMPunknown")
simulate_scatac <- function(n_cells, which_celltypes, n_frags_per_cell = 1000,
rate_noise = 0, seed = 100, shuffle = FALSE){
# Reproducibility
set.seed(seed)
which_celltypes <- sort(which_celltypes)
stopifnot(rate_noise < 1)
stopifnot(n_frags_per_cell > 100)
n_peaks <- dim(bulk)[1]
#--
# Set up cell labels
#--
if(length(n_cells) > 1){
stopifnot(length(which_celltypes) == length(n_cells))
# Generate cell labels
cell_labels <- sapply(1:length(which_celltypes), function(i){
rep(which_celltypes[i], n_cells[i])
}) %>% unlist() %>% sort()
} else {
n_groups <- length(which_celltypes)
cell_labels <- sort(rep(which_celltypes, n_cells*n_groups))
}
final_names <- paste0(cell_labels, "_", as.character(1:length(cell_labels)))
#-------------------
# Simulate true data
#-------------------
# Generate cell-type specific peaks
lapply(which_celltypes, function(celltype){
# Apply different rates per cell depending on group label for generating cell-type specific peaks
n_cells_this_celltype <- sum(cell_labels == celltype)
counts_celltype <- bulk[,celltype]
# Define probabilities
# Prob observting frag Total number of fragments epxpected; the 0.5s are for two alleles that will be simulated/added later
prob_per_peaks <- counts_celltype/sum(counts_celltype) * (n_frags_per_cell*0.5 * (1-rate_noise)) + ((rate_noise*n_frags_per_cell)/n_peaks*0.5)
# Cap probabilities at something sensible
prob_per_peaks <- ifelse(prob_per_peaks > 0.9, 0.9, prob_per_peaks)
# Represent the two haplotypes as two random draws
mat1 <- (matrix(rbinom(n_peaks*n_cells_this_celltype, size = 1, prob = prob_per_peaks),
ncol = n_cells_this_celltype, byrow = FALSE) )
mat2 <- (matrix(rbinom(n_peaks*n_cells_this_celltype, size = 1, prob = prob_per_peaks),
ncol = n_cells_this_celltype, byrow = FALSE) )
mat <- mat1 + mat2
Matrix(mat)
}) %>% do.call(what = "cbind") -> sparse_matrix
colnames(sparse_matrix) <- final_names
sparse_matrix
}
# Here, we call the function above to simulate data
simulated_noisy <- simulate_scatac(50, c("Ery", "CMP", "CD8", "HSC", "CD4", "NK"), rate_noise = 0.8)
simulated_clean <- simulate_scatac(50, c("Ery", "CMP", "CD8", "HSC", "CD4", "NK"), rate_noise = 0)
# Do a basic LSI embedding to assess
compute_LSI <- function(x){
nfreqs <- t(t(x) / Matrix::colSums(x))
idf <- as(log(1 + ncol(x) / Matrix::rowSums(x)), "sparseVector")
tf_idf_counts <- as(Diagonal(x=as.vector(idf)), "sparseMatrix") %*% nfreqs
SVD_x <- irlba(tf_idf_counts, 3, 3)
d_diag = matrix(0, nrow=length(SVD_x$d), ncol=length(SVD_x$d))
diag(d_diag) = SVD_x$d
LSI_x_final = t(d_diag %*% t(SVD_x$v))
LSI_x_final
}
# Function to do LSI and then create the corresponding data frame
makeLSI_df <- function(simulated){
# Compute LSI and extract cell types from previous simulation
LSI_dims <- compute_LSI(simulated)
celltypes <- str_split_fixed(colnames(simulated), "_", 2)[,1]
# Make one data frame for plotting
LSI_df <- data.frame(
LSI_2 = LSI_dims[,2],
LSI_3 = LSI_dims[,3],
celltype = celltypes,
cell_id = colnames(simulated)
)
LSI_df
}
# Create two LSI dfs to compare
LSI_df_noise <- makeLSI_df(simulated_noisy)
LSI_df_clean <- makeLSI_df(simulated_clean)
p1 <- ggplot(shuf(LSI_df_clean), aes(x = LSI_2, y = LSI_3, color = celltype)) +
geom_point(size = 1) + scale_color_manual(values = jdb_color_maps) +
ggtitle("clean - simulated")
p2 <- ggplot(shuf(LSI_df_noise), aes(x = LSI_2, y = LSI_3, color = celltype)) +
geom_point(size = 1) + scale_color_manual(values = jdb_color_maps) +
ggtitle("noisy - simulated")
cowplot::ggsave(cowplot::plot_grid(p1, p2, nrow = 1),
filename = "../output/simulated_comparison.pdf", width = 9, height = 4)
|
/bonemarrow/code/00_simulate_functions.R
|
no_license
|
caleblareau/simulate_singlecell_frombulk
|
R
| false
| false
| 4,928
|
r
|
library(data.table)
library(dplyr)
library(Matrix)
library(BuenColors)
library(stringr)
library(cowplot)
library(irlba)
#-----------
# Parameters
#-----------
# n_cells Number of cells from each group to be simulated; either a number (all the same) or a vector
# of length(which_celltypes)
# which_celltypes Number of groups to partition cells into (even groups); must evenly divide into n_cells
# n_frags_per_cell number of ragments in peaks to be simulated per single cell
# rate_noise number between 0 (perfect downsample) and 1 (nonsense) for noise
# seed rando parameter for setting the seed
# shuffle Randomly order the resulting cells and peaks
bulk <- data.matrix(data.frame(fread("../data/exp100-bulk.counts.txt")))
colnames(bulk) <- c("B", "CD4", "CD8", "CLP", "CMP", "Ery", "GMP", "GMP-A", "GMP-B", "GMP-C",
"HSC", "LMPP", "MCP", "mDC", "Mega", "MEP", "Mono", "MPP",
"NK", "pDC", "GMPunknown")
simulate_scatac <- function(n_cells, which_celltypes, n_frags_per_cell = 1000,
rate_noise = 0, seed = 100, shuffle = FALSE){
# Reproducibility
set.seed(seed)
which_celltypes <- sort(which_celltypes)
stopifnot(rate_noise < 1)
stopifnot(n_frags_per_cell > 100)
n_peaks <- dim(bulk)[1]
#--
# Set up cell labels
#--
if(length(n_cells) > 1){
stopifnot(length(which_celltypes) == length(n_cells))
# Generate cell labels
cell_labels <- sapply(1:length(which_celltypes), function(i){
rep(which_celltypes[i], n_cells[i])
}) %>% unlist() %>% sort()
} else {
n_groups <- length(which_celltypes)
cell_labels <- sort(rep(which_celltypes, n_cells*n_groups))
}
final_names <- paste0(cell_labels, "_", as.character(1:length(cell_labels)))
#-------------------
# Simulate true data
#-------------------
# Generate cell-type specific peaks
lapply(which_celltypes, function(celltype){
# Apply different rates per cell depending on group label for generating cell-type specific peaks
n_cells_this_celltype <- sum(cell_labels == celltype)
counts_celltype <- bulk[,celltype]
# Define probabilities
# Prob observting frag Total number of fragments epxpected; the 0.5s are for two alleles that will be simulated/added later
prob_per_peaks <- counts_celltype/sum(counts_celltype) * (n_frags_per_cell*0.5 * (1-rate_noise)) + ((rate_noise*n_frags_per_cell)/n_peaks*0.5)
# Cap probabilities at something sensible
prob_per_peaks <- ifelse(prob_per_peaks > 0.9, 0.9, prob_per_peaks)
# Represent the two haplotypes as two random draws
mat1 <- (matrix(rbinom(n_peaks*n_cells_this_celltype, size = 1, prob = prob_per_peaks),
ncol = n_cells_this_celltype, byrow = FALSE) )
mat2 <- (matrix(rbinom(n_peaks*n_cells_this_celltype, size = 1, prob = prob_per_peaks),
ncol = n_cells_this_celltype, byrow = FALSE) )
mat <- mat1 + mat2
Matrix(mat)
}) %>% do.call(what = "cbind") -> sparse_matrix
colnames(sparse_matrix) <- final_names
sparse_matrix
}
# Here, we call the function above to simulate data
simulated_noisy <- simulate_scatac(50, c("Ery", "CMP", "CD8", "HSC", "CD4", "NK"), rate_noise = 0.8)
simulated_clean <- simulate_scatac(50, c("Ery", "CMP", "CD8", "HSC", "CD4", "NK"), rate_noise = 0)
# Do a basic LSI embedding to assess
compute_LSI <- function(x){
nfreqs <- t(t(x) / Matrix::colSums(x))
idf <- as(log(1 + ncol(x) / Matrix::rowSums(x)), "sparseVector")
tf_idf_counts <- as(Diagonal(x=as.vector(idf)), "sparseMatrix") %*% nfreqs
SVD_x <- irlba(tf_idf_counts, 3, 3)
d_diag = matrix(0, nrow=length(SVD_x$d), ncol=length(SVD_x$d))
diag(d_diag) = SVD_x$d
LSI_x_final = t(d_diag %*% t(SVD_x$v))
LSI_x_final
}
# Function to do LSI and then create the corresponding data frame
makeLSI_df <- function(simulated){
# Compute LSI and extract cell types from previous simulation
LSI_dims <- compute_LSI(simulated)
celltypes <- str_split_fixed(colnames(simulated), "_", 2)[,1]
# Make one data frame for plotting
LSI_df <- data.frame(
LSI_2 = LSI_dims[,2],
LSI_3 = LSI_dims[,3],
celltype = celltypes,
cell_id = colnames(simulated)
)
LSI_df
}
# Create two LSI dfs to compare
LSI_df_noise <- makeLSI_df(simulated_noisy)
LSI_df_clean <- makeLSI_df(simulated_clean)
p1 <- ggplot(shuf(LSI_df_clean), aes(x = LSI_2, y = LSI_3, color = celltype)) +
geom_point(size = 1) + scale_color_manual(values = jdb_color_maps) +
ggtitle("clean - simulated")
p2 <- ggplot(shuf(LSI_df_noise), aes(x = LSI_2, y = LSI_3, color = celltype)) +
geom_point(size = 1) + scale_color_manual(values = jdb_color_maps) +
ggtitle("noisy - simulated")
cowplot::ggsave(cowplot::plot_grid(p1, p2, nrow = 1),
filename = "../output/simulated_comparison.pdf", width = 9, height = 4)
|
#import des données de log_data
setwd("/Users/epellegrin/Desktop/data-science/MIASHS/Analyse des données de Panels/TP")
long.data = read.csv2("long_data.csv",sep = ";")
#
dim(long.data)
head(long.data)
library(ggplot2)
ggplot(long.data, aes(time,bn))+geom_point()
ggplot(long.data, aes(time,bn, group=palmId))+geom_point()+geom_line()
ggplot(long.data, aes(time,bn, group=palmId))+geom_point()+geom_line()+geom_smooth(method ="lm", se= FALSE)
## affiche l'évolution par individu du nombre de bananes dans le temps
## bn : nombre de régimes en fonction du temps
ggplot(long.data, aes(time,bn))+geom_point()+geom_line()+ facet_wrap(~palmId)
## régression linéaire simple du nombre de régimes de banane
# y= beta1+ beta2xi + epsilonij
mod <-lm(bn ~ time, data = long.data)
res <- resid(mod)
summary(mod)
plot(mod)
ggplot(long.data, aes(palmId,res) )+geom_point()
# on réordoen en fonction des résidus
ggplot(long.data, aes(reorder(palmId,res),res) )+geom_point()+geom_hline(yintercept = 0,colour=2)
## on remarque que certains on des résidus tout le temps négatifs et d'autres sont toujours positifs
# l'intercept : 34 ( 34 régimes par an )
# time = -4 une baisse de 4 régimes par ans
## on itroduit un effet aléatoire au niveau de l'intercept
# modele yij = beat1 + beta2xi + bj + epsilonij
library(lme4)
mod1 <- lmer(bn ~ time + (1|palmId) , data = long.data)
summary(mod1)
summary(mod1)$varcor # variances associées aux effest alétoires
VarCorr(mod1)# var
fixef(mod1) # extraction des beta1 et beta2
ranef(mod1) # extartion des bj
#sigmacarrechapeau de
#Groups Name Variance Std.Dev.
#palmId (Intercept) 2.08 1.442
#Residual 16.33 4.042
## autre modèle
# modele yij = beta1 + beta2xi + bj + ajxi+ epsilonij
# i= 0..4
# j= 1..72
mod2 <- lmer(bn ~ time + (0+time | palmId) , data = long.data)
summary(mod2)
summary(mod2)$varcor # variances associées aux effest alétoires
VarCorr(mod2)# variance
fixef(mod2) # extraction des beta1 et beta2
ranef(mod2) # extartion des bj
## dans lmer : time et 1+time sont équivallents
mod3 <- lmer(bn ~ time + (1+time | palmId) , data = long.data)
mod3 <- lmer(bn ~ time + (time | palmId) , data = long.data)
# modele yij = beta1 + beta2xi + bj + ajxi+ epsilonij
summary(mod3)
summary(mod3)$varcor # variances associées aux effest alétoires
VarCorr(mod3)# variance
# on voit qu'il ya une covariance entre l'intercept et la pente
fixef(mod3) # extraction des beta1 et beta2
ranef(mod3) # extartion des bj
# modele avec intercept et pente aléatoire indépencats
mod4 <- lmer(bn ~ time + (1 | palmId) + (0+time | palmId), data = long.data)
## ecriture équivallente
mod4 <- lmer(bn ~ time + (0+time || palmId), data = long.data)
# modele yij = beta1 + beta2xi + bj + ajxi+ epsilonij
summary(mod4)
summary(mod4)$varcor # variances associées aux effest alétoires
VarCorr(mod4)# variance
# on voit qu'il ya une covariance entre l'intercept et la pente
fixef(mod4) # extraction des beta1 et beta2
ranef(mod4) # extartion des bj
## comparaison des modèles
res <- anova(mod3, mod4) # refitting model with ML
print(res)
# pvalue : 0.9023 => on garde le modèle le plus simple ( mod4)
## test pente aléatoire /intercept aléatoire
res <- anova(mod4, mod2) # refitting model with ML
print(res)
# pvalue: 0.4085 => on garde le modèle le plus simple
res <- anova(mod4, mod1) # refitting model with ML
print(res)
# pvalue <0.05 => on garde le modèle le plus complet
## esperance conditionnelle ( intercept aléatoire) / marginale ( pente aléatoire)
y1 <- predict(mod1)
head(y1)
## esperance marginale
y1b <- predict(mod1, re.form=NA)
head(y1b)
y2 <- predict(mod2)
head(y2)
## esperance marginale
# ajouter re.form = NA pour
y2b <- predict(mod2, re.form=NA)
head(y2b)
y3 <- predict(mod3)
head(y3)
## esperance marginale
y3b <- predict(mod3, re.form=NA)
head(y3b)
long.data$y1 <- y1
long.data$y2 <- y2
long.data$y3 <- y3
long.data$y1b <- y1b
ggplot(sub,aes(time,y1, group = palmId)) + geom_line(colour = "red")
ggplot(sub,aes(time,y1b, group = palmId)) + geom_line(colour = "black")
ggplot(sub,aes(time,y2, group = palmId)) + geom_line(colour = "blue")
ggplot(sub,aes(time,y2, group = palmId)) + geom_line(colour = "forestgreen")
library(gridExtra)
sub <- subset(long.data, palmId %in% c ("57_11", "37_5", "56_13", "34_7", "54_13","37_6","37_23"))
sub <- droplevels(sub)
##
p1 <- ggplot(sub, aes(time,y1, group= palmId))+ geom_line(colour="red")
p2 <- ggplot(sub, aes(time,y2, group= palmId))+ geom_line( colour="blue")
p3 <- ggplot(sub, aes(time,y3, group= palmId))+ geom_line(colour="forestgreen")
# la même prédictuon pour tous les individus
p4 <- ggplot(sub, aes(time,y1b, group= palmId))+ geom_line(colour="black")
grid.arrange(p1,p2,p3,p4)
|
/TP/Long_data.R
|
no_license
|
mordor-ai/M1-Analyse-des-donnees-de-Panels
|
R
| false
| false
| 4,807
|
r
|
#import des données de log_data
setwd("/Users/epellegrin/Desktop/data-science/MIASHS/Analyse des données de Panels/TP")
long.data = read.csv2("long_data.csv",sep = ";")
#
dim(long.data)
head(long.data)
library(ggplot2)
ggplot(long.data, aes(time,bn))+geom_point()
ggplot(long.data, aes(time,bn, group=palmId))+geom_point()+geom_line()
ggplot(long.data, aes(time,bn, group=palmId))+geom_point()+geom_line()+geom_smooth(method ="lm", se= FALSE)
## affiche l'évolution par individu du nombre de bananes dans le temps
## bn : nombre de régimes en fonction du temps
ggplot(long.data, aes(time,bn))+geom_point()+geom_line()+ facet_wrap(~palmId)
## régression linéaire simple du nombre de régimes de banane
# y= beta1+ beta2xi + epsilonij
mod <-lm(bn ~ time, data = long.data)
res <- resid(mod)
summary(mod)
plot(mod)
ggplot(long.data, aes(palmId,res) )+geom_point()
# on réordoen en fonction des résidus
ggplot(long.data, aes(reorder(palmId,res),res) )+geom_point()+geom_hline(yintercept = 0,colour=2)
## on remarque que certains on des résidus tout le temps négatifs et d'autres sont toujours positifs
# l'intercept : 34 ( 34 régimes par an )
# time = -4 une baisse de 4 régimes par ans
## on itroduit un effet aléatoire au niveau de l'intercept
# modele yij = beat1 + beta2xi + bj + epsilonij
library(lme4)
mod1 <- lmer(bn ~ time + (1|palmId) , data = long.data)
summary(mod1)
summary(mod1)$varcor # variances associées aux effest alétoires
VarCorr(mod1)# var
fixef(mod1) # extraction des beta1 et beta2
ranef(mod1) # extartion des bj
#sigmacarrechapeau de
#Groups Name Variance Std.Dev.
#palmId (Intercept) 2.08 1.442
#Residual 16.33 4.042
## autre modèle
# modele yij = beta1 + beta2xi + bj + ajxi+ epsilonij
# i= 0..4
# j= 1..72
mod2 <- lmer(bn ~ time + (0+time | palmId) , data = long.data)
summary(mod2)
summary(mod2)$varcor # variances associées aux effest alétoires
VarCorr(mod2)# variance
fixef(mod2) # extraction des beta1 et beta2
ranef(mod2) # extartion des bj
## dans lmer : time et 1+time sont équivallents
mod3 <- lmer(bn ~ time + (1+time | palmId) , data = long.data)
mod3 <- lmer(bn ~ time + (time | palmId) , data = long.data)
# modele yij = beta1 + beta2xi + bj + ajxi+ epsilonij
summary(mod3)
summary(mod3)$varcor # variances associées aux effest alétoires
VarCorr(mod3)# variance
# on voit qu'il ya une covariance entre l'intercept et la pente
fixef(mod3) # extraction des beta1 et beta2
ranef(mod3) # extartion des bj
# modele avec intercept et pente aléatoire indépencats
mod4 <- lmer(bn ~ time + (1 | palmId) + (0+time | palmId), data = long.data)
## ecriture équivallente
mod4 <- lmer(bn ~ time + (0+time || palmId), data = long.data)
# modele yij = beta1 + beta2xi + bj + ajxi+ epsilonij
summary(mod4)
summary(mod4)$varcor # variances associées aux effest alétoires
VarCorr(mod4)# variance
# on voit qu'il ya une covariance entre l'intercept et la pente
fixef(mod4) # extraction des beta1 et beta2
ranef(mod4) # extartion des bj
## comparaison des modèles
res <- anova(mod3, mod4) # refitting model with ML
print(res)
# pvalue : 0.9023 => on garde le modèle le plus simple ( mod4)
## test pente aléatoire /intercept aléatoire
res <- anova(mod4, mod2) # refitting model with ML
print(res)
# pvalue: 0.4085 => on garde le modèle le plus simple
res <- anova(mod4, mod1) # refitting model with ML
print(res)
# pvalue <0.05 => on garde le modèle le plus complet
## esperance conditionnelle ( intercept aléatoire) / marginale ( pente aléatoire)
y1 <- predict(mod1)
head(y1)
## esperance marginale
y1b <- predict(mod1, re.form=NA)
head(y1b)
y2 <- predict(mod2)
head(y2)
## esperance marginale
# ajouter re.form = NA pour
y2b <- predict(mod2, re.form=NA)
head(y2b)
y3 <- predict(mod3)
head(y3)
## esperance marginale
y3b <- predict(mod3, re.form=NA)
head(y3b)
long.data$y1 <- y1
long.data$y2 <- y2
long.data$y3 <- y3
long.data$y1b <- y1b
ggplot(sub,aes(time,y1, group = palmId)) + geom_line(colour = "red")
ggplot(sub,aes(time,y1b, group = palmId)) + geom_line(colour = "black")
ggplot(sub,aes(time,y2, group = palmId)) + geom_line(colour = "blue")
ggplot(sub,aes(time,y2, group = palmId)) + geom_line(colour = "forestgreen")
library(gridExtra)
sub <- subset(long.data, palmId %in% c ("57_11", "37_5", "56_13", "34_7", "54_13","37_6","37_23"))
sub <- droplevels(sub)
##
p1 <- ggplot(sub, aes(time,y1, group= palmId))+ geom_line(colour="red")
p2 <- ggplot(sub, aes(time,y2, group= palmId))+ geom_line( colour="blue")
p3 <- ggplot(sub, aes(time,y3, group= palmId))+ geom_line(colour="forestgreen")
# la même prédictuon pour tous les individus
p4 <- ggplot(sub, aes(time,y1b, group= palmId))+ geom_line(colour="black")
grid.arrange(p1,p2,p3,p4)
|
# Function to extract relevant data from output files
extract <- function(segment)
{
df <- data.frame()
# loop through each core
for (core in 1:numCore)
{
if (length(segment[[core]]) == 0)
{
print(paste("Error in Condition ",
condition,
", Core ",
core,
sep = ""))
} else
{
# loop through the lavaan objects within each core
for (a in seq(1,length(segment[[core]]), by = 2))
{
# save a summary of the lavaan results
summ <- summary(segment[[core]][[a]])
# factor loadings
loadVec <- summ$est[grep("=~", summ$op)]
# error variances
errorVec <- summ$est[grep("~~", summ$op)]
# exclude the error variance of the factor
errorVec <- errorVec[-length(errorVec)]
# standard error
seVec <- summ$se[grep("~~|=~", summ$op)]
# exclude se of the error variance of the factor
seVec <- seVec[-length(seVec)]
# name vectors
nameVec <- segment[[core]][[a]]@Model@dimNames[[2]][[1]]
errornameVec <- paste(nameVec,
"error",
sep = ".")
senameVec <- paste(nameVec,
"SE",
sep = ".")
errorsenameVec <- paste(nameVec,
"errorSE",
sep = ".")
temp <- unlist(segment[[core]][[a]]@convergence)
# Number of Convergence
tempConverge <- temp[seq(from = 1, to = imputationMI*4, by = 4)]
numConverge <- sum(tempConverge, na.rm = TRUE)
# Number of SE = TRUE
tempSE <- temp[seq(from = 2, to = imputationMI*4, by = 4)]
numSE <- sum(tempSE, na.rm = TRUE)
# Number of Heywood.lv
tempHeylv <- temp[seq(from = 3, to = imputationMI*4, by = 4)]
numHeylv <- sum(tempHeylv, na.rm = TRUE)
# Number of Heywood.ov
tempHeyov <- temp[seq(from = 4, to = imputationMI*4, by = 4)]
numHeyov <- sum(tempHeyov, na.rm = TRUE)
# All Convergence/Heywood counts in one row
oneRow <- c(numConverge, numSE, numHeylv, numHeyov)
# Name for the above vectors
nameConvVec <- c("Convergence", "SETrue", "Heywood.lv", "Heywood.ov")
mengrubin <- tryCatch({
lavTestLRT.mi(segment[[core]][[a]],
asymptotic = TRUE,
test = "D3")
}, error = function(e){
rep(NA,12)
})
# in the case that the model fit is perfect
if (length(mengrubin) == 7)
{
mengrubin <- rep("Perfect",12)
}
names(mengrubin) <- paste(c("chisq",
"df",
"p",
"ariv",
"fmi",
"npar",
"ntotal",
"chisq.scaled",
"df.scaled",
"p.scaled",
"chisq.scale.factor",
"chisq.shift.parameters"),
"mengrubin", sep = ".")
Lirobust <- tryCatch({
lavTestLRT.mi(segment[[core]][[a]],
asymptotic = TRUE,
test = "D2",
pool.robust = TRUE)
}, error = function(e){
rep(NA,12)
})
# in the case that the model fit is perfect
if (length(Lirobust) == 7)
{
Lirobust <- rep("Perfect", 12)
}
names(Lirobust) <- paste(c("chisq",
"df",
"p",
"ariv",
"fmi",
"npar",
"ntotal",
"chisq.scaled",
"df.scaled",
"p.scaled",
"ariv.scaled",
"fmi.scaled"),
"lirobust", sep = ".")
Li <- tryCatch({
lavTestLRT.mi(segment[[core]][[a]],
asymptotic = TRUE,
test = "D2",
pool.robust = FALSE)
}, error = function(e){
rep(NA,12)
})
# in the case that the model fit is perfect
if (length(Li) == 7)
{
Li <- rep("Perfect", 12)
}
names(Li) <- paste(c("chisq",
"df",
"p",
"ariv",
"fmi",
"npar",
"ntotal",
"chisq.scaled",
"df.scaled",
"p.scaled",
"chisq.scaling.factor",
"chisq.shift.parameters"),
"li", sep = ".")
# Bind them all vectors into one row
allstats <- c(loadVec,
errorVec,
seVec,
oneRow,
mengrubin,
Li,
Lirobust)
allstats <- data.frame(t(allstats),
stringsAsFactors = FALSE)
names(allstats) <- c(nameVec,
errornameVec,
senameVec,
errorsenameVec,
nameConvVec,
names(mengrubin),
names(Li),
names(Lirobust))
df <- rbind(df,
allstats,
make.row.names = FALSE,
row.names = NULL)
names(df) <- c(nameVec,
errornameVec,
senameVec,
errorsenameVec,
nameConvVec,
names(mengrubin),
names(Li),
names(Lirobust))
paste("Core ",
core,
" Number ",
a,
" Done",
sep = "")
}
}
}
return(df)
}
|
/Extract_WLSMV.R
|
no_license
|
Aaron0696/FIML_MI_JOC_MISSINGDATA
|
R
| false
| false
| 6,913
|
r
|
# Function to extract relevant data from output files
extract <- function(segment)
{
df <- data.frame()
# loop through each core
for (core in 1:numCore)
{
if (length(segment[[core]]) == 0)
{
print(paste("Error in Condition ",
condition,
", Core ",
core,
sep = ""))
} else
{
# loop through the lavaan objects within each core
for (a in seq(1,length(segment[[core]]), by = 2))
{
# save a summary of the lavaan results
summ <- summary(segment[[core]][[a]])
# factor loadings
loadVec <- summ$est[grep("=~", summ$op)]
# error variances
errorVec <- summ$est[grep("~~", summ$op)]
# exclude the error variance of the factor
errorVec <- errorVec[-length(errorVec)]
# standard error
seVec <- summ$se[grep("~~|=~", summ$op)]
# exclude se of the error variance of the factor
seVec <- seVec[-length(seVec)]
# name vectors
nameVec <- segment[[core]][[a]]@Model@dimNames[[2]][[1]]
errornameVec <- paste(nameVec,
"error",
sep = ".")
senameVec <- paste(nameVec,
"SE",
sep = ".")
errorsenameVec <- paste(nameVec,
"errorSE",
sep = ".")
temp <- unlist(segment[[core]][[a]]@convergence)
# Number of Convergence
tempConverge <- temp[seq(from = 1, to = imputationMI*4, by = 4)]
numConverge <- sum(tempConverge, na.rm = TRUE)
# Number of SE = TRUE
tempSE <- temp[seq(from = 2, to = imputationMI*4, by = 4)]
numSE <- sum(tempSE, na.rm = TRUE)
# Number of Heywood.lv
tempHeylv <- temp[seq(from = 3, to = imputationMI*4, by = 4)]
numHeylv <- sum(tempHeylv, na.rm = TRUE)
# Number of Heywood.ov
tempHeyov <- temp[seq(from = 4, to = imputationMI*4, by = 4)]
numHeyov <- sum(tempHeyov, na.rm = TRUE)
# All Convergence/Heywood counts in one row
oneRow <- c(numConverge, numSE, numHeylv, numHeyov)
# Name for the above vectors
nameConvVec <- c("Convergence", "SETrue", "Heywood.lv", "Heywood.ov")
mengrubin <- tryCatch({
lavTestLRT.mi(segment[[core]][[a]],
asymptotic = TRUE,
test = "D3")
}, error = function(e){
rep(NA,12)
})
# in the case that the model fit is perfect
if (length(mengrubin) == 7)
{
mengrubin <- rep("Perfect",12)
}
names(mengrubin) <- paste(c("chisq",
"df",
"p",
"ariv",
"fmi",
"npar",
"ntotal",
"chisq.scaled",
"df.scaled",
"p.scaled",
"chisq.scale.factor",
"chisq.shift.parameters"),
"mengrubin", sep = ".")
Lirobust <- tryCatch({
lavTestLRT.mi(segment[[core]][[a]],
asymptotic = TRUE,
test = "D2",
pool.robust = TRUE)
}, error = function(e){
rep(NA,12)
})
# in the case that the model fit is perfect
if (length(Lirobust) == 7)
{
Lirobust <- rep("Perfect", 12)
}
names(Lirobust) <- paste(c("chisq",
"df",
"p",
"ariv",
"fmi",
"npar",
"ntotal",
"chisq.scaled",
"df.scaled",
"p.scaled",
"ariv.scaled",
"fmi.scaled"),
"lirobust", sep = ".")
Li <- tryCatch({
lavTestLRT.mi(segment[[core]][[a]],
asymptotic = TRUE,
test = "D2",
pool.robust = FALSE)
}, error = function(e){
rep(NA,12)
})
# in the case that the model fit is perfect
if (length(Li) == 7)
{
Li <- rep("Perfect", 12)
}
names(Li) <- paste(c("chisq",
"df",
"p",
"ariv",
"fmi",
"npar",
"ntotal",
"chisq.scaled",
"df.scaled",
"p.scaled",
"chisq.scaling.factor",
"chisq.shift.parameters"),
"li", sep = ".")
# Bind them all vectors into one row
allstats <- c(loadVec,
errorVec,
seVec,
oneRow,
mengrubin,
Li,
Lirobust)
allstats <- data.frame(t(allstats),
stringsAsFactors = FALSE)
names(allstats) <- c(nameVec,
errornameVec,
senameVec,
errorsenameVec,
nameConvVec,
names(mengrubin),
names(Li),
names(Lirobust))
df <- rbind(df,
allstats,
make.row.names = FALSE,
row.names = NULL)
names(df) <- c(nameVec,
errornameVec,
senameVec,
errorsenameVec,
nameConvVec,
names(mengrubin),
names(Li),
names(Lirobust))
paste("Core ",
core,
" Number ",
a,
" Done",
sep = "")
}
}
}
return(df)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/popular.R
\name{popular}
\alias{popular}
\alias{ny_popular_emailed}
\alias{ny_popular_shared}
\alias{ny_popular_shared_type}
\alias{ny_popular_viewed}
\title{Popular}
\usage{
ny_popular_emailed(period = 30)
ny_popular_shared(period = 30)
ny_popular_shared_type(period = 30, type = c("email", "facebook", "twitter"))
ny_popular_viewed(period = 30)
}
\arguments{
\item{period}{Time period: 1, 7, or 30 days.}
\item{type}{Share type: \code{email}, \code{facebook}, or \code{twitter}.}
}
\description{
Provides services for getting the most popular articles on NYTimes.com based on emails, shares, or views.
}
\section{Functions}{
\itemize{
\item{\code{ny_popular_emailed} Returns an array of the most emailed articles on NYTimes.com for specified period of time.}
\item{\code{ny_popular_shared} Returns an array of the most shared articles on NYTimes.com for specified period of time.}
\item{\code{ny_popular_shared_type} Returns an array of the most shared articles by share type on NYTimes.com for specified period of time.}
\item{\code{ny_popular_viewed} Returns an array of the most viewed articles on NYTimes.com for specified period of time.}
}
}
\examples{
\dontrun{
nytimes_key("xXXxxXxXxXXx")
emailed <- ny_popular_emailed(7)
}
}
|
/man/popular.Rd
|
permissive
|
news-r/nytimes
|
R
| false
| true
| 1,329
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/popular.R
\name{popular}
\alias{popular}
\alias{ny_popular_emailed}
\alias{ny_popular_shared}
\alias{ny_popular_shared_type}
\alias{ny_popular_viewed}
\title{Popular}
\usage{
ny_popular_emailed(period = 30)
ny_popular_shared(period = 30)
ny_popular_shared_type(period = 30, type = c("email", "facebook", "twitter"))
ny_popular_viewed(period = 30)
}
\arguments{
\item{period}{Time period: 1, 7, or 30 days.}
\item{type}{Share type: \code{email}, \code{facebook}, or \code{twitter}.}
}
\description{
Provides services for getting the most popular articles on NYTimes.com based on emails, shares, or views.
}
\section{Functions}{
\itemize{
\item{\code{ny_popular_emailed} Returns an array of the most emailed articles on NYTimes.com for specified period of time.}
\item{\code{ny_popular_shared} Returns an array of the most shared articles on NYTimes.com for specified period of time.}
\item{\code{ny_popular_shared_type} Returns an array of the most shared articles by share type on NYTimes.com for specified period of time.}
\item{\code{ny_popular_viewed} Returns an array of the most viewed articles on NYTimes.com for specified period of time.}
}
}
\examples{
\dontrun{
nytimes_key("xXXxxXxXxXXx")
emailed <- ny_popular_emailed(7)
}
}
|
require("lda")
saveRDS(topics,topics_filename)
saveRDS(vocab,vocabulary_filename)
|
/pyslda/saveModel.R
|
permissive
|
LeJit/PythonSLDA
|
R
| false
| false
| 82
|
r
|
require("lda")
saveRDS(topics,topics_filename)
saveRDS(vocab,vocabulary_filename)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ks_test.R
\name{ks_test}
\alias{ks_test}
\title{Weighted KS Test}
\usage{
ks_test(x, y, thresh = 0.05, w_x = rep(1, length(x)), w_y = rep(1, length(y)))
}
\arguments{
\item{x}{Vector of values sampled from the first distribution}
\item{y}{Vector of values sampled from the second distribution}
\item{thresh}{The threshold needed to clear between the two cumulative distributions}
\item{w_x}{The observation weights for x}
\item{w_y}{The observation weights for y}
}
\value{
A list with class \code{"htest"} containing the following components:
\itemize{
\item \emph{statistic} the value of the test statistic.
\item \emph{p.value} the p-value of the test.
\item \emph{alternative} a character string describing the alternative hypothesis.
\item \emph{method} a character string indicating what type of test was performed.
\item \emph{data.name} a character string giving the name(s) of the data.
}
}
\description{
Weighted Kolmogorov-Smirnov Two-Sample Test with threshold
}
\details{
The usual Kolmogorov-Smirnov test for two vectors \strong{X} and \strong{Y}, of size m
and n rely on the empirical cdfs \eqn{E_x} and \eqn{E_y} and the test statistic
\deqn{D = sup_{t\in (X, Y)} |E_x(x) - E_y(x))}.
This modified Kolmogorov-Smirnov test relies on two modifications.
\itemize{
\item Using observation weights for both vectors \strong{X} and \strong{Y}: Those
weights are used in two places, while modifying the usual KS test. First, the
empirical cdfs are updates to account for the weights. Secondly, the effective
sample sizes are also modified. This is inspired from
\url{https://stackoverflow.com/a/55664242/13768995}, using Monahan (2011).
\item Testing against a threshold: the test statistic is thresholded such
that \eqn{D = max(D - thresh, 0)}. Since \eqn{0\le D\le 1}, the value of
the threshold is also between 0 and 1, representing an effect size for the
difference.
}
}
\examples{
x <- runif(100)
y <- runif(100, min = .5, max = .5)
ks_test(x, y, thresh = .001)
}
\references{
Monahan, J. (2011). \emph{Numerical Methods of Statistics} (2nd ed.,
Cambridge Series in Statistical and Probabilistic Mathematics). Cambridge:
Cambridge University Press. doi:10.1017/CBO9780511977176
}
|
/man/ks_test.Rd
|
permissive
|
rubak/Ecume
|
R
| false
| true
| 2,279
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ks_test.R
\name{ks_test}
\alias{ks_test}
\title{Weighted KS Test}
\usage{
ks_test(x, y, thresh = 0.05, w_x = rep(1, length(x)), w_y = rep(1, length(y)))
}
\arguments{
\item{x}{Vector of values sampled from the first distribution}
\item{y}{Vector of values sampled from the second distribution}
\item{thresh}{The threshold needed to clear between the two cumulative distributions}
\item{w_x}{The observation weights for x}
\item{w_y}{The observation weights for y}
}
\value{
A list with class \code{"htest"} containing the following components:
\itemize{
\item \emph{statistic} the value of the test statistic.
\item \emph{p.value} the p-value of the test.
\item \emph{alternative} a character string describing the alternative hypothesis.
\item \emph{method} a character string indicating what type of test was performed.
\item \emph{data.name} a character string giving the name(s) of the data.
}
}
\description{
Weighted Kolmogorov-Smirnov Two-Sample Test with threshold
}
\details{
The usual Kolmogorov-Smirnov test for two vectors \strong{X} and \strong{Y}, of size m
and n rely on the empirical cdfs \eqn{E_x} and \eqn{E_y} and the test statistic
\deqn{D = sup_{t\in (X, Y)} |E_x(x) - E_y(x))}.
This modified Kolmogorov-Smirnov test relies on two modifications.
\itemize{
\item Using observation weights for both vectors \strong{X} and \strong{Y}: Those
weights are used in two places, while modifying the usual KS test. First, the
empirical cdfs are updates to account for the weights. Secondly, the effective
sample sizes are also modified. This is inspired from
\url{https://stackoverflow.com/a/55664242/13768995}, using Monahan (2011).
\item Testing against a threshold: the test statistic is thresholded such
that \eqn{D = max(D - thresh, 0)}. Since \eqn{0\le D\le 1}, the value of
the threshold is also between 0 and 1, representing an effect size for the
difference.
}
}
\examples{
x <- runif(100)
y <- runif(100, min = .5, max = .5)
ks_test(x, y, thresh = .001)
}
\references{
Monahan, J. (2011). \emph{Numerical Methods of Statistics} (2nd ed.,
Cambridge Series in Statistical and Probabilistic Mathematics). Cambridge:
Cambridge University Press. doi:10.1017/CBO9780511977176
}
|
list(
index = list(
sd_section("Settings functions",
"These functions are used in `staticdocs.r` to configure various
settings that staticdocs uses when rendering a package. This is
particularly useful for generating an index page that groups functions
into useful categories",
c(
"sd_icon",
"sd_item",
"sd_section"
)
)
)
)
|
/staticdocs/index.r
|
no_license
|
jcheng5/staticdocs
|
R
| false
| false
| 402
|
r
|
list(
index = list(
sd_section("Settings functions",
"These functions are used in `staticdocs.r` to configure various
settings that staticdocs uses when rendering a package. This is
particularly useful for generating an index page that groups functions
into useful categories",
c(
"sd_icon",
"sd_item",
"sd_section"
)
)
)
)
|
fitYP41 <- function(Y, d, Z, beta1=1, beta2= -1, maxiter=60){
#### Do two things: (1) for the data, and given beta1, beta2;
#### compute the baseline that max the EL. i.e. NPMLE.
#### (2) Given the baseline and the 2 betas, compute the (log) EL value.
#### there is no alpha, So Z is a vector or nx1 matrix.
temp1 <- YP41(y=Y, d=d, Z=Z, b1=beta1, b2=beta2, k=maxiter)
ELval <- ELcomp(Haz=temp1$Hazw, Sur=temp1$Survival, gam=temp1$gam)
list(EmpLik=ELval, BaselineH=temp1$Hazw)
}
|
/R/fitYP41.R
|
no_license
|
cran/ELYP
|
R
| false
| false
| 496
|
r
|
fitYP41 <- function(Y, d, Z, beta1=1, beta2= -1, maxiter=60){
#### Do two things: (1) for the data, and given beta1, beta2;
#### compute the baseline that max the EL. i.e. NPMLE.
#### (2) Given the baseline and the 2 betas, compute the (log) EL value.
#### there is no alpha, So Z is a vector or nx1 matrix.
temp1 <- YP41(y=Y, d=d, Z=Z, b1=beta1, b2=beta2, k=maxiter)
ELval <- ELcomp(Haz=temp1$Hazw, Sur=temp1$Survival, gam=temp1$gam)
list(EmpLik=ELval, BaselineH=temp1$Hazw)
}
|
#' Makes a Growth Concentration Curve Function out of two income vectors
#'
#' @param x0 A vector of incomes for group/time 0
#' @param x1 A vector of incomes for group/time 1
#' @param w0 (optional) A vector of sample weights for group/time 0
#' @param w1 (optional) A vector of sample weights for group/time 1
#' @param gridIntegration (optional) A grid of class 'NIGrid' for multivariate numerical integration (mvQuad package)
#'
#' @return Returns a function which takes a vector of probabilities as inputs (p) and gives points at the Growth Concentration Curve as outputs
#'
#' @import mvQuad
#'
#' @export
make_growthConcCurve = function(x0, x1, w0 = NULL, w1 = NULL,
gridIntegration = NULL){
if(is.null(w0)){
w0 = rep(1, length(x0))
}
if(is.null(w1)){
w1 = rep(1, length(x1))
}
if(is.null(gridIntegration)){
gridIntegration = mvQuad::createNIGrid(dim = 1, type = "GLe", level = 2000)
}
genLorenz0 = make_genLorenz(x = x0, w = w0)
genLorenz1 = make_genLorenz(x = x1, w = w1)
mu0 = genLorenz0(1)
mu1 = genLorenz1(1)
function(p){
(genLorenz1(p) - genLorenz0(p))/(mu1 - mu0)
}
}
#' Makes a Growth Concentration Curve Function out of two quantile functions
#'
#' @param qf0 A quantile function for group/time 0
#' @param qf1 A quantile function for group/time 1
#' @param gridIntegration (optional) A grid of class 'NIGrid' for multivariate numerical integration (mvQuad package)
#'
#' @return Returns a function which takes a vector of probabilities as inputs (p) and gives points at the Growth Concentration Curve as outputs
#'
#' @import mvQuad
#'
#' @export
make_growthConcCurve_fromQuantile = function(qf0, qf1,
gridIntegration = NULL){
if(is.null(gridIntegration)){
gridIntegration = mvQuad::createNIGrid(dim = 1, type = "GLe", level = 2000)
}
genLorenz0 = make_genLorenz_fromQuantile(qf0, gridIntegration = gridIntegration)
genLorenz1 = make_genLorenz_fromQuantile(qf1, gridIntegration = gridIntegration)
mu0 = genLorenz0(1)
mu1 = genLorenz1(1)
function(p){
(genLorenz1(p) - genLorenz0(p))/(mu1 - mu0)
}
}
|
/R/growthConcCurve.R
|
no_license
|
antrologos/inequalityTools
|
R
| false
| false
| 2,401
|
r
|
#' Makes a Growth Concentration Curve Function out of two income vectors
#'
#' @param x0 A vector of incomes for group/time 0
#' @param x1 A vector of incomes for group/time 1
#' @param w0 (optional) A vector of sample weights for group/time 0
#' @param w1 (optional) A vector of sample weights for group/time 1
#' @param gridIntegration (optional) A grid of class 'NIGrid' for multivariate numerical integration (mvQuad package)
#'
#' @return Returns a function which takes a vector of probabilities as inputs (p) and gives points at the Growth Concentration Curve as outputs
#'
#' @import mvQuad
#'
#' @export
make_growthConcCurve = function(x0, x1, w0 = NULL, w1 = NULL,
gridIntegration = NULL){
if(is.null(w0)){
w0 = rep(1, length(x0))
}
if(is.null(w1)){
w1 = rep(1, length(x1))
}
if(is.null(gridIntegration)){
gridIntegration = mvQuad::createNIGrid(dim = 1, type = "GLe", level = 2000)
}
genLorenz0 = make_genLorenz(x = x0, w = w0)
genLorenz1 = make_genLorenz(x = x1, w = w1)
mu0 = genLorenz0(1)
mu1 = genLorenz1(1)
function(p){
(genLorenz1(p) - genLorenz0(p))/(mu1 - mu0)
}
}
#' Makes a Growth Concentration Curve Function out of two quantile functions
#'
#' @param qf0 A quantile function for group/time 0
#' @param qf1 A quantile function for group/time 1
#' @param gridIntegration (optional) A grid of class 'NIGrid' for multivariate numerical integration (mvQuad package)
#'
#' @return Returns a function which takes a vector of probabilities as inputs (p) and gives points at the Growth Concentration Curve as outputs
#'
#' @import mvQuad
#'
#' @export
make_growthConcCurve_fromQuantile = function(qf0, qf1,
gridIntegration = NULL){
if(is.null(gridIntegration)){
gridIntegration = mvQuad::createNIGrid(dim = 1, type = "GLe", level = 2000)
}
genLorenz0 = make_genLorenz_fromQuantile(qf0, gridIntegration = gridIntegration)
genLorenz1 = make_genLorenz_fromQuantile(qf1, gridIntegration = gridIntegration)
mu0 = genLorenz0(1)
mu1 = genLorenz1(1)
function(p){
(genLorenz1(p) - genLorenz0(p))/(mu1 - mu0)
}
}
|
#' Construct full paths to a group of raw input files
#'
#' For a group of samples this function creates the list of paths to the raw
#' input files which can then be used in [loadCoverage]. The raw input
#' files are either BAM files or BigWig files.
#'
#' @param datadir The main directory where each of the `sampledirs` is a
#' sub-directory of `datadir`.
#' @param sampledirs A character vector with the names of the sample
#' directories. If `datadir` is `NULL` it is then assumed that
#' `sampledirs` specifies the full path to each sample.
#' @param samplepatt If specified and `sampledirs` is set to `NULL`,
#' then the directories matching this pattern in `datadir` (set to
#' `.` if it's set to `NULL`) are used as the sample directories.
#' @param fileterm Name of the BAM or BigWig file used in each sample. By
#' default it is set to `accepted_hits.bam` since that is the automatic
#' name generated when aligning with TopHat. If `NULL` it is then ignored
#' when reading the rawfiles. This can be useful if all the raw files are
#' stored in a single directory.
#'
#' @return A vector with the full paths to the raw files and sample names
#' stored as the vector names.
#'
#' @details This function can also be used to identify a set of BigWig files.
#'
#' @author Leonardo Collado-Torres
#' @export
#' @seealso [loadCoverage]
#' @examples
#' ## Get list of BAM files included in derfinder
#' datadir <- system.file("extdata", "genomeData", package = "derfinder")
#' files <- rawFiles(
#' datadir = datadir, samplepatt = "*accepted_hits.bam$",
#' fileterm = NULL
#' )
#' files
rawFiles <- function(datadir = NULL, sampledirs = NULL, samplepatt = NULL,
fileterm = "accepted_hits.bam") {
## Determine the full paths to the sample directories
if (!is.null(sampledirs)) {
if (!is.null(datadir)) {
## Using sampledirs with datadir
files <- sapply(sampledirs, function(x) {
file.path(datadir, x)
})
names(files) <- sampledirs
} else {
## Using only the sampledirs since datadir is NULL
files <- sampledirs
names(files) <- sampledirs
}
} else if (!is.null(samplepatt)) {
if (is.null(datadir)) {
## This case assumes that the datadir is the current directory
datadir <- "."
}
## Identify the directories with this pattern
files <- dir(path = datadir, pattern = samplepatt, full.names = TRUE)
names(files) <- dir(
path = datadir, pattern = samplepatt,
full.names = FALSE
)
} else {
stop("Either 'samplepatt' or 'sampledirs' must be non-NULL.")
}
## Tell R which are the BAM files
if (!is.null(fileterm)) {
tmp <- file.path(files, fileterm)
names(tmp) <- names(files)
files <- tmp
}
## Done
return(files)
}
|
/R/rawFiles.R
|
no_license
|
fallinwind/derfinder
|
R
| false
| false
| 2,917
|
r
|
#' Construct full paths to a group of raw input files
#'
#' For a group of samples this function creates the list of paths to the raw
#' input files which can then be used in [loadCoverage]. The raw input
#' files are either BAM files or BigWig files.
#'
#' @param datadir The main directory where each of the `sampledirs` is a
#' sub-directory of `datadir`.
#' @param sampledirs A character vector with the names of the sample
#' directories. If `datadir` is `NULL` it is then assumed that
#' `sampledirs` specifies the full path to each sample.
#' @param samplepatt If specified and `sampledirs` is set to `NULL`,
#' then the directories matching this pattern in `datadir` (set to
#' `.` if it's set to `NULL`) are used as the sample directories.
#' @param fileterm Name of the BAM or BigWig file used in each sample. By
#' default it is set to `accepted_hits.bam` since that is the automatic
#' name generated when aligning with TopHat. If `NULL` it is then ignored
#' when reading the rawfiles. This can be useful if all the raw files are
#' stored in a single directory.
#'
#' @return A vector with the full paths to the raw files and sample names
#' stored as the vector names.
#'
#' @details This function can also be used to identify a set of BigWig files.
#'
#' @author Leonardo Collado-Torres
#' @export
#' @seealso [loadCoverage]
#' @examples
#' ## Get list of BAM files included in derfinder
#' datadir <- system.file("extdata", "genomeData", package = "derfinder")
#' files <- rawFiles(
#' datadir = datadir, samplepatt = "*accepted_hits.bam$",
#' fileterm = NULL
#' )
#' files
rawFiles <- function(datadir = NULL, sampledirs = NULL, samplepatt = NULL,
fileterm = "accepted_hits.bam") {
## Determine the full paths to the sample directories
if (!is.null(sampledirs)) {
if (!is.null(datadir)) {
## Using sampledirs with datadir
files <- sapply(sampledirs, function(x) {
file.path(datadir, x)
})
names(files) <- sampledirs
} else {
## Using only the sampledirs since datadir is NULL
files <- sampledirs
names(files) <- sampledirs
}
} else if (!is.null(samplepatt)) {
if (is.null(datadir)) {
## This case assumes that the datadir is the current directory
datadir <- "."
}
## Identify the directories with this pattern
files <- dir(path = datadir, pattern = samplepatt, full.names = TRUE)
names(files) <- dir(
path = datadir, pattern = samplepatt,
full.names = FALSE
)
} else {
stop("Either 'samplepatt' or 'sampledirs' must be non-NULL.")
}
## Tell R which are the BAM files
if (!is.null(fileterm)) {
tmp <- file.path(files, fileterm)
names(tmp) <- names(files)
files <- tmp
}
## Done
return(files)
}
|
# Set paths
path = paste(gdrivepath,'research/proj_010_trump/',sep='')
datpath = paste(path,'data/',sep='')
rawpath = paste(path,'data/raw/',sep='')
respath = paste(path,'results/',sep='')
setwd(path)
# Load libraries
pkgs = c('choroplethr','stringr','choroplethrMaps','lubridate','stargazer','ggplot2','googlesheets'
,'knitr')
installif(pkgs)
lib(pkgs)
# Log into google sheets, store sheet ids
gs_ls()
gskey.varguide = '1F9WgoFMqd0yHVtc7ux1mAtntdOhAx6igw9ylo-PGxGs'
# Store custom functions
resetproj = function() {
reboot()
setproj(10)
}
loadall = function() {
load(paste(gdrivepath,'research/data/lookup/USA lookups.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/data/cntyfacts/geoarea.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/data/cntyfacts/pop.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/acs5cnty.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/cdc_mort.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/crash.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/primaries.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/taa.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/genelec.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/john.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/relig.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/main.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/cces.Rdata',sep='')
,envir=parent.frame())
}
loadmain = function() {
load(paste(datpath,'main.Rdata',sep=''),envir=parent.frame())
#load(paste(gdrivepath,'research/proj_010_trump/data/cces.Rdata',sep='')
# ,envir=parent.frame())
load(paste(datpath,'cces.Rdata',sep=''),envir=parent.frame())
}
gsgetregvars = function() {
gsdat = data.table(gs_read(gs_key(gskey.varguide),ws='ccesplus guide'))
return(gsdat[!is.na(include)&is.na(omit.dummy)&is.na(exclude),.(varname.raw,tags,include)])
}
# Other stuff
source('fCntyToolbox.R')
|
/backup/10_16_2018/proj.R
|
no_license
|
eastnile/proj_010_trump
|
R
| false
| false
| 2,480
|
r
|
# Set paths
path = paste(gdrivepath,'research/proj_010_trump/',sep='')
datpath = paste(path,'data/',sep='')
rawpath = paste(path,'data/raw/',sep='')
respath = paste(path,'results/',sep='')
setwd(path)
# Load libraries
pkgs = c('choroplethr','stringr','choroplethrMaps','lubridate','stargazer','ggplot2','googlesheets'
,'knitr')
installif(pkgs)
lib(pkgs)
# Log into google sheets, store sheet ids
gs_ls()
gskey.varguide = '1F9WgoFMqd0yHVtc7ux1mAtntdOhAx6igw9ylo-PGxGs'
# Store custom functions
resetproj = function() {
reboot()
setproj(10)
}
loadall = function() {
load(paste(gdrivepath,'research/data/lookup/USA lookups.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/data/cntyfacts/geoarea.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/data/cntyfacts/pop.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/acs5cnty.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/cdc_mort.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/crash.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/primaries.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/taa.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/genelec.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/john.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/relig.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/main.Rdata',sep='')
,envir=parent.frame())
load(paste(gdrivepath,'research/proj_010_trump/data/cces.Rdata',sep='')
,envir=parent.frame())
}
loadmain = function() {
load(paste(datpath,'main.Rdata',sep=''),envir=parent.frame())
#load(paste(gdrivepath,'research/proj_010_trump/data/cces.Rdata',sep='')
# ,envir=parent.frame())
load(paste(datpath,'cces.Rdata',sep=''),envir=parent.frame())
}
gsgetregvars = function() {
gsdat = data.table(gs_read(gs_key(gskey.varguide),ws='ccesplus guide'))
return(gsdat[!is.na(include)&is.na(omit.dummy)&is.na(exclude),.(varname.raw,tags,include)])
}
# Other stuff
source('fCntyToolbox.R')
|
testlist <- list(a = 0L, b = 0L, x = c(175570943L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
/grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610055759-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 315
|
r
|
testlist <- list(a = 0L, b = 0L, x = c(175570943L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result)
|
# simulated Dail Madsen count data
# assume Gains are result of local population size, apparent survival function of habitat
# assume single stream segment, no confluences
data.fn <- function(nsites, surveys, alpha0, alpha1, cmin, cmax){
y <- array(dim=c(nsites,surveys))
X <- sort(runif(n=nsites,min=cmin,max=cmax))
lam <- exp(alpha0 + alpha1*X)
N <- rpois(n=nsites, lambda=lam)
# generate observations for first year
tmp <- N
for(i in 1:surveys){
y[,i] <- rbinom(n=nsites, size=tmp, prob=.4)
tmp <- tmp-y[,i]
}
return(list(y=y, N=N))
}
popdy <- function(N, surveys){
# generate observations for second year with S & G
len <- length(N)
for(i in 1:len){
S[i] <- sum(rbinom(n=N[i], size=1, prob=.8)) # constant apparent survival across sites
}
G <- rbinom(n=N, size=10, prob=.5) # make gains function of site level abundance
N2 <- S + G
y <- array(dim=c(len, surveys))
tmp <- N
for(i in 1:surveys){
y[,i] <- rbinom(n=nsites, size=tmp, prob=.4)
tmp <- tmp-y[,i]
}
return(list(N2=N2, G=G, S=S, N=N, y=y))
}
set.seed(10)
sim.data <- data.fn(nsites=20,surveys=3,alpha0=1, alpha1=1.1, cmin=-3, cmax=3)
sim.data$y
# simulate gains and survival
cat("
model{
# define priors for parameters
# gamma = recruitment
gamma ~ dgamma(0.001, 0.001)
# omega[i] = survival probability of stage i
# p[i] = detection probability of i;
# lambda[i] = initial population size of i
for(i in 1:nstages){
omega[i] ~ dbeta(1,1)
p[i] ~ dbeta(1, 1)
lambda[i] ~ dgamma(0.001, 0.001)
}
# phi = transition probability from juveniles to adults
phi ~ dbeta(1, 1)
# degine the stage transition matrix
# TransMat(i_new, i_old) - probability of transitioning to stage i_new from i_old, conditional on survival
# stage
}
")
cat("
model {
### priors
# initial abundance: negative bionomial parameters
r ~ dunif(0,50)
p ~ dunif(0,1)
# survival
for(i in 1:nSites){
omega[i] <- 1/(1+exp(-omegaX[i]))
omegaX[i] <- alpha + beta1*poolRatioSt[i] + beta2*meanDepthSt[i] + beta3*tempSt[i]
}
# Jeffery's prior for survival coefficients
alpha ~ dnorm(0, 0.37); beta1 ~ dnorm(0, 0.37);beta2 ~ dnorm(0, 0.37); beta3 ~ dnorm(0, 0.37)
# recruitment
gamma ~ dunif(0,10)
# fixed detection probability based on three-pass depletion
Q ~ dunif(0.63, 0.65)
### Dail-Madsen model
# loop across sites
for(i in 1:nSites) {
# Year 1 - initial abundance
N[i,1] ~ dnegbin(p,r)
# Detection model
for(r in 1:nReps){
y[i,1,r] ~ dbin(Q, N[i,1])
}
# Year 2
for(t in 2:nYears) {
# Estimate survival
S[i,t-1] ~ dbin(omega[i], N[i,t-1])
}
}
# Estimate gains: including two sites upstream & downstream
# Due to locations of tributaries and study area boundaries, this section cannot be completely looped
# resulting in a lengthy code
for(t in 2:nYears) {
# Jefferson Hill Brook
G[1,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[3,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1]))
G[2,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[3,t-1] + N[4,t-1] + N[74,t-1] + N[75,t-1]))
for(i in 3:8){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[9,t-1] ~ dpois(gamma*(N[7,t-1] + N[8,t-1] + N[9,t-1] + N[10,t-1] + N[11,t-1] + N[51,t-1]))
G[10,t-1] ~ dpois(gamma*(N[8,t-1] + N[9,t-1] + N[10,t-1] + N[11,t-1] + N[12,t-1] + N[51,t-1] + N[52,t-1]))
G[11,t-1] ~ dpois(gamma*(N[9,t-1] + N[10,t-1] + N[11,t-1] + N[12,t-1] + N[13,t-1] + N[51,t-1] + N[52,t-1]))
G[12,t-1] ~ dpois(gamma*(N[10,t-1] + N[11,t-1] + N[12,t-1] + N[13,t-1] + N[14,t-1] + N[51,t-1]))
for(i in 13:33){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[34,t-1] ~ dpois(gamma*(N[32,t-1] + N[33,t-1] + N[34,t-1] + N[35,t-1] + N[36,t-1] + N[59,t-1]))
G[35,t-1] ~ dpois(gamma*(N[33,t-1] + N[34,t-1] + N[35,t-1] + N[36,t-1] + N[37,t-1] + N[59,t-1] + N[60,t-1]))
G[36,t-1] ~ dpois(gamma*(N[34,t-1] + N[35,t-1] + N[36,t-1] + N[37,t-1] + N[38,t-1] + N[59,t-1] + N[60,t-1]))
G[37,t-1] ~ dpois(gamma*(N[35,t-1] + N[36,t-1] + N[37,t-1] + N[38,t-1] + N[39,t-1] + N[59,t-1]))
for(i in 38:46){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[47,t-1] ~ dpois(gamma*(N[45,t-1] + N[46,t-1] + N[47,t-1] + N[48,t-1] + N[49,t-1] + N[63,t-1]))
G[48,t-1] ~ dpois(gamma*(N[46,t-1] + N[47,t-1] + N[48,t-1] + N[49,t-1] + N[50,t-1] + N[63,t-1] + N[64,t-1]))
G[49,t-1] ~ dpois(gamma*(N[47,t-1] + N[48,t-1] + N[49,t-1] + N[50,t-1] + N[63,t-1] + N[64,t-1]))
G[50,t-1] ~ dpois(gamma*(N[48,t-1] + N[49,t-1] + N[50,t-1] + N[38,t-1] + N[63,t-1]))
G[51,t-1] ~ dpois(gamma*(N[9,t-1] + N[10,t-1] + N[11,t-1] + N[12,t-1] + N[51,t-1] + N[52,t-1] + N[53,t-1]))
G[52,t-1] ~ dpois(gamma*(N[10,t-1] + N[11,t-1] + N[51,t-1] + N[52,t-1] + N[53,t-1] + N[54,t-1]))
for(i in 53:56){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[57,t-1] ~ dpois(gamma*(N[55,t-1] + N[56,t-1] + N[57,t-1] + N[58,t-1]))
G[58,t-1] ~ dpois(gamma*(N[56,t-1] + N[57,t-1] + N[58,t-1]))
G[59,t-1] ~ dpois(gamma*(N[34,t-1] + N[35,t-1] + N[36,t-1] + N[37,t-1] + N[59,t-1] + N[60,t-1] + N[61,t-1]))
G[60,t-1] ~ dpois(gamma*(N[35,t-1] + N[36,t-1] + N[59,t-1] + N[60,t-1] + N[61,t-1] + N[62,t-1]))
G[61,t-1] ~ dpois(gamma*(N[59,t-1] + N[60,t-1] + N[61,t-1] + N[62,t-1]))
G[62,t-1] ~ dpois(gamma*(N[60,t-1] + N[61,t-1] + N[62,t-1]))
G[63,t-1] ~ dpois(gamma*(N[47,t-1] + N[48,t-1] + N[49,t-1] + N[50,t-1] + N[63,t-1] + N[64,t-1] + N[65,t-1]))
G[64,t-1] ~ dpois(gamma*(N[48,t-1] + N[49,t-1] + N[63,t-1] + N[64,t-1] + N[65,t-1] + N[66,t-1]))
G[65,t-1] ~ dpois(gamma*(N[63,t-1] + N[64,t-1] + N[65,t-1] + N[66,t-1]))
G[66,t-1] ~ dpois(gamma*(N[64,t-1] + N[65,t-1] + N[66,t-1]))
# Spruce Brook
G[67,t-1] ~ dpois(gamma*(N[67,t-1] + N[68,t-1] + N[69,t-1]))
G[68,t-1] ~ dpois(gamma*(N[67,t-1] + N[68,t-1] + N[69,t-1] + N[70,t-1]))
for(i in 69:72){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[73,t-1] ~ dpois(gamma*(N[1,t-1] + N[71,t-1] + N[72,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1]))
G[74,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[72,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1]))
G[75,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1] + N[77,t-1]))
G[76,t-1] ~ dpois(gamma*(N[1,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1] + N[77,t-1] + N[78,t-1]))
for(i in 77:144){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[145,t-1] ~ dpois(gamma*(N[143,t-1] + N[144,t-1] + N[145,t-1] + N[146,t-1] + N[147,t-1] + N[150,t-1]))
G[146,t-1] ~ dpois(gamma*(N[144,t-1] + N[145,t-1] + N[146,t-1] + N[147,t-1] + N[148,t-1] + N[150,t-1] + N[151,t-1]))
G[147,t-1] ~ dpois(gamma*(N[145,t-1] + N[146,t-1] + N[147,t-1] + N[148,t-1] + N[149,t-1] + N[150,t-1] + N[151,t-1]))
G[148,t-1] ~ dpois(gamma*(N[146,t-1] + N[147,t-1] + N[148,t-1] + N[149,t-1] + N[150,t-1]))
G[149,t-1] ~ dpois(gamma*(N[147,t-1] + N[148,t-1] + N[149,t-1]))
G[150,t-1] ~ dpois(gamma*(N[145,t-1] + N[146,t-1] + N[147,t-1] + N[148,t-1] + N[150,t-1] + N[151,t-1] + N[152,t-1]))
G[151,t-1] ~ dpois(gamma*(N[146,t-1] + N[147,t-1] + N[150,t-1] + N[151,t-1] + N[152,t-1]))
G[152,t-1] ~ dpois(gamma*(N[150,t-1] + N[151,t-1] + N[152,t-1]))
}
#Sum survival and gain to get total N at each site i in each year t
for(i in 1:nSites) {
for(t in 2:nYears){
N[i,t] <- S[i,t-1] + G[i,t-1]
#Detection model
for(r in 1:nReps){
y[i,t,r] ~ dbin(Q, N[i,t])
}
}
}
}
",fill=TRUE,file="mod1.txt")
|
/simdata1.r
|
no_license
|
openfields/Occupancy
|
R
| false
| false
| 8,146
|
r
|
# simulated Dail Madsen count data
# assume Gains are result of local population size, apparent survival function of habitat
# assume single stream segment, no confluences
data.fn <- function(nsites, surveys, alpha0, alpha1, cmin, cmax){
y <- array(dim=c(nsites,surveys))
X <- sort(runif(n=nsites,min=cmin,max=cmax))
lam <- exp(alpha0 + alpha1*X)
N <- rpois(n=nsites, lambda=lam)
# generate observations for first year
tmp <- N
for(i in 1:surveys){
y[,i] <- rbinom(n=nsites, size=tmp, prob=.4)
tmp <- tmp-y[,i]
}
return(list(y=y, N=N))
}
popdy <- function(N, surveys){
# generate observations for second year with S & G
len <- length(N)
for(i in 1:len){
S[i] <- sum(rbinom(n=N[i], size=1, prob=.8)) # constant apparent survival across sites
}
G <- rbinom(n=N, size=10, prob=.5) # make gains function of site level abundance
N2 <- S + G
y <- array(dim=c(len, surveys))
tmp <- N
for(i in 1:surveys){
y[,i] <- rbinom(n=nsites, size=tmp, prob=.4)
tmp <- tmp-y[,i]
}
return(list(N2=N2, G=G, S=S, N=N, y=y))
}
set.seed(10)
sim.data <- data.fn(nsites=20,surveys=3,alpha0=1, alpha1=1.1, cmin=-3, cmax=3)
sim.data$y
# simulate gains and survival
cat("
model{
# define priors for parameters
# gamma = recruitment
gamma ~ dgamma(0.001, 0.001)
# omega[i] = survival probability of stage i
# p[i] = detection probability of i;
# lambda[i] = initial population size of i
for(i in 1:nstages){
omega[i] ~ dbeta(1,1)
p[i] ~ dbeta(1, 1)
lambda[i] ~ dgamma(0.001, 0.001)
}
# phi = transition probability from juveniles to adults
phi ~ dbeta(1, 1)
# degine the stage transition matrix
# TransMat(i_new, i_old) - probability of transitioning to stage i_new from i_old, conditional on survival
# stage
}
")
cat("
model {
### priors
# initial abundance: negative bionomial parameters
r ~ dunif(0,50)
p ~ dunif(0,1)
# survival
for(i in 1:nSites){
omega[i] <- 1/(1+exp(-omegaX[i]))
omegaX[i] <- alpha + beta1*poolRatioSt[i] + beta2*meanDepthSt[i] + beta3*tempSt[i]
}
# Jeffery's prior for survival coefficients
alpha ~ dnorm(0, 0.37); beta1 ~ dnorm(0, 0.37);beta2 ~ dnorm(0, 0.37); beta3 ~ dnorm(0, 0.37)
# recruitment
gamma ~ dunif(0,10)
# fixed detection probability based on three-pass depletion
Q ~ dunif(0.63, 0.65)
### Dail-Madsen model
# loop across sites
for(i in 1:nSites) {
# Year 1 - initial abundance
N[i,1] ~ dnegbin(p,r)
# Detection model
for(r in 1:nReps){
y[i,1,r] ~ dbin(Q, N[i,1])
}
# Year 2
for(t in 2:nYears) {
# Estimate survival
S[i,t-1] ~ dbin(omega[i], N[i,t-1])
}
}
# Estimate gains: including two sites upstream & downstream
# Due to locations of tributaries and study area boundaries, this section cannot be completely looped
# resulting in a lengthy code
for(t in 2:nYears) {
# Jefferson Hill Brook
G[1,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[3,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1]))
G[2,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[3,t-1] + N[4,t-1] + N[74,t-1] + N[75,t-1]))
for(i in 3:8){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[9,t-1] ~ dpois(gamma*(N[7,t-1] + N[8,t-1] + N[9,t-1] + N[10,t-1] + N[11,t-1] + N[51,t-1]))
G[10,t-1] ~ dpois(gamma*(N[8,t-1] + N[9,t-1] + N[10,t-1] + N[11,t-1] + N[12,t-1] + N[51,t-1] + N[52,t-1]))
G[11,t-1] ~ dpois(gamma*(N[9,t-1] + N[10,t-1] + N[11,t-1] + N[12,t-1] + N[13,t-1] + N[51,t-1] + N[52,t-1]))
G[12,t-1] ~ dpois(gamma*(N[10,t-1] + N[11,t-1] + N[12,t-1] + N[13,t-1] + N[14,t-1] + N[51,t-1]))
for(i in 13:33){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[34,t-1] ~ dpois(gamma*(N[32,t-1] + N[33,t-1] + N[34,t-1] + N[35,t-1] + N[36,t-1] + N[59,t-1]))
G[35,t-1] ~ dpois(gamma*(N[33,t-1] + N[34,t-1] + N[35,t-1] + N[36,t-1] + N[37,t-1] + N[59,t-1] + N[60,t-1]))
G[36,t-1] ~ dpois(gamma*(N[34,t-1] + N[35,t-1] + N[36,t-1] + N[37,t-1] + N[38,t-1] + N[59,t-1] + N[60,t-1]))
G[37,t-1] ~ dpois(gamma*(N[35,t-1] + N[36,t-1] + N[37,t-1] + N[38,t-1] + N[39,t-1] + N[59,t-1]))
for(i in 38:46){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[47,t-1] ~ dpois(gamma*(N[45,t-1] + N[46,t-1] + N[47,t-1] + N[48,t-1] + N[49,t-1] + N[63,t-1]))
G[48,t-1] ~ dpois(gamma*(N[46,t-1] + N[47,t-1] + N[48,t-1] + N[49,t-1] + N[50,t-1] + N[63,t-1] + N[64,t-1]))
G[49,t-1] ~ dpois(gamma*(N[47,t-1] + N[48,t-1] + N[49,t-1] + N[50,t-1] + N[63,t-1] + N[64,t-1]))
G[50,t-1] ~ dpois(gamma*(N[48,t-1] + N[49,t-1] + N[50,t-1] + N[38,t-1] + N[63,t-1]))
G[51,t-1] ~ dpois(gamma*(N[9,t-1] + N[10,t-1] + N[11,t-1] + N[12,t-1] + N[51,t-1] + N[52,t-1] + N[53,t-1]))
G[52,t-1] ~ dpois(gamma*(N[10,t-1] + N[11,t-1] + N[51,t-1] + N[52,t-1] + N[53,t-1] + N[54,t-1]))
for(i in 53:56){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[57,t-1] ~ dpois(gamma*(N[55,t-1] + N[56,t-1] + N[57,t-1] + N[58,t-1]))
G[58,t-1] ~ dpois(gamma*(N[56,t-1] + N[57,t-1] + N[58,t-1]))
G[59,t-1] ~ dpois(gamma*(N[34,t-1] + N[35,t-1] + N[36,t-1] + N[37,t-1] + N[59,t-1] + N[60,t-1] + N[61,t-1]))
G[60,t-1] ~ dpois(gamma*(N[35,t-1] + N[36,t-1] + N[59,t-1] + N[60,t-1] + N[61,t-1] + N[62,t-1]))
G[61,t-1] ~ dpois(gamma*(N[59,t-1] + N[60,t-1] + N[61,t-1] + N[62,t-1]))
G[62,t-1] ~ dpois(gamma*(N[60,t-1] + N[61,t-1] + N[62,t-1]))
G[63,t-1] ~ dpois(gamma*(N[47,t-1] + N[48,t-1] + N[49,t-1] + N[50,t-1] + N[63,t-1] + N[64,t-1] + N[65,t-1]))
G[64,t-1] ~ dpois(gamma*(N[48,t-1] + N[49,t-1] + N[63,t-1] + N[64,t-1] + N[65,t-1] + N[66,t-1]))
G[65,t-1] ~ dpois(gamma*(N[63,t-1] + N[64,t-1] + N[65,t-1] + N[66,t-1]))
G[66,t-1] ~ dpois(gamma*(N[64,t-1] + N[65,t-1] + N[66,t-1]))
# Spruce Brook
G[67,t-1] ~ dpois(gamma*(N[67,t-1] + N[68,t-1] + N[69,t-1]))
G[68,t-1] ~ dpois(gamma*(N[67,t-1] + N[68,t-1] + N[69,t-1] + N[70,t-1]))
for(i in 69:72){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[73,t-1] ~ dpois(gamma*(N[1,t-1] + N[71,t-1] + N[72,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1]))
G[74,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[72,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1]))
G[75,t-1] ~ dpois(gamma*(N[1,t-1] + N[2,t-1] + N[73,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1] + N[77,t-1]))
G[76,t-1] ~ dpois(gamma*(N[1,t-1] + N[74,t-1] + N[75,t-1] + N[76,t-1] + N[77,t-1] + N[78,t-1]))
for(i in 77:144){
G[i,t-1] ~ dpois(gamma*(N[i-2,t-1] + N[i-1,t-1] + N[i,t-1] + N[i+1,t-1] + N[i+2,t-1]))
}
G[145,t-1] ~ dpois(gamma*(N[143,t-1] + N[144,t-1] + N[145,t-1] + N[146,t-1] + N[147,t-1] + N[150,t-1]))
G[146,t-1] ~ dpois(gamma*(N[144,t-1] + N[145,t-1] + N[146,t-1] + N[147,t-1] + N[148,t-1] + N[150,t-1] + N[151,t-1]))
G[147,t-1] ~ dpois(gamma*(N[145,t-1] + N[146,t-1] + N[147,t-1] + N[148,t-1] + N[149,t-1] + N[150,t-1] + N[151,t-1]))
G[148,t-1] ~ dpois(gamma*(N[146,t-1] + N[147,t-1] + N[148,t-1] + N[149,t-1] + N[150,t-1]))
G[149,t-1] ~ dpois(gamma*(N[147,t-1] + N[148,t-1] + N[149,t-1]))
G[150,t-1] ~ dpois(gamma*(N[145,t-1] + N[146,t-1] + N[147,t-1] + N[148,t-1] + N[150,t-1] + N[151,t-1] + N[152,t-1]))
G[151,t-1] ~ dpois(gamma*(N[146,t-1] + N[147,t-1] + N[150,t-1] + N[151,t-1] + N[152,t-1]))
G[152,t-1] ~ dpois(gamma*(N[150,t-1] + N[151,t-1] + N[152,t-1]))
}
#Sum survival and gain to get total N at each site i in each year t
for(i in 1:nSites) {
for(t in 2:nYears){
N[i,t] <- S[i,t-1] + G[i,t-1]
#Detection model
for(r in 1:nReps){
y[i,t,r] ~ dbin(Q, N[i,t])
}
}
}
}
",fill=TRUE,file="mod1.txt")
|
devtools::install_github("rensa/ggflags")
library(tidyverse)
library(ggflags)
#library(ggrepel)
tuesdata <- tidytuesdayR::tt_load(2020, week = 36)
#Fertilizer
fertilizer <- tuesdata$cereal_crop_yield_vs_fertilizer_application %>%
filter(!is.na(Code)) %>%
select(Entity,Code,Year,fertilizer=`Nitrogen fertilizer use (kilograms per hectare)`,
crop_yield=`Cereal yield (tonnes per hectare)`)
#Get the population from the land use dataset and filter out countries with less than
#15MM inhabitants
land_use <- tuesdata$land_use_vs_yield_change_in_cereal_production %>%
filter(!is.na(Code),!is.na(`Cereal yield index`)) %>%
group_by(Entity,Code) %>%
summarise(population=last(`Total population (Gapminder)`)) %>%
ungroup() %>%
filter(population>10000000)
#Calculate the before and after, using the average of two years to be slightly
#more robust against outliers
final <- fertilizer %>% group_by(Entity,Code) %>%
summarise(fertilizer_n=sum(!is.na(fertilizer)),
fertilizer=sum(fertilizer,na.rm=T),
before=max((crop_yield[Year==2001]+crop_yield[Year==2000])/2),
after=max((crop_yield[Year==2016]+crop_yield[Year==2017])/2)) %>%
mutate(perc_change_crop_yield=(after/before)-1) %>%
ungroup() %>%
filter(!is.nan(perc_change_crop_yield),!is.na(perc_change_crop_yield),
fertilizer_n==16) %>%
inner_join(land_use,by=c("Code","Entity"))
#Visualization
textcol <- "midnightblue"
final %>%
mutate(code_icons=case_when(Entity=="Chile" ~ "cl",
Entity=="Netherlands" ~ "nl",
Entity=="Egypt" ~ "eg",
Entity=="France" ~ "fr",
Entity=="Germany" ~ "de",
Entity=="United Kingdom" ~ "gb",
Entity=="United States" ~ "us",
#Entity=="Peru" ~ "pe",
Entity=="South Korea" ~ "kr",
Entity=="China" ~ "cn",
Entity=="Colombia"~"co",
Entity=="Bangladesh"~"bd",
Entity=="Japan"~"jp",
Entity=="Vietnam"~"vn")) %>%
ggplot(aes(x=fertilizer,y=after))+
geom_point(size=2)+
geom_segment(aes(xend=fertilizer,yend=before))+
geom_flag(aes(country=code_icons),size=8)+
#geom_text_repel(aes(label=Entity))+
labs(x="Nitrogen fertilizer use (kg per hectare)",y="Crop yield (tonnes per hectare)",
title="Fertilizers and their effect on crop yield",
subtitle="How do crop yields change between 2002 and 2017 depending on the amount of fertilizers used?",
caption="Data from Our World In Data")+
theme(plot.background = element_rect(fill = "ivory"),
panel.background = element_rect(fill="ivory2"),
axis.title = element_text(family = "sans" ,size=14,colour=textcol),
axis.text = element_text(family = "sans" ,size=14,colour=textcol),
plot.title = element_text(family = "sans", face = "bold", size = 20, colour = textcol),
plot.subtitle = element_text(family = "sans" ,size=16, colour = textcol))
##Other ideas
# The potato map
library(tmap)
data(World)
potatoes <- key_crop_yields %>%
filter(Year>=2008,!is.na(Code)) %>%
group_by(Code,Entity) %>%
summarise(potato_tph=mean(`Potatoes (tonnes per hectare)`,na.rm=T))
World2 <- World %>%
left_join(potatoes,by=c("iso_a3"="Code"))
tm_shape(World2,projection=4326)+
tm_polygons(col="potato_tph",palette="BuGn")
World2 %>% sf::st_transform(4326) %>%
ggplot()+geom_sf(aes(fill=potato_tph))
## Corrmorant
library(corrmorant)
potatoes <- key_crop_yields %>%
filter(Year>=2008,!is.na(Code)) %>%
group_by(Code,Entity) %>%
summarise(potato_tph=mean(`Potatoes (tonnes per hectare)`,na.rm=T))
tractors <- tuesdata$cereal_yields_vs_tractor_inputs_in_agriculture %>%
filter(!is.na(Code),Year>1980,!is.na(`Tractors per 100 sq km arable land`)) %>%
group_by(Code,Entity) %>%
summarise(tractors=last(`Tractors per 100 sq km arable land`))
World %>%
sf::st_drop_geometry() %>%
left_join(potatoes,by=c("iso_a3"="Code")) %>%
left_join(tractors,by=c("iso_a3"="Code")) %>%
select(income_grp,life_exp,potato_tph,tractors) %>%
filter(!is.na(life_exp),!is.na(potato_tph),!is.na(tractors)) %>%
ggcorrm(aes(col=income_grp,fill=income_grp))+
lotri(geom_point(alpha = 0.5)) +
utri_corrtext(nrow = 2, squeeze = 0.6) +
dia_names(y_pos = 0.15, size = 3) +
dia_density(lower = 0.3, color = 1)
## Chile Profile
country <- "Chile"
key_crop_yields <- tuesdata$key_crop_yields %>% filter(Entity==country)
arable_land <- tuesdata$arable_land_pin %>% filter(Entity==country) %>%
rename(arable_land_needed=`Arable land needed to produce a fixed quantity of crops ((1.0 = 1961))`)
fertilizer <- tuesdata$cereal_crop_yield_vs_fertilizer_application %>% filter(Entity==country) %>%
rename(nitrogen=`Nitrogen fertilizer use (kilograms per hectare)`,
yield=`Cereal yield (tonnes per hectare)`)
land_use <- tuesdata$land_use_vs_yield_change_in_cereal_production %>% filter(Entity==country)
tractors <- tuesdata$cereal_yields_vs_tractor_inputs_in_agriculture %>% filter(Entity==country)
#Chile
crop_long <- key_crop_yields %>%
pivot_longer(cols=contains("tonnes"),names_to="crop",values_to="tonnes_per_hectar") %>%
separate(crop,into = "crop",sep = " ")
library(directlabels)
crop_long %>%
ggplot(aes(x=Year,y=tonnes_per_hectar,col=crop))+
geom_line(size=1)+
geom_dl(aes(label=crop),method="smart.grid")+
theme(legend.position = "none")
arable_land %>%
ggplot(aes(x=Year,
y=arable_land_needed,
group=1)) +
geom_line()+
theme(legend.position = "none")
fertilizer %>% filter(Entity%in%c("Chile")) %>%
ggplot(aes(x=Year,y=nitrogen,group=Entity))+geom_line()
|
/2020/Week 36 - Global Crop Yields/Crops.R
|
no_license
|
Rohan4201/tidy-tuesdays
|
R
| false
| false
| 5,897
|
r
|
devtools::install_github("rensa/ggflags")
library(tidyverse)
library(ggflags)
#library(ggrepel)
tuesdata <- tidytuesdayR::tt_load(2020, week = 36)
#Fertilizer
fertilizer <- tuesdata$cereal_crop_yield_vs_fertilizer_application %>%
filter(!is.na(Code)) %>%
select(Entity,Code,Year,fertilizer=`Nitrogen fertilizer use (kilograms per hectare)`,
crop_yield=`Cereal yield (tonnes per hectare)`)
#Get the population from the land use dataset and filter out countries with less than
#15MM inhabitants
land_use <- tuesdata$land_use_vs_yield_change_in_cereal_production %>%
filter(!is.na(Code),!is.na(`Cereal yield index`)) %>%
group_by(Entity,Code) %>%
summarise(population=last(`Total population (Gapminder)`)) %>%
ungroup() %>%
filter(population>10000000)
#Calculate the before and after, using the average of two years to be slightly
#more robust against outliers
final <- fertilizer %>% group_by(Entity,Code) %>%
summarise(fertilizer_n=sum(!is.na(fertilizer)),
fertilizer=sum(fertilizer,na.rm=T),
before=max((crop_yield[Year==2001]+crop_yield[Year==2000])/2),
after=max((crop_yield[Year==2016]+crop_yield[Year==2017])/2)) %>%
mutate(perc_change_crop_yield=(after/before)-1) %>%
ungroup() %>%
filter(!is.nan(perc_change_crop_yield),!is.na(perc_change_crop_yield),
fertilizer_n==16) %>%
inner_join(land_use,by=c("Code","Entity"))
#Visualization
textcol <- "midnightblue"
final %>%
mutate(code_icons=case_when(Entity=="Chile" ~ "cl",
Entity=="Netherlands" ~ "nl",
Entity=="Egypt" ~ "eg",
Entity=="France" ~ "fr",
Entity=="Germany" ~ "de",
Entity=="United Kingdom" ~ "gb",
Entity=="United States" ~ "us",
#Entity=="Peru" ~ "pe",
Entity=="South Korea" ~ "kr",
Entity=="China" ~ "cn",
Entity=="Colombia"~"co",
Entity=="Bangladesh"~"bd",
Entity=="Japan"~"jp",
Entity=="Vietnam"~"vn")) %>%
ggplot(aes(x=fertilizer,y=after))+
geom_point(size=2)+
geom_segment(aes(xend=fertilizer,yend=before))+
geom_flag(aes(country=code_icons),size=8)+
#geom_text_repel(aes(label=Entity))+
labs(x="Nitrogen fertilizer use (kg per hectare)",y="Crop yield (tonnes per hectare)",
title="Fertilizers and their effect on crop yield",
subtitle="How do crop yields change between 2002 and 2017 depending on the amount of fertilizers used?",
caption="Data from Our World In Data")+
theme(plot.background = element_rect(fill = "ivory"),
panel.background = element_rect(fill="ivory2"),
axis.title = element_text(family = "sans" ,size=14,colour=textcol),
axis.text = element_text(family = "sans" ,size=14,colour=textcol),
plot.title = element_text(family = "sans", face = "bold", size = 20, colour = textcol),
plot.subtitle = element_text(family = "sans" ,size=16, colour = textcol))
##Other ideas
# The potato map
library(tmap)
data(World)
potatoes <- key_crop_yields %>%
filter(Year>=2008,!is.na(Code)) %>%
group_by(Code,Entity) %>%
summarise(potato_tph=mean(`Potatoes (tonnes per hectare)`,na.rm=T))
World2 <- World %>%
left_join(potatoes,by=c("iso_a3"="Code"))
tm_shape(World2,projection=4326)+
tm_polygons(col="potato_tph",palette="BuGn")
World2 %>% sf::st_transform(4326) %>%
ggplot()+geom_sf(aes(fill=potato_tph))
## Corrmorant
library(corrmorant)
potatoes <- key_crop_yields %>%
filter(Year>=2008,!is.na(Code)) %>%
group_by(Code,Entity) %>%
summarise(potato_tph=mean(`Potatoes (tonnes per hectare)`,na.rm=T))
tractors <- tuesdata$cereal_yields_vs_tractor_inputs_in_agriculture %>%
filter(!is.na(Code),Year>1980,!is.na(`Tractors per 100 sq km arable land`)) %>%
group_by(Code,Entity) %>%
summarise(tractors=last(`Tractors per 100 sq km arable land`))
World %>%
sf::st_drop_geometry() %>%
left_join(potatoes,by=c("iso_a3"="Code")) %>%
left_join(tractors,by=c("iso_a3"="Code")) %>%
select(income_grp,life_exp,potato_tph,tractors) %>%
filter(!is.na(life_exp),!is.na(potato_tph),!is.na(tractors)) %>%
ggcorrm(aes(col=income_grp,fill=income_grp))+
lotri(geom_point(alpha = 0.5)) +
utri_corrtext(nrow = 2, squeeze = 0.6) +
dia_names(y_pos = 0.15, size = 3) +
dia_density(lower = 0.3, color = 1)
## Chile Profile
country <- "Chile"
key_crop_yields <- tuesdata$key_crop_yields %>% filter(Entity==country)
arable_land <- tuesdata$arable_land_pin %>% filter(Entity==country) %>%
rename(arable_land_needed=`Arable land needed to produce a fixed quantity of crops ((1.0 = 1961))`)
fertilizer <- tuesdata$cereal_crop_yield_vs_fertilizer_application %>% filter(Entity==country) %>%
rename(nitrogen=`Nitrogen fertilizer use (kilograms per hectare)`,
yield=`Cereal yield (tonnes per hectare)`)
land_use <- tuesdata$land_use_vs_yield_change_in_cereal_production %>% filter(Entity==country)
tractors <- tuesdata$cereal_yields_vs_tractor_inputs_in_agriculture %>% filter(Entity==country)
#Chile
crop_long <- key_crop_yields %>%
pivot_longer(cols=contains("tonnes"),names_to="crop",values_to="tonnes_per_hectar") %>%
separate(crop,into = "crop",sep = " ")
library(directlabels)
crop_long %>%
ggplot(aes(x=Year,y=tonnes_per_hectar,col=crop))+
geom_line(size=1)+
geom_dl(aes(label=crop),method="smart.grid")+
theme(legend.position = "none")
arable_land %>%
ggplot(aes(x=Year,
y=arable_land_needed,
group=1)) +
geom_line()+
theme(legend.position = "none")
fertilizer %>% filter(Entity%in%c("Chile")) %>%
ggplot(aes(x=Year,y=nitrogen,group=Entity))+geom_line()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRAFitSBprofile.R
\name{GRAFitSBprofile}
\alias{GRAFitSBprofile}
\title{GRAFit: Pixel-by-pixel Surface Brightness Profile Plot.}
\usage{
GRAFitSBprofile(
image = image,
main_source = main_src,
model = NULL,
segim = segim,
comp = c("bd", "b", "d"),
centerPos = c(optim_xcen1, optim_ycen1),
pixel_scale = 0.03,
zeropoint = NULL,
plot = TRUE,
modelPlot = TRUE,
title = NULL,
col = NULL,
legend = TRUE,
legend_lab = c("Data", "Model+Noise", "Bulge", "Disk"),
legend_col = c("grey", "green", "red", "blue")
)
}
\arguments{
\item{image}{Image matrix; required, the galaxy image we want to fit a model to. The galaxy should be approximately central within this image.}
\item{main_source}{A list containing the specification of the main source in the cutout. This should be generated by the \code{ProFound}.}
\item{model}{The matrix of the model.}
\item{segim}{Segmentation matrix; optional, the full segmentation map of the image. If region is not provided then value of the central pixel is used to select the segmented pixels of the galaxy we want to fit. The log-likelihood is then computed using only these pixels. This matrix *must* be the same dimensions as image.}
\item{comp}{The components to be plotted; \code{bd}: bulge+disk, \code{b}: bulge only, \code{bd}: disk only.}
\item{centerPos}{The position of the center; i.e., c(x,y)}
\item{zeropoint}{Numeric scalar; the magnitude zero point.}
\item{plot}{Logical; should the plot be generated? Otherwise only the values (SBs) will be returned.}
\item{modelPlot}{Logical; should the model profile be plotted?}
\item{title}{String; plot's title.}
\item{col}{A vector of colours.}
\item{legend}{Should a legend be annotated to the plot?}
\item{legend_lab}{A vector of legends.}
\item{legend_col}{A vector of legend's colours.}
\item{pix_scale}{Pixel scale in units of arcsecond/pixel.}
}
\value{
The pixel-by-pixel SB plot.
}
\description{
This high-level function calculates/plots the pixel-by-pixel surface brightness (SB).
}
\examples{
-
}
\seealso{
\code{\link[GRAFit]{GRAFitEllipsePlot}}
}
\author{
Hosein Hashemizadeh
}
|
/man/GRAFitSBprofile.Rd
|
no_license
|
HoseinHashemi/GRAFit
|
R
| false
| true
| 2,196
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRAFitSBprofile.R
\name{GRAFitSBprofile}
\alias{GRAFitSBprofile}
\title{GRAFit: Pixel-by-pixel Surface Brightness Profile Plot.}
\usage{
GRAFitSBprofile(
image = image,
main_source = main_src,
model = NULL,
segim = segim,
comp = c("bd", "b", "d"),
centerPos = c(optim_xcen1, optim_ycen1),
pixel_scale = 0.03,
zeropoint = NULL,
plot = TRUE,
modelPlot = TRUE,
title = NULL,
col = NULL,
legend = TRUE,
legend_lab = c("Data", "Model+Noise", "Bulge", "Disk"),
legend_col = c("grey", "green", "red", "blue")
)
}
\arguments{
\item{image}{Image matrix; required, the galaxy image we want to fit a model to. The galaxy should be approximately central within this image.}
\item{main_source}{A list containing the specification of the main source in the cutout. This should be generated by the \code{ProFound}.}
\item{model}{The matrix of the model.}
\item{segim}{Segmentation matrix; optional, the full segmentation map of the image. If region is not provided then value of the central pixel is used to select the segmented pixels of the galaxy we want to fit. The log-likelihood is then computed using only these pixels. This matrix *must* be the same dimensions as image.}
\item{comp}{The components to be plotted; \code{bd}: bulge+disk, \code{b}: bulge only, \code{bd}: disk only.}
\item{centerPos}{The position of the center; i.e., c(x,y)}
\item{zeropoint}{Numeric scalar; the magnitude zero point.}
\item{plot}{Logical; should the plot be generated? Otherwise only the values (SBs) will be returned.}
\item{modelPlot}{Logical; should the model profile be plotted?}
\item{title}{String; plot's title.}
\item{col}{A vector of colours.}
\item{legend}{Should a legend be annotated to the plot?}
\item{legend_lab}{A vector of legends.}
\item{legend_col}{A vector of legend's colours.}
\item{pix_scale}{Pixel scale in units of arcsecond/pixel.}
}
\value{
The pixel-by-pixel SB plot.
}
\description{
This high-level function calculates/plots the pixel-by-pixel surface brightness (SB).
}
\examples{
-
}
\seealso{
\code{\link[GRAFit]{GRAFitEllipsePlot}}
}
\author{
Hosein Hashemizadeh
}
|
## Utility methods for adding components to plot
#' Adding plot components to iheatmapr
#'
#' These are generic methods for adding new plot components to an
#' \code{link{Iheatmap-class}} object. Not intended for end users; exported for
#' developers seeking to create new Iheatmap subplots.
#' @name add_component
#' @rdname add_component
#' @param p \code{\link{Iheatmap-class}} object
#' @param name internal name
#' @docType methods
#' @aliases add_annotation,Iheatmap,IheatmapAnnotation-method
#' add_axis,IheatmapHorizontal,IheatmapX-method
#' add_axis,IheatmapHorizontal,IheatmapY-method
#' add_axis,IheatmapVertical,IheatmapX-method
#' add_axis,IheatmapVertical,IheatmapY-method
#' add_colorbar,Iheatmap,ContinuousColorbar-method
#' add_colorbar,Iheatmap,DiscreteColorbar-method
#' add_plot,Iheatmap,IheatmapPlot-method
#' add_shape,Iheatmap,IheatmapShape-method
#' @keywords internal
NULL
#' @rdname add_component
#' @param new_axis new \code{\link{IheatmapAxis-class}} object
#' @export
setGeneric("add_axis", function(p, new_axis, ...) standardGeneric("add_axis"))
#' @rdname add_component
#' @param new_colorbar new \code{\link{IheatmapColorbar-class}} object
#' @export
setGeneric("add_colorbar", function(p, new_colorbar, ...)
standardGeneric("add_colorbar"))
#' @rdname add_component
#' @param new_plot new \code{\link{IheatmapPlot-class}} object
#' @export
setGeneric("add_plot", function(p, new_plot, ...) standardGeneric("add_plot"))
#' @rdname add_component
#' @param new_shape new \code{\link{IheatmapShape-class}} object
#' @export
setGeneric("add_shape",
function(p, new_shape, ...) standardGeneric("add_shape"))
#' @rdname add_component
#' @param new_anno new \code{\link{IheatmapAnnotation-class}} object
#' @export
setGeneric("add_annotation",
function(p, new_anno, ...) standardGeneric("add_annotation"))
### Adding New Sub-plots ------------------------------------------------------
### Documented in method defintions
setGeneric("iheatmap", function(data, ...) standardGeneric("iheatmap"))
setGeneric("add_iheatmap",
function(p, data, ...) standardGeneric("add_iheatmap"))
setGeneric("main_heatmap", function(data, ...) standardGeneric("main_heatmap"))
setGeneric("add_main_heatmap",
function(p, data, ...) standardGeneric("add_main_heatmap"))
setGeneric("add_row_signal",
function(p, signal, ...) standardGeneric("add_row_signal"))
setGeneric("add_col_signal",
function(p, signal, ...) standardGeneric("add_col_signal"))
setGeneric("add_row_groups",
function(p, groups, ...) standardGeneric("add_row_groups"))
setGeneric("add_col_groups",
function(p, groups, ...) standardGeneric("add_col_groups"))
setGeneric("add_row_clusters",
function(p, clusters, ...) standardGeneric("add_row_clusters"))
setGeneric("add_col_clusters",
function(p, clusters, ...) standardGeneric("add_col_clusters"))
setGeneric("add_row_clustering",
function(p, ...) standardGeneric("add_row_clustering"))
setGeneric("add_col_clustering",
function(p, ...) standardGeneric("add_col_clustering"))
setGeneric("add_row_annotation",
function(p, ...) standardGeneric("add_row_annotation"))
setGeneric("add_col_annotation",
function(p, ...) standardGeneric("add_col_annotation"))
setGeneric("add_row_dendro",
function(p, dendro, ...) standardGeneric("add_row_dendro"))
setGeneric("add_col_dendro",
function(p, dendro, ...) standardGeneric("add_col_dendro"))
setGeneric("add_row_plot",
function(p, ...) standardGeneric("add_row_plot"))
setGeneric("add_col_plot",
function(p, ...) standardGeneric("add_col_plot"))
setGeneric("add_row_barplot",
function(p, ...) standardGeneric("add_row_barplot"))
setGeneric("add_col_barplot",
function(p, ...) standardGeneric("add_col_barplot"))
setGeneric("add_row_summary",
function(p, ...) standardGeneric("add_row_summary"))
setGeneric("add_col_summary",
function(p, ...) standardGeneric("add_col_summary"))
setGeneric("add_col_title", function(p, ...) standardGeneric("add_col_title"))
setGeneric("add_row_title", function(p, ...) standardGeneric("add_row_title"))
setGeneric("add_col_labels", function(p, ...) standardGeneric("add_col_labels"))
setGeneric("add_row_labels", function(p, ...) standardGeneric("add_row_labels"))
setGeneric("add_row_labels", function(p, ...) standardGeneric("add_row_labels"))
setGeneric("add_subplot_horizontal",
function(p, ...) standardGeneric("add_subplot_horizontal"))
setGeneric("add_subplot_vertical",
function(p, ...) standardGeneric("add_subplot_vertical"))
setGeneric("add_subplot", function(p, ...) standardGeneric("add_subplot"))
setGeneric("reorder_rows",
function(p, row_order, ...) standardGeneric("reorder_rows"))
setGeneric("reorder_cols",
function(p, col_order,...) standardGeneric("reorder_cols"))
## Methods for converting to plotly object ------------------------------------
#' Convert iheatmapr subcomponents to plotly format
#'
#' These are generic methods for converting \code{link{Iheatmap-class}} plot
#' components to plotly lists. Not intended for end users; exported for
#' developers seeking to create new Iheatmap subplots. Any new
#' \code{link{IheatmapPlot}}, \code{link{IheatmapShape}},
#' \code{link{IheatmapAnnotation}}, or \code{link{IheatmapColorbar}} child class
#' should have one of these methods.
#' @name make_component
#' @rdname make_component
#' @param x \code{\link{IheatmapPlot-class}}, \code{\link{IheatmapShape-class}},
#' or \code{\link{IheatmapAnnotation-class}} object
#' @param ... additional arguments specific to component
#' @docType methods
#' @aliases make_trace,MainHeatmap-method
#' make_trace,RowAnnotation-method
#' make_trace,ColumnAnnotation-method
#' make_trace,RowPlot-method
#' make_trace,ColumnPlot-method
#' make_trace,GenericPlot-method
#' make_shapes,Dendrogram-method
#' make_annotations,RowTitle-method
#' make_annotations,ColumnTitle-method
#' make_annotations,RowLabels-method
#' make_annotations,ColumnLabels-method
#' make_colorbar,ContinuousColorbar,IheatmapColorbarGrid-method
#' make_colorbar,DiscreteColorbar,IheatmapColorbarGrid-method
#' @keywords internal
NULL
#' @rdname make_component
#' @export
setGeneric("make_trace", function(x, ...) standardGeneric("make_trace"))
#' @rdname make_component
#' @export
setGeneric("make_shapes", function(x, ...) standardGeneric("make_shapes"))
#' @rdname make_component
#' @export
setGeneric("make_annotations",
function(x, ...) standardGeneric("make_annotations"))
#' @rdname make_component
#' @export
setGeneric("make_colorbar", function(cb, grid) standardGeneric("make_colorbar"))
setGeneric("get_layout", function(x, ...) standardGeneric("get_layout"))
setGeneric("modify_layout", function(x, ...) standardGeneric("modify_layout"))
#' @export
setGeneric("to_widget", function(p, ...) standardGeneric("to_widget"))
setGeneric("save_iheatmap",
function(p, filename, ...) standardGeneric("save_iheatmap"))
## Axis utility methods -------------------------------------------------------
setGeneric("domain_start", function(x) standardGeneric("domain_start"))
setGeneric("domain_end", function(x) standardGeneric("domain_end"))
setGeneric("id", function(x) standardGeneric("id"))
setGeneric("domain_start<-",
function(x, value) standardGeneric("domain_start<-"))
setGeneric("domain_end<-", function(x, value) standardGeneric("domain_end<-"))
setGeneric("yaxis_name", function(x, ...) standardGeneric("yaxis_name"))
setGeneric("xaxis_name", function(x, ...) standardGeneric("xaxis_name"))
setGeneric("axis_text", function(x, ...) standardGeneric("axis_text"))
setGeneric("axis_values", function(x, ...) standardGeneric("axis_values"))
setGeneric("axis_order", function(x, ...) standardGeneric("axis_order"))
setGeneric("axis_order<-", function(x, value) standardGeneric("axis_order<-"))
setGeneric("yaxes", function(p, ...) standardGeneric("yaxes"))
setGeneric("xaxes", function(p, ...) standardGeneric("xaxes"))
setGeneric("yaxes<-", function(p, value) standardGeneric("yaxes<-"))
setGeneric("xaxes<-", function(p, value) standardGeneric("xaxes<-"))
setGeneric("buffers", function(x) standardGeneric("buffers"))
setGeneric("current_xaxis", function(x) standardGeneric("current_xaxis"))
setGeneric("current_xaxis<-",
function(x, value) standardGeneric("current_xaxis<-"))
setGeneric("current_yaxis", function(x) standardGeneric("current_yaxis"))
setGeneric("current_yaxis<-",
function(x, value) standardGeneric("current_yaxis<-"))
## Plot utility methods -------------------------------------------------------
setGeneric("plots", function(x) standardGeneric("plots"))
setGeneric("plots<-", function(x, value) standardGeneric("plots<-"))
setGeneric("get_data", function(x, ...) standardGeneric("get_data"))
setGeneric("get_title", function(x, ...) standardGeneric("get_title"))
setGeneric("colorbar", function(x, ...) standardGeneric("colorbar"))
setGeneric("get_heatmap", function(p, ...) standardGeneric("get_heatmap"))
setGeneric("get_col_groups", function(p, ...) standardGeneric("get_col_groups"))
setGeneric("get_row_groups", function(p, ...) standardGeneric("get_row_groups"))
## Shapes utility methods ------------------------------------------------------
setGeneric("shapes", function(x) standardGeneric("shapes"))
setGeneric("shapes<-", function(x, value) standardGeneric("shapes<-"))
## Annotations utility methods -------------------------------------------------
setGeneric("annotations", function(x) standardGeneric("annotations"))
setGeneric("annotations<-", function(x, value) standardGeneric("annotations<-"))
## Colorbar Methods ----------------------------------------------------------
setGeneric("colorscale", function(colorbar, ...) standardGeneric("colorscale"))
setGeneric("colorbars", function(x, ...) standardGeneric("colorbars"))
setGeneric("colorbars<-", function(x, value) standardGeneric("colorbars<-"))
setGeneric("zmin", function(x) standardGeneric("zmin"))
setGeneric("zmax", function(x) standardGeneric("zmax"))
setGeneric("color_palette", function(x, ...) standardGeneric("color_palette"))
setGeneric("get_colorbar_position",
function(x, ...) standardGeneric("get_colorbar_position"))
setGeneric("get_legend_position",
function(x, ...) standardGeneric("get_legend_position"))
|
/R/AllGenerics.R
|
permissive
|
ropensci/iheatmapr
|
R
| false
| false
| 10,619
|
r
|
## Utility methods for adding components to plot
#' Adding plot components to iheatmapr
#'
#' These are generic methods for adding new plot components to an
#' \code{link{Iheatmap-class}} object. Not intended for end users; exported for
#' developers seeking to create new Iheatmap subplots.
#' @name add_component
#' @rdname add_component
#' @param p \code{\link{Iheatmap-class}} object
#' @param name internal name
#' @docType methods
#' @aliases add_annotation,Iheatmap,IheatmapAnnotation-method
#' add_axis,IheatmapHorizontal,IheatmapX-method
#' add_axis,IheatmapHorizontal,IheatmapY-method
#' add_axis,IheatmapVertical,IheatmapX-method
#' add_axis,IheatmapVertical,IheatmapY-method
#' add_colorbar,Iheatmap,ContinuousColorbar-method
#' add_colorbar,Iheatmap,DiscreteColorbar-method
#' add_plot,Iheatmap,IheatmapPlot-method
#' add_shape,Iheatmap,IheatmapShape-method
#' @keywords internal
NULL
#' @rdname add_component
#' @param new_axis new \code{\link{IheatmapAxis-class}} object
#' @export
setGeneric("add_axis", function(p, new_axis, ...) standardGeneric("add_axis"))
#' @rdname add_component
#' @param new_colorbar new \code{\link{IheatmapColorbar-class}} object
#' @export
setGeneric("add_colorbar", function(p, new_colorbar, ...)
standardGeneric("add_colorbar"))
#' @rdname add_component
#' @param new_plot new \code{\link{IheatmapPlot-class}} object
#' @export
setGeneric("add_plot", function(p, new_plot, ...) standardGeneric("add_plot"))
#' @rdname add_component
#' @param new_shape new \code{\link{IheatmapShape-class}} object
#' @export
setGeneric("add_shape",
function(p, new_shape, ...) standardGeneric("add_shape"))
#' @rdname add_component
#' @param new_anno new \code{\link{IheatmapAnnotation-class}} object
#' @export
setGeneric("add_annotation",
function(p, new_anno, ...) standardGeneric("add_annotation"))
### Adding New Sub-plots ------------------------------------------------------
### Documented in method defintions
setGeneric("iheatmap", function(data, ...) standardGeneric("iheatmap"))
setGeneric("add_iheatmap",
function(p, data, ...) standardGeneric("add_iheatmap"))
setGeneric("main_heatmap", function(data, ...) standardGeneric("main_heatmap"))
setGeneric("add_main_heatmap",
function(p, data, ...) standardGeneric("add_main_heatmap"))
setGeneric("add_row_signal",
function(p, signal, ...) standardGeneric("add_row_signal"))
setGeneric("add_col_signal",
function(p, signal, ...) standardGeneric("add_col_signal"))
setGeneric("add_row_groups",
function(p, groups, ...) standardGeneric("add_row_groups"))
setGeneric("add_col_groups",
function(p, groups, ...) standardGeneric("add_col_groups"))
setGeneric("add_row_clusters",
function(p, clusters, ...) standardGeneric("add_row_clusters"))
setGeneric("add_col_clusters",
function(p, clusters, ...) standardGeneric("add_col_clusters"))
setGeneric("add_row_clustering",
function(p, ...) standardGeneric("add_row_clustering"))
setGeneric("add_col_clustering",
function(p, ...) standardGeneric("add_col_clustering"))
setGeneric("add_row_annotation",
function(p, ...) standardGeneric("add_row_annotation"))
setGeneric("add_col_annotation",
function(p, ...) standardGeneric("add_col_annotation"))
setGeneric("add_row_dendro",
function(p, dendro, ...) standardGeneric("add_row_dendro"))
setGeneric("add_col_dendro",
function(p, dendro, ...) standardGeneric("add_col_dendro"))
setGeneric("add_row_plot",
function(p, ...) standardGeneric("add_row_plot"))
setGeneric("add_col_plot",
function(p, ...) standardGeneric("add_col_plot"))
setGeneric("add_row_barplot",
function(p, ...) standardGeneric("add_row_barplot"))
setGeneric("add_col_barplot",
function(p, ...) standardGeneric("add_col_barplot"))
setGeneric("add_row_summary",
function(p, ...) standardGeneric("add_row_summary"))
setGeneric("add_col_summary",
function(p, ...) standardGeneric("add_col_summary"))
setGeneric("add_col_title", function(p, ...) standardGeneric("add_col_title"))
setGeneric("add_row_title", function(p, ...) standardGeneric("add_row_title"))
setGeneric("add_col_labels", function(p, ...) standardGeneric("add_col_labels"))
setGeneric("add_row_labels", function(p, ...) standardGeneric("add_row_labels"))
setGeneric("add_row_labels", function(p, ...) standardGeneric("add_row_labels"))
setGeneric("add_subplot_horizontal",
function(p, ...) standardGeneric("add_subplot_horizontal"))
setGeneric("add_subplot_vertical",
function(p, ...) standardGeneric("add_subplot_vertical"))
setGeneric("add_subplot", function(p, ...) standardGeneric("add_subplot"))
setGeneric("reorder_rows",
function(p, row_order, ...) standardGeneric("reorder_rows"))
setGeneric("reorder_cols",
function(p, col_order,...) standardGeneric("reorder_cols"))
## Methods for converting to plotly object ------------------------------------
#' Convert iheatmapr subcomponents to plotly format
#'
#' These are generic methods for converting \code{link{Iheatmap-class}} plot
#' components to plotly lists. Not intended for end users; exported for
#' developers seeking to create new Iheatmap subplots. Any new
#' \code{link{IheatmapPlot}}, \code{link{IheatmapShape}},
#' \code{link{IheatmapAnnotation}}, or \code{link{IheatmapColorbar}} child class
#' should have one of these methods.
#' @name make_component
#' @rdname make_component
#' @param x \code{\link{IheatmapPlot-class}}, \code{\link{IheatmapShape-class}},
#' or \code{\link{IheatmapAnnotation-class}} object
#' @param ... additional arguments specific to component
#' @docType methods
#' @aliases make_trace,MainHeatmap-method
#' make_trace,RowAnnotation-method
#' make_trace,ColumnAnnotation-method
#' make_trace,RowPlot-method
#' make_trace,ColumnPlot-method
#' make_trace,GenericPlot-method
#' make_shapes,Dendrogram-method
#' make_annotations,RowTitle-method
#' make_annotations,ColumnTitle-method
#' make_annotations,RowLabels-method
#' make_annotations,ColumnLabels-method
#' make_colorbar,ContinuousColorbar,IheatmapColorbarGrid-method
#' make_colorbar,DiscreteColorbar,IheatmapColorbarGrid-method
#' @keywords internal
NULL
#' @rdname make_component
#' @export
setGeneric("make_trace", function(x, ...) standardGeneric("make_trace"))
#' @rdname make_component
#' @export
setGeneric("make_shapes", function(x, ...) standardGeneric("make_shapes"))
#' @rdname make_component
#' @export
setGeneric("make_annotations",
function(x, ...) standardGeneric("make_annotations"))
#' @rdname make_component
#' @export
setGeneric("make_colorbar", function(cb, grid) standardGeneric("make_colorbar"))
setGeneric("get_layout", function(x, ...) standardGeneric("get_layout"))
setGeneric("modify_layout", function(x, ...) standardGeneric("modify_layout"))
#' @export
setGeneric("to_widget", function(p, ...) standardGeneric("to_widget"))
setGeneric("save_iheatmap",
function(p, filename, ...) standardGeneric("save_iheatmap"))
## Axis utility methods -------------------------------------------------------
setGeneric("domain_start", function(x) standardGeneric("domain_start"))
setGeneric("domain_end", function(x) standardGeneric("domain_end"))
setGeneric("id", function(x) standardGeneric("id"))
setGeneric("domain_start<-",
function(x, value) standardGeneric("domain_start<-"))
setGeneric("domain_end<-", function(x, value) standardGeneric("domain_end<-"))
setGeneric("yaxis_name", function(x, ...) standardGeneric("yaxis_name"))
setGeneric("xaxis_name", function(x, ...) standardGeneric("xaxis_name"))
setGeneric("axis_text", function(x, ...) standardGeneric("axis_text"))
setGeneric("axis_values", function(x, ...) standardGeneric("axis_values"))
setGeneric("axis_order", function(x, ...) standardGeneric("axis_order"))
setGeneric("axis_order<-", function(x, value) standardGeneric("axis_order<-"))
setGeneric("yaxes", function(p, ...) standardGeneric("yaxes"))
setGeneric("xaxes", function(p, ...) standardGeneric("xaxes"))
setGeneric("yaxes<-", function(p, value) standardGeneric("yaxes<-"))
setGeneric("xaxes<-", function(p, value) standardGeneric("xaxes<-"))
setGeneric("buffers", function(x) standardGeneric("buffers"))
setGeneric("current_xaxis", function(x) standardGeneric("current_xaxis"))
setGeneric("current_xaxis<-",
function(x, value) standardGeneric("current_xaxis<-"))
setGeneric("current_yaxis", function(x) standardGeneric("current_yaxis"))
setGeneric("current_yaxis<-",
function(x, value) standardGeneric("current_yaxis<-"))
## Plot utility methods -------------------------------------------------------
setGeneric("plots", function(x) standardGeneric("plots"))
setGeneric("plots<-", function(x, value) standardGeneric("plots<-"))
setGeneric("get_data", function(x, ...) standardGeneric("get_data"))
setGeneric("get_title", function(x, ...) standardGeneric("get_title"))
setGeneric("colorbar", function(x, ...) standardGeneric("colorbar"))
setGeneric("get_heatmap", function(p, ...) standardGeneric("get_heatmap"))
setGeneric("get_col_groups", function(p, ...) standardGeneric("get_col_groups"))
setGeneric("get_row_groups", function(p, ...) standardGeneric("get_row_groups"))
## Shapes utility methods ------------------------------------------------------
setGeneric("shapes", function(x) standardGeneric("shapes"))
setGeneric("shapes<-", function(x, value) standardGeneric("shapes<-"))
## Annotations utility methods -------------------------------------------------
setGeneric("annotations", function(x) standardGeneric("annotations"))
setGeneric("annotations<-", function(x, value) standardGeneric("annotations<-"))
## Colorbar Methods ----------------------------------------------------------
setGeneric("colorscale", function(colorbar, ...) standardGeneric("colorscale"))
setGeneric("colorbars", function(x, ...) standardGeneric("colorbars"))
setGeneric("colorbars<-", function(x, value) standardGeneric("colorbars<-"))
setGeneric("zmin", function(x) standardGeneric("zmin"))
setGeneric("zmax", function(x) standardGeneric("zmax"))
setGeneric("color_palette", function(x, ...) standardGeneric("color_palette"))
setGeneric("get_colorbar_position",
function(x, ...) standardGeneric("get_colorbar_position"))
setGeneric("get_legend_position",
function(x, ...) standardGeneric("get_legend_position"))
|
library(shiny)
library(DT)
library(ggplot2)
library(ggdendro)
library(datasets)
library(rhandsontable)
library(caret)
library(psych)
library(rpart)
library(randomForest)
library(logging)
basicConfig()
ui <- fluidPage(titlePanel("Projectissimo"),
tabsetPanel(
tabPanel("CSV upload",
sidebarLayout(
sidebarPanel(width = 3,
fileInput("file1", "Please choose a CSV file:", multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")
),
tags$hr(),
checkboxInput("header", "Header", TRUE),
radioButtons("sep", "Separators", choices = c("Comma" = ",", "Semi" = ";", "Tab" = "\t"),
selected = ","),
radioButtons("quo", "Quote", choices = c("None" = "", "Double Quote" = '"', "Single Quote" = "'"),
selected = '"'),
tags$hr(),
rHandsontableOutput("datatypechange"),
conditionalPanel(
condition = "output.datatypechange",
tags$hr(),
actionButton("change.apply", "Change data type", icon = icon("rocket"))
)
),
mainPanel(dataTableOutput("textfile"))
)),
tabPanel("Data treatment",
sidebarLayout(
sidebarPanel(width = 6,
fluidRow(
#need conditional panel
column(width = 6,
tags$h3("NA treatment"),
rHandsontableOutput("impNA"),
actionButton("impute.NA", "Apply", icon = icon("rocket"))
),
column(width = 6,
tags$h3("Outliers treatment"),
rHandsontableOutput("impMinmax"),
actionButton("impute.minmax", "Apply", icon = icon("rocket"))
))
),
mainPanel(width = 6,
fluidRow(
column(width = 6,
selectInput("coldisp", "Column to plot", choices = NULL, selected = NULL)
),
column(width = 6,
selectInput("plotdisp", "Type of plot", choices = c("Boxplot", "Histogram", "Stripchart"),
selected = "Boxplot")
)
),
fluidRow(
plotOutput("vdisp"))
)
)
),
tabPanel(
"Column selection",
dataTableOutput("ColSelect")
),
tabPanel("Clustering",
sidebarLayout(
sidebarPanel(width = 2,
selectInput("ctype", "Please select the clustering method",
choices = c("K-Means", "Hierarchical"),
selected = "K-Means"),
tags$hr(),
uiOutput("controls"),
tags$hr(),
actionButton("clupdate", "Run Clustering", icon = icon("rocket"))
),
mainPanel(
plotOutput("Plotidze"),
conditionalPanel(
condition = "output.Plotidze && input.ctype == 'K-Means'",
selectInput("col1", "Please select column 1",
choices = NULL),
selectInput("col2", "Please select column 2",
choices = NULL)
)
)
)
),
tabPanel("PCA",
sidebarLayout(
sidebarPanel( width = 2,
numericInput("pcnum", "Number of Principal Components", value = 2, min = 2, max = 20),
selectInput("pcrotate", "Rotation",
choices = c("none", "varimax", "quatimax", "promax"),
selected = "varimax"),
numericInput("pcafilter", "Ignore values less than:", value = 0, min = 0, max = 1),
actionButton("pcarun", "Run PCA", icon = icon("rocket"))
),
mainPanel(
dataTableOutput("eigen"),
dataTableOutput("loadings")
)
)
),
tabPanel("Tree-based classification",
sidebarLayout(
sidebarPanel(width = 2,
numericInput("train.percent.sel", "Please select % in train", value = 75, min = 1, max = 100 ),
selectInput("preval", "Please select predict value: (factors only)", choices = NULL),
selectInput("treechoose", "Please select preffered method", choices = c("Decision Trees", "Random Forest"),
selected = "Decision Trees"),
conditionalPanel(
condition = "input.treechoose == 'Random Forest'",
numericInput("ntree.sel", "Ntree", value = 500, min = 10, max = 1000),
numericInput("mtry.sel", "Mtry", value = 2, min =1, max = 15)
),
actionButton("run.tree", "Apply", icon = icon("rocket"))
),
mainPanel(
column(width = 4,
tags$h1("Train prediction"),
rHandsontableOutput("train.prediction"),
textOutput("accutrain")
),
column(width = 4,
tags$h1("Test prediction"),
rHandsontableOutput("test.prediction"),
textOutput("accutest")
)
)
)
)
))
|
/ui.R
|
no_license
|
olegshlykov/projectissimo
|
R
| false
| false
| 8,425
|
r
|
library(shiny)
library(DT)
library(ggplot2)
library(ggdendro)
library(datasets)
library(rhandsontable)
library(caret)
library(psych)
library(rpart)
library(randomForest)
library(logging)
basicConfig()
ui <- fluidPage(titlePanel("Projectissimo"),
tabsetPanel(
tabPanel("CSV upload",
sidebarLayout(
sidebarPanel(width = 3,
fileInput("file1", "Please choose a CSV file:", multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")
),
tags$hr(),
checkboxInput("header", "Header", TRUE),
radioButtons("sep", "Separators", choices = c("Comma" = ",", "Semi" = ";", "Tab" = "\t"),
selected = ","),
radioButtons("quo", "Quote", choices = c("None" = "", "Double Quote" = '"', "Single Quote" = "'"),
selected = '"'),
tags$hr(),
rHandsontableOutput("datatypechange"),
conditionalPanel(
condition = "output.datatypechange",
tags$hr(),
actionButton("change.apply", "Change data type", icon = icon("rocket"))
)
),
mainPanel(dataTableOutput("textfile"))
)),
tabPanel("Data treatment",
sidebarLayout(
sidebarPanel(width = 6,
fluidRow(
#need conditional panel
column(width = 6,
tags$h3("NA treatment"),
rHandsontableOutput("impNA"),
actionButton("impute.NA", "Apply", icon = icon("rocket"))
),
column(width = 6,
tags$h3("Outliers treatment"),
rHandsontableOutput("impMinmax"),
actionButton("impute.minmax", "Apply", icon = icon("rocket"))
))
),
mainPanel(width = 6,
fluidRow(
column(width = 6,
selectInput("coldisp", "Column to plot", choices = NULL, selected = NULL)
),
column(width = 6,
selectInput("plotdisp", "Type of plot", choices = c("Boxplot", "Histogram", "Stripchart"),
selected = "Boxplot")
)
),
fluidRow(
plotOutput("vdisp"))
)
)
),
tabPanel(
"Column selection",
dataTableOutput("ColSelect")
),
tabPanel("Clustering",
sidebarLayout(
sidebarPanel(width = 2,
selectInput("ctype", "Please select the clustering method",
choices = c("K-Means", "Hierarchical"),
selected = "K-Means"),
tags$hr(),
uiOutput("controls"),
tags$hr(),
actionButton("clupdate", "Run Clustering", icon = icon("rocket"))
),
mainPanel(
plotOutput("Plotidze"),
conditionalPanel(
condition = "output.Plotidze && input.ctype == 'K-Means'",
selectInput("col1", "Please select column 1",
choices = NULL),
selectInput("col2", "Please select column 2",
choices = NULL)
)
)
)
),
tabPanel("PCA",
sidebarLayout(
sidebarPanel( width = 2,
numericInput("pcnum", "Number of Principal Components", value = 2, min = 2, max = 20),
selectInput("pcrotate", "Rotation",
choices = c("none", "varimax", "quatimax", "promax"),
selected = "varimax"),
numericInput("pcafilter", "Ignore values less than:", value = 0, min = 0, max = 1),
actionButton("pcarun", "Run PCA", icon = icon("rocket"))
),
mainPanel(
dataTableOutput("eigen"),
dataTableOutput("loadings")
)
)
),
tabPanel("Tree-based classification",
sidebarLayout(
sidebarPanel(width = 2,
numericInput("train.percent.sel", "Please select % in train", value = 75, min = 1, max = 100 ),
selectInput("preval", "Please select predict value: (factors only)", choices = NULL),
selectInput("treechoose", "Please select preffered method", choices = c("Decision Trees", "Random Forest"),
selected = "Decision Trees"),
conditionalPanel(
condition = "input.treechoose == 'Random Forest'",
numericInput("ntree.sel", "Ntree", value = 500, min = 10, max = 1000),
numericInput("mtry.sel", "Mtry", value = 2, min =1, max = 15)
),
actionButton("run.tree", "Apply", icon = icon("rocket"))
),
mainPanel(
column(width = 4,
tags$h1("Train prediction"),
rHandsontableOutput("train.prediction"),
textOutput("accutrain")
),
column(width = 4,
tags$h1("Test prediction"),
rHandsontableOutput("test.prediction"),
textOutput("accutest")
)
)
)
)
))
|
utils::globalVariables("ssenv")
#' @title Substitute new values into the input object
#'
#' @description
#' Replaces existing values found in one object with new values
#'
#' @param x A character vector of the form "name=value"
#' @param ssparams A character vector with arbitrary lines,
#' currently imagined to be .ss.params
#'
#' @details
#' For each line of x, the function: 1) finds the "name" and the "value"
#' 2) checks to see whether the "name" exists in ssparams; if not, prints a warning
#' but if so, replaces the existing line of ssparams with that line of x.
#'
#' Not expected to be used directly.
#'
#' @return The modified ssparams.
subin = function (x,ssparams) {
for (i in 1:length(x)) {
inprm = substr(x[i],1,regexpr("=",x[i]))
indef = substr(x[i],regexpr("=",x[i])+1, nchar(x[i]))
if (length(which(substr(ssparams,1,regexpr("=",ssparams)) == inprm)) == 0)
warning('Trouble! There is no parameter "', substr(inprm,1,regexpr("=",inprm)-1),
'"', call.=FALSE)
else {ssparams[which(substr(ssparams,1,regexpr("=",ssparams)) == inprm)]=paste0(inprm,indef)}
}
return(ssparams)
}
# test whether this works appropriately when there is no = in a an input line
# test whether it works if "name = value", as well as "name=value".
# most likely I should re-do to extract the = from inorm and remove trailing blanks
#' @title Change list version of paramaters into char vector
#'
#' @description
#' Turns a list of options into a charvar of options
#'
#' @details
#' The resulting charvar has values such as "name=value" where "name" was the named item
#' of the list.
#'
#' @return
#' A character vector
#'
#' Not expected to be used directly.
#'
#' @param x A list.
#'
charlistopts = function (x) {
paste0(names(x),"=",unlist(x))
}
#Huge ups to http://digitheadslabnotebook.blogspot.com/2011/06/environments-in-r.html
#which helped me get the scoping to play out correctly.
#ss.options will: 1) return the current values of .ss.params, if no invals
# 2) Reset the values of .ss.params, if reset==TRUE
# 3) change the values of the listed parameters, if a) invals =
# c("param=value","param=value") or list(param="value")
#' @title Set or reset parameters to be used by SaTScan
#'
#' @description Set or reset parameters to be used by SaTScan
#'
#' @details \code{ss.options()} is intended to function like \code{par()} or
#' \code{options()}. There is a default set of parameter settings that resembles
#' the one used by SaTScan, except that it produces all possible output files and
#' makes them as .dbf files instead of text.
#'
#' @param invals A list with entries of the form name=value, where value should be
#' in quotes unless it is a number. Alternatively, may be a character vector whose
#' entries are of the form "name=value". The "name" in either case should be a
#' valid SaTScan parameter name; unrecognized names will generate a warning and will
#' do nothing.
#' @param reset If TRUE, will restore the default parameter values described in
#' the "Details" section.
#' @return If \code{invals == NULL}, returns the current parameter set,
#' as altered by previous
#' calls to \code{ss.options()} since the last call with \code{reset=TRUE}. Otherwise
#' returns modified parameter set invisibly. The side effect, if \code{invals != NULL}, is to
#' set the current values of the parameters per the value of \code{invals}
#' and \code{reset}.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' head(ss.options(),3)
#' ss.options(list(CaseFile="NYCfever.cas"))
#' head(ss.options(),3)
#'
#' # reset; shows whole parameter file without invisible()
#' invisible(ss.options(reset=TRUE))
#' head(ss.options(),3)
#' }
#'
ss.options = function (invals=NULL, reset=FALSE) {
inparms = ssenv$.ss.params
if (reset == TRUE) ssenv$.ss.params = ssenv$.ss.params.defaults
if (is.null(invals)) {return(ssenv$.ss.params)}
else {
if (class(invals) == "list") invals = charlistopts(invals)
ssenv$.ss.params = subin(invals, inparms)
invisible(ssenv$.ss.params)
}
}
# review the help text for logic-- matches function??
#I need to think about how this will work when called by another function.
# Do I need to re-think this? There is
# a [Multiple Data Sets] line already...
#' @title Add lines to the current SaTScan parameter list
#'
#' @description Allows you to add arbitrary lines to the current set
#' of SaTScan parameters
#'
#' @details For certain SaTScan models or inputs (multiple data sets,
#' Polygon),
#' SaTScan allows a variable number of parameters; these
#' parameters are not used/allowed for other models or inputs.
#' This function allows the user to add
#' arbitray lines to the current list of
#' parameters. In addition to the options mentioned, it could also be
#' used to add comments to the parameter file.
#'
#' @param invals A character vector, which will be added to the end of the
#' current paramter list.
#'
#' @return Nothing.
ss.options.extra = function(invals=NULL) {
if (is.null(invals)) stop("This function doesn't do anything when there is no input")
if (class(invals) != "character") stop("Please input a character vector")
else {
ssenv$.ss.params = c(ssenv$.ss.params, invals)
invisible()
}
}
# for help page: examples of [Polygon] and Multiple Data Sets
# Functions to write out the param file
# Probably a really bad idea to make matchout = FALSE-- only useful to write file
# from R but examine output manually
#' @title Write the SaTScan parameter file
#'
#' @description Writes the current set of SaTScan parameters to a
#' specified location in the OS.
#'
#' @details The current SaTScan options can be reset or modified
#' \code{ss.options()} and/or \code{ss.options.extra()}. Once
#' they are set as desired, they can be written to the OS
#' using this function.
#'
#' @param location A directory location, excluding the trailing "/".
#' @param filename The name of the file to be written to the OS;
#' The extension ".prm" will be appended.
#' @param matchout If false, the ResultsFile parameter will not
#' be touched; note that this will likely result in undesirable
#' performance from calls to \code{satcan()} using the parameter file.
#' If true, the ResultsFile is reset to share the filename given here.
#'
#' @return Nothing. (Invisibly.) Side effect is to write a file
#' in the OS.
#'
#'
#' @examples
#' \dontrun{
#' ## Would write the current ss.options() to c:/temp/NYCfever.prm
#' write.ss.prm("c:/tmp","NYCfever")
#' }
#'
#'
#'
#' @export
#' @seealso \code{\link{ss.options}}, \code{\link{ss.options.extra}}
#'
#
# I should change this to detect and deal with the trailing /.
# change docs to cross-link.
write.ss.prm = function(location, filename, matchout = TRUE) {
if (matchout) ss.options(list(ResultsFile=paste0(filename,".txt")))
fileconn<-file(paste0(location,"/",filename,".prm"))
writeLines(ssenv$.ss.params, fileconn)
close(fileconn)
invisible()
}
#Testing
#ss.options(c("CaseFile=blue","ControlFile=red"))
#ss.options("CaseFile=orange")
#head(.ss.params)
#check = ss.options(reset=TRUE)
#head(check)
|
/rsatscan/R/params.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 7,468
|
r
|
utils::globalVariables("ssenv")
#' @title Substitute new values into the input object
#'
#' @description
#' Replaces existing values found in one object with new values
#'
#' @param x A character vector of the form "name=value"
#' @param ssparams A character vector with arbitrary lines,
#' currently imagined to be .ss.params
#'
#' @details
#' For each line of x, the function: 1) finds the "name" and the "value"
#' 2) checks to see whether the "name" exists in ssparams; if not, prints a warning
#' but if so, replaces the existing line of ssparams with that line of x.
#'
#' Not expected to be used directly.
#'
#' @return The modified ssparams.
subin = function (x,ssparams) {
for (i in 1:length(x)) {
inprm = substr(x[i],1,regexpr("=",x[i]))
indef = substr(x[i],regexpr("=",x[i])+1, nchar(x[i]))
if (length(which(substr(ssparams,1,regexpr("=",ssparams)) == inprm)) == 0)
warning('Trouble! There is no parameter "', substr(inprm,1,regexpr("=",inprm)-1),
'"', call.=FALSE)
else {ssparams[which(substr(ssparams,1,regexpr("=",ssparams)) == inprm)]=paste0(inprm,indef)}
}
return(ssparams)
}
# test whether this works appropriately when there is no = in a an input line
# test whether it works if "name = value", as well as "name=value".
# most likely I should re-do to extract the = from inorm and remove trailing blanks
#' @title Change list version of paramaters into char vector
#'
#' @description
#' Turns a list of options into a charvar of options
#'
#' @details
#' The resulting charvar has values such as "name=value" where "name" was the named item
#' of the list.
#'
#' @return
#' A character vector
#'
#' Not expected to be used directly.
#'
#' @param x A list.
#'
charlistopts = function (x) {
paste0(names(x),"=",unlist(x))
}
#Huge ups to http://digitheadslabnotebook.blogspot.com/2011/06/environments-in-r.html
#which helped me get the scoping to play out correctly.
#ss.options will: 1) return the current values of .ss.params, if no invals
# 2) Reset the values of .ss.params, if reset==TRUE
# 3) change the values of the listed parameters, if a) invals =
# c("param=value","param=value") or list(param="value")
#' @title Set or reset parameters to be used by SaTScan
#'
#' @description Set or reset parameters to be used by SaTScan
#'
#' @details \code{ss.options()} is intended to function like \code{par()} or
#' \code{options()}. There is a default set of parameter settings that resembles
#' the one used by SaTScan, except that it produces all possible output files and
#' makes them as .dbf files instead of text.
#'
#' @param invals A list with entries of the form name=value, where value should be
#' in quotes unless it is a number. Alternatively, may be a character vector whose
#' entries are of the form "name=value". The "name" in either case should be a
#' valid SaTScan parameter name; unrecognized names will generate a warning and will
#' do nothing.
#' @param reset If TRUE, will restore the default parameter values described in
#' the "Details" section.
#' @return If \code{invals == NULL}, returns the current parameter set,
#' as altered by previous
#' calls to \code{ss.options()} since the last call with \code{reset=TRUE}. Otherwise
#' returns modified parameter set invisibly. The side effect, if \code{invals != NULL}, is to
#' set the current values of the parameters per the value of \code{invals}
#' and \code{reset}.
#'
#' @export
#'
#' @examples
#' \dontrun{
#' head(ss.options(),3)
#' ss.options(list(CaseFile="NYCfever.cas"))
#' head(ss.options(),3)
#'
#' # reset; shows whole parameter file without invisible()
#' invisible(ss.options(reset=TRUE))
#' head(ss.options(),3)
#' }
#'
ss.options = function (invals=NULL, reset=FALSE) {
inparms = ssenv$.ss.params
if (reset == TRUE) ssenv$.ss.params = ssenv$.ss.params.defaults
if (is.null(invals)) {return(ssenv$.ss.params)}
else {
if (class(invals) == "list") invals = charlistopts(invals)
ssenv$.ss.params = subin(invals, inparms)
invisible(ssenv$.ss.params)
}
}
# review the help text for logic-- matches function??
#I need to think about how this will work when called by another function.
# Do I need to re-think this? There is
# a [Multiple Data Sets] line already...
#' @title Add lines to the current SaTScan parameter list
#'
#' @description Allows you to add arbitrary lines to the current set
#' of SaTScan parameters
#'
#' @details For certain SaTScan models or inputs (multiple data sets,
#' Polygon),
#' SaTScan allows a variable number of parameters; these
#' parameters are not used/allowed for other models or inputs.
#' This function allows the user to add
#' arbitray lines to the current list of
#' parameters. In addition to the options mentioned, it could also be
#' used to add comments to the parameter file.
#'
#' @param invals A character vector, which will be added to the end of the
#' current paramter list.
#'
#' @return Nothing.
ss.options.extra = function(invals=NULL) {
if (is.null(invals)) stop("This function doesn't do anything when there is no input")
if (class(invals) != "character") stop("Please input a character vector")
else {
ssenv$.ss.params = c(ssenv$.ss.params, invals)
invisible()
}
}
# for help page: examples of [Polygon] and Multiple Data Sets
# Functions to write out the param file
# Probably a really bad idea to make matchout = FALSE-- only useful to write file
# from R but examine output manually
#' @title Write the SaTScan parameter file
#'
#' @description Writes the current set of SaTScan parameters to a
#' specified location in the OS.
#'
#' @details The current SaTScan options can be reset or modified
#' \code{ss.options()} and/or \code{ss.options.extra()}. Once
#' they are set as desired, they can be written to the OS
#' using this function.
#'
#' @param location A directory location, excluding the trailing "/".
#' @param filename The name of the file to be written to the OS;
#' The extension ".prm" will be appended.
#' @param matchout If false, the ResultsFile parameter will not
#' be touched; note that this will likely result in undesirable
#' performance from calls to \code{satcan()} using the parameter file.
#' If true, the ResultsFile is reset to share the filename given here.
#'
#' @return Nothing. (Invisibly.) Side effect is to write a file
#' in the OS.
#'
#'
#' @examples
#' \dontrun{
#' ## Would write the current ss.options() to c:/temp/NYCfever.prm
#' write.ss.prm("c:/tmp","NYCfever")
#' }
#'
#'
#'
#' @export
#' @seealso \code{\link{ss.options}}, \code{\link{ss.options.extra}}
#'
#
# I should change this to detect and deal with the trailing /.
# change docs to cross-link.
write.ss.prm = function(location, filename, matchout = TRUE) {
if (matchout) ss.options(list(ResultsFile=paste0(filename,".txt")))
fileconn<-file(paste0(location,"/",filename,".prm"))
writeLines(ssenv$.ss.params, fileconn)
close(fileconn)
invisible()
}
#Testing
#ss.options(c("CaseFile=blue","ControlFile=red"))
#ss.options("CaseFile=orange")
#head(.ss.params)
#check = ss.options(reset=TRUE)
#head(check)
|
options <- commandArgs(trailingOnly = T)
check_for_default <- integer(0)
directory_name <- which(grepl("-dir", options))
quitNow <- 0
if(length(directory_name) > 1 | identical(check_for_default, directory_name)){
print("Metapop requires the name of the directory where the assembly and aligned files exist. This must be a single directory. Metapop will exit after checking for other required params.")
flush.console()
quitNow <- 1
}else{
directory_name <- options[directory_name + 1]
}
if(quitNow == 1){
quit(save="no")
}
setwd(directory_name)
#dev
#setwd("C:/Users/Kenji/Desktop/Metapop_Test_Cases/old_mock")
run_parameters <- read.csv("metapop_run_settings/run_settings.tsv", sep = "\t")
run_parameters$parameter <- as.character(run_parameters$parameter)
run_parameters$setting <- as.character(run_parameters$setting)
library_location <- run_parameters$setting[run_parameters$parameter == "Library Location"]
threads <- as.numeric(run_parameters$setting[run_parameters$parameter == "Threads"])
genes_file <- run_parameters$setting[run_parameters$parameter == "Genes"]
#dev
#library_location <- .libPaths()
suppressMessages(suppressWarnings(library(doParallel, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(Biostrings, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(cowplot, lib.loc = library_location)))
passing_contigs <- list.files(path = "metapop_cov_and_depth/", pattern = ".tsv", full.names = T)
min_cov <- as.numeric(run_parameters$setting[run_parameters$parameter=="Coverage"])
min_dep <- as.numeric(run_parameters$setting[run_parameters$parameter=="Depth"])
cl <- makeCluster(min(detectCores(), threads, length(passing_contigs)))
clusterExport(cl, varlist=c("passing_contigs", "min_cov", "min_dep", "library_location"))
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
passing_contigs <- unique(foreach(i=passing_contigs, .combine=c) %dopar% {
tmp <- fread(i, sep="\t")
return(tmp$V1[tmp$V3>=min_cov & tmp$V4 >= min_dep])
})
stopCluster(cl)
#Already per contig
codon_bias_iqr <- fread(list.files(full.names = T, path = "metapop_codon_bias/", pattern="gene_IQR_and_mean.tsv"), sep = "\t")
codon_bias_iqr <- codon_bias_iqr[codon_bias_iqr$parent_contig %in% passing_contigs,]
genes <- readDNAStringSet(genes_file)
s <- strsplit(names(genes), "[# \t]+") # split names by tab/space
genes <- data.table(matrix(unlist(s), ncol=5, byrow=T))
names(genes)[1:4] = c("contig_gene", "start", "end", "OC")
genes$start <- as.numeric((genes$start))
genes$end <- as.numeric((genes$end))
genes <- genes[,-5]
# Figure out what contig they come from, mostly for cleaning purposes
genes$parent_contig <- gsub("_\\d+$", "", genes$contig_gene)
genes <- genes[genes$parent_contig %in% passing_contigs,]
codon_bias_genes <- fread(list.files(full.names = T, path = "metapop_codon_bias/", pattern="gene_euclidean_distances.tsv"), sep = "\t")
codon_bias_genes <- codon_bias_genes[codon_bias_genes$parent_contig %in% passing_contigs,]
codon_bias_genes$start <- genes$start[match(codon_bias_genes$gene, genes$contig_gene)]
codon_bias_genes$end <- genes$end[match(codon_bias_genes$gene, genes$contig_gene)]
codon_bias_genes$strand <- genes$OC[match(codon_bias_genes$gene, genes$contig_gene)]
rm(genes)
#presplit into contigs
namesave = unique(codon_bias_genes$parent_contig)
codon_bias_genes <- codon_bias_genes[, list(list(.SD)), by = parent_contig]$V1
names(codon_bias_genes) = namesave
codon_bias_genes <- lapply(codon_bias_genes, function(x){
l <- min(x$euc_dist)
u <- max(x$euc_dist)
x$relative_dist <- 2+(((x$euc_dist - l)/(u-l))*2)
return(x)
})
if(!all(passing_contigs %in% names(codon_bias_genes))){
print("There's some contigs in the samples that weren't found in the genes file.")
print("There may be cases where contigs had no predicted genes, or where the codon bias of a set of genes could not be calculated completely.")
print("Only contigs with predicted genes can/will be plotted.")
passing_contigs <- passing_contigs[passing_contigs %in% names(codon_bias_genes)]
}
#If I normalize the gene distance on 0-1, then I can make a consistent width plot
color_legend_plot <- ggplot(data = data.table(dots = c(1,2)), aes(x = dots, fill = factor(dots))) +
geom_bar()+
scale_fill_manual(name = "Gene Codon Bias", values = alpha(c("#2ca9e1", "#FF0000"), 1), labels = c("Typical Codon Use", "Abnormal Codon Use"))+
theme(legend.text = element_text(size = 14),
legend.title = element_text(size = 14))
color_leg <- get_legend(color_legend_plot)
codon_bias_ringplot <- function(contig, gene_profile, thresholds, leg = color_leg){
#iqr <- round(thresholds[1], 2)
#mu <- round(thresholds[2], 2)
#outlier_bound <- round(thresholds[3], 2)
if(all(thresholds == 0)){
return(NA)
}
p <- ggplot(gene_profile, aes(fill=factor(outlier_status), xmin=2, xmax=relative_dist, ymin=start, ymax=end))+
annotate("rect", xmin = 1.992, xmax = 4, ymin=0, ymax = max(gene_profile$end), fill = "grey65", color = "black") +
geom_rect() +
coord_polar(theta="y") +
xlim(c(0, 4)) +
ylim(c(0, max(gene_profile$end*4/3))) +
theme(panel.background = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank(),
axis.line = element_blank())+
guides(fill = F) +
ggtitle(paste0(contig, "\nCodon Usage Bias Distances")) +
scale_fill_manual(values = c("#2ca9e1", "#FF0000")) +
annotate("text", y = 0, x = 0, hjust = 0.5, label = paste("Contig Length\n", max(gene_profile$end), " bp", sep = ""), vjust = 0.5)+
annotate("text", x = 3.1, y = max(gene_profile$end)*1.065, label = "Gene\nEuclidean\nDistance", vjust = 0, hjust = 0.5) +
#tick labels
annotate("text", y = max(gene_profile$end)*1.014, x = 4, label = paste0(round(max(gene_profile$euc_dist), 3), ""), vjust = 1, hjust = 0, angle = 90)+
annotate("text", y = max(gene_profile$end)*1.030, x = 2, label = paste0(round(min(gene_profile$euc_dist), 3), ""), vjust = 0, hjust = 0, angle = 90)+
#baseline
#annotate("segment", x = 2, xend = 4, y = max(gene_profile$end)*1.025, yend = max(gene_profile$end)*1.014) +
#ticks
annotate("segment", x = 1.992, xend = 1.996, y = max(gene_profile$end), yend = max(gene_profile$end)*1.016) +
annotate("segment", x = 2.66, xend = 2.66, y = max(gene_profile$end), yend = max(gene_profile$end)*1.012) +
annotate("segment", x = 3.33, xend = 3.33, y = max(gene_profile$end), yend = max(gene_profile$end)*1.010) +
annotate("segment", x = 4, xend = 4, y = max(gene_profile$end), yend = max(gene_profile$end)*1.008)
p <- plot_grid(NULL, p, leg, NULL, ncol = 4, rel_widths = c(.3, .8, .15, .2))
return(p)
}
groups <- (1:length(passing_contigs))%/%(min(threads, detectCores()))
unique_groups <- unique(groups)
if(!dir.exists("metapop_visualizations")){
dir.create("metapop_visualizations")
}
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "groups", "unique_groups", "color_leg"), envir = environment())
clusterEvalQ(cl, expr = suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
clusterEvalQ(cl, expr = suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location))))
clusterEvalQ(cl, expr = suppressMessages(suppressWarnings(library(cowplot, lib.loc = library_location))))
pdf("metapop_visualizations/codon_bias_plots.pdf", height = 9, width = 12)
registerDoParallel(cl)
for(k in unique_groups){
CB_plots <- foreach(i = passing_contigs[groups == k]) %dopar% {
codon_bias_ringplot(contig = i, gene_profile = codon_bias_genes[[which(names(codon_bias_genes) == i)]], thresholds = as.numeric(codon_bias_iqr[which(codon_bias_iqr$parent_contig == i),2:4], leg = color_leg))
}
CB_plots <- CB_plots[!is.na(CB_plots)]
#prevents superfluous outputs
if(length(CB_plots) > 0){
for(i in CB_plots){
print(i)
}
}
}
stopCluster(cl)
dev.off()
|
/MetaPop_Codon_Bias_Viz.R
|
no_license
|
Thexiyang/metapop
|
R
| false
| false
| 8,649
|
r
|
options <- commandArgs(trailingOnly = T)
check_for_default <- integer(0)
directory_name <- which(grepl("-dir", options))
quitNow <- 0
if(length(directory_name) > 1 | identical(check_for_default, directory_name)){
print("Metapop requires the name of the directory where the assembly and aligned files exist. This must be a single directory. Metapop will exit after checking for other required params.")
flush.console()
quitNow <- 1
}else{
directory_name <- options[directory_name + 1]
}
if(quitNow == 1){
quit(save="no")
}
setwd(directory_name)
#dev
#setwd("C:/Users/Kenji/Desktop/Metapop_Test_Cases/old_mock")
run_parameters <- read.csv("metapop_run_settings/run_settings.tsv", sep = "\t")
run_parameters$parameter <- as.character(run_parameters$parameter)
run_parameters$setting <- as.character(run_parameters$setting)
library_location <- run_parameters$setting[run_parameters$parameter == "Library Location"]
threads <- as.numeric(run_parameters$setting[run_parameters$parameter == "Threads"])
genes_file <- run_parameters$setting[run_parameters$parameter == "Genes"]
#dev
#library_location <- .libPaths()
suppressMessages(suppressWarnings(library(doParallel, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(Biostrings, lib.loc = library_location)))
suppressMessages(suppressWarnings(library(cowplot, lib.loc = library_location)))
passing_contigs <- list.files(path = "metapop_cov_and_depth/", pattern = ".tsv", full.names = T)
min_cov <- as.numeric(run_parameters$setting[run_parameters$parameter=="Coverage"])
min_dep <- as.numeric(run_parameters$setting[run_parameters$parameter=="Depth"])
cl <- makeCluster(min(detectCores(), threads, length(passing_contigs)))
clusterExport(cl, varlist=c("passing_contigs", "min_cov", "min_dep", "library_location"))
clusterEvalQ(cl, suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
registerDoParallel(cl)
passing_contigs <- unique(foreach(i=passing_contigs, .combine=c) %dopar% {
tmp <- fread(i, sep="\t")
return(tmp$V1[tmp$V3>=min_cov & tmp$V4 >= min_dep])
})
stopCluster(cl)
#Already per contig
codon_bias_iqr <- fread(list.files(full.names = T, path = "metapop_codon_bias/", pattern="gene_IQR_and_mean.tsv"), sep = "\t")
codon_bias_iqr <- codon_bias_iqr[codon_bias_iqr$parent_contig %in% passing_contigs,]
genes <- readDNAStringSet(genes_file)
s <- strsplit(names(genes), "[# \t]+") # split names by tab/space
genes <- data.table(matrix(unlist(s), ncol=5, byrow=T))
names(genes)[1:4] = c("contig_gene", "start", "end", "OC")
genes$start <- as.numeric((genes$start))
genes$end <- as.numeric((genes$end))
genes <- genes[,-5]
# Figure out what contig they come from, mostly for cleaning purposes
genes$parent_contig <- gsub("_\\d+$", "", genes$contig_gene)
genes <- genes[genes$parent_contig %in% passing_contigs,]
codon_bias_genes <- fread(list.files(full.names = T, path = "metapop_codon_bias/", pattern="gene_euclidean_distances.tsv"), sep = "\t")
codon_bias_genes <- codon_bias_genes[codon_bias_genes$parent_contig %in% passing_contigs,]
codon_bias_genes$start <- genes$start[match(codon_bias_genes$gene, genes$contig_gene)]
codon_bias_genes$end <- genes$end[match(codon_bias_genes$gene, genes$contig_gene)]
codon_bias_genes$strand <- genes$OC[match(codon_bias_genes$gene, genes$contig_gene)]
rm(genes)
#presplit into contigs
namesave = unique(codon_bias_genes$parent_contig)
codon_bias_genes <- codon_bias_genes[, list(list(.SD)), by = parent_contig]$V1
names(codon_bias_genes) = namesave
codon_bias_genes <- lapply(codon_bias_genes, function(x){
l <- min(x$euc_dist)
u <- max(x$euc_dist)
x$relative_dist <- 2+(((x$euc_dist - l)/(u-l))*2)
return(x)
})
if(!all(passing_contigs %in% names(codon_bias_genes))){
print("There's some contigs in the samples that weren't found in the genes file.")
print("There may be cases where contigs had no predicted genes, or where the codon bias of a set of genes could not be calculated completely.")
print("Only contigs with predicted genes can/will be plotted.")
passing_contigs <- passing_contigs[passing_contigs %in% names(codon_bias_genes)]
}
#If I normalize the gene distance on 0-1, then I can make a consistent width plot
color_legend_plot <- ggplot(data = data.table(dots = c(1,2)), aes(x = dots, fill = factor(dots))) +
geom_bar()+
scale_fill_manual(name = "Gene Codon Bias", values = alpha(c("#2ca9e1", "#FF0000"), 1), labels = c("Typical Codon Use", "Abnormal Codon Use"))+
theme(legend.text = element_text(size = 14),
legend.title = element_text(size = 14))
color_leg <- get_legend(color_legend_plot)
codon_bias_ringplot <- function(contig, gene_profile, thresholds, leg = color_leg){
#iqr <- round(thresholds[1], 2)
#mu <- round(thresholds[2], 2)
#outlier_bound <- round(thresholds[3], 2)
if(all(thresholds == 0)){
return(NA)
}
p <- ggplot(gene_profile, aes(fill=factor(outlier_status), xmin=2, xmax=relative_dist, ymin=start, ymax=end))+
annotate("rect", xmin = 1.992, xmax = 4, ymin=0, ymax = max(gene_profile$end), fill = "grey65", color = "black") +
geom_rect() +
coord_polar(theta="y") +
xlim(c(0, 4)) +
ylim(c(0, max(gene_profile$end*4/3))) +
theme(panel.background = element_blank(),
axis.text.y = element_blank(),
axis.title.y = element_blank(),
axis.ticks.y = element_blank(),
axis.text.x = element_blank(),
axis.title.x = element_blank(),
axis.ticks.x = element_blank(),
axis.line = element_blank())+
guides(fill = F) +
ggtitle(paste0(contig, "\nCodon Usage Bias Distances")) +
scale_fill_manual(values = c("#2ca9e1", "#FF0000")) +
annotate("text", y = 0, x = 0, hjust = 0.5, label = paste("Contig Length\n", max(gene_profile$end), " bp", sep = ""), vjust = 0.5)+
annotate("text", x = 3.1, y = max(gene_profile$end)*1.065, label = "Gene\nEuclidean\nDistance", vjust = 0, hjust = 0.5) +
#tick labels
annotate("text", y = max(gene_profile$end)*1.014, x = 4, label = paste0(round(max(gene_profile$euc_dist), 3), ""), vjust = 1, hjust = 0, angle = 90)+
annotate("text", y = max(gene_profile$end)*1.030, x = 2, label = paste0(round(min(gene_profile$euc_dist), 3), ""), vjust = 0, hjust = 0, angle = 90)+
#baseline
#annotate("segment", x = 2, xend = 4, y = max(gene_profile$end)*1.025, yend = max(gene_profile$end)*1.014) +
#ticks
annotate("segment", x = 1.992, xend = 1.996, y = max(gene_profile$end), yend = max(gene_profile$end)*1.016) +
annotate("segment", x = 2.66, xend = 2.66, y = max(gene_profile$end), yend = max(gene_profile$end)*1.012) +
annotate("segment", x = 3.33, xend = 3.33, y = max(gene_profile$end), yend = max(gene_profile$end)*1.010) +
annotate("segment", x = 4, xend = 4, y = max(gene_profile$end), yend = max(gene_profile$end)*1.008)
p <- plot_grid(NULL, p, leg, NULL, ncol = 4, rel_widths = c(.3, .8, .15, .2))
return(p)
}
groups <- (1:length(passing_contigs))%/%(min(threads, detectCores()))
unique_groups <- unique(groups)
if(!dir.exists("metapop_visualizations")){
dir.create("metapop_visualizations")
}
cl <- makeCluster(min(threads, detectCores()))
clusterExport(cl, varlist = c("library_location", "groups", "unique_groups", "color_leg"), envir = environment())
clusterEvalQ(cl, expr = suppressMessages(suppressWarnings(library(data.table, lib.loc = library_location))))
clusterEvalQ(cl, expr = suppressMessages(suppressWarnings(library(ggplot2, lib.loc = library_location))))
clusterEvalQ(cl, expr = suppressMessages(suppressWarnings(library(cowplot, lib.loc = library_location))))
pdf("metapop_visualizations/codon_bias_plots.pdf", height = 9, width = 12)
registerDoParallel(cl)
for(k in unique_groups){
CB_plots <- foreach(i = passing_contigs[groups == k]) %dopar% {
codon_bias_ringplot(contig = i, gene_profile = codon_bias_genes[[which(names(codon_bias_genes) == i)]], thresholds = as.numeric(codon_bias_iqr[which(codon_bias_iqr$parent_contig == i),2:4], leg = color_leg))
}
CB_plots <- CB_plots[!is.na(CB_plots)]
#prevents superfluous outputs
if(length(CB_plots) > 0){
for(i in CB_plots){
print(i)
}
}
}
stopCluster(cl)
dev.off()
|
library(nat)
### Name: im3d-coords
### Title: Interconvert pixel and physical coordinates
### Aliases: im3d-coords xyzpos ijkpos
### ** Examples
# make an emty im3d
d=im3d(,dim=c(20,30,40),origin=c(10,20,30),voxdims=c(1,2,3))
# check round trip for origin
stopifnot(all.equal(ijkpos(d,xyzpos(d,c(1,1,1))), c(1,1,1)))
|
/data/genthat_extracted_code/nat/examples/im3d-coords.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 324
|
r
|
library(nat)
### Name: im3d-coords
### Title: Interconvert pixel and physical coordinates
### Aliases: im3d-coords xyzpos ijkpos
### ** Examples
# make an emty im3d
d=im3d(,dim=c(20,30,40),origin=c(10,20,30),voxdims=c(1,2,3))
# check round trip for origin
stopifnot(all.equal(ijkpos(d,xyzpos(d,c(1,1,1))), c(1,1,1)))
|
summary.scSpectra <- function(object,ncases=10, ...){
if (inherits(object,"scSpectra")==FALSE) {
stop("object must be a scSpectra class object")
}
if (inherits(ncases,"numeric")==FALSE) {
stop("ncases must be a valid number")
}
cat(paste("(",ncases," first mass spectra) \n",sep=""))
print(head(object$est.table,ncases))
cat("\n");cat("----------------------------")
cat("\n\n")
cat(paste("Scale estimator:",object$estimator,"\n"))
cat(paste("Method:",object$met,"\n"))
cat(paste("Threshold:",object$threshold,"\n"))
cat(paste("Limits: [",round(object$lower,4),",",round(object$upper,4),"] \n",sep=""))
cat(paste("Deriv. order:",object$nd,"\n"))
cat(paste("Lambda:",object$lambda,"\n"))
cat(paste("No. potentially faulty spectra: ",object$cfailure," (",object$prop*100," %)",sep=""))
}
|
/R/summary.scSpectra.R
|
no_license
|
sgibb/MALDIrppa
|
R
| false
| false
| 833
|
r
|
summary.scSpectra <- function(object,ncases=10, ...){
if (inherits(object,"scSpectra")==FALSE) {
stop("object must be a scSpectra class object")
}
if (inherits(ncases,"numeric")==FALSE) {
stop("ncases must be a valid number")
}
cat(paste("(",ncases," first mass spectra) \n",sep=""))
print(head(object$est.table,ncases))
cat("\n");cat("----------------------------")
cat("\n\n")
cat(paste("Scale estimator:",object$estimator,"\n"))
cat(paste("Method:",object$met,"\n"))
cat(paste("Threshold:",object$threshold,"\n"))
cat(paste("Limits: [",round(object$lower,4),",",round(object$upper,4),"] \n",sep=""))
cat(paste("Deriv. order:",object$nd,"\n"))
cat(paste("Lambda:",object$lambda,"\n"))
cat(paste("No. potentially faulty spectra: ",object$cfailure," (",object$prop*100," %)",sep=""))
}
|
with(a91d0cfc218694ba49ae274bb5810d7bc, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';FRAME909970$DATA_COLLECTION_TIME[FRAME909970$DATA_COLLECTION_TIME == ""] <- 0;});
|
/1c4fa71c-191c-4da9-8102-b247ffddc5d3/R/Temp/aY5QjNeONCzH2.R
|
no_license
|
ayanmanna8/test
|
R
| false
| false
| 277
|
r
|
with(a91d0cfc218694ba49ae274bb5810d7bc, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/1c4fa71c-191c-4da9-8102-b247ffddc5d3';FRAME909970$DATA_COLLECTION_TIME[FRAME909970$DATA_COLLECTION_TIME == ""] <- 0;});
|
getwd()
ls()
rm(list=ls())
setwd("F:/Rujuta/DataExploration")
# Data Download, unzip file
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl, destfile="./Assignment1/Data.zip")
unzip("./Assignment1/Data.zip")
# for doing the assignment in the most efficient way, i would be using the following r packages
# lubridate for date and time, readr for reading data, dplyr for data manipulation
library(dplyr)
library(readr)
HHData6 <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
HHData7 <- mutate(HHData6, datetime= paste(Date,Time,sep=" "))
HHData7$Date <- as.Date(HHData7$Date, format="%d/%m/%Y")
HHData8 <- filter(HHData7, (Date=="2007-02-02") | (Date=="2007-02-01"))
HHData8$Time <- strptime(HHData8$Time, format="%H:%M:%S")
HHData8$datetime <- strptime(HHData8$datetime, format="%d/%m/%Y %H:%M:%S")
HHData9 <- HHData8
HHData9$Global_active_power <- as.numeric(as.character(HHData9$Global_active_power))
# for Chart 1
hist(HHData9$Global_active_power, col="red", main="Global Active Power", xlab="Global active power(in Kilowatts)")
|
/plot1.R
|
no_license
|
RujutaJ1/ExData_Plotting1
|
R
| false
| false
| 1,125
|
r
|
getwd()
ls()
rm(list=ls())
setwd("F:/Rujuta/DataExploration")
# Data Download, unzip file
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileurl, destfile="./Assignment1/Data.zip")
unzip("./Assignment1/Data.zip")
# for doing the assignment in the most efficient way, i would be using the following r packages
# lubridate for date and time, readr for reading data, dplyr for data manipulation
library(dplyr)
library(readr)
HHData6 <- read.table("household_power_consumption.txt", header=TRUE, sep=";")
HHData7 <- mutate(HHData6, datetime= paste(Date,Time,sep=" "))
HHData7$Date <- as.Date(HHData7$Date, format="%d/%m/%Y")
HHData8 <- filter(HHData7, (Date=="2007-02-02") | (Date=="2007-02-01"))
HHData8$Time <- strptime(HHData8$Time, format="%H:%M:%S")
HHData8$datetime <- strptime(HHData8$datetime, format="%d/%m/%Y %H:%M:%S")
HHData9 <- HHData8
HHData9$Global_active_power <- as.numeric(as.character(HHData9$Global_active_power))
# for Chart 1
hist(HHData9$Global_active_power, col="red", main="Global Active Power", xlab="Global active power(in Kilowatts)")
|
library(miceadds)
### Name: NMIwaldtest
### Title: Wald Test for Nested Multiply Imputed Datasets
### Aliases: NMIwaldtest create.designMatrices.waldtest summary.NMIwaldtest
### MIwaldtest summary.MIwaldtest
### Keywords: Nested multiple imputation summary
### ** Examples
## Not run:
##D #############################################################################
##D # EXAMPLE 1: Nested multiple imputation and Wald test | TIMSS data
##D #############################################################################
##D
##D library(BIFIEsurvey)
##D data(data.timss2, package="BIFIEsurvey" )
##D datlist <- data.timss2
##D # remove first four variables
##D M <- length(datlist)
##D for (ll in 1:M){
##D datlist[[ll]] <- datlist[[ll]][, -c(1:4) ]
##D }
##D
##D #***************
##D # (1) nested multiple imputation using mice
##D imp1 <- miceadds::mice.nmi( datlist, m=3, maxit=2 )
##D summary(imp1)
##D
##D #**** Model 1: Linear regression with interaction effects
##D res1 <- with( imp1, stats::lm( likesc ~ female*migrant + female*books ) )
##D pres1 <- miceadds::pool.mids.nmi( res1 )
##D summary(pres1)
##D
##D # test whether both interaction effects equals zero
##D pars <- dimnames(pres1$qhat)[[3]]
##D des <- miceadds::create.designMatrices.waldtest( pars=pars, k=2)
##D Cdes <- des$Cdes
##D rdes <- des$rdes
##D Cdes[1, "female:migrant"] <- 1
##D Cdes[2, "female:books"] <- 1
##D wres1 <- miceadds::NMIwaldtest( qhat=pres1$qhat, u=pres1$u, Cdes=Cdes, rdes=rdes )
##D summary(wres1)
##D
##D # a simpler specification is the use of "testnull"
##D testnull <- c("female:migrant", "female:books")
##D wres1b <- miceadds::NMIwaldtest( qhat=qhat, u=u, testnull=testnull )
##D summary(wres1b)
##D
##D #**** Model 2: Multivariate linear regression
##D res2 <- with( imp1, stats::lm( cbind( ASMMAT, ASSSCI ) ~
##D 0 + I(1*(female==1)) + I(1*(female==0)) ) )
##D pres2 <- miceadds::pool.mids.nmi( res2 )
##D summary(pres2)
##D
##D # test whether both gender differences equals -10 points
##D pars <- dimnames(pres2$qhat)[[3]]
##D ## > pars
##D ## [1] "ASMMAT:I(1 * (female==1))" "ASMMAT:I(1 * (female==0))"
##D ## [3] "ASSSCI:I(1 * (female==1))" "ASSSCI:I(1 * (female==0))"
##D
##D des <- miceadds::create.designMatrices.waldtest( pars=pars, k=2)
##D Cdes <- des$Cdes
##D rdes <- c(-10,-10)
##D Cdes[1, "ASMMAT:I(1*(female==1))"] <- 1
##D Cdes[1, "ASMMAT:I(1*(female==0))"] <- -1
##D Cdes[2, "ASSSCI:I(1*(female==1))"] <- 1
##D Cdes[2, "ASSSCI:I(1*(female==0))"] <- -1
##D
##D wres2 <- miceadds::NMIwaldtest( qhat=pres2$qhat, u=pres2$u, Cdes=Cdes, rdes=rdes )
##D summary(wres2)
##D
##D # test only first hypothesis
##D wres2b <- miceadds::NMIwaldtest( qhat=pres2$qhat, u=pres2$u, Cdes=Cdes[1,,drop=FALSE],
##D rdes=rdes[1] )
##D summary(wres2b)
##D
##D #############################################################################
##D # EXAMPLE 2: Multiple imputation and Wald test | TIMSS data
##D #############################################################################
##D
##D library(BIFIEsurvey)
##D data(data.timss2, package="BIFIEsurvey" )
##D dat <- data.timss2[[1]]
##D dat <- dat[, - c(1:4) ]
##D
##D # perform multiple imputation
##D imp <- mice::mice( dat, m=6, maxit=3 )
##D
##D # define analysis model
##D res1 <- with( imp, lm( likesc ~ female*migrant + female*books ) )
##D pres1 <- mice::pool( res1 )
##D summary(pres1)
##D
##D # Wald test for zero interaction effects
##D qhat <- pres1$qhat
##D u <- pres1$u
##D pars <- dimnames(pres1$qhat)[[2]]
##D des <- miceadds::create.designMatrices.waldtest( pars=pars, k=2)
##D Cdes <- des$Cdes
##D rdes <- des$rdes
##D Cdes[1, "female:migrant"] <- 1
##D Cdes[2, "female:books"] <- 1
##D
##D # apply MIwaldtest function
##D wres1 <- miceadds::MIwaldtest( qhat, u, Cdes, rdes )
##D summary(wres1)
##D
##D # use again "testnull"
##D testnull <- c("female:migrant", "female:books")
##D wres1b <- miceadds::MIwaldtest( qhat=qhat, u=u, testnull=testnull )
##D summary(wres1b)
##D
##D #***** linear regression with cluster robust standard errors
##D
##D # convert object of class mids into a list object
##D datlist_imp <- miceadds::mids2datlist( imp )
##D # define cluster
##D idschool <- as.numeric( substring( data.timss2[[1]]$IDSTUD, 1, 5 ) )
##D # linear regression
##D res2 <- lapply( datlist_imp, FUN=function(data){
##D miceadds::lm.cluster( data=data, formula=likesc ~ female*migrant + female*books,
##D cluster=idschool ) } )
##D # extract parameters and covariance matrix
##D qhat <- lapply( res2, FUN=function(rr){ coef(rr) } )
##D u <- lapply( res2, FUN=function(rr){ vcov(rr) } )
##D # perform Wald test
##D wres2 <- miceadds::MIwaldtest( qhat, u, Cdes, rdes )
##D summary(wres2)
## End(Not run)
|
/data/genthat_extracted_code/miceadds/examples/NMIwaldtest.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 4,823
|
r
|
library(miceadds)
### Name: NMIwaldtest
### Title: Wald Test for Nested Multiply Imputed Datasets
### Aliases: NMIwaldtest create.designMatrices.waldtest summary.NMIwaldtest
### MIwaldtest summary.MIwaldtest
### Keywords: Nested multiple imputation summary
### ** Examples
## Not run:
##D #############################################################################
##D # EXAMPLE 1: Nested multiple imputation and Wald test | TIMSS data
##D #############################################################################
##D
##D library(BIFIEsurvey)
##D data(data.timss2, package="BIFIEsurvey" )
##D datlist <- data.timss2
##D # remove first four variables
##D M <- length(datlist)
##D for (ll in 1:M){
##D datlist[[ll]] <- datlist[[ll]][, -c(1:4) ]
##D }
##D
##D #***************
##D # (1) nested multiple imputation using mice
##D imp1 <- miceadds::mice.nmi( datlist, m=3, maxit=2 )
##D summary(imp1)
##D
##D #**** Model 1: Linear regression with interaction effects
##D res1 <- with( imp1, stats::lm( likesc ~ female*migrant + female*books ) )
##D pres1 <- miceadds::pool.mids.nmi( res1 )
##D summary(pres1)
##D
##D # test whether both interaction effects equals zero
##D pars <- dimnames(pres1$qhat)[[3]]
##D des <- miceadds::create.designMatrices.waldtest( pars=pars, k=2)
##D Cdes <- des$Cdes
##D rdes <- des$rdes
##D Cdes[1, "female:migrant"] <- 1
##D Cdes[2, "female:books"] <- 1
##D wres1 <- miceadds::NMIwaldtest( qhat=pres1$qhat, u=pres1$u, Cdes=Cdes, rdes=rdes )
##D summary(wres1)
##D
##D # a simpler specification is the use of "testnull"
##D testnull <- c("female:migrant", "female:books")
##D wres1b <- miceadds::NMIwaldtest( qhat=qhat, u=u, testnull=testnull )
##D summary(wres1b)
##D
##D #**** Model 2: Multivariate linear regression
##D res2 <- with( imp1, stats::lm( cbind( ASMMAT, ASSSCI ) ~
##D 0 + I(1*(female==1)) + I(1*(female==0)) ) )
##D pres2 <- miceadds::pool.mids.nmi( res2 )
##D summary(pres2)
##D
##D # test whether both gender differences equals -10 points
##D pars <- dimnames(pres2$qhat)[[3]]
##D ## > pars
##D ## [1] "ASMMAT:I(1 * (female==1))" "ASMMAT:I(1 * (female==0))"
##D ## [3] "ASSSCI:I(1 * (female==1))" "ASSSCI:I(1 * (female==0))"
##D
##D des <- miceadds::create.designMatrices.waldtest( pars=pars, k=2)
##D Cdes <- des$Cdes
##D rdes <- c(-10,-10)
##D Cdes[1, "ASMMAT:I(1*(female==1))"] <- 1
##D Cdes[1, "ASMMAT:I(1*(female==0))"] <- -1
##D Cdes[2, "ASSSCI:I(1*(female==1))"] <- 1
##D Cdes[2, "ASSSCI:I(1*(female==0))"] <- -1
##D
##D wres2 <- miceadds::NMIwaldtest( qhat=pres2$qhat, u=pres2$u, Cdes=Cdes, rdes=rdes )
##D summary(wres2)
##D
##D # test only first hypothesis
##D wres2b <- miceadds::NMIwaldtest( qhat=pres2$qhat, u=pres2$u, Cdes=Cdes[1,,drop=FALSE],
##D rdes=rdes[1] )
##D summary(wres2b)
##D
##D #############################################################################
##D # EXAMPLE 2: Multiple imputation and Wald test | TIMSS data
##D #############################################################################
##D
##D library(BIFIEsurvey)
##D data(data.timss2, package="BIFIEsurvey" )
##D dat <- data.timss2[[1]]
##D dat <- dat[, - c(1:4) ]
##D
##D # perform multiple imputation
##D imp <- mice::mice( dat, m=6, maxit=3 )
##D
##D # define analysis model
##D res1 <- with( imp, lm( likesc ~ female*migrant + female*books ) )
##D pres1 <- mice::pool( res1 )
##D summary(pres1)
##D
##D # Wald test for zero interaction effects
##D qhat <- pres1$qhat
##D u <- pres1$u
##D pars <- dimnames(pres1$qhat)[[2]]
##D des <- miceadds::create.designMatrices.waldtest( pars=pars, k=2)
##D Cdes <- des$Cdes
##D rdes <- des$rdes
##D Cdes[1, "female:migrant"] <- 1
##D Cdes[2, "female:books"] <- 1
##D
##D # apply MIwaldtest function
##D wres1 <- miceadds::MIwaldtest( qhat, u, Cdes, rdes )
##D summary(wres1)
##D
##D # use again "testnull"
##D testnull <- c("female:migrant", "female:books")
##D wres1b <- miceadds::MIwaldtest( qhat=qhat, u=u, testnull=testnull )
##D summary(wres1b)
##D
##D #***** linear regression with cluster robust standard errors
##D
##D # convert object of class mids into a list object
##D datlist_imp <- miceadds::mids2datlist( imp )
##D # define cluster
##D idschool <- as.numeric( substring( data.timss2[[1]]$IDSTUD, 1, 5 ) )
##D # linear regression
##D res2 <- lapply( datlist_imp, FUN=function(data){
##D miceadds::lm.cluster( data=data, formula=likesc ~ female*migrant + female*books,
##D cluster=idschool ) } )
##D # extract parameters and covariance matrix
##D qhat <- lapply( res2, FUN=function(rr){ coef(rr) } )
##D u <- lapply( res2, FUN=function(rr){ vcov(rr) } )
##D # perform Wald test
##D wres2 <- miceadds::MIwaldtest( qhat, u, Cdes, rdes )
##D summary(wres2)
## End(Not run)
|
#' Class GMQLDataset
#'
#' Abstract class representing GMQL dataset
#'
#' @slot value value associated to GMQL dataset
#' @name GMQLDataset-class
#' @rdname GMQLDataset-class
#' @noRd
#' @return instance of GMQL dataset
#'
setClass("GMQLDataset", representation(value = "character"))
#' GMQLDataset alloc Function
#'
#' Alloc GMQLDataset object with its value
#'
#' @name GMQLDataset
#' @importFrom methods new
#'
#' @param value value associated to GMQL dataset
#' @rdname GMQLDataset-class
#' @noRd
GMQLDataset <- function(value) {
dataset <- new("GMQLDataset",value = value)
return(dataset)
}
setMethod("show", "GMQLDataset", function(object) {
cat("GMQL Dataset \n")
cat(" value :",paste(object@value))
})
setGeneric("value", function(.dataset) standardGeneric("value"))
setMethod("value", "GMQLDataset", function(.dataset) .dataset@value)
|
/R/AllClasses.R
|
no_license
|
DEIB-GECO/RGMQL
|
R
| false
| false
| 870
|
r
|
#' Class GMQLDataset
#'
#' Abstract class representing GMQL dataset
#'
#' @slot value value associated to GMQL dataset
#' @name GMQLDataset-class
#' @rdname GMQLDataset-class
#' @noRd
#' @return instance of GMQL dataset
#'
setClass("GMQLDataset", representation(value = "character"))
#' GMQLDataset alloc Function
#'
#' Alloc GMQLDataset object with its value
#'
#' @name GMQLDataset
#' @importFrom methods new
#'
#' @param value value associated to GMQL dataset
#' @rdname GMQLDataset-class
#' @noRd
GMQLDataset <- function(value) {
dataset <- new("GMQLDataset",value = value)
return(dataset)
}
setMethod("show", "GMQLDataset", function(object) {
cat("GMQL Dataset \n")
cat(" value :",paste(object@value))
})
setGeneric("value", function(.dataset) standardGeneric("value"))
setMethod("value", "GMQLDataset", function(.dataset) .dataset@value)
|
data <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClasses=c("character", "character",
rep("numeric",7)), na="?")
data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
data <- subset(data, Date %in% dates)
## plot 2
png("plot2.png",width=480, height=480, units="px")
plot(data$Time,data$Global_active_power,
type="l",
ylab="Global Active Power (kilowats)",
xlab="")
dev.off()
|
/plot2.R
|
no_license
|
balima78/ExData_Plotting1
|
R
| false
| false
| 642
|
r
|
data <- read.table("household_power_consumption.txt",
header=TRUE,
sep=";",
colClasses=c("character", "character",
rep("numeric",7)), na="?")
data$Time <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
data <- subset(data, Date %in% dates)
## plot 2
png("plot2.png",width=480, height=480, units="px")
plot(data$Time,data$Global_active_power,
type="l",
ylab="Global Active Power (kilowats)",
xlab="")
dev.off()
|
\name{yq_breaks}
\alias{yq_breaks}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to generate a sequence of breaks for scale_x_yearqtr, displaying all quarters in the series
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
yq_breaks(data, yq_col = yq)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
The data frame from which to extract the minimum and maximum yearqtr values
}
\item{year_col}{
Name of the column containing the yearqtr values
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
/twViz/man/yq_breaks.Rd
|
no_license
|
dchiuten/twtr
|
R
| false
| false
| 1,614
|
rd
|
\name{yq_breaks}
\alias{yq_breaks}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Function to generate a sequence of breaks for scale_x_yearqtr, displaying all quarters in the series
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
yq_breaks(data, yq_col = yq)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
The data frame from which to extract the minimum and maximum yearqtr values
}
\item{year_col}{
Name of the column containing the yearqtr values
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory (show via RShowDoc("KEYWORDS")):
% \keyword{ ~kwd1 }
% \keyword{ ~kwd2 }
% Use only one keyword per line.
% For non-standard keywords, use \concept instead of \keyword:
% \concept{ ~cpt1 }
% \concept{ ~cpt2 }
% Use only one concept per line.
|
library("knitr")
knit("submission.Rmd")
test_that("positive integers can be added", {
expect_equal(sum(5, 5), 10)
expect_equal(sum(8, 2), 10)
expect_equal(sum(1, 1), 2)
})
test_that("negative integers can be added", {
expect_equal(sum(15, -5), 10)
expect_equal(sum(-8, -6), -14)
expect_equal(sum(-10, 20), 10)
})
|
/db/data/autotest_files/r/script_files/test_rmd.R
|
permissive
|
MarkUsProject/Markus
|
R
| false
| false
| 327
|
r
|
library("knitr")
knit("submission.Rmd")
test_that("positive integers can be added", {
expect_equal(sum(5, 5), 10)
expect_equal(sum(8, 2), 10)
expect_equal(sum(1, 1), 2)
})
test_that("negative integers can be added", {
expect_equal(sum(15, -5), 10)
expect_equal(sum(-8, -6), -14)
expect_equal(sum(-10, 20), 10)
})
|
library(tidyverse)
polldata <- read_csv("data/ElectionPolling2020_Mod.csv")
polldata <- polldata %>%
mutate(spread = green - blue,
startdate = as.Date(startdate, "%d/%m/%Y"),
enddate = as.Date(enddate, "%d/%m/%Y"))
save(polldata, file = "rda/polldata.rda")
|
/wrangle-data.R
|
no_license
|
abentsui/twelection2020
|
R
| false
| false
| 276
|
r
|
library(tidyverse)
polldata <- read_csv("data/ElectionPolling2020_Mod.csv")
polldata <- polldata %>%
mutate(spread = green - blue,
startdate = as.Date(startdate, "%d/%m/%Y"),
enddate = as.Date(enddate, "%d/%m/%Y"))
save(polldata, file = "rda/polldata.rda")
|
seed <- 903
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225399.74946176852
df.resid <- 35402
df <- 165
coefs <- c(6.623182165829688, 5.962729617623979, 5.669752958540023, 5.3338781734941465, 5.1564010851905175, 4.948991808243085, 4.860323769655147, 4.6184920214467455, 4.408727420713806, 4.296599607098982, 4.290358678180333, 4.165773464979817, 4.0413749957112355, 3.963323797292847, 3.787252704187191, 3.5423488381502164, 3.309618983671571, 2.953114438589659, 2.5099433933885416, 2.070652616953682, 1.6075958700338953, 0.8994539405868023, 0.893740269424074, 0.334437681413972, 0.4325809337230635, -0.8320568018156138, -0.45532712832946703, 0.967551076088582, 1.0891093344089693, -1.1251755189965604, -2.2545030639440355, -2.2593050850523255, -9.572042508661865e-2, 0.7380341088879862, 1.1264584425539659, -0.5834901257003801, -2.126761925372439e-3, -0.6422907028060159, 0.27933705469718884, -0.5313908289125288, 0.9456641940310901, 0.5102183270198773, -0.8152012804600272, -1.4684928597062328, -0.5964202033284318, -0.787871473917516, -0.12998165808082007, 0.4515068949392667, 0.32040851214446264, -0.6794651366226729, -0.19884789520926133, 0.7131800489930574, -1.9799819353884056, 1.668436516870391, 0.7731072090286674, 1.1522919235099318, -1.462105997104614, -8.754433204763898e-2, -0.6071934492718842, 0.7313756854950122, 0.8506891251975106, 0.5905560532698337, -1.67578267963747, -1.1865450519765313, -0.557179936241881, 0.13664721693445617, 0.6931447796682365, -0.6337465178984961, -1.7645084699699825, -0.5900735712714842, -2.3274023977002685, -0.3314599993633034, 0.24403410177971, 0.8885460949254688, 0.6851851545291074, -0.5991433605920636, -1.5839512213868239, -0.9428841444352526, -5.705141994724461e-2, 0.7029478788721704, 1.125135391191842, 4.448750587452047e-2, 0.25216694609825785, -1.4731397029302962, -0.12080540840611517, 0.35446692892680676, 1.1757709136823296, 0.5622109927047533, 0.8703172843940981, -1.8562650024973972, 0.569224125500101, 0.605091182036558, 0.6327937766971077, 0.3696609267989361, 0.15424036428016732, 1.2839984663937467, -0.32632685011050117, 0.2411513602090531, -0.10499784448876741, 2.013945310950528e-2, 0.22788945824735427, -0.31069528442535693, 0.8749776342780102, 0.31875986087496283, 0.7516776688843293, 0.8184230361599981, 1.0546513482426276, -0.9894753292042514, -0.5877220750340338, -1.3478001254852792, 0.4247625864330418, 0.4030254899771654, 1.5968663606122848, -0.9622652228968471, -0.3891179382080225, -1.509826297542044, 0.7439421966360855, -0.3344284137081303, 0.41827211245026097, 0.664113140213679, -0.7508928943918919, -0.5496991376406823, -1.1404726286572542, -0.45785430310213576, 0.316458017170139, 0.8706270349566859, -0.12205666294824896, 0.9103555671772638, -0.8487347904280618, -0.3839453706877467, 0.4404741918124203, 0.886004645098731, 0.8382540267150478, 0.4550561720057889, -1.1429448299484713e-2, 1.1937482740412027, -0.4125534871691698, 1.000987694198938, 0.671828944546465, 0.9445756472984729, 0.6473672683564149, -0.7244831954392201, -1.3383258440325363, 0.5717967185104065, 0.2562245334933188, 0.5531201862754737, -0.13901077515897906, -0.7962763959094482, -2.013971633930299, 1.2756118117213413, 0.18888704317130528, 1.1920719139014317, -0.22947784480142305, 3.828683876329796e-2, 6.289967422371767e-2, -1.5456680748666127, -1.1543172431851902, 0.9684151087578475, 1.1608126904389642, -0.2886453944215093, 1.501333565252508, -0.2652434284503002, -0.13323937068522224, 4.527678652819918e-2, 1.0960514289533556)
|
/analysis/boot/boot903.R
|
no_license
|
patperry/interaction-proc
|
R
| false
| false
| 3,744
|
r
|
seed <- 903
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225399.74946176852
df.resid <- 35402
df <- 165
coefs <- c(6.623182165829688, 5.962729617623979, 5.669752958540023, 5.3338781734941465, 5.1564010851905175, 4.948991808243085, 4.860323769655147, 4.6184920214467455, 4.408727420713806, 4.296599607098982, 4.290358678180333, 4.165773464979817, 4.0413749957112355, 3.963323797292847, 3.787252704187191, 3.5423488381502164, 3.309618983671571, 2.953114438589659, 2.5099433933885416, 2.070652616953682, 1.6075958700338953, 0.8994539405868023, 0.893740269424074, 0.334437681413972, 0.4325809337230635, -0.8320568018156138, -0.45532712832946703, 0.967551076088582, 1.0891093344089693, -1.1251755189965604, -2.2545030639440355, -2.2593050850523255, -9.572042508661865e-2, 0.7380341088879862, 1.1264584425539659, -0.5834901257003801, -2.126761925372439e-3, -0.6422907028060159, 0.27933705469718884, -0.5313908289125288, 0.9456641940310901, 0.5102183270198773, -0.8152012804600272, -1.4684928597062328, -0.5964202033284318, -0.787871473917516, -0.12998165808082007, 0.4515068949392667, 0.32040851214446264, -0.6794651366226729, -0.19884789520926133, 0.7131800489930574, -1.9799819353884056, 1.668436516870391, 0.7731072090286674, 1.1522919235099318, -1.462105997104614, -8.754433204763898e-2, -0.6071934492718842, 0.7313756854950122, 0.8506891251975106, 0.5905560532698337, -1.67578267963747, -1.1865450519765313, -0.557179936241881, 0.13664721693445617, 0.6931447796682365, -0.6337465178984961, -1.7645084699699825, -0.5900735712714842, -2.3274023977002685, -0.3314599993633034, 0.24403410177971, 0.8885460949254688, 0.6851851545291074, -0.5991433605920636, -1.5839512213868239, -0.9428841444352526, -5.705141994724461e-2, 0.7029478788721704, 1.125135391191842, 4.448750587452047e-2, 0.25216694609825785, -1.4731397029302962, -0.12080540840611517, 0.35446692892680676, 1.1757709136823296, 0.5622109927047533, 0.8703172843940981, -1.8562650024973972, 0.569224125500101, 0.605091182036558, 0.6327937766971077, 0.3696609267989361, 0.15424036428016732, 1.2839984663937467, -0.32632685011050117, 0.2411513602090531, -0.10499784448876741, 2.013945310950528e-2, 0.22788945824735427, -0.31069528442535693, 0.8749776342780102, 0.31875986087496283, 0.7516776688843293, 0.8184230361599981, 1.0546513482426276, -0.9894753292042514, -0.5877220750340338, -1.3478001254852792, 0.4247625864330418, 0.4030254899771654, 1.5968663606122848, -0.9622652228968471, -0.3891179382080225, -1.509826297542044, 0.7439421966360855, -0.3344284137081303, 0.41827211245026097, 0.664113140213679, -0.7508928943918919, -0.5496991376406823, -1.1404726286572542, -0.45785430310213576, 0.316458017170139, 0.8706270349566859, -0.12205666294824896, 0.9103555671772638, -0.8487347904280618, -0.3839453706877467, 0.4404741918124203, 0.886004645098731, 0.8382540267150478, 0.4550561720057889, -1.1429448299484713e-2, 1.1937482740412027, -0.4125534871691698, 1.000987694198938, 0.671828944546465, 0.9445756472984729, 0.6473672683564149, -0.7244831954392201, -1.3383258440325363, 0.5717967185104065, 0.2562245334933188, 0.5531201862754737, -0.13901077515897906, -0.7962763959094482, -2.013971633930299, 1.2756118117213413, 0.18888704317130528, 1.1920719139014317, -0.22947784480142305, 3.828683876329796e-2, 6.289967422371767e-2, -1.5456680748666127, -1.1543172431851902, 0.9684151087578475, 1.1608126904389642, -0.2886453944215093, 1.501333565252508, -0.2652434284503002, -0.13323937068522224, 4.527678652819918e-2, 1.0960514289533556)
|
library(comato)
### Name: analyze.similarity
### Title: Analyzing graph similarity.
### Aliases: analyze.similarity
### ** Examples
require("igraph")
g1 = set.vertex.attribute(erdos.renyi.game(15, 0.7, type="gnp"), "name", value=1:15)
g2 = set.vertex.attribute(erdos.renyi.game(15, 0.7, type="gnp"), "name", value=1:15)
analyze.similarity(conceptmap(g1), conceptmap(g2))
|
/data/genthat_extracted_code/comato/examples/analyze.similarity.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 378
|
r
|
library(comato)
### Name: analyze.similarity
### Title: Analyzing graph similarity.
### Aliases: analyze.similarity
### ** Examples
require("igraph")
g1 = set.vertex.attribute(erdos.renyi.game(15, 0.7, type="gnp"), "name", value=1:15)
g2 = set.vertex.attribute(erdos.renyi.game(15, 0.7, type="gnp"), "name", value=1:15)
analyze.similarity(conceptmap(g1), conceptmap(g2))
|
## Put comments here that give an overall description of what your
## functions do
## These two functions are inverting a matrix.
#If the matrix has been already inverted, so the answer is taken from cashe
## Write a short comment describing this function
## This function is making an access to inverted matrix from cashe
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(solve) m <<- solve
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## Write a short comment describing this function
#This function is pulling out the inverting matrix from cashe if it is possible#
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmean(m)
m
}
|
/cachematrix.R
|
no_license
|
Rus1886/1
|
R
| false
| false
| 991
|
r
|
## Put comments here that give an overall description of what your
## functions do
## These two functions are inverting a matrix.
#If the matrix has been already inverted, so the answer is taken from cashe
## Write a short comment describing this function
## This function is making an access to inverted matrix from cashe
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setmean <- function(solve) m <<- solve
getmean <- function() m
list(set = set, get = get,
setmean = setmean,
getmean = getmean)
}
## Write a short comment describing this function
#This function is pulling out the inverting matrix from cashe if it is possible#
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getmean()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setmean(m)
m
}
|
#' @title Simulate the Confidence Interval for a the ratio of Variances
#' @description Simulate the Confidence Interval for a the ratio of Variances
#' @usage civar2.sim(n1, n2, sig1, sig2, alp = 0.05, N = 100, seed = 9857, dig = 4, plot = TRUE)
#'
#' @param n1 Sample size of population1
#' @param n2 Sample size of population2
#' @param sig1 Standard deviation of population1
#' @param sig2 Standard deviation of population2
#' @param alp Level of significance, Default: 0.05
#' @param N Number of iterations, Default: 100
#' @param seed Seed value for generating random numbers, Default: 9857
#' @param dig Number of digits below the decimal point, Default: 4
#' @param plot Logical value for plot, Default: TRUE
#'
#' @return None.
#' @examples
#' civar2.sim(n1 = 25, n2 = 16, sig1 = sqrt(8), sig2 = 2)
#' civar2.sim(n1 = 25, n2 = 16, sig1 = sqrt(8), sig2 = 2, N = 10000, plot = F)
#' @export
civar2.sim <- function(n1, n2, sig1, sig2, alp = 0.05, N = 100, seed = 9857,
dig = 4, plot = TRUE) {
vr0 <- sig1^2 / sig2^2
ci <- matrix(0, nrow = N, ncol = 3)
ir <- 1:N
fv1 <- qf(alp / 2, n1 - 1, n2 - 1)
fv2 <- qf(1 - alp / 2, n1 - 1, n2 - 1)
set.seed(seed)
for (i in ir) {
x <- rnorm(n1, 0, sig1)
y <- rnorm(n2, 0, sig2)
xv <- var(x)
yv <- var(y)
xm <- xv / yv
lcl <- xm / fv2
ucl <- xm / fv1
ci[i, ] <- c(lcl, xm, ucl)
}
if (plot) {
win.graph(7, 4)
plot(ir, ci[, 2],
type = "p", pch = 19, cex = 0.6,
col = 1, ylim = c(min(ci), max(ci)), main = "Confidence Intervals for Ratio of Population Variances",
ylab = "Confidence Interval", xlab = "Iteration"
)
abline(h = vr0, col = 2)
arrows(ir, ci[, 1], ir, ci[, 3],
length = 0.03, code = 3,
angle = 90, lwd = 1.5, col = ifelse((ci[, 1] > vr0 |
ci[, 3] < vr0), 2, 4)
)
}
nup <- sum(ci[, 1] > vr0)
nlow <- sum(ci[, 3] < vr0)
cat(paste0(
"P(LCL > ", vr0, ") = ", nup, "/",
N, " = ", nup / N, "\t P(UCL < ", vr0, ") = ",
nlow, "/", N, " = ", nlow / N
), "\n")
}
|
/R/civar2.sim.R
|
permissive
|
jhk0530/Rstat
|
R
| false
| false
| 2,061
|
r
|
#' @title Simulate the Confidence Interval for a the ratio of Variances
#' @description Simulate the Confidence Interval for a the ratio of Variances
#' @usage civar2.sim(n1, n2, sig1, sig2, alp = 0.05, N = 100, seed = 9857, dig = 4, plot = TRUE)
#'
#' @param n1 Sample size of population1
#' @param n2 Sample size of population2
#' @param sig1 Standard deviation of population1
#' @param sig2 Standard deviation of population2
#' @param alp Level of significance, Default: 0.05
#' @param N Number of iterations, Default: 100
#' @param seed Seed value for generating random numbers, Default: 9857
#' @param dig Number of digits below the decimal point, Default: 4
#' @param plot Logical value for plot, Default: TRUE
#'
#' @return None.
#' @examples
#' civar2.sim(n1 = 25, n2 = 16, sig1 = sqrt(8), sig2 = 2)
#' civar2.sim(n1 = 25, n2 = 16, sig1 = sqrt(8), sig2 = 2, N = 10000, plot = F)
#' @export
civar2.sim <- function(n1, n2, sig1, sig2, alp = 0.05, N = 100, seed = 9857,
dig = 4, plot = TRUE) {
vr0 <- sig1^2 / sig2^2
ci <- matrix(0, nrow = N, ncol = 3)
ir <- 1:N
fv1 <- qf(alp / 2, n1 - 1, n2 - 1)
fv2 <- qf(1 - alp / 2, n1 - 1, n2 - 1)
set.seed(seed)
for (i in ir) {
x <- rnorm(n1, 0, sig1)
y <- rnorm(n2, 0, sig2)
xv <- var(x)
yv <- var(y)
xm <- xv / yv
lcl <- xm / fv2
ucl <- xm / fv1
ci[i, ] <- c(lcl, xm, ucl)
}
if (plot) {
win.graph(7, 4)
plot(ir, ci[, 2],
type = "p", pch = 19, cex = 0.6,
col = 1, ylim = c(min(ci), max(ci)), main = "Confidence Intervals for Ratio of Population Variances",
ylab = "Confidence Interval", xlab = "Iteration"
)
abline(h = vr0, col = 2)
arrows(ir, ci[, 1], ir, ci[, 3],
length = 0.03, code = 3,
angle = 90, lwd = 1.5, col = ifelse((ci[, 1] > vr0 |
ci[, 3] < vr0), 2, 4)
)
}
nup <- sum(ci[, 1] > vr0)
nlow <- sum(ci[, 3] < vr0)
cat(paste0(
"P(LCL > ", vr0, ") = ", nup, "/",
N, " = ", nup / N, "\t P(UCL < ", vr0, ") = ",
nlow, "/", N, " = ", nlow / N
), "\n")
}
|
#' plotPacific
#' A ggplot basemap that is Pacific-centered. Defaults to full North Pacific
#' @param ew.breaks in 180-degree centered coordinates, provide your E/W limits and breaks
#' @param ns.breaks in 0-degree centered coordinates, provide your E/W limits and breaks; 0 is minimum
#' @param fillcol color to fill countries
#' @param bordercol color to outline countries
#' @param alpha optional transparency, defaults to 1
plotPacific <- function(ew.breaks = c(seq(-120,-170,-10),180,seq(170,80,-10)),
ns.breaks = seq(0,60,10),
fillcol = "grey88",
bordercol = "skyblue3",
alpha = 1) {
## load shapefiles
WorldData <-
map_data('world',
wrap = c(-25, 335),
orientation = c(20, 225)) %>% filter(region != 'Antarctica')
WorldData <- fortify(WorldData)
## customize the x,y labels to have the degree symbol
ewlbls <-
unlist(lapply(ew.breaks, function(x)
ifelse(
x < 0, paste(-x, "°E"), ifelse(x > 0, paste(x, "°W"), x)
)))
ew.lims <- c(abs(ew.breaks)[1], last(ew.breaks)+200)
ns.lims <- c(ns.breaks[1],last(ns.breaks))
nslbls <- unlist(lapply(ns.breaks, function(x)
paste(x, "°N")))
p <- ggplot() +
theme_bw() +
theme(
legend.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid = element_blank(),
legend.position = 'bottom'
) +
## add map
geom_map(
data = WorldData,
map = WorldData,
aes(
x = long,
y = lat,
group = group,
map_id = region
),
fill = fillcol,
colour = bordercol,
size = 0.5
) +
scale_x_continuous(
limits = ew.lims,
breaks = seq(ew.lims[1],ew.lims[2],10),
labels = ewlbls,
position = 'top'
) +
scale_y_continuous(
limits = ns.lims,
breaks = ns.breaks,
labels = nslbls,
position = 'top'
)
return(p)
}
|
/R/plotPacific.R
|
no_license
|
mkapur/kaputils
|
R
| false
| false
| 2,104
|
r
|
#' plotPacific
#' A ggplot basemap that is Pacific-centered. Defaults to full North Pacific
#' @param ew.breaks in 180-degree centered coordinates, provide your E/W limits and breaks
#' @param ns.breaks in 0-degree centered coordinates, provide your E/W limits and breaks; 0 is minimum
#' @param fillcol color to fill countries
#' @param bordercol color to outline countries
#' @param alpha optional transparency, defaults to 1
plotPacific <- function(ew.breaks = c(seq(-120,-170,-10),180,seq(170,80,-10)),
ns.breaks = seq(0,60,10),
fillcol = "grey88",
bordercol = "skyblue3",
alpha = 1) {
## load shapefiles
WorldData <-
map_data('world',
wrap = c(-25, 335),
orientation = c(20, 225)) %>% filter(region != 'Antarctica')
WorldData <- fortify(WorldData)
## customize the x,y labels to have the degree symbol
ewlbls <-
unlist(lapply(ew.breaks, function(x)
ifelse(
x < 0, paste(-x, "°E"), ifelse(x > 0, paste(x, "°W"), x)
)))
ew.lims <- c(abs(ew.breaks)[1], last(ew.breaks)+200)
ns.lims <- c(ns.breaks[1],last(ns.breaks))
nslbls <- unlist(lapply(ns.breaks, function(x)
paste(x, "°N")))
p <- ggplot() +
theme_bw() +
theme(
legend.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid = element_blank(),
legend.position = 'bottom'
) +
## add map
geom_map(
data = WorldData,
map = WorldData,
aes(
x = long,
y = lat,
group = group,
map_id = region
),
fill = fillcol,
colour = bordercol,
size = 0.5
) +
scale_x_continuous(
limits = ew.lims,
breaks = seq(ew.lims[1],ew.lims[2],10),
labels = ewlbls,
position = 'top'
) +
scale_y_continuous(
limits = ns.lims,
breaks = ns.breaks,
labels = nslbls,
position = 'top'
)
return(p)
}
|
source("getPowerConsumptionData.R")
getPowerConsumptionData()
png("plot2.png")
plot(as.POSIXlt(power_consumption$Date), power_consumption$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
billwebb/ExData_Plotting1
|
R
| false
| false
| 228
|
r
|
source("getPowerConsumptionData.R")
getPowerConsumptionData()
png("plot2.png")
plot(as.POSIXlt(power_consumption$Date), power_consumption$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
# create a time series data
#first create a vector of numerical values
# 36 observations
set.seed(1234)
(sales = round(runif(36, 0,100)))
length(sales)
#This data can be daily, weekly, monthly, quarter, yearly data
#create yearly time series : start year 1980
#Yearly----
(ysales = ts(sales, frequency = 1))
(yearlysales = ts(sales, start=c(1980), frequency=1))
plot(yearlysales)
(yearlysales1 = ts(sales, start=c(1980,3), frequency=1))
# 3rd yr from 1980
plot(yearlysales)
#find the year when sales was > 50
yearlysales1[ yearlysales1 > 50]
class(yearlysales1)
methods(class=ts)
yearlysales1
(w1= window(yearlysales1, start=1983, end=1990))
plot(w1)
#Quarterly -----
12/4 # freq=4
(qtrsales = ts(sales, start=c(1980), frequency=4))
plot(qtrsales)
#list data from Qtr3 1980 to 1985
window(qtrsales, start=c(1980, 3), end=c(1985, 2))
#Monthly -----
12/12 # freq=12 start month=Apr/ 1990
(monsales = ts(sales, start=c(1990,4), frequency=12))
plot(monsales)
window(monsales, start=c(1991, 3))
#create data from Feb 2000 to Nov 2002
(monsales1 = ts(sales, start=c(2000,2), end=c(2003,3), frequency=12)) #recycling of elements beyond given sales value
monsales1
str(monsales1)
length(monsales1)
#see subset of sales data : May 2000 to Aug 2001
window(monsales1, start=c(2000, 5), end=c(2001, 8))
#Monthly TS
sales2 = ceiling(rnorm(365, mean=100, sd=10))
sales2
#YYYY,day
(dailysales = ts(sales2, start=c(2017,10), frequency=365))
window(dailysales, start=c(2017,50), end=c(2017,100))
mean(window(dailysales, start=c(2017,50), end=c(2017,100)))
head(sales2)
plot(dailysales)
class(dailysales)
#quarterly
sales3 = floor(rnorm(16, mean=200, sd = 12))
(qtrsales = ts(sales3, start = c(2018,1), frequency = 4))
plot(qtrsales)
#weekly
|
/92-wksp2/6b1-ts-data.R
|
no_license
|
DUanalytics/rAnalytics
|
R
| false
| false
| 1,745
|
r
|
# create a time series data
#first create a vector of numerical values
# 36 observations
set.seed(1234)
(sales = round(runif(36, 0,100)))
length(sales)
#This data can be daily, weekly, monthly, quarter, yearly data
#create yearly time series : start year 1980
#Yearly----
(ysales = ts(sales, frequency = 1))
(yearlysales = ts(sales, start=c(1980), frequency=1))
plot(yearlysales)
(yearlysales1 = ts(sales, start=c(1980,3), frequency=1))
# 3rd yr from 1980
plot(yearlysales)
#find the year when sales was > 50
yearlysales1[ yearlysales1 > 50]
class(yearlysales1)
methods(class=ts)
yearlysales1
(w1= window(yearlysales1, start=1983, end=1990))
plot(w1)
#Quarterly -----
12/4 # freq=4
(qtrsales = ts(sales, start=c(1980), frequency=4))
plot(qtrsales)
#list data from Qtr3 1980 to 1985
window(qtrsales, start=c(1980, 3), end=c(1985, 2))
#Monthly -----
12/12 # freq=12 start month=Apr/ 1990
(monsales = ts(sales, start=c(1990,4), frequency=12))
plot(monsales)
window(monsales, start=c(1991, 3))
#create data from Feb 2000 to Nov 2002
(monsales1 = ts(sales, start=c(2000,2), end=c(2003,3), frequency=12)) #recycling of elements beyond given sales value
monsales1
str(monsales1)
length(monsales1)
#see subset of sales data : May 2000 to Aug 2001
window(monsales1, start=c(2000, 5), end=c(2001, 8))
#Monthly TS
sales2 = ceiling(rnorm(365, mean=100, sd=10))
sales2
#YYYY,day
(dailysales = ts(sales2, start=c(2017,10), frequency=365))
window(dailysales, start=c(2017,50), end=c(2017,100))
mean(window(dailysales, start=c(2017,50), end=c(2017,100)))
head(sales2)
plot(dailysales)
class(dailysales)
#quarterly
sales3 = floor(rnorm(16, mean=200, sd = 12))
(qtrsales = ts(sales3, start = c(2018,1), frequency = 4))
plot(qtrsales)
#weekly
|
/3. Модуль 3/3.6. Разработка telegram ботов на языке R/tg_inline_keyboard_urls.R
|
no_license
|
selesnow/r_for_marketing
|
R
| false
| false
| 928
|
r
| ||
#' Define a new pipe
#'
#' All pipes of the package, including `%>%` and `%<>%`, are defined using this
#' general approach.
#'
#' @param format_fun a function taking an argument `call`, which will be fed the quoted
#' rhs, and should return the quoted expression of what the pipe should do.
#' @param compound_fun either `NULL` or a function taking arguments `lhs` and `res`
#' which are respectively the quoted first element of the pipe chain and the result of
#' the pipe chain, and should return a quoted expression to executre in the
#' parent environment.
#'
#' This pipe constructir is best understood by examples below and by the use of the
#' `%B>%` pipe.
#'
#' @examples
#' # let's build a standard pipe (it's the code used to create `%>%`)
#' `%>>>%` <- new_pipe(
#' function(call){
#' # add explicit dots at the right place in rhs
#' call <- insert_dot(call)
#' # the new dot should be equal to the call
#' bquote(. <- .(call))
#' })
#' iris %>>>% head() %>>>% dim()
#' # let's build a compound pipe (it's the code used to create `%>%`)
#' `%<>>>%` <- new_pipe(
#' function(call){
#' call <- insert_dot(call)
#' bquote(. <- .(call))
#' },
#' function(lhs, res){
#' substitute(lhs <- res)
#' })
#' x <- iris
#' x %<>>>% head() %>>>% dim()
#' x
#' @export
new_pipe <- function(format_fun, compound_fun = NULL) {
if(!is.function(format_fun) || !isTRUE(all.equal(
formals(format_fun), as.pairlist(alist(call=)))))
stop("`format_fun` must be a function using a unique argument named `call`")
if(!is.null(compound_fun) && (!is.function(compound_fun) || !isTRUE(all.equal(
formals(compound_fun), as.pairlist(alist(lhs=, res=))))))
stop("`compound_fun` must be NULL or a function using arguments `lhs` and `res`")
# copy the pipe
p <- pipe_op
# assign the relevant class
class(p) <- c(
"pipe",
if(is.null(compound_fun)) "standard_pipe" else "compound_pipe")
# assign the format_fun
attr(p, "format_fun") <- format_fun
attr(p, "compound_fun") <- compound_fun
p
}
|
/R/pipe.R
|
no_license
|
trinker/pipe
|
R
| false
| false
| 2,062
|
r
|
#' Define a new pipe
#'
#' All pipes of the package, including `%>%` and `%<>%`, are defined using this
#' general approach.
#'
#' @param format_fun a function taking an argument `call`, which will be fed the quoted
#' rhs, and should return the quoted expression of what the pipe should do.
#' @param compound_fun either `NULL` or a function taking arguments `lhs` and `res`
#' which are respectively the quoted first element of the pipe chain and the result of
#' the pipe chain, and should return a quoted expression to executre in the
#' parent environment.
#'
#' This pipe constructir is best understood by examples below and by the use of the
#' `%B>%` pipe.
#'
#' @examples
#' # let's build a standard pipe (it's the code used to create `%>%`)
#' `%>>>%` <- new_pipe(
#' function(call){
#' # add explicit dots at the right place in rhs
#' call <- insert_dot(call)
#' # the new dot should be equal to the call
#' bquote(. <- .(call))
#' })
#' iris %>>>% head() %>>>% dim()
#' # let's build a compound pipe (it's the code used to create `%>%`)
#' `%<>>>%` <- new_pipe(
#' function(call){
#' call <- insert_dot(call)
#' bquote(. <- .(call))
#' },
#' function(lhs, res){
#' substitute(lhs <- res)
#' })
#' x <- iris
#' x %<>>>% head() %>>>% dim()
#' x
#' @export
new_pipe <- function(format_fun, compound_fun = NULL) {
if(!is.function(format_fun) || !isTRUE(all.equal(
formals(format_fun), as.pairlist(alist(call=)))))
stop("`format_fun` must be a function using a unique argument named `call`")
if(!is.null(compound_fun) && (!is.function(compound_fun) || !isTRUE(all.equal(
formals(compound_fun), as.pairlist(alist(lhs=, res=))))))
stop("`compound_fun` must be NULL or a function using arguments `lhs` and `res`")
# copy the pipe
p <- pipe_op
# assign the relevant class
class(p) <- c(
"pipe",
if(is.null(compound_fun)) "standard_pipe" else "compound_pipe")
# assign the format_fun
attr(p, "format_fun") <- format_fun
attr(p, "compound_fun") <- compound_fun
p
}
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("trees_to_script_info() works", {
expect_error(
kwb.code:::trees_to_script_info()
# argument "x" is missing, with no default
)
})
|
/tests/testthat/test-function-trees_to_script_info.R
|
permissive
|
KWB-R/kwb.code
|
R
| false
| false
| 229
|
r
|
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("trees_to_script_info() works", {
expect_error(
kwb.code:::trees_to_script_info()
# argument "x" is missing, with no default
)
})
|
## logistic plot for individual in delta value
choice.beta = data.frame(ID=double(),
beta=double(),
p=double(),
stringsAsFactors=FALSE)[1:length(id.filtered),]
for (i in 1:length(id.filtered)) {
d = choice %>% filter(ID == id.filtered[i], is.na(choseright)==FALSE)
logistic = glm(choseright ~ 1 + delta.value, data = d, family = "binomial")
beta = summary(logistic)[['coefficients']][2,1]
p = summary(logistic)[['coefficients']][2,4]
choice.beta$ID[i] = id.filtered[i]
choice.beta$beta[i] = beta
choice.beta$p[i] = p
}
filtered.b = choice.beta %>% filter(beta>0, p<0.05)
#filtered.b = choice.beta %>% filter(p<0.05, beta>0)
choice = filter(choice, (ID %in% filtered.b$ID | ID %in% c(10, 56)))
choice %>% group_by(ID) %>% ggplot(aes(x = delta.value, y = choseright, group = ID)) +
geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)+
facet_wrap(~ID)
## Plot trends of their rating task
rid = rating %>% filter(ID == choice.rtid$ID[1])
plot(rid$trial_index, rid$response, '-s')
rating = rating %>% mutate(response = as.numeric(response))
rating %>% group_by(ID) %>% ggplot(aes(x = trial_index, y = response, group = ID)) +
geom_point(color = "steelblue", size = 0.7) + geom_line(color = "steelblue3", alpha = 0.5) +
facet_wrap(~ID)
##test plot for old subjects
rat.old = read.csv("food-choice-batch-2-12.csv") %>% filter(ttype == 'rating_task')
rat.old$ID = cumsum(!duplicated(rat.old['run_id']))
rat.old %>% group_by(ID) %>% ggplot(aes(x = trial_index, y = response, group = ID)) +
geom_point(color = "steelblue", size = 0.7) + geom_line(color = "steelblue3", alpha = 0.5) +
facet_wrap(~ID)
rating %>% group_by(ID) %>% dplyr::summarise(rt = mean(as.numeric(rt)))
# try z-scored ratings
rating = rating %>% group_by(ID) %>% mutate(z = scale(response))
choice = choice %>% dplyr::group_by(ID) %>% dplyr::mutate(z.delta.value = scale(delta.value))
choice %>% group_by(ID) %>% ggplot(aes(x = z.delta.value, y = choseright, group = ID)) +
geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)+
facet_wrap(~ID)
## test the distribution of positions for more valuable items
position = choice %>% dplyr::group_by(ID) %>% dplyr::mutate(posi = case_when(delta.value>0 ~ 1, delta.value<0 ~ 0, delta.value ==0 ~ 2))
x = position %>% dplyr::group_by(ID) %>% filter(posi != 2) %>% dplyr::summarize(p = mean(posi))
position %>% dplyr::group_by(ID) %>% dplyr::filter(posi == 2) %>% dplyr::summarize(p = mean(choseright))
### multilevel model mixed-effects
m1 = glmer(choseright~1+delta.mem+delta.value+(1+delta.mem+delta.value|ID),
data = choice, family = "binomial")
summary(m1)
m2 = glmer(choseright~1+delta.mem+(1+delta.mem|ID),
data = choice, family = "binomial")
|
/logistic_update_2_24.R
|
no_license
|
christineli0330/Choice_task_analysis
|
R
| false
| false
| 2,909
|
r
|
## logistic plot for individual in delta value
choice.beta = data.frame(ID=double(),
beta=double(),
p=double(),
stringsAsFactors=FALSE)[1:length(id.filtered),]
for (i in 1:length(id.filtered)) {
d = choice %>% filter(ID == id.filtered[i], is.na(choseright)==FALSE)
logistic = glm(choseright ~ 1 + delta.value, data = d, family = "binomial")
beta = summary(logistic)[['coefficients']][2,1]
p = summary(logistic)[['coefficients']][2,4]
choice.beta$ID[i] = id.filtered[i]
choice.beta$beta[i] = beta
choice.beta$p[i] = p
}
filtered.b = choice.beta %>% filter(beta>0, p<0.05)
#filtered.b = choice.beta %>% filter(p<0.05, beta>0)
choice = filter(choice, (ID %in% filtered.b$ID | ID %in% c(10, 56)))
choice %>% group_by(ID) %>% ggplot(aes(x = delta.value, y = choseright, group = ID)) +
geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)+
facet_wrap(~ID)
## Plot trends of their rating task
rid = rating %>% filter(ID == choice.rtid$ID[1])
plot(rid$trial_index, rid$response, '-s')
rating = rating %>% mutate(response = as.numeric(response))
rating %>% group_by(ID) %>% ggplot(aes(x = trial_index, y = response, group = ID)) +
geom_point(color = "steelblue", size = 0.7) + geom_line(color = "steelblue3", alpha = 0.5) +
facet_wrap(~ID)
##test plot for old subjects
rat.old = read.csv("food-choice-batch-2-12.csv") %>% filter(ttype == 'rating_task')
rat.old$ID = cumsum(!duplicated(rat.old['run_id']))
rat.old %>% group_by(ID) %>% ggplot(aes(x = trial_index, y = response, group = ID)) +
geom_point(color = "steelblue", size = 0.7) + geom_line(color = "steelblue3", alpha = 0.5) +
facet_wrap(~ID)
rating %>% group_by(ID) %>% dplyr::summarise(rt = mean(as.numeric(rt)))
# try z-scored ratings
rating = rating %>% group_by(ID) %>% mutate(z = scale(response))
choice = choice %>% dplyr::group_by(ID) %>% dplyr::mutate(z.delta.value = scale(delta.value))
choice %>% group_by(ID) %>% ggplot(aes(x = z.delta.value, y = choseright, group = ID)) +
geom_point() +
stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE)+
facet_wrap(~ID)
## test the distribution of positions for more valuable items
position = choice %>% dplyr::group_by(ID) %>% dplyr::mutate(posi = case_when(delta.value>0 ~ 1, delta.value<0 ~ 0, delta.value ==0 ~ 2))
x = position %>% dplyr::group_by(ID) %>% filter(posi != 2) %>% dplyr::summarize(p = mean(posi))
position %>% dplyr::group_by(ID) %>% dplyr::filter(posi == 2) %>% dplyr::summarize(p = mean(choseright))
### multilevel model mixed-effects
m1 = glmer(choseright~1+delta.mem+delta.value+(1+delta.mem+delta.value|ID),
data = choice, family = "binomial")
summary(m1)
m2 = glmer(choseright~1+delta.mem+(1+delta.mem|ID),
data = choice, family = "binomial")
|
library(AnnuityRIR)
### Name: PV_post_mood_pm
### Title: Compute the present expected value of an n-payment annuity, with
### payments of 1 unit each made at the end of every year
### (annuity-immediate), valued at the rate X, with the method of Mood
### _et al._ using some positive moments of the distribution.
### Aliases: PV_post_mood_pm
### ** Examples
#example 1
data=c(0.298,0.255,0.212,0.180,0.165,0.163,0.167,0.161,0.154,
0.128,0.079,0.059,0.042,-0.008,-0.012,-0.002)
PV_post_mood_pm(data)
# example 2
data<-rnorm(n=30,m=0.03,sd=0.01)
PV_post_mood_pm(data)
# example 3
data = c(1.77,1.85,1.85,1.84,1.84,1.83,1.85,1.85,1.88,1.85,1.80,1.84,1.91,1.85,1.84,1.85,
1.86,1.85,1.88,1.86)
data=data/100
PV_post_mood_pm(data)
|
/data/genthat_extracted_code/AnnuityRIR/examples/PV_post_mood_pm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 740
|
r
|
library(AnnuityRIR)
### Name: PV_post_mood_pm
### Title: Compute the present expected value of an n-payment annuity, with
### payments of 1 unit each made at the end of every year
### (annuity-immediate), valued at the rate X, with the method of Mood
### _et al._ using some positive moments of the distribution.
### Aliases: PV_post_mood_pm
### ** Examples
#example 1
data=c(0.298,0.255,0.212,0.180,0.165,0.163,0.167,0.161,0.154,
0.128,0.079,0.059,0.042,-0.008,-0.012,-0.002)
PV_post_mood_pm(data)
# example 2
data<-rnorm(n=30,m=0.03,sd=0.01)
PV_post_mood_pm(data)
# example 3
data = c(1.77,1.85,1.85,1.84,1.84,1.83,1.85,1.85,1.88,1.85,1.80,1.84,1.91,1.85,1.84,1.85,
1.86,1.85,1.88,1.86)
data=data/100
PV_post_mood_pm(data)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/types.R
\name{type-predicates}
\alias{type-predicates}
\alias{is_list}
\alias{is_atomic}
\alias{is_vector}
\alias{is_integer}
\alias{is_double}
\alias{is_character}
\alias{is_logical}
\alias{is_raw}
\alias{is_bytes}
\alias{is_null}
\title{Type predicates}
\usage{
is_list(x, n = NULL)
is_atomic(x, n = NULL)
is_vector(x, n = NULL)
is_integer(x, n = NULL)
is_double(x, n = NULL)
is_character(x, n = NULL, encoding = NULL)
is_logical(x, n = NULL)
is_raw(x, n = NULL)
is_bytes(x, n = NULL)
is_null(x)
}
\arguments{
\item{x}{Object to be tested.}
\item{n}{Expected length of a vector.}
\item{encoding}{Expected encoding of a string or character
vector. One of \code{UTF-8}, \code{latin1}, or \code{unknown}.}
}
\description{
These type predicates aim to make type testing in R more
consistent. They are wrappers around \code{\link[base:typeof]{base::typeof()}}, so operate
at a level beneath S3/S4 etc.
}
\details{
Compared to base R functions:
\itemize{
\item The predicates for vectors include the \code{n} argument for
pattern-matching on the vector length.
\item Unlike \code{is.atomic()}, \code{is_atomic()} does not return \code{TRUE} for
\code{NULL}.
\item Unlike \code{is.vector()}, \code{is_vector()} test if an object is an
atomic vector or a list. \code{is.vector} checks for the presence of
attributes (other than name).
}
}
\seealso{
\link{bare-type-predicates} \link{scalar-type-predicates}
}
|
/man/type-predicates.Rd
|
no_license
|
EdwinTh/rlang
|
R
| false
| true
| 1,492
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/types.R
\name{type-predicates}
\alias{type-predicates}
\alias{is_list}
\alias{is_atomic}
\alias{is_vector}
\alias{is_integer}
\alias{is_double}
\alias{is_character}
\alias{is_logical}
\alias{is_raw}
\alias{is_bytes}
\alias{is_null}
\title{Type predicates}
\usage{
is_list(x, n = NULL)
is_atomic(x, n = NULL)
is_vector(x, n = NULL)
is_integer(x, n = NULL)
is_double(x, n = NULL)
is_character(x, n = NULL, encoding = NULL)
is_logical(x, n = NULL)
is_raw(x, n = NULL)
is_bytes(x, n = NULL)
is_null(x)
}
\arguments{
\item{x}{Object to be tested.}
\item{n}{Expected length of a vector.}
\item{encoding}{Expected encoding of a string or character
vector. One of \code{UTF-8}, \code{latin1}, or \code{unknown}.}
}
\description{
These type predicates aim to make type testing in R more
consistent. They are wrappers around \code{\link[base:typeof]{base::typeof()}}, so operate
at a level beneath S3/S4 etc.
}
\details{
Compared to base R functions:
\itemize{
\item The predicates for vectors include the \code{n} argument for
pattern-matching on the vector length.
\item Unlike \code{is.atomic()}, \code{is_atomic()} does not return \code{TRUE} for
\code{NULL}.
\item Unlike \code{is.vector()}, \code{is_vector()} test if an object is an
atomic vector or a list. \code{is.vector} checks for the presence of
attributes (other than name).
}
}
\seealso{
\link{bare-type-predicates} \link{scalar-type-predicates}
}
|
#' Title
#'
#' @param id
#' @param label
#' @param markers
#' @param sortConditions
#' @param colorConditions
#' @param annotation
#'
#' @return
#' @export
#'
#' @examples
flagModuleUI <- function(id, label = "qcViolin", markers, sortConditions,
colorConditions, annotation) {
# Create a namespace function using the provided id
ns <- NS(id)
}
#' Title
#'
#' @param input
#' @param output
#' @param session
#' @param data
#' @param annotation
#' @param idColumn
#' @param subsetCondition
#' @param subsetChoices
#' @param sortConditions
#' @param markers
#' @param colorConditions
#' @param mapVar
#'
#' @return
#' @export
#'
#' @examples
flagModuleOutput <- function(input, output, session, data, annotation,
idColumn = "patientID", subsetCondition=NULL,
subsetChoices=NULL, sortConditions, markers,
colorConditions, mapVar = c("idVar"="FCSFiles")) {
checkedData <- reactive({
#verify that data maps to annotation, otherwise return NULL
#each module should verify joins
})
#return flagged
}
|
/R/flagSamples.R
|
permissive
|
laderast/flowDashboard
|
R
| false
| false
| 1,122
|
r
|
#' Title
#'
#' @param id
#' @param label
#' @param markers
#' @param sortConditions
#' @param colorConditions
#' @param annotation
#'
#' @return
#' @export
#'
#' @examples
flagModuleUI <- function(id, label = "qcViolin", markers, sortConditions,
colorConditions, annotation) {
# Create a namespace function using the provided id
ns <- NS(id)
}
#' Title
#'
#' @param input
#' @param output
#' @param session
#' @param data
#' @param annotation
#' @param idColumn
#' @param subsetCondition
#' @param subsetChoices
#' @param sortConditions
#' @param markers
#' @param colorConditions
#' @param mapVar
#'
#' @return
#' @export
#'
#' @examples
flagModuleOutput <- function(input, output, session, data, annotation,
idColumn = "patientID", subsetCondition=NULL,
subsetChoices=NULL, sortConditions, markers,
colorConditions, mapVar = c("idVar"="FCSFiles")) {
checkedData <- reactive({
#verify that data maps to annotation, otherwise return NULL
#each module should verify joins
})
#return flagged
}
|
\name{get.raxml.treeLikelihoods}
\alias{get.raxml.treeLikelihoods}
\title{Extract likelihoods from a RAxML info file}
\description{Reads the info file from a RAxML site-likelihood analysis with multiple input trees.
Probably not often needed on its own, but used in \code{match.lnL.to.trees}.}
\usage{
get.raxml.treeLikelihoods(x, logfile = NA)
}
\arguments{
\item{x}{file name of a RAxML .info file from site-likelihood analysis}
\item{logfile}{name of a log file, useful for recording any files that were not successfully
read in}
}
\value{A named vector of class \code{double} with tree likelihoods, where the names are character
equivalents of the tree numbers; or, if the file had no trees in it, the character vector
"FAIL"}
\author{Andrew Hipp}
\seealso{
\code{\link{match.lnL.to.trees}},
\code{\link{get.raxml.siteLikelihoods}}
}
\keyword{IO}
|
/man/get.raxml.treeLikelihoods.Rd
|
no_license
|
andrew-hipp/RADami
|
R
| false
| false
| 877
|
rd
|
\name{get.raxml.treeLikelihoods}
\alias{get.raxml.treeLikelihoods}
\title{Extract likelihoods from a RAxML info file}
\description{Reads the info file from a RAxML site-likelihood analysis with multiple input trees.
Probably not often needed on its own, but used in \code{match.lnL.to.trees}.}
\usage{
get.raxml.treeLikelihoods(x, logfile = NA)
}
\arguments{
\item{x}{file name of a RAxML .info file from site-likelihood analysis}
\item{logfile}{name of a log file, useful for recording any files that were not successfully
read in}
}
\value{A named vector of class \code{double} with tree likelihoods, where the names are character
equivalents of the tree numbers; or, if the file had no trees in it, the character vector
"FAIL"}
\author{Andrew Hipp}
\seealso{
\code{\link{match.lnL.to.trees}},
\code{\link{get.raxml.siteLikelihoods}}
}
\keyword{IO}
|
library(ExonModelStrainXmap)
mapConnect(dbPackage="xmapcore")
trs <- getAllTranscripts()
res <- RunExonModelWorkflow(eset2, idlist=trs[1643:1650], analysisUnit="probeset")
res <- RunExonModelWorkflow(eset2, idlist=trs[500:510], analysisUnit="probeset")
resmulti <- NULL
ressingles <- NULL
i <- 1
while(i < length(trs)){
minires <- RunExonModelWorkflow(eset2, idlist=trs[i:(i+500)], analysisUnit="probeset")
resmulti <- rbind(resmulti, minires$multi)
ressingles <- rbind(ressingles, minires$singles)
i <- i + 501
write.table(resmulti, file="probeset-model-multi.txt", quote=F, row.name=F, sep="\t")
write.table(ressingles, file="probeset-model-singles.txt", quote=F, row.name=F, sep="\t")
}
write.table(out2, file="probeset-model-multi.txt", quote=F, row.name=F, sep="\t")
#out2 <- xmap_annotate(out)
#write.table(out2, file="probeset-model-multi.txt", quote=F, row.name=F, sep="\t")
|
/tests/tests2.R
|
no_license
|
laderast/ExonModelStrain
|
R
| false
| false
| 922
|
r
|
library(ExonModelStrainXmap)
mapConnect(dbPackage="xmapcore")
trs <- getAllTranscripts()
res <- RunExonModelWorkflow(eset2, idlist=trs[1643:1650], analysisUnit="probeset")
res <- RunExonModelWorkflow(eset2, idlist=trs[500:510], analysisUnit="probeset")
resmulti <- NULL
ressingles <- NULL
i <- 1
while(i < length(trs)){
minires <- RunExonModelWorkflow(eset2, idlist=trs[i:(i+500)], analysisUnit="probeset")
resmulti <- rbind(resmulti, minires$multi)
ressingles <- rbind(ressingles, minires$singles)
i <- i + 501
write.table(resmulti, file="probeset-model-multi.txt", quote=F, row.name=F, sep="\t")
write.table(ressingles, file="probeset-model-singles.txt", quote=F, row.name=F, sep="\t")
}
write.table(out2, file="probeset-model-multi.txt", quote=F, row.name=F, sep="\t")
#out2 <- xmap_annotate(out)
#write.table(out2, file="probeset-model-multi.txt", quote=F, row.name=F, sep="\t")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/articles.R
\docType{data}
\name{suffix_lowercase}
\alias{suffix_lowercase}
\title{Catalan articles in lowercase, separated by comma}
\format{
An object of class \code{character} of length 11.
}
\usage{
suffix_lowercase
}
\description{
Catalan articles in lowercase, separated by comma
}
\keyword{datasets}
|
/man/suffix_lowercase.Rd
|
permissive
|
jmones/catmunalias
|
R
| false
| true
| 384
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/articles.R
\docType{data}
\name{suffix_lowercase}
\alias{suffix_lowercase}
\title{Catalan articles in lowercase, separated by comma}
\format{
An object of class \code{character} of length 11.
}
\usage{
suffix_lowercase
}
\description{
Catalan articles in lowercase, separated by comma
}
\keyword{datasets}
|
# Load required packages
library("VGAM")
library("BMS")
library("expectreg")
library("fGarch")
library("numDeriv")
library("rootSolve")
library("evd")
library("MASS")
library("stabledist")
# Uncomment to change your working directory setwd()
# Set parameters
RiskLevel = 0.01 # Predetermined Value at Risk level
Contamination = 0.2 # between 0 and 1, 0 = Normal, 1 = Laplace
windowsize = 250 # Estimation window size for the rolling window
distribution = "StableDist" # Distribution used in TERES methodology
Crix = read.csv("crix.csv")
y = Crix[, 2]
y = diff(log(y))
y = na.omit(y)
# Pre-white data with a GARCH model
GARCHvola = garchFit(~garch(1, 1), data = y)
ywhite = y/volatility(GARCHvola)
yclean = ywhite - mean(ywhite)
# Estimation of the index parameter alpha (stable distributions) for each of the moving windows
alpha.indparam = matrix(data = NA, nrow = (length(y) - windowsize + 1), ncol = 1)
for (i in (1:(length(y) - windowsize + 1))) {
ywindow = yclean[(i):(i + windowsize - 1)]
Fit = stableFit(ywindow, alpha = 1.75, beta = 0, gamma = 1, delta = 0,
type = "mle", doplot = FALSE, control = list(),
trace = FALSE, title = NULL, description = NULL)
alpha.indparam[i] = Fit@fit$estimate[1]
}
# Helpful funtions
tau = function(alpha, delta = 0, distribution) {
if (alpha < 1e-27) {
return(0)
}
switch(distribution, Laplace = {
F = function(x) {
(1 - delta) * pnorm(x) + delta * plaplace(x)
}
}, StableDist = {
F = function(x) {
(1 - delta) * pnorm(x) + delta * pstable(x, alpha = alpha.indparam[i], beta = 1)
}
})
f = function(x) {
grad(F, x)
}
inverse = function(f, lower = -100, upper = 100) {
function(y) uniroot((function(x) f(x) - y), lower = lower, upper = upper)[1]
}
quantileFun = inverse(F)
q = as.numeric(quantileFun(alpha))
LPM = function(x) {
x * (f(x))
}
LPMq = function(x) {
integrate(LPM, -Inf, x)
}
tmp = as.numeric(LPMq(q)[1]) - q * alpha
return(tmp/(2 * tmp + q))
}
ES = function(delta, alpha, sample) {
funtau = sapply(alpha, tau, delta, distribution)
etau = quantile(sample, alpha)
return(etau + (etau - mean(sample))/(1 - 2 * funtau) * (funtau/alpha))
}
ES.EVT = function(x, alpha) {
L = -x
zq = quantile(L, 1 - alpha)
# meplot(L, xlim = c(0,5))
thr = quantile(L, 0.9)
fitty = fpot(L, thr, model = "gpd", std.err = F)
scale = as.numeric(fitty$scale)
shape = as.numeric(fitty$param[2])
evtES = -(zq/(1 - shape) + (scale - shape * thr)/(1 - shape))
return(c(evtES, scale, shape))
}
# Actual estimation, this can take up to a minute
ESresults = matrix(data = NA, nrow = (length(y) - windowsize + 1), ncol = 4)
colnames(ESresults) = c("TERES", "EVT", "scale", "shape")
for (i in (1:(length(y) - windowsize + 1))) {
ywindow = yclean[(i):(i + windowsize - 1)]
ESresults[i, 1] = ES(Contamination, RiskLevel, ywindow) * volatility(GARCHvola)[i + windowsize - 1]
ESresults[i, 2] = ES.EVT(ywindow, RiskLevel)[1] * volatility(GARCHvola)[i + windowsize - 1]
ESresults[i, 3:4] = ES.EVT(ywindow, RiskLevel)[2:3]
}
# Plot the results
plot(ESresults[, 1], ylab = "Expected Shortfall", type = "l", lwd = 0.8, col = "blue")
plot(ESresults[, 2], ylab = "Expected Shortfall", type = "l", lwd = 0.8, col = "green")
plot(ESresults[, 1] - ESresults[, 2], ylab = "Expected Shortfall", type = "l", lwd = 2, col = "red")
# uncomment to save the results write.table(ESresults, file = 'ESfromRollingWindow.csv', sep = ',')
# write.table(yclean, file = 'StandardizedReturns.csv', sep = ',')
|
/TERES_EVT.R
|
no_license
|
Ver2307/CRIX---TERES-EVT
|
R
| false
| false
| 3,603
|
r
|
# Load required packages
library("VGAM")
library("BMS")
library("expectreg")
library("fGarch")
library("numDeriv")
library("rootSolve")
library("evd")
library("MASS")
library("stabledist")
# Uncomment to change your working directory setwd()
# Set parameters
RiskLevel = 0.01 # Predetermined Value at Risk level
Contamination = 0.2 # between 0 and 1, 0 = Normal, 1 = Laplace
windowsize = 250 # Estimation window size for the rolling window
distribution = "StableDist" # Distribution used in TERES methodology
Crix = read.csv("crix.csv")
y = Crix[, 2]
y = diff(log(y))
y = na.omit(y)
# Pre-white data with a GARCH model
GARCHvola = garchFit(~garch(1, 1), data = y)
ywhite = y/volatility(GARCHvola)
yclean = ywhite - mean(ywhite)
# Estimation of the index parameter alpha (stable distributions) for each of the moving windows
alpha.indparam = matrix(data = NA, nrow = (length(y) - windowsize + 1), ncol = 1)
for (i in (1:(length(y) - windowsize + 1))) {
ywindow = yclean[(i):(i + windowsize - 1)]
Fit = stableFit(ywindow, alpha = 1.75, beta = 0, gamma = 1, delta = 0,
type = "mle", doplot = FALSE, control = list(),
trace = FALSE, title = NULL, description = NULL)
alpha.indparam[i] = Fit@fit$estimate[1]
}
# Helpful funtions
tau = function(alpha, delta = 0, distribution) {
if (alpha < 1e-27) {
return(0)
}
switch(distribution, Laplace = {
F = function(x) {
(1 - delta) * pnorm(x) + delta * plaplace(x)
}
}, StableDist = {
F = function(x) {
(1 - delta) * pnorm(x) + delta * pstable(x, alpha = alpha.indparam[i], beta = 1)
}
})
f = function(x) {
grad(F, x)
}
inverse = function(f, lower = -100, upper = 100) {
function(y) uniroot((function(x) f(x) - y), lower = lower, upper = upper)[1]
}
quantileFun = inverse(F)
q = as.numeric(quantileFun(alpha))
LPM = function(x) {
x * (f(x))
}
LPMq = function(x) {
integrate(LPM, -Inf, x)
}
tmp = as.numeric(LPMq(q)[1]) - q * alpha
return(tmp/(2 * tmp + q))
}
ES = function(delta, alpha, sample) {
funtau = sapply(alpha, tau, delta, distribution)
etau = quantile(sample, alpha)
return(etau + (etau - mean(sample))/(1 - 2 * funtau) * (funtau/alpha))
}
ES.EVT = function(x, alpha) {
L = -x
zq = quantile(L, 1 - alpha)
# meplot(L, xlim = c(0,5))
thr = quantile(L, 0.9)
fitty = fpot(L, thr, model = "gpd", std.err = F)
scale = as.numeric(fitty$scale)
shape = as.numeric(fitty$param[2])
evtES = -(zq/(1 - shape) + (scale - shape * thr)/(1 - shape))
return(c(evtES, scale, shape))
}
# Actual estimation, this can take up to a minute
ESresults = matrix(data = NA, nrow = (length(y) - windowsize + 1), ncol = 4)
colnames(ESresults) = c("TERES", "EVT", "scale", "shape")
for (i in (1:(length(y) - windowsize + 1))) {
ywindow = yclean[(i):(i + windowsize - 1)]
ESresults[i, 1] = ES(Contamination, RiskLevel, ywindow) * volatility(GARCHvola)[i + windowsize - 1]
ESresults[i, 2] = ES.EVT(ywindow, RiskLevel)[1] * volatility(GARCHvola)[i + windowsize - 1]
ESresults[i, 3:4] = ES.EVT(ywindow, RiskLevel)[2:3]
}
# Plot the results
plot(ESresults[, 1], ylab = "Expected Shortfall", type = "l", lwd = 0.8, col = "blue")
plot(ESresults[, 2], ylab = "Expected Shortfall", type = "l", lwd = 0.8, col = "green")
plot(ESresults[, 1] - ESresults[, 2], ylab = "Expected Shortfall", type = "l", lwd = 2, col = "red")
# uncomment to save the results write.table(ESresults, file = 'ESfromRollingWindow.csv', sep = ',')
# write.table(yclean, file = 'StandardizedReturns.csv', sep = ',')
|
#' Flip x,y to y,x, and vice versa
#'
#' @export
#' @param input Feature of features
#' @template lint
#' @return a \code{\link{data-Feature}} or \code{\link{data-FeatureCollection}}
#' @examples
#' # a point
#' serbia <- '{
#' "type": "Feature",
#' "properties": {"color": "red"},
#' "geometry": {
#' "type": "Point",
#' "coordinates": [20.566406, 43.421008]
#' }
#' }'
#' lawn_flip(serbia)
#'
#' # a featurecollection
#' pts <- lawn_random("points")
#' lawn_flip(pts)
#' @examples \dontrun{
#' lawn_data$points_average %>% view
#' lawn_flip(lawn_data$points_average) %>% view
#' lawn_data$polygons_average %>% view
#' lawn_flip(lawn_data$polygons_average) %>% view
#' }
lawn_flip <- function(input, lint = FALSE) {
input <- convert(input)
lawnlint(input, lint)
ct$eval(sprintf("var flp = turf.flip(%s);", input))
structure(ct$get("flp"), class = tolower(ct$get("flp.type")))
}
|
/lawn/R/flip.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 904
|
r
|
#' Flip x,y to y,x, and vice versa
#'
#' @export
#' @param input Feature of features
#' @template lint
#' @return a \code{\link{data-Feature}} or \code{\link{data-FeatureCollection}}
#' @examples
#' # a point
#' serbia <- '{
#' "type": "Feature",
#' "properties": {"color": "red"},
#' "geometry": {
#' "type": "Point",
#' "coordinates": [20.566406, 43.421008]
#' }
#' }'
#' lawn_flip(serbia)
#'
#' # a featurecollection
#' pts <- lawn_random("points")
#' lawn_flip(pts)
#' @examples \dontrun{
#' lawn_data$points_average %>% view
#' lawn_flip(lawn_data$points_average) %>% view
#' lawn_data$polygons_average %>% view
#' lawn_flip(lawn_data$polygons_average) %>% view
#' }
lawn_flip <- function(input, lint = FALSE) {
input <- convert(input)
lawnlint(input, lint)
ct$eval(sprintf("var flp = turf.flip(%s);", input))
structure(ct$get("flp"), class = tolower(ct$get("flp.type")))
}
|
#' gibbsHMM_PT
#'
#' parallel tempering with a column prior - option to mix over column or stick to j=1
#' @param x, alpha, log=False
#' @keywords dirichlet
#' @export
#' @examples dDirichlet(c(.1, .9), c(0.1,0.1))
ReplicateSimer2.old<-function( N, n, Kfit=10, SimID, ITERATIONS,BURN, AMAX, PRIOR_TYPE, PTchain=20){
# STORE SIMULATIONS in a list
simFunctionMorpher<-function(SimNumber){
if( SimNumber==1){ return(FunkSim1)
}else if (SimNumber==2){ return(FunkSim3)
}else if (SimNumber==3){ return(FunkSim4)
} }
MorphingSIMULATE<-simFunctionMorpher(SimID)
SIMS<-lapply( rep(n,N), MorphingSIMULATE )
# Compute density for L1 norm and store in a list
simDensityMorpher<-function(SimNumber){
if( SimNumber==1){ return( SimDensity1)
}else if (SimNumber==2){ return(SimDensity3)
}else if (SimNumber==3){ return(SimDensity4)
} }
MorphineDENSITY<-simDensityMorpher(SimID)
SIM_DENSITY_TRUE<-lapply(SIMS, MorphineDENSITY)
NumCores<-min(parallel::detectCores(), N)
# Clean up Gibbs for lyra...
library(parallel)
Result<-mclapply(c(1:N), function(x) {
gibbsHMM_PT_wDist_LYRAfinally(YZ=SIMS[[x]],K=Kfit, densTrue=SIM_DENSITY_TRUE[[x]], M=ITERATIONS, alphaMAX=AMAX, type= PRIOR_TYPE, alphaMin=0.001, J=PTchain, SuppressAll="TRUE")
} , mc.cores = NumCores)
print(NumCores)
# combine results!
#Alive<-sapply(Result, function(x) median(x$K0[-c(1:BURN)]))
Fin<-data.frame("Run"=rep(0,N), "MedianK0"=rep(0, N), "MeanDist"=rep(0,N), "MeanDist_MERGED"=rep(0,N), "WorstMixChain"=rep(0,N))
for(i in 1:N){
Fin[i,1]<-i
.result<-Result[[i]]
# print(head(.result))
Fin[i,2]<- median(.result$K0[-c(1:BURN)])
Fin[i,3]<-mean(.result$f2Dist[-c(1:BURN)])
Fin[i,4]<-mean(.result$f2Dist_Merged[-c(1:BURN)])
Fin[i,5]<-.result$WorstMixProp[1]
}
#Alive<-sapply(Result, function(x) median( x[[]]$K0[-c(1:BURN)]))
# Alive<-sapply(Result, function(x) median( x$K0[-c(1:BURN)]))
# L1norm<-sapply(Result, function(x) mean(x[['f2Dist']][-c(1:BURN)]))
# SmallResults<-data.frame("AliveStates"=Alive, "L1norm"=L1norm)
return(Fin)
}
|
/R/ReplicateSimer2.old.R
|
no_license
|
zoevanhavre/Zhmm.0
|
R
| false
| false
| 2,080
|
r
|
#' gibbsHMM_PT
#'
#' parallel tempering with a column prior - option to mix over column or stick to j=1
#' @param x, alpha, log=False
#' @keywords dirichlet
#' @export
#' @examples dDirichlet(c(.1, .9), c(0.1,0.1))
ReplicateSimer2.old<-function( N, n, Kfit=10, SimID, ITERATIONS,BURN, AMAX, PRIOR_TYPE, PTchain=20){
# STORE SIMULATIONS in a list
simFunctionMorpher<-function(SimNumber){
if( SimNumber==1){ return(FunkSim1)
}else if (SimNumber==2){ return(FunkSim3)
}else if (SimNumber==3){ return(FunkSim4)
} }
MorphingSIMULATE<-simFunctionMorpher(SimID)
SIMS<-lapply( rep(n,N), MorphingSIMULATE )
# Compute density for L1 norm and store in a list
simDensityMorpher<-function(SimNumber){
if( SimNumber==1){ return( SimDensity1)
}else if (SimNumber==2){ return(SimDensity3)
}else if (SimNumber==3){ return(SimDensity4)
} }
MorphineDENSITY<-simDensityMorpher(SimID)
SIM_DENSITY_TRUE<-lapply(SIMS, MorphineDENSITY)
NumCores<-min(parallel::detectCores(), N)
# Clean up Gibbs for lyra...
library(parallel)
Result<-mclapply(c(1:N), function(x) {
gibbsHMM_PT_wDist_LYRAfinally(YZ=SIMS[[x]],K=Kfit, densTrue=SIM_DENSITY_TRUE[[x]], M=ITERATIONS, alphaMAX=AMAX, type= PRIOR_TYPE, alphaMin=0.001, J=PTchain, SuppressAll="TRUE")
} , mc.cores = NumCores)
print(NumCores)
# combine results!
#Alive<-sapply(Result, function(x) median(x$K0[-c(1:BURN)]))
Fin<-data.frame("Run"=rep(0,N), "MedianK0"=rep(0, N), "MeanDist"=rep(0,N), "MeanDist_MERGED"=rep(0,N), "WorstMixChain"=rep(0,N))
for(i in 1:N){
Fin[i,1]<-i
.result<-Result[[i]]
# print(head(.result))
Fin[i,2]<- median(.result$K0[-c(1:BURN)])
Fin[i,3]<-mean(.result$f2Dist[-c(1:BURN)])
Fin[i,4]<-mean(.result$f2Dist_Merged[-c(1:BURN)])
Fin[i,5]<-.result$WorstMixProp[1]
}
#Alive<-sapply(Result, function(x) median( x[[]]$K0[-c(1:BURN)]))
# Alive<-sapply(Result, function(x) median( x$K0[-c(1:BURN)]))
# L1norm<-sapply(Result, function(x) mean(x[['f2Dist']][-c(1:BURN)]))
# SmallResults<-data.frame("AliveStates"=Alive, "L1norm"=L1norm)
return(Fin)
}
|
library(minque)
### Name: brate
### Title: Cotton boll retention rate data
### Aliases: brate
### Keywords: datasets cotton boll retention linear mixed model MINQUE REML
### resampling jackknife
### ** Examples
library(minque)
data(brate)
head(brate)
brate$Geno=factor(brate$Geno)
brate$Pos=factor(brate$Pos)
brate$Rep=factor(brate$Rep)
res=lmm(Brate~1|Geno*Pos+Rep,data=brate)
res[[1]]$Var
res[[1]]$FixedEffect
res[[1]]$RandomEffect
res=lmm.jack(Brate~1|Geno*Pos+Rep,data=brate,JacNum=10,JacRep=1,ALPHA=0.05)
res[[1]]$Var
res[[1]]$PVar
res[[1]]$FixedEffect
res[[1]]$RandomEffect
## end
|
/data/genthat_extracted_code/minque/examples/brate.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 617
|
r
|
library(minque)
### Name: brate
### Title: Cotton boll retention rate data
### Aliases: brate
### Keywords: datasets cotton boll retention linear mixed model MINQUE REML
### resampling jackknife
### ** Examples
library(minque)
data(brate)
head(brate)
brate$Geno=factor(brate$Geno)
brate$Pos=factor(brate$Pos)
brate$Rep=factor(brate$Rep)
res=lmm(Brate~1|Geno*Pos+Rep,data=brate)
res[[1]]$Var
res[[1]]$FixedEffect
res[[1]]$RandomEffect
res=lmm.jack(Brate~1|Geno*Pos+Rep,data=brate,JacNum=10,JacRep=1,ALPHA=0.05)
res[[1]]$Var
res[[1]]$PVar
res[[1]]$FixedEffect
res[[1]]$RandomEffect
## end
|
## By default, do not run the tests
## which also means do not run on CRAN
runTests <- FALSE
## Use the Travis / GitHub integrations as we set this
## environment variable to "yes" in .travis.yml
if (Sys.getenv("RunGgplot2Tests=yes") == "yes") runTests <- TRUE
## Also run the tests when building on Dean's machine
if (isTRUE(unname(Sys.info()["user"]) == "Dean")) runTests <- TRUE
if (runTests) {
# Wrap up the ggMarginal visual tests in a function runMarginalTests so that
# it's easy to test under multiple versions of ggplot2
runMarginalTests <- function(ggplot2Version) {
context <- paste("ggMarginal under ggplot2 version", ggplot2Version)
context(context)
test_that("ggMarginal can produce basic marginal plots" , {
sapply(c("basic density", "basic histogram", "basic boxplot",
"scatter plot from data"), function(x)
expectDopp2(funName = x, ggplot2Version = ggplot2Version))
})
test_that("ggMarginal's other params work" , {
sapply(c("only x margin", "smaller marginal plots", "both hists red col",
"top hist red col and fill"), function(x)
expectDopp2(funName = x, ggplot2Version = ggplot2Version))
})
test_that("Misc. issues are solved" , {
sapply(c("theme bw", "legend and title",
"flipped coord where x is drat and y is wt"), function(x)
expectDopp2(funName = x, ggplot2Version = ggplot2Version))
})
}
# Function to run all visual regression tests across all ggplot2 versions
runMarginalTestsApply <- function(ggplot2Versions) {
sapply(ggplot2Versions, function(ggplot2Version) {
withGGplot2Version(ggplot2Version, {
runMarginalTests(ggplot2Version)
})
})
}
runMarginalTestsApply(c("2.2.0", "2.2.1", "latest"))
}
|
/tests/testthat/test-ggMarginal.R
|
permissive
|
2533245542/ggExtra
|
R
| false
| false
| 1,825
|
r
|
## By default, do not run the tests
## which also means do not run on CRAN
runTests <- FALSE
## Use the Travis / GitHub integrations as we set this
## environment variable to "yes" in .travis.yml
if (Sys.getenv("RunGgplot2Tests=yes") == "yes") runTests <- TRUE
## Also run the tests when building on Dean's machine
if (isTRUE(unname(Sys.info()["user"]) == "Dean")) runTests <- TRUE
if (runTests) {
# Wrap up the ggMarginal visual tests in a function runMarginalTests so that
# it's easy to test under multiple versions of ggplot2
runMarginalTests <- function(ggplot2Version) {
context <- paste("ggMarginal under ggplot2 version", ggplot2Version)
context(context)
test_that("ggMarginal can produce basic marginal plots" , {
sapply(c("basic density", "basic histogram", "basic boxplot",
"scatter plot from data"), function(x)
expectDopp2(funName = x, ggplot2Version = ggplot2Version))
})
test_that("ggMarginal's other params work" , {
sapply(c("only x margin", "smaller marginal plots", "both hists red col",
"top hist red col and fill"), function(x)
expectDopp2(funName = x, ggplot2Version = ggplot2Version))
})
test_that("Misc. issues are solved" , {
sapply(c("theme bw", "legend and title",
"flipped coord where x is drat and y is wt"), function(x)
expectDopp2(funName = x, ggplot2Version = ggplot2Version))
})
}
# Function to run all visual regression tests across all ggplot2 versions
runMarginalTestsApply <- function(ggplot2Versions) {
sapply(ggplot2Versions, function(ggplot2Version) {
withGGplot2Version(ggplot2Version, {
runMarginalTests(ggplot2Version)
})
})
}
runMarginalTestsApply(c("2.2.0", "2.2.1", "latest"))
}
|
# Hausaufgabe 5
# Diffusionsprozesse.
setwd("\\\\fs.univie.ac.at\\homedirs\\a1277687\\Desktop\\NA für finanzmathematik")
ITO_SIM_PR <- function( iter = 100, anz_perioden = 50,
anfang = 1.05, anz_schritte=900, sigma=0.05, mlt= 0.7){
random_vals <- matrix(rnorm(iter*anz_schritte*anz_perioden), ncol=iter)
X <- matrix(NA_real_, ncol = iter, nrow = anz_schritte*anz_perioden + 1)
X[1,] <- anfang
for(i in 2:nrow(X)) {
X[i, ] <- X[i-1, ] +(random_vals[i-1, ]*sqrt(X[(i-1),])/sqrt(anz_schritte))*sigma+mlt*(6-2*(X[(i-1),]))/anz_schritte
}
return(X)
}
# Jetzt simulieren wir unseren Prozess mit verschieden Parametern
png("ITO_Prozess_mit_Parametern_sigma=0.05anfang=3, mlt=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =0.05, mlt= 0.7 )
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=0.05, anfang=3, mlt=0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=0.5anfang=3.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =0.5, mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=0.5, anfang=3mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=0.8_anfang=3,mlt= 0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =0.8,mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=0.8, anfang=3, mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=1_anfang=3.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =1, mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=1, anfang=3, mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=2_anfang=3.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =2, mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=2, anfang=3, mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
x11(1280, 960)
ITO_SIM_PR <- function( iter = 100, anz_perioden = 50,
anfang = 1.05,anz_schritte=900, sigma=0.05, Multiplikator=0.7){
random_vals <- matrix(rnorm(iter*anz_schritte*anz_perioden), ncol=iter)
X <- matrix(NA_real_, ncol = iter, nrow = anz_schritte*anz_perioden + 1)
X[1,] <- anfang
for(i in 2:nrow(X)) {
X[i, ] <- X[i-1, ] + random_vals[i-1, ]*sqrt(X[(i-1),])/sqrt(anz_schritte)*sigma+Multiplikator*(6-2*X[(i-1),]+cos((i-1)/anz_schritte))/anz_schritte
}
return(X)
}
# Jetzt simulieren wir unseren Prozess mit verschieden Parametern
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1.5_anfang=3_MP=0.7.png",width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1.5)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1.5, anfang=3,Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=0.2_anfang=3_MP=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=0.2)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=0.2, anfang=3,Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=0.05_anfang=3_MP=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=0.05)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=0.05, anfang=3, Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=10.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=10)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=1.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=2.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=2)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=2.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=0.1.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=0.1)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=0.1.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=0.2.png",width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=0.2)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=0.2.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
|
/Simulation eines Diffusionsprozesses_Timur_Sudak_a1277687.r
|
no_license
|
Timsud/Finanzmathematik
|
R
| false
| false
| 6,137
|
r
|
# Hausaufgabe 5
# Diffusionsprozesse.
setwd("\\\\fs.univie.ac.at\\homedirs\\a1277687\\Desktop\\NA für finanzmathematik")
ITO_SIM_PR <- function( iter = 100, anz_perioden = 50,
anfang = 1.05, anz_schritte=900, sigma=0.05, mlt= 0.7){
random_vals <- matrix(rnorm(iter*anz_schritte*anz_perioden), ncol=iter)
X <- matrix(NA_real_, ncol = iter, nrow = anz_schritte*anz_perioden + 1)
X[1,] <- anfang
for(i in 2:nrow(X)) {
X[i, ] <- X[i-1, ] +(random_vals[i-1, ]*sqrt(X[(i-1),])/sqrt(anz_schritte))*sigma+mlt*(6-2*(X[(i-1),]))/anz_schritte
}
return(X)
}
# Jetzt simulieren wir unseren Prozess mit verschieden Parametern
png("ITO_Prozess_mit_Parametern_sigma=0.05anfang=3, mlt=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =0.05, mlt= 0.7 )
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=0.05, anfang=3, mlt=0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=0.5anfang=3.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =0.5, mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=0.5, anfang=3mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=0.8_anfang=3,mlt= 0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =0.8,mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=0.8, anfang=3, mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=1_anfang=3.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =1, mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=1, anfang=3, mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
png("ITO_Prozessmit_Parametern_sigma=2_anfang=3.png", width= 1280, height= 980)
idp<- ITO_SIM_PR( anfang=3, sigma =2, mlt= 0.7)
matplot(seq(0,10, le=45001), idp, col=rainbow(100), type="l", lty=1, main= "ITO Prozess mit Parametern: sigma=2, anfang=3, mlt= 0.7" ,xlab= "T", ylab= "Wert", cex=4)
dev.off()
x11(1280, 960)
ITO_SIM_PR <- function( iter = 100, anz_perioden = 50,
anfang = 1.05,anz_schritte=900, sigma=0.05, Multiplikator=0.7){
random_vals <- matrix(rnorm(iter*anz_schritte*anz_perioden), ncol=iter)
X <- matrix(NA_real_, ncol = iter, nrow = anz_schritte*anz_perioden + 1)
X[1,] <- anfang
for(i in 2:nrow(X)) {
X[i, ] <- X[i-1, ] + random_vals[i-1, ]*sqrt(X[(i-1),])/sqrt(anz_schritte)*sigma+Multiplikator*(6-2*X[(i-1),]+cos((i-1)/anz_schritte))/anz_schritte
}
return(X)
}
# Jetzt simulieren wir unseren Prozess mit verschieden Parametern
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1.5_anfang=3_MP=0.7.png",width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1.5)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1.5, anfang=3,Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=0.2_anfang=3_MP=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=0.2)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=0.2, anfang=3,Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=0.05_anfang=3_MP=0.7.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=0.05)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=0.05, anfang=3, Multiplikator=0.7.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=10.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=10)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=1.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=2.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=2)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=2.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=0.1.png", width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=0.1)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=0.1.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
png("ITO_Prozessmit_Parametern2_sigma=1_anfang=3_MP=0.2.png",width= 1280, height= 980)
idp<- ITO_SIM_PR(anfang =3,sigma=1, Multiplikator=0.2)
matplot(seq(0,50, le=45001), idp, col=topo.colors(100), type="l", lty=1, main= " ITO Prozess mit Parametern: sigma=1, anfang=3, Multiplikator=0.2.",xlab= "T", ylab= "Wert", cex=4)
lines(seq(0,50, le=45001), rowMeans(idp), col = "red")
dev.off()
|
##These functions compute the inverse of a matrix. To speed
## up computation time, the inverse of the matrix is
## cached to avoid unnecessary computations.
##This function creates a "matrix", which
## is really just a list which contains a function
## to 1. set the value of the matrix
## 2. Get the value of the matrix
## 3. Set the value of the inverse
## 4. Get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <-function(solve){ inv <<- solve}
getinv <- function(){inv}
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function calculates the inverse of the "matrix" given in
## the above function. It checks to see if the inverse has been
## calculated already. If so, it takes the inverse from the cache
## If not, it calculates the inverse and sets the value of the
##inverse with the setinv function
cacheSolve <- function(x, ...) {
inv <- makeCacheMatrix(x)$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- makeCacheMatrix(x)$get()
inv <- solve(data, ...)
makeCacheMatrix(x)$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
mwalter4/ProgrammingAssignment2
|
R
| false
| false
| 1,422
|
r
|
##These functions compute the inverse of a matrix. To speed
## up computation time, the inverse of the matrix is
## cached to avoid unnecessary computations.
##This function creates a "matrix", which
## is really just a list which contains a function
## to 1. set the value of the matrix
## 2. Get the value of the matrix
## 3. Set the value of the inverse
## 4. Get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <-function(solve){ inv <<- solve}
getinv <- function(){inv}
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function calculates the inverse of the "matrix" given in
## the above function. It checks to see if the inverse has been
## calculated already. If so, it takes the inverse from the cache
## If not, it calculates the inverse and sets the value of the
##inverse with the setinv function
cacheSolve <- function(x, ...) {
inv <- makeCacheMatrix(x)$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- makeCacheMatrix(x)$get()
inv <- solve(data, ...)
makeCacheMatrix(x)$setinv(inv)
inv
}
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "sonar")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.gamboost", par.vals = list(baselearner = "bbs", Binomial.link = "probit", risk = "none"), predict.type = "prob")
#:# hash
#:# 56b1798283c9122b42eeebdaf9ee0b32
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
/models/openml_sonar/classification_Class/56b1798283c9122b42eeebdaf9ee0b32/code.R
|
no_license
|
pysiakk/CaseStudies2019S
|
R
| false
| false
| 741
|
r
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "sonar")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.gamboost", par.vals = list(baselearner = "bbs", Binomial.link = "probit", risk = "none"), predict.type = "prob")
#:# hash
#:# 56b1798283c9122b42eeebdaf9ee0b32
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/int.R
\name{integrateRegCurve}
\alias{integrateRegCurve}
\title{Integrate the area over/under the regularization path of a penalized regression model}
\usage{
integrateRegCurve(fit, weighted = FALSE)
}
\arguments{
\item{fit}{A regularized regression model fited using glmnet}
\item{weighted}{Should the regularization curve be weighted by the corresponding lambda (as higher lambda pushes coefficients to zero)}
}
\value{
Integrated area over or under a regularization curve using the trapezoid method from the pracma-package
}
\description{
This function evaluates the overall significance of a regularized regression coefficient in a penalized Cox model. It takes into account the whole range of lambda-penalization parameter, and computes the area over or under the regularization curve. This gives more insight into the importance of a regression coefficient over the whole range of lambda, instead of evaluating it at a single optimal lambda point determined typically using cross-validation.
}
\examples{
# Exemplify one PSP of the readily fitted ensembles
data(ePCRmodels)
RegAUC <- cbind(
integrateRegCurve(fit = DREAM@PSPs[[1]]@fit),
integrateRegCurve(fit = DREAM@PSPs[[2]]@fit),
integrateRegCurve(fit = DREAM@PSPs[[3]]@fit)
)
SortRegAUC <- RegAUC[order(apply(RegAUC, MARGIN=1,
FUN=function(z) abs(mean(z)) ), decreasing=TRUE),]
colnames(SortRegAUC) <- c(DREAM@PSPs[[1]]@description,
DREAM@PSPs[[2]]@description,
DREAM@PSPs[[3]]@description)
SortRegAUC[1:10,] # Top 10 coefficients according to (absolute) regularization curve auc
}
|
/man/integrateRegCurve.Rd
|
no_license
|
Syksy/ePCR
|
R
| false
| true
| 1,624
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/int.R
\name{integrateRegCurve}
\alias{integrateRegCurve}
\title{Integrate the area over/under the regularization path of a penalized regression model}
\usage{
integrateRegCurve(fit, weighted = FALSE)
}
\arguments{
\item{fit}{A regularized regression model fited using glmnet}
\item{weighted}{Should the regularization curve be weighted by the corresponding lambda (as higher lambda pushes coefficients to zero)}
}
\value{
Integrated area over or under a regularization curve using the trapezoid method from the pracma-package
}
\description{
This function evaluates the overall significance of a regularized regression coefficient in a penalized Cox model. It takes into account the whole range of lambda-penalization parameter, and computes the area over or under the regularization curve. This gives more insight into the importance of a regression coefficient over the whole range of lambda, instead of evaluating it at a single optimal lambda point determined typically using cross-validation.
}
\examples{
# Exemplify one PSP of the readily fitted ensembles
data(ePCRmodels)
RegAUC <- cbind(
integrateRegCurve(fit = DREAM@PSPs[[1]]@fit),
integrateRegCurve(fit = DREAM@PSPs[[2]]@fit),
integrateRegCurve(fit = DREAM@PSPs[[3]]@fit)
)
SortRegAUC <- RegAUC[order(apply(RegAUC, MARGIN=1,
FUN=function(z) abs(mean(z)) ), decreasing=TRUE),]
colnames(SortRegAUC) <- c(DREAM@PSPs[[1]]@description,
DREAM@PSPs[[2]]@description,
DREAM@PSPs[[3]]@description)
SortRegAUC[1:10,] # Top 10 coefficients according to (absolute) regularization curve auc
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getK.R
\name{getK}
\alias{getK}
\title{GetK function}
\usage{
getK(..., timeVECT, typeOF = FALSE)
}
\arguments{
\item{...}{One of more getMPN OF getk objects}
\item{timeVECT}{vector containing the time/dose points (necessary if getMPN as objects)}
\item{typeOF}{FALSE for getMPN combination and TRUE for getK combination.}
}
\description{
GetK function
}
|
/man/getK.Rd
|
no_license
|
mverbyla/viralEXP
|
R
| false
| true
| 435
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getK.R
\name{getK}
\alias{getK}
\title{GetK function}
\usage{
getK(..., timeVECT, typeOF = FALSE)
}
\arguments{
\item{...}{One of more getMPN OF getk objects}
\item{timeVECT}{vector containing the time/dose points (necessary if getMPN as objects)}
\item{typeOF}{FALSE for getMPN combination and TRUE for getK combination.}
}
\description{
GetK function
}
|
#' @title Random Assignment Generator for a Factorial Experiment with Many Conditions
#'
#' @description This function provides a list of random numbers that can be used to assign
#' participants to conditions (cells) in an experiment with many conditions, such as a factorial
#' experiment. The randomization is restricted as follows: if the number of participants
#' available is a multiple of the number of conditions, then cell sizes will be
#' balanced; otherwise, they will be as near balanced as possible.
#'
#'
#' @param N The total number of participants to be randomized.
#' @param C The total number of conditions for the experiment you are planning. Note that f
#' or a complete factorial experiment having k factors, there will be 2^k conditions.
#'
#' @return A dataframe with 1 variable ranList with N observations, each observation of ranList
#' provides a random number for each participant. This will be a number from 1 to C.
#' For example, if the 4th number in the list is 7, the 4th subject is randomly assigned
#' to experiment condition 7. Random numbers will be generated so that the experiment is
#' approximately balanced.
#'
#' @export RandomAssignmentGenerator
#' @examples
#' result <- RandomAssignmentGenerator(35,17)
#' print(result)
RandomAssignmentGenerator <- function(N,
C){
numloop <- N %/% C;
size1 <- N %% C;
ranList <- c();
for(i in 1:numloop) {
tmp <- sample(1:C,size=C,replace=FALSE);
ranList <- append(ranList,tmp);
}
tmp <- sample(1:C,size=size1,replace=FALSE);
ranList <- append(ranList,tmp);
#print(ranList);
result <- data.frame(ranList);
return(result);
}
|
/R/random_assignment_generator.R
|
no_license
|
cran/MOST
|
R
| false
| false
| 1,764
|
r
|
#' @title Random Assignment Generator for a Factorial Experiment with Many Conditions
#'
#' @description This function provides a list of random numbers that can be used to assign
#' participants to conditions (cells) in an experiment with many conditions, such as a factorial
#' experiment. The randomization is restricted as follows: if the number of participants
#' available is a multiple of the number of conditions, then cell sizes will be
#' balanced; otherwise, they will be as near balanced as possible.
#'
#'
#' @param N The total number of participants to be randomized.
#' @param C The total number of conditions for the experiment you are planning. Note that f
#' or a complete factorial experiment having k factors, there will be 2^k conditions.
#'
#' @return A dataframe with 1 variable ranList with N observations, each observation of ranList
#' provides a random number for each participant. This will be a number from 1 to C.
#' For example, if the 4th number in the list is 7, the 4th subject is randomly assigned
#' to experiment condition 7. Random numbers will be generated so that the experiment is
#' approximately balanced.
#'
#' @export RandomAssignmentGenerator
#' @examples
#' result <- RandomAssignmentGenerator(35,17)
#' print(result)
RandomAssignmentGenerator <- function(N,
C){
numloop <- N %/% C;
size1 <- N %% C;
ranList <- c();
for(i in 1:numloop) {
tmp <- sample(1:C,size=C,replace=FALSE);
ranList <- append(ranList,tmp);
}
tmp <- sample(1:C,size=size1,replace=FALSE);
ranList <- append(ranList,tmp);
#print(ranList);
result <- data.frame(ranList);
return(result);
}
|
#' @export
themeNewVal <- function(this,p.new,input){
out=list()
if(themeListDepth(this)==2){
item=names(this)
newtxt=c()
for(subitem in head(names(this[[1]]),-1)){
newval=input[[paste0("pop",item,subitem)]]
if(this[[1]][[subitem]]['class']=='character') newval=paste0("'",newval,"'")
newtxt=c(newtxt,paste0(this[[1]][[subitem]]['name'],"=",newval))
}
out=c(out,paste0(item,"=",this[[1]][['call']],"(",paste0(newtxt,collapse=','),")"))
}else{
item=names(this)
for(item1 in names(this[[1]])){
newtxt=c()
for(subitem in head(names(this[[1]][[item1]]),-1)){
check=input[[paste0("pop",item,item1,subitem)]]
if(!(check==''||is.null(check))){
subitem.class=this[[1]][[item1]][[subitem]]['class']='NULL'
if(this[[1]][[item1]][[subitem]]['class']%in%c('NULL')){
subitem.class=ThemeDefaultClass$class[ThemeDefaultClass$item==subitem]
}else{
subitem.class=this[[1]][[item1]][[subitem]]['class']
}
if(item!='text'&item1=='text'&subitem=='size') subitem.class='rel'
newval=input[[paste0("pop",item,item1,subitem)]]
if(subitem.class=='character') newval=paste0("'",newval,"'")
if(subitem.class=='rel') newval=paste0("rel(",newval,")")
newtxt=c(newtxt,paste0(this[[1]][[item1]][[subitem]]['name'],"=",newval))
}
}
if(paste0(newtxt,collapse=',')!='')
if(paste0(paste(item,item1,sep='.')%in%c('legend.position','legend.justification'))){
if(!grepl('c\\(',newval)){
out=c(out,paste0(paste(item,item1,sep='.'),"=",newval))
}else{
out=c(out,paste0(paste(item,item1,sep='.'),"=",gsub("'","",newval)))
}
}else{
out=c(out,paste0(paste(item,item1,sep='.'),"=",this[[1]][[item1]][['call']],"(",paste0(newtxt,collapse=','),")"))
}
}
}
out=paste0(unlist(out),collapse=',')
return(out)
}
|
/ggedit/R/themeNewVal.R
|
no_license
|
rpodcast/ggedit
|
R
| false
| false
| 2,068
|
r
|
#' @export
themeNewVal <- function(this,p.new,input){
out=list()
if(themeListDepth(this)==2){
item=names(this)
newtxt=c()
for(subitem in head(names(this[[1]]),-1)){
newval=input[[paste0("pop",item,subitem)]]
if(this[[1]][[subitem]]['class']=='character') newval=paste0("'",newval,"'")
newtxt=c(newtxt,paste0(this[[1]][[subitem]]['name'],"=",newval))
}
out=c(out,paste0(item,"=",this[[1]][['call']],"(",paste0(newtxt,collapse=','),")"))
}else{
item=names(this)
for(item1 in names(this[[1]])){
newtxt=c()
for(subitem in head(names(this[[1]][[item1]]),-1)){
check=input[[paste0("pop",item,item1,subitem)]]
if(!(check==''||is.null(check))){
subitem.class=this[[1]][[item1]][[subitem]]['class']='NULL'
if(this[[1]][[item1]][[subitem]]['class']%in%c('NULL')){
subitem.class=ThemeDefaultClass$class[ThemeDefaultClass$item==subitem]
}else{
subitem.class=this[[1]][[item1]][[subitem]]['class']
}
if(item!='text'&item1=='text'&subitem=='size') subitem.class='rel'
newval=input[[paste0("pop",item,item1,subitem)]]
if(subitem.class=='character') newval=paste0("'",newval,"'")
if(subitem.class=='rel') newval=paste0("rel(",newval,")")
newtxt=c(newtxt,paste0(this[[1]][[item1]][[subitem]]['name'],"=",newval))
}
}
if(paste0(newtxt,collapse=',')!='')
if(paste0(paste(item,item1,sep='.')%in%c('legend.position','legend.justification'))){
if(!grepl('c\\(',newval)){
out=c(out,paste0(paste(item,item1,sep='.'),"=",newval))
}else{
out=c(out,paste0(paste(item,item1,sep='.'),"=",gsub("'","",newval)))
}
}else{
out=c(out,paste0(paste(item,item1,sep='.'),"=",this[[1]][[item1]][['call']],"(",paste0(newtxt,collapse=','),")"))
}
}
}
out=paste0(unlist(out),collapse=',')
return(out)
}
|
[heading "FirstName" data [FirstName] format [info 100 edge [size: 1x1] left]]
[heading "LastName" data [LastName] format [info 100 edge [size: 1x1] left]]
[heading "Address" data [to-human "Address"] format [info 180 edge [size: 1x1] left]]
[heading "Email" data [Email] format [info 120 edge [size: 1x1] left]]
[heading "Company" data [to-human "Company"] format [info 180 edge [size: 1x1] left]]
[heading "Phone" data [Phone] format [info 90 edge [size: 1x1] left]]
|
/db-overlays/billing_complete/person/listing-layout.r
|
permissive
|
mikeyaunish/DB-Rider
|
R
| false
| false
| 474
|
r
|
[heading "FirstName" data [FirstName] format [info 100 edge [size: 1x1] left]]
[heading "LastName" data [LastName] format [info 100 edge [size: 1x1] left]]
[heading "Address" data [to-human "Address"] format [info 180 edge [size: 1x1] left]]
[heading "Email" data [Email] format [info 120 edge [size: 1x1] left]]
[heading "Company" data [to-human "Company"] format [info 180 edge [size: 1x1] left]]
[heading "Phone" data [Phone] format [info 90 edge [size: 1x1] left]]
|
#' Get or set \code{gene_id_type} from a SingleCellExperiment object
#' @rdname gene_id_type
#' @param object A \code{\link{SingleCellExperiment}} object.
#' @param value Value to be assigned to corresponding object.
#'
#' @return gene id type string
#' @author Luyi Tian
#'
#' @export
#'
#' @examples
#' data("sc_sample_data")
#' data("sc_sample_qc")
#' sce = SingleCellExperiment(assays = list(counts =as.matrix(sc_sample_data)))
#' organism(sce) = "mmusculus_gene_ensembl"
#' gene_id_type(sce) = "ensembl_gene_id"
#' QC_metrics(sce) = sc_sample_qc
#' demultiplex_info(sce) = cell_barcode_matching
#' UMI_dup_info(sce) = UMI_duplication
#'
#' gene_id_type(sce)
#'
gene_id_type.sce <- function(object) {
return(object@metadata$Biomart$gene_id_type)
}
#' @rdname gene_id_type
#' @aliases gene_id_type
#' @export
setMethod("gene_id_type", signature(object = "SingleCellExperiment"),
gene_id_type.sce)
#' @aliases gene_id_type
#' @rdname gene_id_type
#' @export
setReplaceMethod("gene_id_type",signature="SingleCellExperiment",
function(object, value) {
if(is.null(value)){
object@metadata$Biomart$gene_id_type = NA
}else if(value == "NA"){
object@metadata$Biomart$gene_id_type = NA
}else{
object@metadata$Biomart$gene_id_type = value
}
return(object)
})
#' @param ntop numeric scalar indicating the number of most variable features to
#' use for the t-SNE Default is \code{500}, but any \code{ntop} argument is
#' overrided if the \code{feature_set} argument is non-NULL.
#' @param exprs_values character string indicating which values should be used
#' as the expression values for this plot. Valid arguments are \code{"tpm"}
#' (transcripts per million), \code{"norm_tpm"} (normalised TPM
#' values), \code{"fpkm"} (FPKM values), \code{"norm_fpkm"} (normalised FPKM
#' values), \code{"counts"} (counts for each feature), \code{"norm_counts"},
#' \code{"cpm"} (counts-per-million), \code{"norm_cpm"} (normalised
#' counts-per-million), \code{"logcounts"} (log-transformed count data; default),
#' \code{"norm_exprs"} (normalised
#' expression values) or \code{"stand_exprs"} (standardised expression values),
#' or any other named element of the \code{assayData} slot of the \code{SingleCellExperiment}
#' object that can be accessed with the \code{assay} function.
#' @param feature_set character, numeric or logical vector indicating a set of
#' features to use for the t-SNE calculation. If character, entries must all be
#' in \code{featureNames(object)}. If numeric, values are taken to be indices for
#' features. If logical, vector is used to index features and should have length
#' equal to \code{nrow(object)}.
#' @param use_dimred character(1), use named reduced dimension representation of cells
#' stored in \code{SingleCellExperiment} object instead of recomputing (e.g. "PCA").
#' Default is \code{NULL}, no reduced dimension values are provided to \code{Rtsne}.
#' @param n_dimred integer(1), number of components of the reduced dimension slot
#' to use. Default is \code{NULL}, in which case (if \code{use_dimred} is not \code{NULL})
#' all components of the reduced dimension slot are used.
#' @param scale_features logical, should the expression values be standardised
#' so that each feature has unit variance? Default is \code{TRUE}.
#' @param rand_seed (optional) numeric scalar that can be passed to
#' \code{set.seed} to make plots reproducible.
#' @param perplexity numeric scalar value defining the "perplexity parameter"
#' for the t-SNE plot. Passed to \code{\link[Rtsne]{Rtsne}} - see documentation
#' for that package for more details.
#'
#' @rdname plotTSNE
#' @export
runTSNE <- function(object, ntop = 500, ncomponents = 2, exprs_values = "logcounts",
feature_set = NULL, use_dimred = NULL, n_dimred = NULL, scale_features = TRUE,
rand_seed = NULL, perplexity = floor(ncol(object) / 5), ...) {
if (!is.null(use_dimred)) {
## Use existing dimensionality reduction results (turning off PCA)
dr <- reducedDim(object, use_dimred)
if (!is.null(n_dimred)) {
dr <- dr[,seq_len(n_dimred),drop = FALSE]
}
vals <- dr
do_pca <- FALSE
pca_dims <- ncol(vals)
} else {
## Define an expression matrix depending on which values we're
## using
exprs_mat <- assay(object, i = exprs_values)
## Define features to use: either ntop, or if a set of features is
## defined, then those
if ( is.null(feature_set) ) {
rv <- .general_rowVars(exprs_mat)
ntop <- min(ntop, length(rv))
feature_set <- order(rv, decreasing = TRUE)[seq_len(ntop)]
}
## Drop any features with zero variance
vals <- exprs_mat[feature_set,,drop = FALSE]
keep_feature <- .general_rowVars(vals) > 0.001
keep_feature[is.na(keep_feature)] <- FALSE
vals <- vals[keep_feature,,drop = FALSE]
## Standardise expression if stand_exprs(object) is null
vals <- t(vals)
if (scale_features) {
vals <- scale(vals, scale = TRUE)
}
do_pca <- TRUE
pca_dims <- max(50, ncol(object))
}
# Actually running the Rtsne step.
if ( !is.null(rand_seed) )
set.seed(rand_seed)
tsne_out <- Rtsne::Rtsne(vals, initial_dims = pca_dims, pca = do_pca,
perplexity = perplexity, dims = ncomponents,...)
reducedDim(object, "TSNE") <- tsne_out$Y
return(object)
}
#' Plot t-SNE for an SingleCellExperiment object
#'
#' Produce a t-distributed stochastic neighbour embedding (t-SNE) plot of two
#' components for an \code{SingleCellExperiment} dataset.
#'
#' @param object an \code{SingleCellExperiment} object
#' @param ncomponents numeric scalar indicating the number of t-SNE
#' components to plot, starting from the first t-SNE component. Default is
#' 2. If \code{ncomponents} is 2, then a scatterplot of component 1 vs component
#' 2 is produced. If \code{ncomponents} is greater than 2, a pairs plots for the
#' top components is produced. NB: computing more than two components for t-SNE
#' can become very time consuming.
#' @param colour_by character string defining the column of \code{pData(object)} to
#' be used as a factor by which to colour the points in the plot. Alternatively,
#' a data frame with one column containing values to map to colours for all cells.
#' @param shape_by character string defining the column of \code{pData(object)} to
#' be used as a factor by which to define the shape of the points in the plot.
#' Alternatively, a data frame with one column containing values to map to shapes.
#' @param size_by character string defining the column of \code{pData(object)} to
#' be used as a factor by which to define the size of points in the plot.
#' Alternatively, a data frame with one column containing values to map to sizes.
#' @param return_SCE logical, should the function return an \code{SingleCellExperiment}
#' object with principal component values for cells in the
#' \code{reducedDims} slot. Default is \code{FALSE}, in which case a
#' \code{ggplot} object is returned.
#' @param rerun logical, should PCA be recomputed even if \code{object} contains a
#' "PCA" element in the \code{reducedDims} slot?
#' @param draw_plot logical, should the plot be drawn on the current graphics
#' device? Only used if \code{return_SCE} is \code{TRUE}, otherwise the plot
#' is always produced.
#' @param theme_size numeric scalar giving default font size for plotting theme
#' (default is 10).
#' @param legend character, specifying how the legend(s) be shown? Default is
#' \code{"auto"}, which hides legends that have only one level and shows others.
#' Alternatives are "all" (show all legends) or "none" (hide all legends).
#' @param ... further arguments passed to \code{\link[Rtsne]{Rtsne}}
#'
#' @details The function \code{\link[Rtsne]{Rtsne}} is used internally to
#' compute the t-SNE. Note that the algorithm is not deterministic, so different
#' runs of the function will produce differing plots (see \code{\link{set.seed}}
#' to set a random seed for replicable results). The value of the
#' \code{perplexity} parameter can have a large effect on the resulting plot, so
#' it can often be worthwhile to try multiple values to find the most appealing
#' visualisation.
#'
#' @return If \code{return_SCE} is \code{TRUE}, then the function returns a
#' \code{SingleCellExperiment} object, otherwise it returns a \code{ggplot} object.
#' @name plotTSNE
#' @rdname plotTSNE
#' @aliases plotTSNE plotTSNE,SingleCellExperiment-method
#'
#' @export
#' @seealso
#' \code{\link[Rtsne]{Rtsne}}
#' @references
#' L.J.P. van der Maaten. Barnes-Hut-SNE. In Proceedings of the International
#' Conference on Learning Representations, 2013.
#'
#' @examples
#' ## Set up an example SingleCellExperiment
#' data("sc_example_counts")
#' data("sc_example_cell_info")
#' example_sce <- SingleCellExperiment(
#' assays = list(counts = sc_example_counts), colData = sc_example_cell_info)
#' example_sce <- normalize(example_sce)
#' drop_genes <- apply(exprs(example_sce), 1, function(x) {var(x) == 0})
#' example_sce <- example_sce[!drop_genes, ]
#'
#' ## Examples plotting t-SNE
#' plotTSNE(example_sce, perplexity = 10)
#' plotTSNE(example_sce, colour_by = "Cell_Cycle", perplexity = 10)
#' plotTSNE(example_sce, colour_by = "Cell_Cycle", shape_by = "Treatment",
#' size_by = "Mutation_Status", perplexity = 10)
#' plotTSNE(example_sce, shape_by = "Treatment", size_by = "Mutation_Status",
#' perplexity = 5)
#' plotTSNE(example_sce, feature_set = 1:100, colour_by = "Treatment",
#' shape_by = "Mutation_Status", perplexity = 5)
#'
#' plotTSNE(example_sce, shape_by = "Treatment", return_SCE = TRUE,
#' perplexity = 10)
#'
#'
plotTSNE <- function(object, colour_by = NULL, shape_by = NULL, size_by = NULL,
return_SCE = FALSE, draw_plot = TRUE,
theme_size = 10, legend = "auto",
rerun = FALSE, ncomponents = 2, ...) {
if ( !("TSNE" %in% names(reducedDims(object))) || rerun) {
object <- runTSNE(object, ncomponents = ncomponents, ...)
}
plot_out <- plotReducedDim(object, ncomponents = ncomponents, use_dimred = "TSNE",
colour_by = colour_by, shape_by = shape_by, size_by = size_by,
theme_size = theme_size, legend = legend)
if (return_SCE) {
if ( draw_plot )
print(plot_out)
return(object)
} else {
return(plot_out)
}
}
|
/R-raw/scGPS_methods.R
|
no_license
|
quanaibn/scGPS
|
R
| false
| false
| 10,580
|
r
|
#' Get or set \code{gene_id_type} from a SingleCellExperiment object
#' @rdname gene_id_type
#' @param object A \code{\link{SingleCellExperiment}} object.
#' @param value Value to be assigned to corresponding object.
#'
#' @return gene id type string
#' @author Luyi Tian
#'
#' @export
#'
#' @examples
#' data("sc_sample_data")
#' data("sc_sample_qc")
#' sce = SingleCellExperiment(assays = list(counts =as.matrix(sc_sample_data)))
#' organism(sce) = "mmusculus_gene_ensembl"
#' gene_id_type(sce) = "ensembl_gene_id"
#' QC_metrics(sce) = sc_sample_qc
#' demultiplex_info(sce) = cell_barcode_matching
#' UMI_dup_info(sce) = UMI_duplication
#'
#' gene_id_type(sce)
#'
gene_id_type.sce <- function(object) {
return(object@metadata$Biomart$gene_id_type)
}
#' @rdname gene_id_type
#' @aliases gene_id_type
#' @export
setMethod("gene_id_type", signature(object = "SingleCellExperiment"),
gene_id_type.sce)
#' @aliases gene_id_type
#' @rdname gene_id_type
#' @export
setReplaceMethod("gene_id_type",signature="SingleCellExperiment",
function(object, value) {
if(is.null(value)){
object@metadata$Biomart$gene_id_type = NA
}else if(value == "NA"){
object@metadata$Biomart$gene_id_type = NA
}else{
object@metadata$Biomart$gene_id_type = value
}
return(object)
})
#' @param ntop numeric scalar indicating the number of most variable features to
#' use for the t-SNE Default is \code{500}, but any \code{ntop} argument is
#' overrided if the \code{feature_set} argument is non-NULL.
#' @param exprs_values character string indicating which values should be used
#' as the expression values for this plot. Valid arguments are \code{"tpm"}
#' (transcripts per million), \code{"norm_tpm"} (normalised TPM
#' values), \code{"fpkm"} (FPKM values), \code{"norm_fpkm"} (normalised FPKM
#' values), \code{"counts"} (counts for each feature), \code{"norm_counts"},
#' \code{"cpm"} (counts-per-million), \code{"norm_cpm"} (normalised
#' counts-per-million), \code{"logcounts"} (log-transformed count data; default),
#' \code{"norm_exprs"} (normalised
#' expression values) or \code{"stand_exprs"} (standardised expression values),
#' or any other named element of the \code{assayData} slot of the \code{SingleCellExperiment}
#' object that can be accessed with the \code{assay} function.
#' @param feature_set character, numeric or logical vector indicating a set of
#' features to use for the t-SNE calculation. If character, entries must all be
#' in \code{featureNames(object)}. If numeric, values are taken to be indices for
#' features. If logical, vector is used to index features and should have length
#' equal to \code{nrow(object)}.
#' @param use_dimred character(1), use named reduced dimension representation of cells
#' stored in \code{SingleCellExperiment} object instead of recomputing (e.g. "PCA").
#' Default is \code{NULL}, no reduced dimension values are provided to \code{Rtsne}.
#' @param n_dimred integer(1), number of components of the reduced dimension slot
#' to use. Default is \code{NULL}, in which case (if \code{use_dimred} is not \code{NULL})
#' all components of the reduced dimension slot are used.
#' @param scale_features logical, should the expression values be standardised
#' so that each feature has unit variance? Default is \code{TRUE}.
#' @param rand_seed (optional) numeric scalar that can be passed to
#' \code{set.seed} to make plots reproducible.
#' @param perplexity numeric scalar value defining the "perplexity parameter"
#' for the t-SNE plot. Passed to \code{\link[Rtsne]{Rtsne}} - see documentation
#' for that package for more details.
#'
#' @rdname plotTSNE
#' @export
runTSNE <- function(object, ntop = 500, ncomponents = 2, exprs_values = "logcounts",
feature_set = NULL, use_dimred = NULL, n_dimred = NULL, scale_features = TRUE,
rand_seed = NULL, perplexity = floor(ncol(object) / 5), ...) {
if (!is.null(use_dimred)) {
## Use existing dimensionality reduction results (turning off PCA)
dr <- reducedDim(object, use_dimred)
if (!is.null(n_dimred)) {
dr <- dr[,seq_len(n_dimred),drop = FALSE]
}
vals <- dr
do_pca <- FALSE
pca_dims <- ncol(vals)
} else {
## Define an expression matrix depending on which values we're
## using
exprs_mat <- assay(object, i = exprs_values)
## Define features to use: either ntop, or if a set of features is
## defined, then those
if ( is.null(feature_set) ) {
rv <- .general_rowVars(exprs_mat)
ntop <- min(ntop, length(rv))
feature_set <- order(rv, decreasing = TRUE)[seq_len(ntop)]
}
## Drop any features with zero variance
vals <- exprs_mat[feature_set,,drop = FALSE]
keep_feature <- .general_rowVars(vals) > 0.001
keep_feature[is.na(keep_feature)] <- FALSE
vals <- vals[keep_feature,,drop = FALSE]
## Standardise expression if stand_exprs(object) is null
vals <- t(vals)
if (scale_features) {
vals <- scale(vals, scale = TRUE)
}
do_pca <- TRUE
pca_dims <- max(50, ncol(object))
}
# Actually running the Rtsne step.
if ( !is.null(rand_seed) )
set.seed(rand_seed)
tsne_out <- Rtsne::Rtsne(vals, initial_dims = pca_dims, pca = do_pca,
perplexity = perplexity, dims = ncomponents,...)
reducedDim(object, "TSNE") <- tsne_out$Y
return(object)
}
#' Plot t-SNE for an SingleCellExperiment object
#'
#' Produce a t-distributed stochastic neighbour embedding (t-SNE) plot of two
#' components for an \code{SingleCellExperiment} dataset.
#'
#' @param object an \code{SingleCellExperiment} object
#' @param ncomponents numeric scalar indicating the number of t-SNE
#' components to plot, starting from the first t-SNE component. Default is
#' 2. If \code{ncomponents} is 2, then a scatterplot of component 1 vs component
#' 2 is produced. If \code{ncomponents} is greater than 2, a pairs plots for the
#' top components is produced. NB: computing more than two components for t-SNE
#' can become very time consuming.
#' @param colour_by character string defining the column of \code{pData(object)} to
#' be used as a factor by which to colour the points in the plot. Alternatively,
#' a data frame with one column containing values to map to colours for all cells.
#' @param shape_by character string defining the column of \code{pData(object)} to
#' be used as a factor by which to define the shape of the points in the plot.
#' Alternatively, a data frame with one column containing values to map to shapes.
#' @param size_by character string defining the column of \code{pData(object)} to
#' be used as a factor by which to define the size of points in the plot.
#' Alternatively, a data frame with one column containing values to map to sizes.
#' @param return_SCE logical, should the function return an \code{SingleCellExperiment}
#' object with principal component values for cells in the
#' \code{reducedDims} slot. Default is \code{FALSE}, in which case a
#' \code{ggplot} object is returned.
#' @param rerun logical, should PCA be recomputed even if \code{object} contains a
#' "PCA" element in the \code{reducedDims} slot?
#' @param draw_plot logical, should the plot be drawn on the current graphics
#' device? Only used if \code{return_SCE} is \code{TRUE}, otherwise the plot
#' is always produced.
#' @param theme_size numeric scalar giving default font size for plotting theme
#' (default is 10).
#' @param legend character, specifying how the legend(s) be shown? Default is
#' \code{"auto"}, which hides legends that have only one level and shows others.
#' Alternatives are "all" (show all legends) or "none" (hide all legends).
#' @param ... further arguments passed to \code{\link[Rtsne]{Rtsne}}
#'
#' @details The function \code{\link[Rtsne]{Rtsne}} is used internally to
#' compute the t-SNE. Note that the algorithm is not deterministic, so different
#' runs of the function will produce differing plots (see \code{\link{set.seed}}
#' to set a random seed for replicable results). The value of the
#' \code{perplexity} parameter can have a large effect on the resulting plot, so
#' it can often be worthwhile to try multiple values to find the most appealing
#' visualisation.
#'
#' @return If \code{return_SCE} is \code{TRUE}, then the function returns a
#' \code{SingleCellExperiment} object, otherwise it returns a \code{ggplot} object.
#' @name plotTSNE
#' @rdname plotTSNE
#' @aliases plotTSNE plotTSNE,SingleCellExperiment-method
#'
#' @export
#' @seealso
#' \code{\link[Rtsne]{Rtsne}}
#' @references
#' L.J.P. van der Maaten. Barnes-Hut-SNE. In Proceedings of the International
#' Conference on Learning Representations, 2013.
#'
#' @examples
#' ## Set up an example SingleCellExperiment
#' data("sc_example_counts")
#' data("sc_example_cell_info")
#' example_sce <- SingleCellExperiment(
#' assays = list(counts = sc_example_counts), colData = sc_example_cell_info)
#' example_sce <- normalize(example_sce)
#' drop_genes <- apply(exprs(example_sce), 1, function(x) {var(x) == 0})
#' example_sce <- example_sce[!drop_genes, ]
#'
#' ## Examples plotting t-SNE
#' plotTSNE(example_sce, perplexity = 10)
#' plotTSNE(example_sce, colour_by = "Cell_Cycle", perplexity = 10)
#' plotTSNE(example_sce, colour_by = "Cell_Cycle", shape_by = "Treatment",
#' size_by = "Mutation_Status", perplexity = 10)
#' plotTSNE(example_sce, shape_by = "Treatment", size_by = "Mutation_Status",
#' perplexity = 5)
#' plotTSNE(example_sce, feature_set = 1:100, colour_by = "Treatment",
#' shape_by = "Mutation_Status", perplexity = 5)
#'
#' plotTSNE(example_sce, shape_by = "Treatment", return_SCE = TRUE,
#' perplexity = 10)
#'
#'
plotTSNE <- function(object, colour_by = NULL, shape_by = NULL, size_by = NULL,
return_SCE = FALSE, draw_plot = TRUE,
theme_size = 10, legend = "auto",
rerun = FALSE, ncomponents = 2, ...) {
if ( !("TSNE" %in% names(reducedDims(object))) || rerun) {
object <- runTSNE(object, ncomponents = ncomponents, ...)
}
plot_out <- plotReducedDim(object, ncomponents = ncomponents, use_dimred = "TSNE",
colour_by = colour_by, shape_by = shape_by, size_by = size_by,
theme_size = theme_size, legend = legend)
if (return_SCE) {
if ( draw_plot )
print(plot_out)
return(object)
} else {
return(plot_out)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_ROCit.R
\name{print.rocit}
\alias{print.rocit}
\title{Print \code{rocit} Object}
\usage{
\method{print}{rocit}(x, ... = NULL)
}
\arguments{
\item{x}{An object of class \code{"rocit"},
returned by \code{\link{rocit}} function.}
\item{...}{\code{NULL}. Used for S3 generic/method consistency.}
}
\description{
Print \code{rocit} Object
}
\examples{
data("Diabetes")
roc_empirical <- rocit(score = Diabetes$chol, class = Diabetes$dtest,
negref = "-") # default method empirical
roc_binormal <- rocit(score = Diabetes$chol, class = Diabetes$dtest,
negref = "-", method = "bin")
# ---------------------
print(roc_empirical)
print(roc_binormal)
}
\seealso{
\code{\link{rocit}}, \code{\link{summary.rocit}}
}
|
/man/print.rocit.Rd
|
no_license
|
cran/ROCit
|
R
| false
| true
| 834
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_ROCit.R
\name{print.rocit}
\alias{print.rocit}
\title{Print \code{rocit} Object}
\usage{
\method{print}{rocit}(x, ... = NULL)
}
\arguments{
\item{x}{An object of class \code{"rocit"},
returned by \code{\link{rocit}} function.}
\item{...}{\code{NULL}. Used for S3 generic/method consistency.}
}
\description{
Print \code{rocit} Object
}
\examples{
data("Diabetes")
roc_empirical <- rocit(score = Diabetes$chol, class = Diabetes$dtest,
negref = "-") # default method empirical
roc_binormal <- rocit(score = Diabetes$chol, class = Diabetes$dtest,
negref = "-", method = "bin")
# ---------------------
print(roc_empirical)
print(roc_binormal)
}
\seealso{
\code{\link{rocit}}, \code{\link{summary.rocit}}
}
|
###
### $Id: testRPPASpatialParams.R 956 2015-01-26 01:40:28Z proebuck $
###
options(warn=1)
library(SuperCurve)
source("checkFuncs")
###########################
## tests of cutoff
checkException(RPPASpatialParams(cutoff="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(cutoff=-1),
msg="invalid value (too small) should fail")
checkException(RPPASpatialParams(cutoff=2),
msg="invalid value (too large) should fail")
checkException(RPPASpatialParams(cutoff=1:10),
msg="numeric vector should fail")
###########################
## tests of k
checkException(RPPASpatialParams(k="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(k=Inf),
msg="invalid value (infinite) should fail")
checkException(RPPASpatialParams(k=1),
msg="invalid value (too small) should fail")
checkException(RPPASpatialParams(k=1:10),
msg="numeric vector should fail")
###########################
## tests of gamma
checkException(RPPASpatialParams(gamma="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(gamma=Inf),
msg="invalid value (infinite) should fail")
checkException(RPPASpatialParams(gamma=-1),
msg="invalid value (too small) should fail")
checkException(RPPASpatialParams(gamma=3),
msg="invalid value (too large) should fail")
checkException(RPPASpatialParams(gamma=1:10),
msg="numeric vector should fail")
###########################
## tests of plotSurface
checkException(RPPASpatialParams(plotSurface="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(plotSurface=1),
msg="invalid logical value should fail")
checkException(RPPASpatialParams(plotSurface=c(TRUE, FALSE)),
msg="logical vector should fail")
|
/tests/testRPPASpatialParams.R
|
no_license
|
rmylonas/SuperCurvePAF
|
R
| false
| false
| 1,981
|
r
|
###
### $Id: testRPPASpatialParams.R 956 2015-01-26 01:40:28Z proebuck $
###
options(warn=1)
library(SuperCurve)
source("checkFuncs")
###########################
## tests of cutoff
checkException(RPPASpatialParams(cutoff="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(cutoff=-1),
msg="invalid value (too small) should fail")
checkException(RPPASpatialParams(cutoff=2),
msg="invalid value (too large) should fail")
checkException(RPPASpatialParams(cutoff=1:10),
msg="numeric vector should fail")
###########################
## tests of k
checkException(RPPASpatialParams(k="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(k=Inf),
msg="invalid value (infinite) should fail")
checkException(RPPASpatialParams(k=1),
msg="invalid value (too small) should fail")
checkException(RPPASpatialParams(k=1:10),
msg="numeric vector should fail")
###########################
## tests of gamma
checkException(RPPASpatialParams(gamma="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(gamma=Inf),
msg="invalid value (infinite) should fail")
checkException(RPPASpatialParams(gamma=-1),
msg="invalid value (too small) should fail")
checkException(RPPASpatialParams(gamma=3),
msg="invalid value (too large) should fail")
checkException(RPPASpatialParams(gamma=1:10),
msg="numeric vector should fail")
###########################
## tests of plotSurface
checkException(RPPASpatialParams(plotSurface="bogus"),
msg="invalid character value should fail")
checkException(RPPASpatialParams(plotSurface=1),
msg="invalid logical value should fail")
checkException(RPPASpatialParams(plotSurface=c(TRUE, FALSE)),
msg="logical vector should fail")
|
\name{MultiMeasure-class}
\alias{MultiMeasure-class}
\docType{class}
\title{Multi-platform genomic measurements across the same samples - class}
\description{
An S4 class that stores normalised matched genomic data from multiple platforms and/or laboratory conditions (e.g. from microarrays, RNA-Seq and other sequencing assays).
}
\section{List Components}{
This class has two slots, \code{names} and \code{data}.
\describe{
\item{\code{names}:}{ character vector contains the names of each data type (e.g. RNA-Seq, Agilent etc.). Must be the same \code{length} as \code{data}.}
\item{\code{data}:}{ list of numeric matrices of identical \code{dim}, \code{rownames} and \code{colnames} where each matrix contains the measurements from the platform/condition described in \code{names}. Rows of each matrix correspond to genomic features and columns to samples. Must be the same length as \code{names}.}
}
}
\seealso{
\code{\link{MultiMeasure}} constructs MultiMeasure objects.
}
\section{Methods}{
\code{MultiMeasure} objects have a \code{show} method that describes the dimensions of the data, in the form: \code{MultiMeasure object with i platforms/conditions, j samples and k measured loci}.
}
\author{Tim Peters <t.peters@garvan.org.au>}
\keyword{classes}
|
/man/MultiMeasure-class.Rd
|
permissive
|
timpeters82/consensus
|
R
| false
| false
| 1,302
|
rd
|
\name{MultiMeasure-class}
\alias{MultiMeasure-class}
\docType{class}
\title{Multi-platform genomic measurements across the same samples - class}
\description{
An S4 class that stores normalised matched genomic data from multiple platforms and/or laboratory conditions (e.g. from microarrays, RNA-Seq and other sequencing assays).
}
\section{List Components}{
This class has two slots, \code{names} and \code{data}.
\describe{
\item{\code{names}:}{ character vector contains the names of each data type (e.g. RNA-Seq, Agilent etc.). Must be the same \code{length} as \code{data}.}
\item{\code{data}:}{ list of numeric matrices of identical \code{dim}, \code{rownames} and \code{colnames} where each matrix contains the measurements from the platform/condition described in \code{names}. Rows of each matrix correspond to genomic features and columns to samples. Must be the same length as \code{names}.}
}
}
\seealso{
\code{\link{MultiMeasure}} constructs MultiMeasure objects.
}
\section{Methods}{
\code{MultiMeasure} objects have a \code{show} method that describes the dimensions of the data, in the form: \code{MultiMeasure object with i platforms/conditions, j samples and k measured loci}.
}
\author{Tim Peters <t.peters@garvan.org.au>}
\keyword{classes}
|
#!/usr/bin/env Rscript
#---------------
# initialization
#---------------
source('pascal/lib/init.R')
source('pascal/lib/muts.R')
# additional packages
suppressMessages(pacman::p_load(yaml))
sysprefix="umask 002 && unset PYTHONPATH && source /home/bermans/miniconda2/envs/pyclone/bin/activate /home/bermans/miniconda2/envs/pyclone >/dev/null 2>&1 && "
# create necessary directories
system("mkdir pyclone pyclone/config pyclone/events pyclone/priors pyclone/tables pyclone/plots &>/dev/null")
#-----------
# processing
#-----------
cat(green("\n-") %+% " reading input\n")
patients <- read.delim("sample_sets.txt",sep=" ",stringsAsFactors=FALSE,header=FALSE) %>%
setNames(c("patient",1:(ncol(.)-1))) %>%
group_by(patient) %>%
filter(!grepl("#",patient)) %>%
summarise_each(funs(ifelse(.=="","NA",.)))
subsets <- read.delim("subsets.txt",sep=" ",stringsAsFactors=FALSE,header=FALSE) %>%
setNames(c("subset",1:(ncol(.)-1))) %>%
group_by(subset) %>%
filter(!grepl("#",subset)) %>%
summarise_each(funs(ifelse(.=="","NA",.)))
sample.t <- subsets %>%
select(-subset) %>%
unlist %>%
list.filter(.!="NA") %>%
sort
sample.n <- sample.t %>%
lapply(.,function(patient)
patients[which(apply(patients,1,function(sample)contains(sample,patient))),] %>%
list.filter(.!="NA") %>%
unlist %>%
tail(n=1)
) %>%
unlist
sample.tn <- data.frame(normal=sample.n,tumor=sample.t)
# read & format mutations
muts.vcf <- read.delim("recurrent_mutations/sufam/all_sufam.txt",stringsAsFactors=FALSE,sep="\t") %>%
select(sample.name=sample,chrom,pos,alt=val_alt,cov,maf=val_maf) %>%
tbl_df
muts.suf <- read.delim("recurrent_mutations/sufam/all_mutations.vcf",stringsAsFactors=FALSE,sep="\t") %>%
select(chrom=`X.CHROM`,pos=POS,gene=`ANN....GENE`,alt=ALT,effect=`ANN....EFFECT`) %>%
tbl_df
muts <- muts.vcf %>% full_join(muts.suf, by=c("chrom","pos","alt")) %>%
rowwise() %>%
mutate(gene=str_split(gene,"\\|") %>% unlist %>% head(1)) %>%
mutate(effect=str_split(effect,"\\|") %>% unlist %>% tail(n=1)) %>%
ungroup() %>%
mutate(effect=
ifelse(effect%in%c("STOP_GAINED","Nonsense_Mutation","stop_gained&splice_region_variant","stop_gained"), "truncating snv",
ifelse(effect%in%c("FRAME_SHIFT","FRAME_SHIFT","Frame_Shift_Del","Frame_Shift_Ins","frameshift_variant","frameshift_variant&stop_gained","frameshift_variant&splice_region_variant","frameshift_variant&splice_acceptor_variant&splice_region_variant&splice_region_variant&intron_variant"), "frameshift indel",
ifelse(effect%in%c("NON_SYNONYMOUS_CODING","STOP_LOST","Missense_Mutation","missense_variant","missense_variant&splice_region_variant","missense_variant|missense_variant"),"missense snv",
ifelse(effect%in%c("CODON_CHANGE_PLUS_CODON_DELETION","CODON_DELETION","CODON_INSERTION","In_Frame_Ins","In_Frame_Del","disruptive_inframe_deletion","disruptive_inframe_insertion","inframe_deletion","inframe_insertion","disruptive_inframe_deletion&splice_region_variant","inframe_deletion&splice_region_variant"), "inframe indel",
ifelse(effect%in%c("SPLICE_SITE_DONOR","SPLICE_SITE_ACCEPTOR","SPLICE_SITE_REGION","Splice_Site","splice_donor_variant&intron_variant","splice_acceptor_variant&intron_variant","splicing","splice_donor_variant&splice_region_variant&intron_variant","splice_donor_variant&disruptive_inframe_deletion&splice_region_variant&splice_region_variant&intron_variant","splice_region_variant&intron_variant","frameshift_variant&splice_acceptor_variant&splice_region_variant&intron_variant"), "splice site variant",
ifelse(effect%in%c("STOP_LOST","START_LOST","START_GAINED","UTR_5_PRIME","start_lost","stop_lost"), "start/stop codon change",
#ifelse(effect%in%c("Amplification","Homozygous Deletion"),X #"CNA",
ifelse(effect%in%c("synonymous_variant","splice_region_variant&synonymous_variant","non_coding_exon_variant","upstream_gene_variant","downstream_gene_variant","intron_variant","frameshift_variant&splice_donor_variant&splice_region_variant&splice_region_variant&intron_variant","non_coding_exon_variant|synonymous_variant","SYNONYMOUS_CODING","synonymous_variant|synonymous_variant","splice_region_variant&synonymous_variant|splice_region_variant&non_coding_exon_variant","intragenic_variant","intergenic_region","3_prime_UTR_variant","5_prime_UTR_premature_start_codon_gain_variant","5_prime_UTR_variant","intergenic_region"), "silent", # synonymous/noncoding/up/downstream/intragenic
NA)))))))) %>%
distinct %>%
select(-c(alt)) %>%
mutate(chrom=ifelse(chrom=="X",23,ifelse(chrom=="Y",23,chrom))) %>%
mutate(chrom=as.numeric(chrom))
segfiles<-list.files("facets",pattern="*cncf.txt")
setwd("pyclone") # for some reason PyClone needs to be run from the root directory
#---------------------------------
# variables for yaml configuration
#---------------------------------
num_iters <- as.integer(50000)
base_measure_params <- list(alpha=as.integer(1),beta=as.integer(1))
concentration <- list(value=as.integer(1),prior=list(shape=1.0,rate=0.001))
density <- "pyclone_beta_binomial"
beta_binomial_precision_params <- list(value=as.integer(1000),prior=list(shape=1.0,rate=0.0001),proposal=list(precision=0.01))
working_dir <- getwd()
#------------------------------
# main loop over sample subsets
#------------------------------
for (subnum in 1:nrow(subsets)){
# input processing
line <- as.vector(subsets[subnum,])
subsamples <- line[line!="NA"][-1]
subname <- line[[1]][1]
cat(blue("\n--------------------------------\n PYCLONE beginning subset ",subname,"\n--------------------------------\n",sep=""))
system(str_c("mkdir ",subname," &>/dev/null"))
#----------------------------
# run-specific yaml variables
#----------------------------
samples <- lapply(subsamples,function(sample)
list(
mutations_file=str_c("priors/",sample,".priors.yaml"),
tumour_content=list(value=1.0),
error_rate=0.001)
) %>% setNames(subsamples)
#----------------
# write yaml file
#----------------
cat(green("\n-") %+% " building configuration file:\n config/",subname,".config.yaml\n",sep="")
sink(file=str_c("config/",subname,".config.yaml"))
cat(as.yaml(list(
num_iters=num_iters,
base_measure_params=base_measure_params,
concentration=concentration,
density=density,
beta_binomial_precision_params=beta_binomial_precision_params,
working_dir=working_dir,
trace_dir=subname,
samples=samples)))
sink()
#-------------------
# build event tables
#-------------------
subevents=list()
for (samplenum in 1:length(subsamples)) {
sample.t <- subsamples[samplenum] %>% unlist
sample.n <- sample.tn[which(sample.tn$tumor==sample.t),"normal"] %>% as.character
seg <- read.delim(str_c("../facets/",grep(str_c(sample.t,"_",sep=""),segfiles,value=TRUE))) %>%
select(chrom,start=loc.start,end=loc.end,tcn.em,lcn.em) %>%
filter(!is.na(tcn.em)&!is.na(lcn.em)) %>% # remove all rows with unassigned CN so midpoint assignment will find next closest segment
rowwise %>%
mutate(mid=(start+end)/2.0) %>%
select(-c(start,end))
# assign muts to their nearest CN segment
submuts <- filter(muts,sample.name==sample.t) %>%
mutate(id=str_c(chrom,pos,gene,effect,sep=":")) %>%
rename_("cov.t"="cov") %>%
left_join(seg,by="chrom") %>%
group_by(id) %>%
slice(which.min(abs(mid-pos))) %>%
ungroup %>%
mutate(minor=ifelse(lcn.em==0,1,0)) %>%
mutate(major=ifelse(lcn.em==0,tcn.em-1,tcn.em)) %>%
bind_cols(data.frame(cov.n=filter(muts,sample.name==sample.n)$cov)) %>%
filter(maf>0.05 & cov.t>10)
# create events table
events <- data.frame(
mutation_id=submuts$id,
ref_counts=round(submuts$cov.n),
var_counts=round(submuts$cov.t),
normal_cn=rep(2,nrow(submuts)),
minor_cn=submuts$lcn.em,
major_cn=submuts$tcn.em-submuts$lcn.em
)
subevents<-c(subevents,list(events))
}
#-----------------------------------------------------------
# remove events with ref=0 & var=0 depth accross all samples
#-----------------------------------------------------------
rmrows <- subevents %>% lapply(., function(t) which(t$ref_counts==0 & t$var_counts==0)) %>% unlist
if(length(rmrows)>0){
subevents <- lapply(subevents,function(t) t[-rmrows,])
}
#-----------------------------
# build event & mutation files
#-----------------------------
for (samplenum in 1:length(subsamples)){
sample <- subsamples[samplenum] %>% unlist
cat(green("\n-") %+% " building input files for sample ",sample,":",sep="")
cat("\n events/",sample,".events.tsv",sep="")
write.table(subevents[samplenum],file=str_c("events/",sample,".events.tsv"),row.names=FALSE,quote=FALSE,sep="\t")
cat("\n priors/",sample,".priors.yaml\n",sep="")
system(str_c(sysprefix,"PyClone build_mutations_file --in_file events/",sample,".events.tsv --out_file priors/",sample,".priors.yaml"))
}
#-----------------
# pyclone analysis
#-----------------
cat(green("\n-") %+% " running MCMC simulation:\n")
system(str_c(sysprefix,"PyClone run_analysis --config_file config/",subname,".config.yaml"))
#-------------
# build tables
#-------------
cat(green("\n-") %+% " building analysis tables:\n tables/",subname,".loci.tsv",sep="")
system(str_c(sysprefix,"PyClone build_table --config_file config/",subname,".config.yaml --out_file tables/",subname,".loci.tsv --table_type loci"))
cat("\n tables/",subname,".cluster.tsv\n",sep="")
system(str_c(sysprefix,"PyClone build_table --config_file config/",subname,".config.yaml --out_file tables/",subname,".cluster.tsv --table_type cluster"))
#---------
# plotting
#---------
cat(green("\n-") %+% " plotting results:\n plots/",subname,".loci.pdf",sep="")
system(str_c(sysprefix,"xvfb-run PyClone plot_loci --config_file config/",subname,".config.yaml --plot_file plots/",subname,".loci.pdf --plot_type density"))
cat("\n plots/",subname,".cluster.pdf\n",sep="")
system(str_c(sysprefix,"xvfb-run PyClone plot_clusters --config_file config/",subname,".config.yaml --plot_file plots/",subname,".cluster.pdf --plot_type density"))
}
#-------------
# PostPy paths
#-------------
#"/ifs/e63data/reis-filho/usr/PostPy/interval_analyser.py"
#"/ifs/e63data/reis-filho/usr/PostPy/CI_filter.py"
#"/ifs/e63data/reis-filho/usr/PostPy/pyclone_files_updater.py"
|
/R/clonality/pyclone.R
|
permissive
|
lmexj/pascal
|
R
| false
| false
| 10,248
|
r
|
#!/usr/bin/env Rscript
#---------------
# initialization
#---------------
source('pascal/lib/init.R')
source('pascal/lib/muts.R')
# additional packages
suppressMessages(pacman::p_load(yaml))
sysprefix="umask 002 && unset PYTHONPATH && source /home/bermans/miniconda2/envs/pyclone/bin/activate /home/bermans/miniconda2/envs/pyclone >/dev/null 2>&1 && "
# create necessary directories
system("mkdir pyclone pyclone/config pyclone/events pyclone/priors pyclone/tables pyclone/plots &>/dev/null")
#-----------
# processing
#-----------
cat(green("\n-") %+% " reading input\n")
patients <- read.delim("sample_sets.txt",sep=" ",stringsAsFactors=FALSE,header=FALSE) %>%
setNames(c("patient",1:(ncol(.)-1))) %>%
group_by(patient) %>%
filter(!grepl("#",patient)) %>%
summarise_each(funs(ifelse(.=="","NA",.)))
subsets <- read.delim("subsets.txt",sep=" ",stringsAsFactors=FALSE,header=FALSE) %>%
setNames(c("subset",1:(ncol(.)-1))) %>%
group_by(subset) %>%
filter(!grepl("#",subset)) %>%
summarise_each(funs(ifelse(.=="","NA",.)))
sample.t <- subsets %>%
select(-subset) %>%
unlist %>%
list.filter(.!="NA") %>%
sort
sample.n <- sample.t %>%
lapply(.,function(patient)
patients[which(apply(patients,1,function(sample)contains(sample,patient))),] %>%
list.filter(.!="NA") %>%
unlist %>%
tail(n=1)
) %>%
unlist
sample.tn <- data.frame(normal=sample.n,tumor=sample.t)
# read & format mutations
muts.vcf <- read.delim("recurrent_mutations/sufam/all_sufam.txt",stringsAsFactors=FALSE,sep="\t") %>%
select(sample.name=sample,chrom,pos,alt=val_alt,cov,maf=val_maf) %>%
tbl_df
muts.suf <- read.delim("recurrent_mutations/sufam/all_mutations.vcf",stringsAsFactors=FALSE,sep="\t") %>%
select(chrom=`X.CHROM`,pos=POS,gene=`ANN....GENE`,alt=ALT,effect=`ANN....EFFECT`) %>%
tbl_df
muts <- muts.vcf %>% full_join(muts.suf, by=c("chrom","pos","alt")) %>%
rowwise() %>%
mutate(gene=str_split(gene,"\\|") %>% unlist %>% head(1)) %>%
mutate(effect=str_split(effect,"\\|") %>% unlist %>% tail(n=1)) %>%
ungroup() %>%
mutate(effect=
ifelse(effect%in%c("STOP_GAINED","Nonsense_Mutation","stop_gained&splice_region_variant","stop_gained"), "truncating snv",
ifelse(effect%in%c("FRAME_SHIFT","FRAME_SHIFT","Frame_Shift_Del","Frame_Shift_Ins","frameshift_variant","frameshift_variant&stop_gained","frameshift_variant&splice_region_variant","frameshift_variant&splice_acceptor_variant&splice_region_variant&splice_region_variant&intron_variant"), "frameshift indel",
ifelse(effect%in%c("NON_SYNONYMOUS_CODING","STOP_LOST","Missense_Mutation","missense_variant","missense_variant&splice_region_variant","missense_variant|missense_variant"),"missense snv",
ifelse(effect%in%c("CODON_CHANGE_PLUS_CODON_DELETION","CODON_DELETION","CODON_INSERTION","In_Frame_Ins","In_Frame_Del","disruptive_inframe_deletion","disruptive_inframe_insertion","inframe_deletion","inframe_insertion","disruptive_inframe_deletion&splice_region_variant","inframe_deletion&splice_region_variant"), "inframe indel",
ifelse(effect%in%c("SPLICE_SITE_DONOR","SPLICE_SITE_ACCEPTOR","SPLICE_SITE_REGION","Splice_Site","splice_donor_variant&intron_variant","splice_acceptor_variant&intron_variant","splicing","splice_donor_variant&splice_region_variant&intron_variant","splice_donor_variant&disruptive_inframe_deletion&splice_region_variant&splice_region_variant&intron_variant","splice_region_variant&intron_variant","frameshift_variant&splice_acceptor_variant&splice_region_variant&intron_variant"), "splice site variant",
ifelse(effect%in%c("STOP_LOST","START_LOST","START_GAINED","UTR_5_PRIME","start_lost","stop_lost"), "start/stop codon change",
#ifelse(effect%in%c("Amplification","Homozygous Deletion"),X #"CNA",
ifelse(effect%in%c("synonymous_variant","splice_region_variant&synonymous_variant","non_coding_exon_variant","upstream_gene_variant","downstream_gene_variant","intron_variant","frameshift_variant&splice_donor_variant&splice_region_variant&splice_region_variant&intron_variant","non_coding_exon_variant|synonymous_variant","SYNONYMOUS_CODING","synonymous_variant|synonymous_variant","splice_region_variant&synonymous_variant|splice_region_variant&non_coding_exon_variant","intragenic_variant","intergenic_region","3_prime_UTR_variant","5_prime_UTR_premature_start_codon_gain_variant","5_prime_UTR_variant","intergenic_region"), "silent", # synonymous/noncoding/up/downstream/intragenic
NA)))))))) %>%
distinct %>%
select(-c(alt)) %>%
mutate(chrom=ifelse(chrom=="X",23,ifelse(chrom=="Y",23,chrom))) %>%
mutate(chrom=as.numeric(chrom))
segfiles<-list.files("facets",pattern="*cncf.txt")
setwd("pyclone") # for some reason PyClone needs to be run from the root directory
#---------------------------------
# variables for yaml configuration
#---------------------------------
num_iters <- as.integer(50000)
base_measure_params <- list(alpha=as.integer(1),beta=as.integer(1))
concentration <- list(value=as.integer(1),prior=list(shape=1.0,rate=0.001))
density <- "pyclone_beta_binomial"
beta_binomial_precision_params <- list(value=as.integer(1000),prior=list(shape=1.0,rate=0.0001),proposal=list(precision=0.01))
working_dir <- getwd()
#------------------------------
# main loop over sample subsets
#------------------------------
for (subnum in 1:nrow(subsets)){
# input processing
line <- as.vector(subsets[subnum,])
subsamples <- line[line!="NA"][-1]
subname <- line[[1]][1]
cat(blue("\n--------------------------------\n PYCLONE beginning subset ",subname,"\n--------------------------------\n",sep=""))
system(str_c("mkdir ",subname," &>/dev/null"))
#----------------------------
# run-specific yaml variables
#----------------------------
samples <- lapply(subsamples,function(sample)
list(
mutations_file=str_c("priors/",sample,".priors.yaml"),
tumour_content=list(value=1.0),
error_rate=0.001)
) %>% setNames(subsamples)
#----------------
# write yaml file
#----------------
cat(green("\n-") %+% " building configuration file:\n config/",subname,".config.yaml\n",sep="")
sink(file=str_c("config/",subname,".config.yaml"))
cat(as.yaml(list(
num_iters=num_iters,
base_measure_params=base_measure_params,
concentration=concentration,
density=density,
beta_binomial_precision_params=beta_binomial_precision_params,
working_dir=working_dir,
trace_dir=subname,
samples=samples)))
sink()
#-------------------
# build event tables
#-------------------
subevents=list()
for (samplenum in 1:length(subsamples)) {
sample.t <- subsamples[samplenum] %>% unlist
sample.n <- sample.tn[which(sample.tn$tumor==sample.t),"normal"] %>% as.character
seg <- read.delim(str_c("../facets/",grep(str_c(sample.t,"_",sep=""),segfiles,value=TRUE))) %>%
select(chrom,start=loc.start,end=loc.end,tcn.em,lcn.em) %>%
filter(!is.na(tcn.em)&!is.na(lcn.em)) %>% # remove all rows with unassigned CN so midpoint assignment will find next closest segment
rowwise %>%
mutate(mid=(start+end)/2.0) %>%
select(-c(start,end))
# assign muts to their nearest CN segment
submuts <- filter(muts,sample.name==sample.t) %>%
mutate(id=str_c(chrom,pos,gene,effect,sep=":")) %>%
rename_("cov.t"="cov") %>%
left_join(seg,by="chrom") %>%
group_by(id) %>%
slice(which.min(abs(mid-pos))) %>%
ungroup %>%
mutate(minor=ifelse(lcn.em==0,1,0)) %>%
mutate(major=ifelse(lcn.em==0,tcn.em-1,tcn.em)) %>%
bind_cols(data.frame(cov.n=filter(muts,sample.name==sample.n)$cov)) %>%
filter(maf>0.05 & cov.t>10)
# create events table
events <- data.frame(
mutation_id=submuts$id,
ref_counts=round(submuts$cov.n),
var_counts=round(submuts$cov.t),
normal_cn=rep(2,nrow(submuts)),
minor_cn=submuts$lcn.em,
major_cn=submuts$tcn.em-submuts$lcn.em
)
subevents<-c(subevents,list(events))
}
#-----------------------------------------------------------
# remove events with ref=0 & var=0 depth accross all samples
#-----------------------------------------------------------
rmrows <- subevents %>% lapply(., function(t) which(t$ref_counts==0 & t$var_counts==0)) %>% unlist
if(length(rmrows)>0){
subevents <- lapply(subevents,function(t) t[-rmrows,])
}
#-----------------------------
# build event & mutation files
#-----------------------------
for (samplenum in 1:length(subsamples)){
sample <- subsamples[samplenum] %>% unlist
cat(green("\n-") %+% " building input files for sample ",sample,":",sep="")
cat("\n events/",sample,".events.tsv",sep="")
write.table(subevents[samplenum],file=str_c("events/",sample,".events.tsv"),row.names=FALSE,quote=FALSE,sep="\t")
cat("\n priors/",sample,".priors.yaml\n",sep="")
system(str_c(sysprefix,"PyClone build_mutations_file --in_file events/",sample,".events.tsv --out_file priors/",sample,".priors.yaml"))
}
#-----------------
# pyclone analysis
#-----------------
cat(green("\n-") %+% " running MCMC simulation:\n")
system(str_c(sysprefix,"PyClone run_analysis --config_file config/",subname,".config.yaml"))
#-------------
# build tables
#-------------
cat(green("\n-") %+% " building analysis tables:\n tables/",subname,".loci.tsv",sep="")
system(str_c(sysprefix,"PyClone build_table --config_file config/",subname,".config.yaml --out_file tables/",subname,".loci.tsv --table_type loci"))
cat("\n tables/",subname,".cluster.tsv\n",sep="")
system(str_c(sysprefix,"PyClone build_table --config_file config/",subname,".config.yaml --out_file tables/",subname,".cluster.tsv --table_type cluster"))
#---------
# plotting
#---------
cat(green("\n-") %+% " plotting results:\n plots/",subname,".loci.pdf",sep="")
system(str_c(sysprefix,"xvfb-run PyClone plot_loci --config_file config/",subname,".config.yaml --plot_file plots/",subname,".loci.pdf --plot_type density"))
cat("\n plots/",subname,".cluster.pdf\n",sep="")
system(str_c(sysprefix,"xvfb-run PyClone plot_clusters --config_file config/",subname,".config.yaml --plot_file plots/",subname,".cluster.pdf --plot_type density"))
}
#-------------
# PostPy paths
#-------------
#"/ifs/e63data/reis-filho/usr/PostPy/interval_analyser.py"
#"/ifs/e63data/reis-filho/usr/PostPy/CI_filter.py"
#"/ifs/e63data/reis-filho/usr/PostPy/pyclone_files_updater.py"
|
\name{pop.index}
\alias{pop.index}
\title{
Calculation of population index
}
\description{Calculates population index of a meteor shower
for a given magnitude data, specified period of days and magnitude values.
}
\usage{
pop.index(data,year, month, day.beg, day.end=day.beg, shw, mag=-6:7)
}
\arguments{
\item{data}{
data frame consisting of visual meteor magnitude data.
}
\item{year}{
numeric vector of length 4 specifying year.
}
\item{month}{
numeric vector specifying month of the year.
}
\item{day.beg}{
numeric vector specifying beginning day.
}
\item{day.end}{
numeric vector specifying ending day.
}
\item{shw}{
character string consisting of three capital letters which represent meteor shower code.
}
\item{mag}{
numeric vector specifying range of magnitudes.
}
}
\details{Cummulative summarized magnitude distribution \emph{Phi(m)} is formed by summing cummulative
frequencies of all observers for each magnitude class \emph{m}.
Using the relationship for population index \emph{r=Phi(m+1)/Phi(m)} and substitutiong \emph{0,1,...m} magnitudes,
equation \emph{Phi(m)=Phi(0)r^m} (or \emph{ln(Phi(m))=ln(Phi(0))+r log(m)} in logarithmic form) can be written.
Then, population index \emph{r} is calculated by the method of least squares, for chosen range of magnitude values.
Standard error of population index is approximated with
\emph{sigma_r= r sqrt(sum e_i^2/((n-2)sum_i m_i^2))},
where \emph{i=1,2,..n}, \emph{n} is number of magnitude values, \emph{e_i} regression residuals,
\emph{i=1,2,..n}.
}
\value{
Data frame containing following vectors
\describe{
\item{day}{factor Day or interval of days}
\item{month}{numeric Month of the year}
\item{year}{numeric Year}
\item{mag}{factor Range of magnitude values}
\item{nINT}{Number of observing time intervals}
\item{nSHW}{Number of observed meteors belonging to the shower}
\item{pop.index}{Population index}
\item{sigma.r}{Standard error of population index}
}
}
\references{
Koschack R. and Rendtel J. (1990b). Determination of
spatial number density and mass index from visual
meteor observations (2). \emph{WGN, Journal of the IMO}, 18(4), 119 - 140.
Rendtel J. and Arlt R., editors (2008). \emph{IMO Handbook
For Meteor Observers}. IMO, Potsdam.
}
\author{
Kristina Veljkovic
}
\note{
The interval for regression is chosen such that: there is at least 3 meteors per magnitude class,
the faintest magnitude classes are not included (m<=4 or in exceptional cases m<=5) and there are at least
5 magnitude classes available. All these conditions are fulfilled for the range of magnitude values printed
in results.
Argument \code{data} has to consist of the columns named "m6" and "p7".
}
\seealso{
\code{\link{mag.distr}},\code{\link{zhr}}
}
\examples{
##select visual meteor data for observation of Perseids, time period 1-20th August 2007
##and calculate population index using magnitudes m<=4
data(magn07)
magnPer<-filter(magn07,shw="PER", year=2007, month=8, day.beg=1, day.end=20)
pop.index(magnPer,year=2007, month=8, day.beg=1, day.end=20, shw="PER",mag=-6:4)
}
|
/man/pop.index.Rd
|
no_license
|
arturochian/MetFns
|
R
| false
| false
| 3,205
|
rd
|
\name{pop.index}
\alias{pop.index}
\title{
Calculation of population index
}
\description{Calculates population index of a meteor shower
for a given magnitude data, specified period of days and magnitude values.
}
\usage{
pop.index(data,year, month, day.beg, day.end=day.beg, shw, mag=-6:7)
}
\arguments{
\item{data}{
data frame consisting of visual meteor magnitude data.
}
\item{year}{
numeric vector of length 4 specifying year.
}
\item{month}{
numeric vector specifying month of the year.
}
\item{day.beg}{
numeric vector specifying beginning day.
}
\item{day.end}{
numeric vector specifying ending day.
}
\item{shw}{
character string consisting of three capital letters which represent meteor shower code.
}
\item{mag}{
numeric vector specifying range of magnitudes.
}
}
\details{Cummulative summarized magnitude distribution \emph{Phi(m)} is formed by summing cummulative
frequencies of all observers for each magnitude class \emph{m}.
Using the relationship for population index \emph{r=Phi(m+1)/Phi(m)} and substitutiong \emph{0,1,...m} magnitudes,
equation \emph{Phi(m)=Phi(0)r^m} (or \emph{ln(Phi(m))=ln(Phi(0))+r log(m)} in logarithmic form) can be written.
Then, population index \emph{r} is calculated by the method of least squares, for chosen range of magnitude values.
Standard error of population index is approximated with
\emph{sigma_r= r sqrt(sum e_i^2/((n-2)sum_i m_i^2))},
where \emph{i=1,2,..n}, \emph{n} is number of magnitude values, \emph{e_i} regression residuals,
\emph{i=1,2,..n}.
}
\value{
Data frame containing following vectors
\describe{
\item{day}{factor Day or interval of days}
\item{month}{numeric Month of the year}
\item{year}{numeric Year}
\item{mag}{factor Range of magnitude values}
\item{nINT}{Number of observing time intervals}
\item{nSHW}{Number of observed meteors belonging to the shower}
\item{pop.index}{Population index}
\item{sigma.r}{Standard error of population index}
}
}
\references{
Koschack R. and Rendtel J. (1990b). Determination of
spatial number density and mass index from visual
meteor observations (2). \emph{WGN, Journal of the IMO}, 18(4), 119 - 140.
Rendtel J. and Arlt R., editors (2008). \emph{IMO Handbook
For Meteor Observers}. IMO, Potsdam.
}
\author{
Kristina Veljkovic
}
\note{
The interval for regression is chosen such that: there is at least 3 meteors per magnitude class,
the faintest magnitude classes are not included (m<=4 or in exceptional cases m<=5) and there are at least
5 magnitude classes available. All these conditions are fulfilled for the range of magnitude values printed
in results.
Argument \code{data} has to consist of the columns named "m6" and "p7".
}
\seealso{
\code{\link{mag.distr}},\code{\link{zhr}}
}
\examples{
##select visual meteor data for observation of Perseids, time period 1-20th August 2007
##and calculate population index using magnitudes m<=4
data(magn07)
magnPer<-filter(magn07,shw="PER", year=2007, month=8, day.beg=1, day.end=20)
pop.index(magnPer,year=2007, month=8, day.beg=1, day.end=20, shw="PER",mag=-6:4)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_L111.nonghg_en_R_S_T_Y.R
\name{module_emissions_L111.nonghg_en_R_S_T_Y}
\alias{module_emissions_L111.nonghg_en_R_S_T_Y}
\title{module_emissions_L111.nonghg_en_R_S_T_Y}
\usage{
module_emissions_L111.nonghg_en_R_S_T_Y(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L111.nonghg_tg_R_en_S_F_Yh}, \code{L111.nonghg_tgej_R_en_S_F_Yh}. The corresponding file in the
original data system was \code{L111.nonghg_en_R_S_T_Y.R} (emissions level1).
}
\description{
Calculate non-ghg emission totals and non-ghg emission shares of total emissions.
}
\details{
This code produces two outputs: non-ghg emission totals and non-ghg emission shares of total emissions.
First, non-ghg gas emissions are combined and grouped by sector and region, emissions are scaled, and international
shipping & aviation emission data calculated based on total emission and total emission shares. Finally, non-ghg emission
totals and shares are calculated by GCAM sector, fuel, technology, and driver type for EDGAR historical years.
}
\author{
RC April 2018
}
|
/man/module_emissions_L111.nonghg_en_R_S_T_Y.Rd
|
permissive
|
Liyang-Guo/gcamdata
|
R
| false
| true
| 1,357
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zchunk_L111.nonghg_en_R_S_T_Y.R
\name{module_emissions_L111.nonghg_en_R_S_T_Y}
\alias{module_emissions_L111.nonghg_en_R_S_T_Y}
\title{module_emissions_L111.nonghg_en_R_S_T_Y}
\usage{
module_emissions_L111.nonghg_en_R_S_T_Y(command, ...)
}
\arguments{
\item{command}{API command to execute}
\item{...}{other optional parameters, depending on command}
}
\value{
Depends on \code{command}: either a vector of required inputs,
a vector of output names, or (if \code{command} is "MAKE") all
the generated outputs: \code{L111.nonghg_tg_R_en_S_F_Yh}, \code{L111.nonghg_tgej_R_en_S_F_Yh}. The corresponding file in the
original data system was \code{L111.nonghg_en_R_S_T_Y.R} (emissions level1).
}
\description{
Calculate non-ghg emission totals and non-ghg emission shares of total emissions.
}
\details{
This code produces two outputs: non-ghg emission totals and non-ghg emission shares of total emissions.
First, non-ghg gas emissions are combined and grouped by sector and region, emissions are scaled, and international
shipping & aviation emission data calculated based on total emission and total emission shares. Finally, non-ghg emission
totals and shares are calculated by GCAM sector, fuel, technology, and driver type for EDGAR historical years.
}
\author{
RC April 2018
}
|
################
### Figure 25 ###
################
require(RMySQL)
con<-dbConnect(MySQL(),dbname="MethData_Lister_hg18")
reNucleotide_Element<-dbGetQuery(con,"SELECT R.nucleotide,R.id_In_RE,R.position,MPA.methCoef,
MPA.name,MPA.nReads,E.id_In_Type,E.nCG,E.chrom,E.chromStart,E.chromEnd,MEA.methCoef,MEA.posInf,MEA.Std_Dev
FROM (((R_POS R JOIN METH_POS_ASSIGNMENT MPA ON R.nucleotide=MPA.nucleotide
AND R.RE_name=MPA.RE_name AND R.id_In_RE=MPA.id_In_RE)
JOIN CORRESPONDENCE C ON R.nucleotide=C.nucleotide
AND R.RE_name=C.RE_name AND R.id_In_RE=C.id_In_RE)
JOIN ELEMENT E ON C.type=E.type AND C.id_In_Type=E.id_In_Type)
JOIN METH_ELEM_ASSIGNMENT MEA ON MEA.type=E.type
AND MEA.id_In_Type=E.id_In_Type
WHERE MPA.name=MEA.name AND R.RE_name='HpaII' AND E.type='CpGisland'
AND MPA.RE_name='HpaII' AND MEA.type='CpGisland' AND C.type='CpGisland'")
reDinucleotide_Element<-reshape(reNucleotide_Element,idvar=c("id_In_RE","name"),
direction="wide",timevar="nucleotide")
reDinucleotide_Element<-subset(reDinucleotide_Element,select=c(id_In_RE,name,position.C,nReads.C,methCoef.C,
methCoef.G,id_In_Type.C,chrom.C,chromStart.C,chromEnd.C,nCG.C,methCoef.1.C,posInf.C,Std_Dev.C)
)
colnames(reDinucleotide_Element)<-c("id_In_RE","CLine","RE_position","RE_nReads",
"C_methCoef","G_methCoef","id_In_Type","E_chrom","E_chromStart","E_chromEnd","E_nCG",
"E_methCoef","E_posInf","E_Std_Dev")
methCoefMean<-function(x){
if (as.numeric(x[5])==-1 & as.numeric(x[6])!=-1){
x[6]
}
else if (as.numeric(x[6])==-1 & as.numeric(x[5])!=-1){
x[5]
}
else{
(as.numeric(x[5])+as.numeric(x[6]))/2
}
}
reDinucleotide_Element_H1<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="H1")
reDinucleotide_Element_IMR90<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="IMR90")
meanMethCoef_H1<-apply(reDinucleotide_Element_H1,1,FUN=methCoefMean)
meanMethCoef_IMR90<-apply(reDinucleotide_Element_IMR90,1,FUN=methCoefMean)
reDinucleotide_Element_H1<-cbind(reDinucleotide_Element_H1,meanMethCoef_H1=as.numeric(as.vector(meanMethCoef_H1)))
reDinucleotide_Element_IMR90<-cbind(reDinucleotide_Element_IMR90,meanMethCoef_IMR90=as.numeric(as.vector(meanMethCoef_IMR90)))
reDinucleotide_Element_ADS<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="ADS")
reDinucleotide_Element_ADS_Adipose<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="ADS_Adipose")
meanMethCoef_ADS<-apply(reDinucleotide_Element_ADS,1,FUN=methCoefMean)
meanMethCoef_ADS_Adipose<-apply(reDinucleotide_Element_ADS_Adipose,1,FUN=methCoefMean)
reDinucleotide_Element_ADS<-cbind(reDinucleotide_Element_ADS,meanMethCoef_ADS=as.numeric(as.vector(meanMethCoef_ADS)))
reDinucleotide_Element_ADS_Adipose<-cbind(reDinucleotide_Element_ADS_Adipose,meanMethCoef_ADS_Adipose=as.numeric(as.vector(meanMethCoef_ADS_Adipose)))
#################################
## With Informativeness filter ##
#################################
selected_reDinucleotide_Element_H1<-subset(reDinucleotide_Element_H1,
reDinucleotide_Element_H1$meanMethCoef_H1!=-1 &
reDinucleotide_Element_H1$E_methCoef!=-1 & ((reDinucleotide_Element_H1$E_posInf/2)/reDinucleotide_Element_H1$E_nCG)>=0.25)
selected_reDinucleotide_Element_IMR90<-subset(reDinucleotide_Element_IMR90,
reDinucleotide_Element_IMR90$meanMethCoef_IMR90!=-1 &
reDinucleotide_Element_IMR90$E_methCoef!=-1 & ((reDinucleotide_Element_IMR90$E_posInf/2)/reDinucleotide_Element_IMR90$E_nCG)>=0.25)
selected_reDinucleotide_Element_ADS<-subset(reDinucleotide_Element_ADS,
reDinucleotide_Element_ADS$meanMethCoef_ADS!=-1 &
reDinucleotide_Element_ADS$E_methCoef!=-1 & ((reDinucleotide_Element_ADS$E_posInf/2)/reDinucleotide_Element_ADS$E_nCG)>=0.25)
selected_reDinucleotide_Element_ADS_Adipose<-subset(reDinucleotide_Element_ADS_Adipose,
reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose!=-1 &
reDinucleotide_Element_ADS_Adipose$E_methCoef!=-1 & ((reDinucleotide_Element_ADS_Adipose$E_posInf/2)/reDinucleotide_Element_ADS_Adipose$E_nCG)>=0.25)
png(paste(resultsDIR,"figure25Filtered.png",sep=""),height=12,width=12,units="cm",res=300)
par(lwd=1.5)
par(cex.axis=0.8)
diffCpGiHpaIIH1<-as.data.frame(cbind(selected_reDinucleotide_Element_H1$E_methCoef-selected_reDinucleotide_Element_H1$meanMethCoef_H1,
selected_reDinucleotide_Element_H1$E_methCoef))
diffCpGiHpaIIIMR90<-as.data.frame(cbind(selected_reDinucleotide_Element_IMR90$E_methCoef-selected_reDinucleotide_Element_IMR90$meanMethCoef_IMR90,
selected_reDinucleotide_Element_IMR90$E_methCoef))
diffCpGiHpaIIADS<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS$E_methCoef-selected_reDinucleotide_Element_ADS$meanMethCoef_ADS,
selected_reDinucleotide_Element_ADS$E_methCoef))
diffCpGiHpaIIADS_Adipose<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS_Adipose$E_methCoef-selected_reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose,
selected_reDinucleotide_Element_ADS_Adipose$E_methCoef))
par(mfrow=c(2,2))
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(diffCpGiHpaIIH1[,1]),col="darkblue",lwd=0.8)
lines(density(diffCpGiHpaIIIMR90[,1]),col="red",lwd=0.8)
lines(density(diffCpGiHpaIIADS[,1],width=0.05),col="green",lwd=0.8)
lines(density(diffCpGiHpaIIADS_Adipose[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>=0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>=0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1]),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2<=0.25)[,1],width=0.05),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2<=0.25)[,1],width=0.05),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1],width=0.05),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>0.25 & diffCpGiHpaIIH1$V2<0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>0.25 & diffCpGiHpaIIIMR90$V2<0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1]),col="purple",lwd=0.8)
par(mfrow=c(1,1))
dev.off()
dataMatrix<-matrix(ncol=2,nrow=4)
colnames(dataMatrix)<-c("ADS","ADS_Adipose")
rownames(dataMatrix)<-c("All","Methylated","Unmethylated","Intermediate")
dataMatrix[1,1]<-length(diffCpGiHpaIIADS[,1])
dataMatrix[1,2]<-length(diffCpGiHpaIIADS_Adipose[,1])
dataMatrix[2,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1])
dataMatrix[2,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1])
dataMatrix[3,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1])
dataMatrix[3,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1])
dataMatrix[4,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1])
dataMatrix[4,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1])
write.table(dataMatrix,file=paste(resultsDIR,"figure25FilteredValues.txt",sep=""),sep="\t")
####################################
## Without Informativeness filter ##
####################################
selected_reDinucleotide_Element_H1<-subset(reDinucleotide_Element_H1,
reDinucleotide_Element_H1$meanMethCoef_H1!=-1 &
reDinucleotide_Element_H1$E_methCoef!=-1)
selected_reDinucleotide_Element_IMR90<-subset(reDinucleotide_Element_IMR90,
reDinucleotide_Element_IMR90$meanMethCoef_IMR90!=-1 &
reDinucleotide_Element_IMR90$E_methCoef!=-1)
selected_reDinucleotide_Element_ADS<-subset(reDinucleotide_Element_ADS,
reDinucleotide_Element_ADS$meanMethCoef_ADS!=-1 &
reDinucleotide_Element_ADS$E_methCoef!=-1)
selected_reDinucleotide_Element_ADS_Adipose<-subset(reDinucleotide_Element_ADS_Adipose,
reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose!=-1 &
reDinucleotide_Element_ADS_Adipose$E_methCoef!=-1)
#######################
png(paste(resultsDIR,"figure25All.png",sep=""),height=12,width=12,units="cm",res=300)
par(lwd=1.5)
par(cex.axis=0.8)
diffCpGiHpaIIH1<-as.data.frame(cbind(selected_reDinucleotide_Element_H1$E_methCoef-selected_reDinucleotide_Element_H1$meanMethCoef_H1,
selected_reDinucleotide_Element_H1$E_methCoef))
diffCpGiHpaIIIMR90<-as.data.frame(cbind(selected_reDinucleotide_Element_IMR90$E_methCoef-selected_reDinucleotide_Element_IMR90$meanMethCoef_IMR90,
selected_reDinucleotide_Element_IMR90$E_methCoef))
diffCpGiHpaIIADS<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS$E_methCoef-selected_reDinucleotide_Element_ADS$meanMethCoef_ADS,
selected_reDinucleotide_Element_ADS$E_methCoef))
diffCpGiHpaIIADS_Adipose<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS_Adipose$E_methCoef-selected_reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose,
selected_reDinucleotide_Element_ADS_Adipose$E_methCoef))
par(mfrow=c(2,2))
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(diffCpGiHpaIIH1[,1],width=0.05),col="darkblue",lwd=0.8)
lines(density(diffCpGiHpaIIIMR90[,1],width=0.05),col="red",lwd=0.8)
lines(density(diffCpGiHpaIIADS[,1],width=0.05),col="green",lwd=0.8)
lines(density(diffCpGiHpaIIADS_Adipose[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>=0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>=0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1]),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2<=0.25)[,1],width=0.05),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2<=0.25)[,1],width=0.05),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1],width=0.05),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>0.25 & diffCpGiHpaIIH1$V2<0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>0.25 & diffCpGiHpaIIIMR90$V2<0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1]),col="purple",lwd=0.8)
par(mfrow=c(1,1))
dev.off()
dataMatrix<-matrix(ncol=2,nrow=4)
colnames(dataMatrix)<-c("ADS","ADS_Adipose")
rownames(dataMatrix)<-c("All","Methylated","Unmethylated","Intermediate")
dataMatrix[1,1]<-length(diffCpGiHpaIIADS[,1])
dataMatrix[1,2]<-length(diffCpGiHpaIIADS_Adipose[,1])
dataMatrix[2,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1])
dataMatrix[2,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1])
dataMatrix[3,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1])
dataMatrix[3,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1])
dataMatrix[4,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1])
dataMatrix[4,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1])
write.table(dataMatrix,file=paste(resultsDIR,"figure25ValuesAll.txt",sep=""),sep="\t")
|
/Genomic_Evaluation_of_individual_CpG_Methylation/R/figure25.R
|
no_license
|
vbarrera/thesis
|
R
| false
| false
| 14,051
|
r
|
################
### Figure 25 ###
################
require(RMySQL)
con<-dbConnect(MySQL(),dbname="MethData_Lister_hg18")
reNucleotide_Element<-dbGetQuery(con,"SELECT R.nucleotide,R.id_In_RE,R.position,MPA.methCoef,
MPA.name,MPA.nReads,E.id_In_Type,E.nCG,E.chrom,E.chromStart,E.chromEnd,MEA.methCoef,MEA.posInf,MEA.Std_Dev
FROM (((R_POS R JOIN METH_POS_ASSIGNMENT MPA ON R.nucleotide=MPA.nucleotide
AND R.RE_name=MPA.RE_name AND R.id_In_RE=MPA.id_In_RE)
JOIN CORRESPONDENCE C ON R.nucleotide=C.nucleotide
AND R.RE_name=C.RE_name AND R.id_In_RE=C.id_In_RE)
JOIN ELEMENT E ON C.type=E.type AND C.id_In_Type=E.id_In_Type)
JOIN METH_ELEM_ASSIGNMENT MEA ON MEA.type=E.type
AND MEA.id_In_Type=E.id_In_Type
WHERE MPA.name=MEA.name AND R.RE_name='HpaII' AND E.type='CpGisland'
AND MPA.RE_name='HpaII' AND MEA.type='CpGisland' AND C.type='CpGisland'")
reDinucleotide_Element<-reshape(reNucleotide_Element,idvar=c("id_In_RE","name"),
direction="wide",timevar="nucleotide")
reDinucleotide_Element<-subset(reDinucleotide_Element,select=c(id_In_RE,name,position.C,nReads.C,methCoef.C,
methCoef.G,id_In_Type.C,chrom.C,chromStart.C,chromEnd.C,nCG.C,methCoef.1.C,posInf.C,Std_Dev.C)
)
colnames(reDinucleotide_Element)<-c("id_In_RE","CLine","RE_position","RE_nReads",
"C_methCoef","G_methCoef","id_In_Type","E_chrom","E_chromStart","E_chromEnd","E_nCG",
"E_methCoef","E_posInf","E_Std_Dev")
methCoefMean<-function(x){
if (as.numeric(x[5])==-1 & as.numeric(x[6])!=-1){
x[6]
}
else if (as.numeric(x[6])==-1 & as.numeric(x[5])!=-1){
x[5]
}
else{
(as.numeric(x[5])+as.numeric(x[6]))/2
}
}
reDinucleotide_Element_H1<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="H1")
reDinucleotide_Element_IMR90<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="IMR90")
meanMethCoef_H1<-apply(reDinucleotide_Element_H1,1,FUN=methCoefMean)
meanMethCoef_IMR90<-apply(reDinucleotide_Element_IMR90,1,FUN=methCoefMean)
reDinucleotide_Element_H1<-cbind(reDinucleotide_Element_H1,meanMethCoef_H1=as.numeric(as.vector(meanMethCoef_H1)))
reDinucleotide_Element_IMR90<-cbind(reDinucleotide_Element_IMR90,meanMethCoef_IMR90=as.numeric(as.vector(meanMethCoef_IMR90)))
reDinucleotide_Element_ADS<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="ADS")
reDinucleotide_Element_ADS_Adipose<-subset(reDinucleotide_Element,reDinucleotide_Element$CLine=="ADS_Adipose")
meanMethCoef_ADS<-apply(reDinucleotide_Element_ADS,1,FUN=methCoefMean)
meanMethCoef_ADS_Adipose<-apply(reDinucleotide_Element_ADS_Adipose,1,FUN=methCoefMean)
reDinucleotide_Element_ADS<-cbind(reDinucleotide_Element_ADS,meanMethCoef_ADS=as.numeric(as.vector(meanMethCoef_ADS)))
reDinucleotide_Element_ADS_Adipose<-cbind(reDinucleotide_Element_ADS_Adipose,meanMethCoef_ADS_Adipose=as.numeric(as.vector(meanMethCoef_ADS_Adipose)))
#################################
## With Informativeness filter ##
#################################
selected_reDinucleotide_Element_H1<-subset(reDinucleotide_Element_H1,
reDinucleotide_Element_H1$meanMethCoef_H1!=-1 &
reDinucleotide_Element_H1$E_methCoef!=-1 & ((reDinucleotide_Element_H1$E_posInf/2)/reDinucleotide_Element_H1$E_nCG)>=0.25)
selected_reDinucleotide_Element_IMR90<-subset(reDinucleotide_Element_IMR90,
reDinucleotide_Element_IMR90$meanMethCoef_IMR90!=-1 &
reDinucleotide_Element_IMR90$E_methCoef!=-1 & ((reDinucleotide_Element_IMR90$E_posInf/2)/reDinucleotide_Element_IMR90$E_nCG)>=0.25)
selected_reDinucleotide_Element_ADS<-subset(reDinucleotide_Element_ADS,
reDinucleotide_Element_ADS$meanMethCoef_ADS!=-1 &
reDinucleotide_Element_ADS$E_methCoef!=-1 & ((reDinucleotide_Element_ADS$E_posInf/2)/reDinucleotide_Element_ADS$E_nCG)>=0.25)
selected_reDinucleotide_Element_ADS_Adipose<-subset(reDinucleotide_Element_ADS_Adipose,
reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose!=-1 &
reDinucleotide_Element_ADS_Adipose$E_methCoef!=-1 & ((reDinucleotide_Element_ADS_Adipose$E_posInf/2)/reDinucleotide_Element_ADS_Adipose$E_nCG)>=0.25)
png(paste(resultsDIR,"figure25Filtered.png",sep=""),height=12,width=12,units="cm",res=300)
par(lwd=1.5)
par(cex.axis=0.8)
diffCpGiHpaIIH1<-as.data.frame(cbind(selected_reDinucleotide_Element_H1$E_methCoef-selected_reDinucleotide_Element_H1$meanMethCoef_H1,
selected_reDinucleotide_Element_H1$E_methCoef))
diffCpGiHpaIIIMR90<-as.data.frame(cbind(selected_reDinucleotide_Element_IMR90$E_methCoef-selected_reDinucleotide_Element_IMR90$meanMethCoef_IMR90,
selected_reDinucleotide_Element_IMR90$E_methCoef))
diffCpGiHpaIIADS<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS$E_methCoef-selected_reDinucleotide_Element_ADS$meanMethCoef_ADS,
selected_reDinucleotide_Element_ADS$E_methCoef))
diffCpGiHpaIIADS_Adipose<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS_Adipose$E_methCoef-selected_reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose,
selected_reDinucleotide_Element_ADS_Adipose$E_methCoef))
par(mfrow=c(2,2))
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(diffCpGiHpaIIH1[,1]),col="darkblue",lwd=0.8)
lines(density(diffCpGiHpaIIIMR90[,1]),col="red",lwd=0.8)
lines(density(diffCpGiHpaIIADS[,1],width=0.05),col="green",lwd=0.8)
lines(density(diffCpGiHpaIIADS_Adipose[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>=0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>=0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1]),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2<=0.25)[,1],width=0.05),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2<=0.25)[,1],width=0.05),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1],width=0.05),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>0.25 & diffCpGiHpaIIH1$V2<0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>0.25 & diffCpGiHpaIIIMR90$V2<0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1]),col="purple",lwd=0.8)
par(mfrow=c(1,1))
dev.off()
dataMatrix<-matrix(ncol=2,nrow=4)
colnames(dataMatrix)<-c("ADS","ADS_Adipose")
rownames(dataMatrix)<-c("All","Methylated","Unmethylated","Intermediate")
dataMatrix[1,1]<-length(diffCpGiHpaIIADS[,1])
dataMatrix[1,2]<-length(diffCpGiHpaIIADS_Adipose[,1])
dataMatrix[2,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1])
dataMatrix[2,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1])
dataMatrix[3,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1])
dataMatrix[3,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1])
dataMatrix[4,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1])
dataMatrix[4,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1])
write.table(dataMatrix,file=paste(resultsDIR,"figure25FilteredValues.txt",sep=""),sep="\t")
####################################
## Without Informativeness filter ##
####################################
selected_reDinucleotide_Element_H1<-subset(reDinucleotide_Element_H1,
reDinucleotide_Element_H1$meanMethCoef_H1!=-1 &
reDinucleotide_Element_H1$E_methCoef!=-1)
selected_reDinucleotide_Element_IMR90<-subset(reDinucleotide_Element_IMR90,
reDinucleotide_Element_IMR90$meanMethCoef_IMR90!=-1 &
reDinucleotide_Element_IMR90$E_methCoef!=-1)
selected_reDinucleotide_Element_ADS<-subset(reDinucleotide_Element_ADS,
reDinucleotide_Element_ADS$meanMethCoef_ADS!=-1 &
reDinucleotide_Element_ADS$E_methCoef!=-1)
selected_reDinucleotide_Element_ADS_Adipose<-subset(reDinucleotide_Element_ADS_Adipose,
reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose!=-1 &
reDinucleotide_Element_ADS_Adipose$E_methCoef!=-1)
#######################
png(paste(resultsDIR,"figure25All.png",sep=""),height=12,width=12,units="cm",res=300)
par(lwd=1.5)
par(cex.axis=0.8)
diffCpGiHpaIIH1<-as.data.frame(cbind(selected_reDinucleotide_Element_H1$E_methCoef-selected_reDinucleotide_Element_H1$meanMethCoef_H1,
selected_reDinucleotide_Element_H1$E_methCoef))
diffCpGiHpaIIIMR90<-as.data.frame(cbind(selected_reDinucleotide_Element_IMR90$E_methCoef-selected_reDinucleotide_Element_IMR90$meanMethCoef_IMR90,
selected_reDinucleotide_Element_IMR90$E_methCoef))
diffCpGiHpaIIADS<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS$E_methCoef-selected_reDinucleotide_Element_ADS$meanMethCoef_ADS,
selected_reDinucleotide_Element_ADS$E_methCoef))
diffCpGiHpaIIADS_Adipose<-as.data.frame(cbind(selected_reDinucleotide_Element_ADS_Adipose$E_methCoef-selected_reDinucleotide_Element_ADS_Adipose$meanMethCoef_ADS_Adipose,
selected_reDinucleotide_Element_ADS_Adipose$E_methCoef))
par(mfrow=c(2,2))
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(diffCpGiHpaIIH1[,1],width=0.05),col="darkblue",lwd=0.8)
lines(density(diffCpGiHpaIIIMR90[,1],width=0.05),col="red",lwd=0.8)
lines(density(diffCpGiHpaIIADS[,1],width=0.05),col="green",lwd=0.8)
lines(density(diffCpGiHpaIIADS_Adipose[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>=0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>=0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1]),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2<=0.25)[,1],width=0.05),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2<=0.25)[,1],width=0.05),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1],width=0.05),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1],width=0.05),col="purple",lwd=0.8)
plot(density(diffCpGiHpaIIADS[,1]),type="n",xlim=c(-1,1),ylim=c(0,20),main="",
xlab="",ylab="")
lines(density(subset(diffCpGiHpaIIH1,diffCpGiHpaIIH1$V2>0.25 & diffCpGiHpaIIH1$V2<0.75)[,1]),col="darkblue",lwd=0.8)
lines(density(subset(diffCpGiHpaIIIMR90,diffCpGiHpaIIIMR90$V2>0.25 & diffCpGiHpaIIIMR90$V2<0.75)[,1]),col="red",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1]),col="green",lwd=0.8)
lines(density(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1]),col="purple",lwd=0.8)
par(mfrow=c(1,1))
dev.off()
dataMatrix<-matrix(ncol=2,nrow=4)
colnames(dataMatrix)<-c("ADS","ADS_Adipose")
rownames(dataMatrix)<-c("All","Methylated","Unmethylated","Intermediate")
dataMatrix[1,1]<-length(diffCpGiHpaIIADS[,1])
dataMatrix[1,2]<-length(diffCpGiHpaIIADS_Adipose[,1])
dataMatrix[2,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>=0.75)[,1])
dataMatrix[2,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>=0.75)[,1])
dataMatrix[3,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2<=0.25)[,1])
dataMatrix[3,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2<=0.25)[,1])
dataMatrix[4,1]<-length(subset(diffCpGiHpaIIADS,diffCpGiHpaIIADS$V2>0.25 & diffCpGiHpaIIADS$V2<0.75)[,1])
dataMatrix[4,2]<-length(subset(diffCpGiHpaIIADS_Adipose,diffCpGiHpaIIADS_Adipose$V2>0.25 & diffCpGiHpaIIADS_Adipose$V2<0.75)[,1])
write.table(dataMatrix,file=paste(resultsDIR,"figure25ValuesAll.txt",sep=""),sep="\t")
|
#Title
#A function for input the sampling date
readSamplingDate <- function()
{
sd <- readline(prompt="Enter the sampling date (year-month-day): ")
return(sd)
}
samplingDate<-readSamplingDate()
#A function for input the sampling time
readSamplingTime <- function()
{
st <- readline(prompt="Enter the sampling time (hour:minute:second): ")
return(st)
}
samplingTime<-readSamplingTime()
#Date transformation
samplingDATE<-as.POSIXct(paste(samplingDate,samplingTime), format="%Y-%m-%d %H:%M:%S")
# Next Rscript:
|
/Radioactivity/SamplingDateReadFunction.R
|
no_license
|
Parek86/PrimeCoolR
|
R
| false
| false
| 571
|
r
|
#Title
#A function for input the sampling date
readSamplingDate <- function()
{
sd <- readline(prompt="Enter the sampling date (year-month-day): ")
return(sd)
}
samplingDate<-readSamplingDate()
#A function for input the sampling time
readSamplingTime <- function()
{
st <- readline(prompt="Enter the sampling time (hour:minute:second): ")
return(st)
}
samplingTime<-readSamplingTime()
#Date transformation
samplingDATE<-as.POSIXct(paste(samplingDate,samplingTime), format="%Y-%m-%d %H:%M:%S")
# Next Rscript:
|
average_score <- function(m, s){
## takes in a matrix m and a vector of scores s
## returns the average score per statement
nr <- nrow(m)
total_score <- 0
for(i in 1:length(s)){
total_score <- total_score + s[i]
}
avg_score <- total_score/nr
}
column_average <- function(m){
## takes in a matrix m and returns a vector containing the average of each C column (sum of each column/# statements)
## indicates what fraction of statements received credit for any given C
nc <- ncol(m)
nr <- nrow(m)
avg_c <- numeric()
for(i in 1:nc){
sum <- 0
for(j in 1:nr){
sum <- sum + m[j, i]
}
avg_c[i] <- sum/nr
}
avg_c
}
mode_vector <- function(s){
## takes in a vector s and returns ms, the mode of that vector
## returns NA if no unique mode exists
## indicates most common score per statement
us <- unique(s)
ms <- us[which.max(tabulate(match(s, us)))]
if(length(us) == length(s)){
ms <- NA
}
ms
}
number_of_statements <- function(m){
## number of statements (rows) in matrix m
ns <- nrow(m)
}
range_vector <- function(s){
## takes in a vector s and returns rs, the range of that vector
## indicates the spread between the student's highest and lowest score
r <- range(s)
rs <- r[2] - r[1]
}
sum_statements <- function(m){
##finds the sums of the 4C scores for all statements in a matrix
nr <- nrow(m)
nc <- ncol(m)
s <- numeric()
for(i in 1:nr){
sum <- 0
for(j in 1:nc){
sum <- sum + m[i, j]
}
s[i] <- sum
}
s
}
|
/R_Functions/Functions Folder V2/Old Functions/dataManipulation/matrixFunctions.R
|
no_license
|
dmcmill/4C_QuantWritingAnalysis
|
R
| false
| false
| 1,894
|
r
|
average_score <- function(m, s){
## takes in a matrix m and a vector of scores s
## returns the average score per statement
nr <- nrow(m)
total_score <- 0
for(i in 1:length(s)){
total_score <- total_score + s[i]
}
avg_score <- total_score/nr
}
column_average <- function(m){
## takes in a matrix m and returns a vector containing the average of each C column (sum of each column/# statements)
## indicates what fraction of statements received credit for any given C
nc <- ncol(m)
nr <- nrow(m)
avg_c <- numeric()
for(i in 1:nc){
sum <- 0
for(j in 1:nr){
sum <- sum + m[j, i]
}
avg_c[i] <- sum/nr
}
avg_c
}
mode_vector <- function(s){
## takes in a vector s and returns ms, the mode of that vector
## returns NA if no unique mode exists
## indicates most common score per statement
us <- unique(s)
ms <- us[which.max(tabulate(match(s, us)))]
if(length(us) == length(s)){
ms <- NA
}
ms
}
number_of_statements <- function(m){
## number of statements (rows) in matrix m
ns <- nrow(m)
}
range_vector <- function(s){
## takes in a vector s and returns rs, the range of that vector
## indicates the spread between the student's highest and lowest score
r <- range(s)
rs <- r[2] - r[1]
}
sum_statements <- function(m){
##finds the sums of the 4C scores for all statements in a matrix
nr <- nrow(m)
nc <- ncol(m)
s <- numeric()
for(i in 1:nr){
sum <- 0
for(j in 1:nc){
sum <- sum + m[i, j]
}
s[i] <- sum
}
s
}
|
## object for storing matrices with cached inverse matrix value
## method to create and manage the object
## x = original matrix, i = the inverse matrix (solve(x))
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) { # change matrix, set cached inverse to NULL
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## solve function for the special matrix object
## return cached inverse if exists, otherwise calculate and cache it
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i ## return inverse of 'x'
}
|
/cachematrix.R
|
no_license
|
tormave/ProgrammingAssignment2
|
R
| false
| false
| 868
|
r
|
## object for storing matrices with cached inverse matrix value
## method to create and manage the object
## x = original matrix, i = the inverse matrix (solve(x))
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) { # change matrix, set cached inverse to NULL
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(inv) i <<- inv
getinverse <- function() i
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## solve function for the special matrix object
## return cached inverse if exists, otherwise calculate and cache it
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i ## return inverse of 'x'
}
|
source("1.0_Climate_data_import.R")
create_met_ensemble <- function(n.ens = 10){
clim.dat.2018 <- get_daymet(2018, 2018)
ens.2018 <- list()
for(i in 1:length(clim.dat.2018)){
county <- clim.dat.2018[[i]]
county.name <- names(clim.dat.2018[i])
date <- county$date # save dates
county <- select(county, -date) # get rid of date column
# storage
prcp <- sum.prcp <- tmin <- tmax <- vp <- RH <- matrix(NA, 12, n.ens)
# run ensemble
for(e in 1:n.ens){
prcp[,e] <- jitter(county[,"prcp"], amount = sd(county[,"prcp"]))
sum.prcp[,e] <- jitter(county[,"sum.prcp"], amount = sd(county[,"sum.prcp"]))
tmin[,e] <- jitter(county[,"tmin"], amount = sd(county[,"tmin"]))
tmax[,e] <- jitter(county[,"tmax"], amount = sd(county[,"tmax"]))
vp[,e] <- jitter(county[,"vp"], amount = sd(county[,"vp"]))
RH[,e] <- jitter(county[,"RH"], amount = sd(county[,"RH"]))
# cant have negative precipitation or RH - set to 0 if negative
prcp <- (abs(prcp) + prcp) / 2
sum.prcp <- (abs(sum.prcp) + sum.prcp) / 2
RH <- (abs(RH) + RH) / 2
}
ens.2018[[i]] <- list(prcp = prcp,
sum.prcp = sum.prcp,
tmin = tmin,
tmax = tmax,
vp = vp,
RH = RH)
}
names(ens.2018) <- names(clim.dat.2018)
return(ens.2018)
}
# par(mfrow = c(2,3))
#
# for(c in 1:length(ens.2018)){
# for(i in 1:6){
# plot(1:12, ens.2018[[c]][[i]][,1], type = "l")
# for(e in 2:10){
# lines(1:12, ens.2018[[c]][[i]][,e])
# }
# }
# }
|
/5.1_Create_Met_Ensemble.R
|
permissive
|
carina-t/Lil_Aye_Deez_2k19
|
R
| false
| false
| 1,679
|
r
|
source("1.0_Climate_data_import.R")
create_met_ensemble <- function(n.ens = 10){
clim.dat.2018 <- get_daymet(2018, 2018)
ens.2018 <- list()
for(i in 1:length(clim.dat.2018)){
county <- clim.dat.2018[[i]]
county.name <- names(clim.dat.2018[i])
date <- county$date # save dates
county <- select(county, -date) # get rid of date column
# storage
prcp <- sum.prcp <- tmin <- tmax <- vp <- RH <- matrix(NA, 12, n.ens)
# run ensemble
for(e in 1:n.ens){
prcp[,e] <- jitter(county[,"prcp"], amount = sd(county[,"prcp"]))
sum.prcp[,e] <- jitter(county[,"sum.prcp"], amount = sd(county[,"sum.prcp"]))
tmin[,e] <- jitter(county[,"tmin"], amount = sd(county[,"tmin"]))
tmax[,e] <- jitter(county[,"tmax"], amount = sd(county[,"tmax"]))
vp[,e] <- jitter(county[,"vp"], amount = sd(county[,"vp"]))
RH[,e] <- jitter(county[,"RH"], amount = sd(county[,"RH"]))
# cant have negative precipitation or RH - set to 0 if negative
prcp <- (abs(prcp) + prcp) / 2
sum.prcp <- (abs(sum.prcp) + sum.prcp) / 2
RH <- (abs(RH) + RH) / 2
}
ens.2018[[i]] <- list(prcp = prcp,
sum.prcp = sum.prcp,
tmin = tmin,
tmax = tmax,
vp = vp,
RH = RH)
}
names(ens.2018) <- names(clim.dat.2018)
return(ens.2018)
}
# par(mfrow = c(2,3))
#
# for(c in 1:length(ens.2018)){
# for(i in 1:6){
# plot(1:12, ens.2018[[c]][[i]][,1], type = "l")
# for(e in 2:10){
# lines(1:12, ens.2018[[c]][[i]][,e])
# }
# }
# }
|
US_7cases2 <- read.csv(url("https://raw.githubusercontent.com/Reinalynn/MSDS692/master/Data/US_7cases2.csv"), header = TRUE, stringsAsFactors = FALSE)
US_7deaths2 <- read.csv(url("https://raw.githubusercontent.com/Reinalynn/MSDS692/master/Data/US_7deaths2.csv"), header = TRUE, stringsAsFactors = FALSE)
US_7cases2t <- as.data.frame(t(US_7cases2))
colnames(US_7cases2t) <- c("CO", "MI", "MN", "NE", "PA", "SD", "TX")
dim(US_7cases2t)
# truncate to remove days prior to reporting (first case occurs on row 45)
US_7cum2 <- US_7cases2t[43:156, ]
str(US_7cum2)
US_7cum2 <- US_7cum2 %>% mutate_if(is.factor, as.character)
US_7cum2 <- US_7cum2 %>% mutate_if(is.character, as.integer)
# difference data
US_72 <- diffM(US_7cum2)
# create ts
US_7ts2 <- ts(US_72, start = c(2020, 65), frequency = 365)
# use recent data from usafacts and limit to deaths but include multiple counties
str(US_7deaths2)
US_7dcum2 <- as.data.frame(t(US_7deaths2))
tail(US_7dcum2)
# truncate
US_7dcum2 <- US_7dcum2[43:156, ]
colnames(US_7dcum2) <- c("CO", "MI", "MN", "NE", "PA", "SD", "TX")
dim(US_7dcum2)
US_7dcum2 <- US_7dcum2 %>% mutate_if(is.factor, as.character)
US_7dcum2 <- US_7dcum2 %>% mutate_if(is.character, as.integer)
# difference
US_7d2 <- diffM(US_7dcum2)
# create ts
US_7dts2 <- ts(US_7d2, start = c(2020, 65), frequency = 365)
autoplot(US_7ts2, main = "COVID-19 Cases for 7 US states")
autoplot(US_7dts2, main = "COVID-19 Deaths for 7 US states")
# BEST MODELS - use auto.arima models for simplicity and consistency
# CO
fit_CO <- auto.arima(US_7ts2[, "CO"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_CO) # too low
fit_CO2 <- arima(US_7ts2[, "CO"], order = c(6, 1, 2))
checkresiduals(fit_CO2) # passes, use ARIMA(6, 1, 2)
fit_CO2 <- sarima.for(US_7ts2[, "CO"], n.ahead = 10, 6, 1, 2)
fit_CO2$pred # cases for CO 06.25 through 07.04
# to check, create actual vector and use RMSE(fit_CO2$pred, *actual)/mean(*actual)
fit_COd <- auto.arima(US_7dts2[, "CO"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_COd) # too low
fit_CO2d <- arima(US_7dts2[, "CO"], order = c(10, 1, 2))
checkresiduals(fit_CO2d) # passes, use ARIMA(10, 1, 2)
fit_CO2d <- sarima.for(US_7dts2[, "CO"], n.ahead = 10, 10, 1, 2)
fit_CO2d$pred # deaths for CO 06.25 through 07.04
# MI
fit_MI <- auto.arima(US_7ts2[, "MI"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MI) # passes, use ARIMA(1,0,1)
fit_MI <- sarima.for(US_7ts2[, "MI"], n.ahead = 10, 1, 0, 1)
fit_MI$pred # cases for MI 06.25 through 07.04
fit_MId <- auto.arima(US_7dts2[, "MI"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MId) # passes, use ARIMA(0, 1, 4)
fit_MId <- sarima.for(US_7dts2[, "MI"], n.ahead = 10, 0, 1, 4)
fit_MId$pred # deaths for MI 06.25 through 07.04
# MN
fit_MN <- auto.arima(US_7ts2[, "MN"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MN) # good, use ARIMA(3, 1, 2)
fit_MN <- sarima.for(US_7ts2[, "MN"], n.ahead = 10, 3, 1, 2)
fit_MN$pred # cases for MN 06.25 through 07.04
fit_MNd <- auto.arima(US_7dts2[, "MN"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MNd) # too low
fit_MN2d <- arima(US_7dts2[, "MN"], order = c(5, 1, 3))
checkresiduals(fit_MN2d) # passes, use ARIMA(5, 1, 3)
fit_MN2d <- sarima.for(US_7dts2[, "MN"], n.ahead = 10, 3, 1, 3)
fit_MN2d$pred # deaths for MN 06.25 through 07.04
# NE
fit_NE <- auto.arima(US_7ts2[, "NE"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_NE) # too low
fit_NE2 <- arima(US_7ts2[, "NE"], order = c(9, 1, 4))
checkresiduals(fit_NE2) # good, use ARIMA(9, 1, 4)
fit_NE2 <- sarima.for(US_7ts2[, "NE"], n.ahead = 10, 9, 1, 4)
fit_NE2$pred # cases for NE 06.25 through 07.04
fit_NEd <- auto.arima(US_7dts2[, "NE"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_NEd) # good, use ARIMA(0, 1, 1)
fit_NEd <- sarima.for(US_7dts2[, "NE"], n.ahead = 10, 0, 1, 1)
fit_NEd$pred # deaths for NE 06.25 through 07.04
# PA
fit_PA <- auto.arima(US_7ts2[, "PA"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_PA) # good, use ARIMA(3, 1, 2)
fit_PA <- sarima.for(US_7ts2[, "PA"], n.ahead = 10, 3, 1, 2)
fit_PA$pred # cases for PA 06.25 through 07.04
fit_PAd <- auto.arima(US_7dts2[, "PA"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_PAd) # good, use ARIMA(1, 1, 4)
fit_PAd <- sarima.for(US_7dts2[, "PA"], n.ahead = 10, 1, 1, 4)
fit_PAd$pred # deaths for PA 06.25 through 07.04
# SD
fit_SD <- auto.arima(US_7ts2[, "SD"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_SD) # good, use ARIMA(5, 1, 0)
fit_SD <- sarima.for(US_7ts2[, "SD"], n.ahead = 10, 5, 1, 0)
fit_SD$pred # cases for SD 06.25 through 07.04
fit_SDd <- auto.arima(US_7dts2[, "SD"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_SDd) # too low
fit_SD2d <- arima(US_7dts2[, "SD"], order = c(6, 1, 3))
checkresiduals(fit_SD2d) # good, use ARIMA(6, 1, 3)
fit_SD2d <- sarima.for(US_7dts2[, "SD"], n.ahead = 10, 6, 1, 3)
fit_SD2d$pred # deaths for SD 06.25 through 07.04
# TX
fit_TX <- auto.arima(US_7ts2[, "TX"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_TX) # too low
fit_TX2 <- arima(US_7ts2[, "TX"], order = c(7, 1, 3))
checkresiduals(fit_TX2) # use ARIMA(7, 1, 3)
fit_TX2 <- sarima.for(US_7ts2[, "TX"], n.ahead = 10, 7, 1, 3)
fit_TX2$pred # cases for TX 06.25 through 07.04
fit_TXd <- auto.arima(US_7dts2[, "TX"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_TXd) # too low
fit_TX2d <- arima(US_7dts2[, "TX"], order = c(10, 2, 3))
checkresiduals(fit_TX2d) # close, use ARIMA(10, 2, 3)
fit_TX2d <- sarima.for(US_7dts2[, "TX"], n.ahead = 10, 10, 2, 3)
fit_TX2d$pred # deaths for TX 06.25 through 07.04
|
/Code/final_forecasting0624.R
|
permissive
|
Reinalynn/Forecasting-COVID-19-Cases-and-Deaths-Using-Time-Series-in-R
|
R
| false
| false
| 5,640
|
r
|
US_7cases2 <- read.csv(url("https://raw.githubusercontent.com/Reinalynn/MSDS692/master/Data/US_7cases2.csv"), header = TRUE, stringsAsFactors = FALSE)
US_7deaths2 <- read.csv(url("https://raw.githubusercontent.com/Reinalynn/MSDS692/master/Data/US_7deaths2.csv"), header = TRUE, stringsAsFactors = FALSE)
US_7cases2t <- as.data.frame(t(US_7cases2))
colnames(US_7cases2t) <- c("CO", "MI", "MN", "NE", "PA", "SD", "TX")
dim(US_7cases2t)
# truncate to remove days prior to reporting (first case occurs on row 45)
US_7cum2 <- US_7cases2t[43:156, ]
str(US_7cum2)
US_7cum2 <- US_7cum2 %>% mutate_if(is.factor, as.character)
US_7cum2 <- US_7cum2 %>% mutate_if(is.character, as.integer)
# difference data
US_72 <- diffM(US_7cum2)
# create ts
US_7ts2 <- ts(US_72, start = c(2020, 65), frequency = 365)
# use recent data from usafacts and limit to deaths but include multiple counties
str(US_7deaths2)
US_7dcum2 <- as.data.frame(t(US_7deaths2))
tail(US_7dcum2)
# truncate
US_7dcum2 <- US_7dcum2[43:156, ]
colnames(US_7dcum2) <- c("CO", "MI", "MN", "NE", "PA", "SD", "TX")
dim(US_7dcum2)
US_7dcum2 <- US_7dcum2 %>% mutate_if(is.factor, as.character)
US_7dcum2 <- US_7dcum2 %>% mutate_if(is.character, as.integer)
# difference
US_7d2 <- diffM(US_7dcum2)
# create ts
US_7dts2 <- ts(US_7d2, start = c(2020, 65), frequency = 365)
autoplot(US_7ts2, main = "COVID-19 Cases for 7 US states")
autoplot(US_7dts2, main = "COVID-19 Deaths for 7 US states")
# BEST MODELS - use auto.arima models for simplicity and consistency
# CO
fit_CO <- auto.arima(US_7ts2[, "CO"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_CO) # too low
fit_CO2 <- arima(US_7ts2[, "CO"], order = c(6, 1, 2))
checkresiduals(fit_CO2) # passes, use ARIMA(6, 1, 2)
fit_CO2 <- sarima.for(US_7ts2[, "CO"], n.ahead = 10, 6, 1, 2)
fit_CO2$pred # cases for CO 06.25 through 07.04
# to check, create actual vector and use RMSE(fit_CO2$pred, *actual)/mean(*actual)
fit_COd <- auto.arima(US_7dts2[, "CO"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_COd) # too low
fit_CO2d <- arima(US_7dts2[, "CO"], order = c(10, 1, 2))
checkresiduals(fit_CO2d) # passes, use ARIMA(10, 1, 2)
fit_CO2d <- sarima.for(US_7dts2[, "CO"], n.ahead = 10, 10, 1, 2)
fit_CO2d$pred # deaths for CO 06.25 through 07.04
# MI
fit_MI <- auto.arima(US_7ts2[, "MI"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MI) # passes, use ARIMA(1,0,1)
fit_MI <- sarima.for(US_7ts2[, "MI"], n.ahead = 10, 1, 0, 1)
fit_MI$pred # cases for MI 06.25 through 07.04
fit_MId <- auto.arima(US_7dts2[, "MI"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MId) # passes, use ARIMA(0, 1, 4)
fit_MId <- sarima.for(US_7dts2[, "MI"], n.ahead = 10, 0, 1, 4)
fit_MId$pred # deaths for MI 06.25 through 07.04
# MN
fit_MN <- auto.arima(US_7ts2[, "MN"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MN) # good, use ARIMA(3, 1, 2)
fit_MN <- sarima.for(US_7ts2[, "MN"], n.ahead = 10, 3, 1, 2)
fit_MN$pred # cases for MN 06.25 through 07.04
fit_MNd <- auto.arima(US_7dts2[, "MN"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_MNd) # too low
fit_MN2d <- arima(US_7dts2[, "MN"], order = c(5, 1, 3))
checkresiduals(fit_MN2d) # passes, use ARIMA(5, 1, 3)
fit_MN2d <- sarima.for(US_7dts2[, "MN"], n.ahead = 10, 3, 1, 3)
fit_MN2d$pred # deaths for MN 06.25 through 07.04
# NE
fit_NE <- auto.arima(US_7ts2[, "NE"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_NE) # too low
fit_NE2 <- arima(US_7ts2[, "NE"], order = c(9, 1, 4))
checkresiduals(fit_NE2) # good, use ARIMA(9, 1, 4)
fit_NE2 <- sarima.for(US_7ts2[, "NE"], n.ahead = 10, 9, 1, 4)
fit_NE2$pred # cases for NE 06.25 through 07.04
fit_NEd <- auto.arima(US_7dts2[, "NE"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_NEd) # good, use ARIMA(0, 1, 1)
fit_NEd <- sarima.for(US_7dts2[, "NE"], n.ahead = 10, 0, 1, 1)
fit_NEd$pred # deaths for NE 06.25 through 07.04
# PA
fit_PA <- auto.arima(US_7ts2[, "PA"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_PA) # good, use ARIMA(3, 1, 2)
fit_PA <- sarima.for(US_7ts2[, "PA"], n.ahead = 10, 3, 1, 2)
fit_PA$pred # cases for PA 06.25 through 07.04
fit_PAd <- auto.arima(US_7dts2[, "PA"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_PAd) # good, use ARIMA(1, 1, 4)
fit_PAd <- sarima.for(US_7dts2[, "PA"], n.ahead = 10, 1, 1, 4)
fit_PAd$pred # deaths for PA 06.25 through 07.04
# SD
fit_SD <- auto.arima(US_7ts2[, "SD"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_SD) # good, use ARIMA(5, 1, 0)
fit_SD <- sarima.for(US_7ts2[, "SD"], n.ahead = 10, 5, 1, 0)
fit_SD$pred # cases for SD 06.25 through 07.04
fit_SDd <- auto.arima(US_7dts2[, "SD"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_SDd) # too low
fit_SD2d <- arima(US_7dts2[, "SD"], order = c(6, 1, 3))
checkresiduals(fit_SD2d) # good, use ARIMA(6, 1, 3)
fit_SD2d <- sarima.for(US_7dts2[, "SD"], n.ahead = 10, 6, 1, 3)
fit_SD2d$pred # deaths for SD 06.25 through 07.04
# TX
fit_TX <- auto.arima(US_7ts2[, "TX"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_TX) # too low
fit_TX2 <- arima(US_7ts2[, "TX"], order = c(7, 1, 3))
checkresiduals(fit_TX2) # use ARIMA(7, 1, 3)
fit_TX2 <- sarima.for(US_7ts2[, "TX"], n.ahead = 10, 7, 1, 3)
fit_TX2$pred # cases for TX 06.25 through 07.04
fit_TXd <- auto.arima(US_7dts2[, "TX"], stepwise = FALSE, approximation = FALSE)
checkresiduals(fit_TXd) # too low
fit_TX2d <- arima(US_7dts2[, "TX"], order = c(10, 2, 3))
checkresiduals(fit_TX2d) # close, use ARIMA(10, 2, 3)
fit_TX2d <- sarima.for(US_7dts2[, "TX"], n.ahead = 10, 10, 2, 3)
fit_TX2d$pred # deaths for TX 06.25 through 07.04
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree50time94.txt")
sim.chrom<-read.table("chrom50time94.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax50tree94.csv",sep=",")
|
/Simulations tree height/50 my/optim50tree94.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false
| false
| 821
|
r
|
library( "ape" )
library( "geiger" )
library( "expm" )
library( "nloptr" )
source( "masternegloglikeeps1.R" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
sim.tree<-read.tree("tree50time94.txt")
sim.chrom<-read.table("chrom50time94.txt", header=FALSE)
last.state=50
x.0<- log(c(0.12, 0.001, 0.25, 0.002,0.036, 0.006, 0.04,0.02, 1.792317852, 1.57e-14))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,11)
my.options<-list("algorithm"= "NLOPT_LN_SBPLX","ftol_rel"=1e-08,"print_level"=1,"maxtime"=170000000, "maxeval"=1000)
mle<-nloptr(x0=x.0,eval_f=negloglikelihood.wh,opts=my.options,bichrom.phy=sim.tree, bichrom.data=sim.chrom,max.chromosome=last.state,pi.0=p.0)
print(mle)
results[1:10]<-mle$solution
results[11]<-mle$objective
write.table(results,file="globalmax50tree94.csv",sep=",")
|
# generate sequence for weather predicting task
# set random seed
set.seed(20180824)
# set number of trials as 200
n_trial <- 200
# load probability table
probs <- readr::read_csv(file.path("content/resources/config/WxPredict/prob_table.csv"))
# use multinomial distribution to get the stimuli sequence
stim_seq <- numeric(n_trial)
outcome_seq <- character(n_trial)
last_stim <- 0
for (i_trial in 1:n_trial) {
repeat {
stim_candidate <- which(rmultinom(1, size = 1, prob = probs$freq_occur) == 1)
if (stim_candidate != last_stim) {
stim_seq[i_trial] <- stim_candidate
last_stim <- stim_candidate
outcome_simulation <- runif(1)
if (outcome_simulation < with(probs, freq_rain[ID == stim_candidate])) {
outcome_seq[i_trial] <- "Rain"
} else {
outcome_seq[i_trial] <- "Sunshine"
}
break
}
}
}
# write out sequence as a json file
jsonlite::write_json(
list(
stim = paste(stim_seq, collapse = ","),
outcome = paste(outcome_seq, collapse = ",")
),
file.path("static/seq/08201_WxPredict.json"),
auto_unbox = TRUE
)
|
/R/config/WeatherPrediction/seqgen.R
|
permissive
|
iquizoo/docs
|
R
| false
| false
| 1,097
|
r
|
# generate sequence for weather predicting task
# set random seed
set.seed(20180824)
# set number of trials as 200
n_trial <- 200
# load probability table
probs <- readr::read_csv(file.path("content/resources/config/WxPredict/prob_table.csv"))
# use multinomial distribution to get the stimuli sequence
stim_seq <- numeric(n_trial)
outcome_seq <- character(n_trial)
last_stim <- 0
for (i_trial in 1:n_trial) {
repeat {
stim_candidate <- which(rmultinom(1, size = 1, prob = probs$freq_occur) == 1)
if (stim_candidate != last_stim) {
stim_seq[i_trial] <- stim_candidate
last_stim <- stim_candidate
outcome_simulation <- runif(1)
if (outcome_simulation < with(probs, freq_rain[ID == stim_candidate])) {
outcome_seq[i_trial] <- "Rain"
} else {
outcome_seq[i_trial] <- "Sunshine"
}
break
}
}
}
# write out sequence as a json file
jsonlite::write_json(
list(
stim = paste(stim_seq, collapse = ","),
outcome = paste(outcome_seq, collapse = ",")
),
file.path("static/seq/08201_WxPredict.json"),
auto_unbox = TRUE
)
|
setwd("C:/Users/wbowers/Documents/tcga_replication/data")
set.seed(123.456)
exp.data <- read.csv("TCGA_SARC_data_raw.csv", row.names = 1, stringsAsFactors = FALSE)
# exp.data <- exp.data[1:1000,]
# Filter out all genes with < 90% nonzero expression
ind.90filt <- c()
for (i in 1:nrow(exp.data)){
if ((sum(exp.data[i,] == 0, na.rm = TRUE)/ncol(exp.data))>=0.1){
ind.90filt <- c(ind.90filt, i)
}
}
exp.data.90filt <- exp.data[-ind.90filt,]
# Check distribution of first gene
library(ggplot2)
ggplot() +
geom_histogram(aes(x=as.numeric(exp.data.90filt[1,])))
#log2 transform add 0.05 to prevent -inf
exp.data.log2 <- log2(exp.data.90filt+0.05)
# median centre for each gene across all tumours
exp.data.c1 <- apply(exp.data.log2,2,function(x){
x-median(x)
})
# median centre for each tumour across all genes
exp.data.c2 <- as.data.frame(t(apply(exp.data.c1,1,function(x){
x-median(x)
})))
write.csv(exp.data.c2, "TCGA_SARC_mrna_data_lnorm_medc_nosdfilt.csv")
# Check new distribution
ggplot() +
geom_histogram(aes(x=as.numeric(exp.data.c2[1,])))
# Remove genes with std < 2
ind.std2filt <- c()
for (i in 1:nrow(exp.data.c2)){
if(sd(exp.data.c1[i,]) < 2){
ind.std2filt <- c(ind.std2filt, i)
}
}
exp.data.sdfilt <- exp.data.c2[-ind.std2filt,]
write.csv(exp.data.sdfilt, "TCGA_SARC_mrna_data_lnorm_medc.csv")
write.table(exp.data.sdfilt, "TCGA_SARC_mrna_data_lnorm_medc.tsv", sep="\t")
|
/scripts/R/explore/mrna_norm_2.R
|
no_license
|
whtbowers/TCGA-SARC_graphs
|
R
| false
| false
| 1,472
|
r
|
setwd("C:/Users/wbowers/Documents/tcga_replication/data")
set.seed(123.456)
exp.data <- read.csv("TCGA_SARC_data_raw.csv", row.names = 1, stringsAsFactors = FALSE)
# exp.data <- exp.data[1:1000,]
# Filter out all genes with < 90% nonzero expression
ind.90filt <- c()
for (i in 1:nrow(exp.data)){
if ((sum(exp.data[i,] == 0, na.rm = TRUE)/ncol(exp.data))>=0.1){
ind.90filt <- c(ind.90filt, i)
}
}
exp.data.90filt <- exp.data[-ind.90filt,]
# Check distribution of first gene
library(ggplot2)
ggplot() +
geom_histogram(aes(x=as.numeric(exp.data.90filt[1,])))
#log2 transform add 0.05 to prevent -inf
exp.data.log2 <- log2(exp.data.90filt+0.05)
# median centre for each gene across all tumours
exp.data.c1 <- apply(exp.data.log2,2,function(x){
x-median(x)
})
# median centre for each tumour across all genes
exp.data.c2 <- as.data.frame(t(apply(exp.data.c1,1,function(x){
x-median(x)
})))
write.csv(exp.data.c2, "TCGA_SARC_mrna_data_lnorm_medc_nosdfilt.csv")
# Check new distribution
ggplot() +
geom_histogram(aes(x=as.numeric(exp.data.c2[1,])))
# Remove genes with std < 2
ind.std2filt <- c()
for (i in 1:nrow(exp.data.c2)){
if(sd(exp.data.c1[i,]) < 2){
ind.std2filt <- c(ind.std2filt, i)
}
}
exp.data.sdfilt <- exp.data.c2[-ind.std2filt,]
write.csv(exp.data.sdfilt, "TCGA_SARC_mrna_data_lnorm_medc.csv")
write.table(exp.data.sdfilt, "TCGA_SARC_mrna_data_lnorm_medc.tsv", sep="\t")
|
\name{dB_getSWC}
\alias{dB_getSWC}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Get soil moisture data from EURAC micro-meteo station (Mazia/Matsch)
%% ~~function to do ... ~~
}
\description{
Retrieve soil moisture data from EURAC micro-meteo station located in Mazia/Matsch
}
\usage{
dB_getSWC(path2files, header.file, station, station_nr, calibration, calibration_file, aggregation, minVALUE=0.05, maxVALUE=0.50, clear_raw_data=FALSE, remove_freezing=FALSE, write.csv=FALSE, path2write)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{path2files}{
path to meteo files
}
\item{header.file}{
header file, absolute path and file name
}
\item{station}{
character, name of micro-meteo station, e.g. "B"
}
\item{station_nr}{
integer, number of micro-meteo station, e.g. 2
}
\item{calibration}{
boolean, TRUE: calibrated SMC sensor data is used for comparision with simulated SMC; FALSE: use of uncalibrated SMC data
}
\item{calibration_file}{
path and file name of file containing calibration functions for specific stations/sensors
}
\item{aggregation}{
character, time aggregation applied. "n" no aggregation of raw data,"h": hourly, "d": daily
}
\item{minVALUE}{
numeric, minimum value of soil temperature for filtering, default = 5 vol\%
}
\item{maxVALUE}{
numeric, maximum value of soil temperature for filtering, default = 50 vol\%
}
\item{clear_raw_data}{
boolean, TRUE: clearing of raw data, not yet implemented, default = FALSE
}
\item{remove_freezing}{
boolean, TRUE: freezing periods are remove from raw data; only possible for B, P and I stations, default = FALSE
}
\item{write.csv}{
boolean, default = FALSE; TRUE: .csv file is written to path2write, FALSE: no .csv file is written
}
\item{path2write}{
path data should be written to
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\enumerate{
\item zoo object containing processed data
\item file output containing processed data, .csv format
}
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Johannes Brenner, \email{Johannes.Brenner@eurac.edu}
}
\note{
calibration file is stored here: \file{//ABZ02FST.EURAC.EDU/Projekte/HiResAlp/06_Workspace/BrJ/02_data/Station_data_Mazia/calibration.csv}
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
|
/man/dB_getSWC.Rd
|
no_license
|
zarch/DataBaseAlpEnvEURAC
|
R
| false
| false
| 2,595
|
rd
|
\name{dB_getSWC}
\alias{dB_getSWC}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Get soil moisture data from EURAC micro-meteo station (Mazia/Matsch)
%% ~~function to do ... ~~
}
\description{
Retrieve soil moisture data from EURAC micro-meteo station located in Mazia/Matsch
}
\usage{
dB_getSWC(path2files, header.file, station, station_nr, calibration, calibration_file, aggregation, minVALUE=0.05, maxVALUE=0.50, clear_raw_data=FALSE, remove_freezing=FALSE, write.csv=FALSE, path2write)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{path2files}{
path to meteo files
}
\item{header.file}{
header file, absolute path and file name
}
\item{station}{
character, name of micro-meteo station, e.g. "B"
}
\item{station_nr}{
integer, number of micro-meteo station, e.g. 2
}
\item{calibration}{
boolean, TRUE: calibrated SMC sensor data is used for comparision with simulated SMC; FALSE: use of uncalibrated SMC data
}
\item{calibration_file}{
path and file name of file containing calibration functions for specific stations/sensors
}
\item{aggregation}{
character, time aggregation applied. "n" no aggregation of raw data,"h": hourly, "d": daily
}
\item{minVALUE}{
numeric, minimum value of soil temperature for filtering, default = 5 vol\%
}
\item{maxVALUE}{
numeric, maximum value of soil temperature for filtering, default = 50 vol\%
}
\item{clear_raw_data}{
boolean, TRUE: clearing of raw data, not yet implemented, default = FALSE
}
\item{remove_freezing}{
boolean, TRUE: freezing periods are remove from raw data; only possible for B, P and I stations, default = FALSE
}
\item{write.csv}{
boolean, default = FALSE; TRUE: .csv file is written to path2write, FALSE: no .csv file is written
}
\item{path2write}{
path data should be written to
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\enumerate{
\item zoo object containing processed data
\item file output containing processed data, .csv format
}
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Johannes Brenner, \email{Johannes.Brenner@eurac.edu}
}
\note{
calibration file is stored here: \file{//ABZ02FST.EURAC.EDU/Projekte/HiResAlp/06_Workspace/BrJ/02_data/Station_data_Mazia/calibration.csv}
}
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
}
|
# Just a note: many of my comments -- notes/explanations -- are in the R scripts
# "approvalgraph_code.R" and "model_code.R." Check in there to get a better
# sense of my thinking when putting together this ShinyApp!
# Download relevant libraries!
library(shiny)
library(readr)
library(sentimentr)
library(tidyverse)
library(ggthemes)
library(dplyr)
library(colourpicker)
library(wordcloud2)
library(tm)
library(gt)
library(magrittr)
library(dplyr)
library(ggthemes)
library(quanteda)
library(MASS)
library(rstanarm)
library(gtsummary)
library(broom.mixed)
library(ggrepel)
library(shinythemes)
# Save the needed tibbles from the R scripts as rds files.
finalstocktib <- read_rds("finalstock.rds")
finalgraphtib <- read_rds("finalgraph.rds")
tweetib1 <- read_rds("tweetib1.rds")
pp <- read_rds("pp.rds")
# Reading in the data.
trumptweets <- read_csv("data/Trump_tweets (1).csv")
summary(trumptweets)
hillarytweets <- read_csv("data/hillarytweets.csv")
summary(hillarytweets)
# Rather than calculate sentiment scores for all of the Tweets (thousands of
# observations, which would substantially slow things down, I took a subset
# of observations).
trump_sentiment_scores <- sentiment(trumptweets$text[1:100])
hillary_sentiment_scores <- sentiment(hillarytweets$text[1:100])
dataframe_options <- c("Hillary Clinton", "Donald Trump")
# Define UI:
ui <- navbarPage("Analyzing @realDonaldTrump: A Deep Dive Into Donald Trump's
Tweets",
tabPanel("Tweet Analysis",
fluidPage(theme = shinytheme("cerulean"),
titlePanel("Sentiment Analysis: A Glimpse At The Data"),
sidebarLayout(
sidebarPanel(
selectInput(inputId = "dataset",
label = "Choose a Twitter account:",
choices = c("Hillary Clinton",
"Donald Trump")),
# Originally, I just had a numericInput() box; at Dan's suggestion, I added a
# slider, so folks who visit my Shiny App can more easily look at the desired
# number of observations.
sliderInput("obs",
"Slide to the number of observations
to view:",
min = 0, max = 254, value = 30
)),
mainPanel(
verbatimTextOutput("summary"),
tableOutput("view")
)),
br(),
br(),
br(),
br(),
sidebarPanel(
numericInput("tweetread",
"Pick the Tweet you'd like to view:",
value = 5
)),
mainPanel(
gt_output(outputId = "tweetread")
),
# The sidebars are great spots to both 1) provide some context around the
# graphics, and 2) align/style the page so that the graphs were aesthetically
# appealing.
sidebarPanel(
p("Here, I visualize the distributions
of Trump and Clinton's Tweets' sentiment scores.
On average, they are both relatively neutral on Twitter,
but it's clear: Trump's Tweets see much more variation
in sentiment; by comparison, Clinton rarely reaches the
most extreme sentiment scores (1 and -1)."),
selectInput(inputId = "candidate",
label = "Choose a Twitter account:",
choices = dataframe_options)),
mainPanel(
br(),
br(),
br(),
br(),
plotOutput(outputId = "bothPlot"),
sliderInput("bins",
"Set the number of bins:",
min = 0, max = 50, value = 20
)))),
tabPanel("Models",
titlePanel("How/why does Trump's sentiment on Twitter change?"),
sidebarPanel(
titlePanel("Approval Rating"),
p("Here, I look at Donald Trump's daily approval
ratings and Twitter sentiment scores (the average sentiment
of his Tweets on a given day) over a 1 month period --
09/12/20 - 10/13/20. As we'd expect, Trump's approval
ratings and sentiment scores seem to be weakly positively
correlated (as his approval rating increases, he also
becomes more positive on Twitter -- perhaps as he becomes
more popular, it puts him in a sunny mood). One must be
cautious in drawing any conclusions, though -- not only is
the relationship relatively weak, this is also a relatively
short period of time; a longer period (like 1 year) -- with
more datapoints -- would likely be more telling. It's no
surprise that the confidence interval is so wide."),
br(),
br(),
br(),
br(),
br(),
p("In this graph, I visualize the posterior distributions for
Trump's daily Twitter sentiment score in 3 hypothetical
universes: one in which he has a 30% approval rating, one
in which he has a 45% approval rating, and one in which he has
a 60% approval rating. The distributions reflect the linear
relationship we observed above -- the hypothetical Trump with a
60% approval rating has a posterior distribution of sentiment
scores that is skewed to the right (more positive). It's also
clear that we have a much more precise estimate for the
hypothetical Trump with a 45% approval rating, given the data;
while, on average, the 30% and 60% approval rating scenarios
are less and more positive, respectively, the distributions are
rather wide, so we wouldn't be surprised if the Trump with a
30% approval rating had a positive daily Twitter sentiment
score."),
br(),
br(),
titlePanel("Stock Market"),
p("Here, I look at daily stock market opening/closing differences
and Donald Trump's corresponding Twitter sentiment scores over a
1 month period (09/12 - 10/13). Interestingly, the S&P 500's
opening/closing differences and Trump's sentiment scores seem to
be very weakly negatively correlated -- indeed the regression
results (which you can view below, in the interactive table!)
produce a coefficient which is very small/negative. Overall,
then, it seems that the stock market isn't associated with Donald
Trump's sentiment on Twitter, and any influence is such that as
the difference becomes more positive (a higher closing index
relative to the opening index) Trump becomes a bit more negative
on Twitter (perhaps he feels vindicated?).
While the relationship does seem to be very weak, we can still
use this dependent variable as a control in our regression of
Trump's sentiment scores on his approval ratings -- as we do
below."),
br(),
titlePanel("Interactive Regression Results"),
p("See an interpretation of these results in the Discussion
tab."),
selectInput(inputId = "regressiontable",
label = "Choose a variable:",
choices = c("Approval Rating",
"Stock Market",
"Interaction")),
br(),
br(),
br(),
br(),
titlePanel("Readability"),
p("Here, I look at the relationship between the readability of
Donald Trump's Tweets and the sentiment of those Tweets.
Interestingly, readability seems to have close to no relationship
with sentiment; regression results confirm this. The
visualization does pull out another trend, however; by only
displaying the text for those Tweets below a certain length
of characters, it seems that Trump's shorter tweets (generally)
tend to be more positive. Clearly, he doesn't like to brag!")),
mainPanel(
plotOutput(outputId = "approvalSentiment"),
plotOutput(outputId = "approvalPosterior"),
plotOutput(outputId = "stockSentiment"),
br(),
br(),
gt_output(outputId = "regressiontable"),
br(),
br(),
br(),
br(),
plotOutput(outputId = "readability"))),
tabPanel("Visualization",
titlePanel("Tweet Word Cloud"),
sidebarPanel(
radioButtons(
inputId = "source",
label = "Pick a candidate:",
choices = c(
"Hillary Clinton; 2016" = "hill16",
"Donald Trump; 2020" = "don20")
),
numericInput("num", "Maximum number of words:",
value = 100, min = 5),
colourInput("col", "Background Color:", value = "white"),
selectInput(
inputId = "language",
label = "Remove stopwords (e.g. and, the) in:",
choices = c("Danish", "Dutch", "English", "Finnish",
"French", "German", "Hungarian", "Italian",
"Norwegian", "Portuguese", "Russian", "Spanish",
"Swedish"),
multiple = FALSE,
selected = "English")),
mainPanel(wordcloud2Output("cloud")),
titlePanel("Character Count"),
sidebarPanel(selectInput(inputId = "hist",
label = "Choose a candidate:",
choices = c("Hillary Clinton",
"Donald Trump"))),
mainPanel(plotOutput(outputId = "char"))),
tabPanel("Discussion",
titlePanel("Interpreting the Models"),
p("This analysis refers to the Interactive Regression Results
displayed on the Models page."),
tags$b(p("Approval Rating")),
uiOutput('eq1'),
p("The first model regresses Trump's daily Twitter sentiment scores
on his associated daily approval ratings. The median of the
Intercept, -0.554, suggests that at a hypothetical approval rating
of 0, Trump's average sentiment score would be quite negative.
It is important to note: the standard error associated with
this value suggests that the 95% confidence interval is
(-1.17, 0.06), meaning that the true value could be positive, but
even so, barely positive. In other words, we can be fairly sure
-- though not entirely sure -- that Trump would have a negative
daily Twitter sentiment score at an approval rating of 0
(which, of course, makes sense!). The median of the coefficient
on the approval rating variable, 0.0138, suggests that on average,
a 1% increase in Trump's daily approval rating is associated with a
0.0138 increase in his daily Twitter sentiment score. In other
words, his popularity in the public is directly reflected in his
Tweets. Once again, the 95% confidence interval cautions us to
be wary; indeed, it suggests that the true value could be as
low as 0, or as as high as 0.02. We should far from accept these
findings as conclusive; they are not necessarily significant."),
tags$b(p("Stock Market")),
uiOutput('eq2'),
p("The second model regresses Trump's daily Twitter sentiment scores
on daily stock market opening/closing differences (does a big
jump or a big drop affect his sentiment on Twitter?). The median of
the Intercept, 0.05, suggests that at a hypothetical difference
value of 0, Trump's average sentiment score would be neutral.
Though relatively high, the Intercept's standard error value and
its resulting 95% confidence interval -- (0.015, 0.086) --
ultimately leads us to the same conclusion. The median of the
coefficient, -0.003, suggests that, on average, a 1 unit increase
in the stock market's opening/closing difference is associated with
a close to negligible dip in Trump's daily Twitter sentiment score.
In other words, it seems that the stock market's changes are not a
particularly powerful predictor of Trump's sentiment. This is, once
again, qualified by the standard error/confidence interval. The
standard error is very high -- 0.014 -- producing a wide confidence
interval of (-0.032, 0.027). It's clear, then, that the true value
could in fact suggest an important relationship between these two
variables. We should, then, take these findings with a grain of
salt."),
tags$b(p("Interaction")),
uiOutput('eq3'),
p("What if we create a model that looks at approval rating, stock
market opening/closing differences, and their interaction?"),
p("This is exactly what the last model aims to do, regressing
Trump's daily Twitter sentiment scores on his associated daily
approval ratings, the associated daily stock market
opening/closing differences, and their interaction."),
p("The median of the Intercept, -0.494, suggests that at a
hypothetical approval rating of 0% and a hypothetical stock
opening/closing difference of 0, Trump's average sentiment
score would be relatively negative; this should, however, be taken
with a grain of salt, given the high standard error value (0.334).
This ultimately implies a 95% confidence interval of
(-1.162, 0.174) -- so the true value could, in fact, represent
a positive sentiment score. (This is similar to the Intercept
we saw in the first model.) The median of the coefficient on
the approval rating variable suggests that at a hypothetical
stock difference value of 0, on average, a 1% increase in Trump's
daily approval rating is associated with a 0.0125 increase in his
daily Twitter sentiment score -- a value similar to the first
model, but slightly lower. A larger standard error value here
suggests that the true value could be as low as -0.003 or as
high as 0.003. The median of the coefficient on the range
variable suggests that at a hypothetical approval rating of
0%, on average, a 1 unit increase in the stock market's
opening/closing difference is associated with a 0.045 increase in
Trump's daily Twitter sentiment score. This is quite different from
the second model, which implied a neglible dip. In any case, once
again, a large standard error value keeps us from striking gold;
with a 95% confidence interval of (-0.228, 0.318), the true value
could be neglible or a robust increase/decrease."),
p("Finally, the median of the interaction term suggests that at
hypothetical values of 1 for the approval rating and difference
variables, one would want to add the median Intercept, median
approval rating coefficient, median difference coefficient, and,
on average, -0.000995 to predict Trump's sentiment score.
Like the others, this value is both small and insignificant, as
indicated by the broad 95% confidence interval (-0.00729,
0.00530).")),
tabPanel("About",
titlePanel("Project Background and Motivations"),
p("This project aims to explore US President Donald Trump's Tweets in
the months leading up to the 2020 General Election. Unlike his
predecessors, Trump has used social media extensively, through which
he reaches over 88 million followers on Twitter alone. Given the
influence his Tweets have had during his Presidency, I wanted to better
understand what was driving his behavior (and specifically, his
sentiment) on Twitter, and how those patterns compared to those of his
2016 rival, Hillary Clinton."),
a("Visit the GitHub repo for this project here.",
href = "https://github.com/trishprabhu/analyzing-realdonaldTrump"),
titlePanel("About The Data"),
p("In this project, I drew upon 3 distinct data sources, and ultimately
utilized 4 datasets. I sourced my Tweet data -- both for Donald Trump,
in 2020 (07/13/20 to 10/13/20), and Hillary Clinton, in 2016 (08/03/16
to 11/03/16) -- from the Trump Twitter Archive, a digital database of
prominent politicians' Tweets. In addition to the text data, the date,
time, Retweet count, and other relevant variables were included. I
sourced my data on Donald Trump's approval ratings from
FiveThirtyEight, a well-known forecasts website, that predicts
everything from election to sports outcomes. The data included the
various approval ratings captured by different polling agencies for
each day during Trump's presidency. Finally, I sourced my stock
volatility data from the CBOE's Volatility Index; the data included
daily datapoints on stock opening, closes, highs, and lows in 2020."),
a("See the data currently in use by visiting this Dropbox link.",
# At Dan's suggestion, I uploaded my datasets (which were large, and making it
# impossible for me to commit my work to GitHub) to Dropbox. Also, Dan,
# apologies -- the link below was too long to fit within the 80 character code
# line limit!
href = "https://www.dropbox.com/sh/5azksa5cvrsi9cs/AADvM-p9h8Sqf4oYzcgaMWXda?dl=0"),
titlePanel("About Me"),
p("My name is Trisha Prabhu, and I'm a member of Harvard College's
Class of 2022. Originally from Naperville, Illinois, at Harvard,
I reside in Cabot House. I'm concentrating in Government, on
the Tech Science pathway, and pursuing a secondary in Economics.
Within the broad field that is Government, I'm most passionate
about understanding the impact the rise of technology has had on
our society -- specifically, with regards to the way the digital
economy has shaped issues like free speech and privacy -- and
spearheading policy and work to address these challenges. You'll
often find me utilizing data science and quantitative research
methods to dig into this work.
You can reach me at trishaprabhu@college.harvard.edu.")
))
# Define server logic:
server <- function(input, output) {
datasetInput <- reactive({
switch(input$dataset,
# As I learned, the values below correspond to the choices argument above --
# important to ensure that everything stays consistent, or your code will break
# (as mine did, until I figured this out)!
"Hillary Clinton" = hillary_sentiment_scores,
"Donald Trump" = trump_sentiment_scores)
})
candidateInput <- reactive({
switch(input$candidate,
"Hillary Clinton" = hillary_sentiment_scores,
"Donald Trump" = trump_sentiment_scores)
})
output$summary <- renderPrint({
dataset <- datasetInput()
tib <- dataset %>%
rename("Tweets" = "element_id",
"Sentence Number" = "sentence_id",
"Word Count" = "word_count",
"Sentiment" = "sentiment")
summary(tib)
})
output$view <- renderTable({
dataset <- datasetInput()
nicetib <- dataset %>%
rename("Tweets" = "element_id",
"Sentence Number" = "sentence_id",
"Word Count" = "word_count",
"Sentiment" = "sentiment")
head(nicetib, n = input$obs)
})
output$bothPlot <- renderPlot({
candidate <- candidateInput()
candidate %>%
ggplot(aes(x = sentiment)) +
geom_histogram(bins = input$bins,
color = "white",
fill = "dodgerblue") +
labs(x = "Sentiment Score",
y = "Count",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my graph. Apologies!
subtitle = "Overall, Hillary is very neutral in her Tweets; Trump is too, but with more variation",
title = "Sentiment Expressed In Tweets",
caption = "Source: Trump Twitter Archive") +
# I thought that explicitly graphing the mean of both Trump and Clinton's
# sentiment scores could help viewers better visualize the distribution overall
# (I also thought it was interesting that, on average, they are both very
# neutral -- likely a result of Trump's more positive Tweets "canceling out"
# his more negative Tweets).
geom_vline(xintercept = mean(candidate$sentiment),
linetype = "dashed") +
theme_classic()
})
output$approvalSentiment <- renderPlot({
finalgraphtib %>%
ggplot(aes(x = (approval_ratings/100), y = meanofmeans)) +
geom_point() +
geom_smooth(formula = y ~ x, method = "lm", se = TRUE) +
# I know that the lines below surpass the 80 character limit, but cutting them
# off was not aesthetically appealing on my graph. Apologies!
labs(title = "Trump's daily approval ratings and sentiment scores on Twitter, 09/12 - 10/13",
subtitle = "Trump's approval ratings and sentiment scores seem to be weakly positively correlated",
x = "Approval Rating",
y = "Sentiment Score",
caption = "Source: Trump Twitter Archive") +
scale_x_continuous(labels = scales::percent_format()) +
theme_bw()
})
output$approvalPosterior <- renderPlot({
approvalratingdistribution <- pp %>%
rename(`30` = `1`) %>%
rename(`45` = `2`) %>%
rename(`60` = `3`) %>%
pivot_longer(cols = `30`:`60`,
names_to = "parameter",
values_to = "score") %>%
ggplot(aes(x = score, fill = parameter)) +
geom_histogram(aes(y = after_stat(count/sum(count))),
alpha = 0.7,
bins = 100,
color = "white",
position = "identity") +
labs(title = "Posterior Distributions for Sentiment Score",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my graph. Apologies!
subtitle = "We have a much more precise estimate for the hypothetical Trump with a 45% approval rating, given the data",
x = "Sentiment Score",
y = "Proportion",
caption = "Source: Trump Twitter Archive, FiveThirtyEight") +
scale_y_continuous(labels = scales::percent_format()) +
scale_fill_manual(name = "Approval Rating (%)",
values = c("dodgerblue", "salmon", "green")) +
theme_bw()
approvalratingdistribution
})
output$stockSentiment <- renderPlot({
stockgraph <- finalstocktib %>%
ggplot(aes(x = range, y = meanofmeans)) +
geom_point() +
geom_smooth(formula = y ~ x, method = "lm", se = TRUE) +
# I know that the lines below surpass the 80 character limit, but cutting them
# off was not aesthetically appealing on my graph. Apologies!
labs(title = "Stock opening/closing differences and Trump's daily sentiment scores on Twitter, 09/12 - 10/13",
subtitle = "The S&P 500's opening/closing differences and Trump's sentiment scores seem to be very, very weakly negatively correlated",
x = "Difference",
y = "Sentiment Score",
caption = "Source: Trump Twitter Archive, CBOE Volatility Index") +
theme_bw()
stockgraph
})
regressiontableInput <- reactive({
switch(input$regressiontable,
"Approval Rating" = formula(finalstocktib$meanofmeans ~
finalstocktib$approval_ratings),
"Stock Market" = formula(finalstocktib$meanofmeans ~
finalstocktib$range),
"Interaction" = formula(finalstocktib$meanofmeans ~
finalstocktib$approval_ratings * finalstocktib$range))
})
output$regressiontable <- render_gt({
formula <- regressiontableInput()
set.seed(10)
fit_obj <- stan_glm(formula,
data = finalstocktib,
family = gaussian(),
refresh = 0)
fit_obj %>%
tidy() %>%
mutate(confidencelow = estimate - (std.error * 2)) %>%
mutate(confidencehigh = estimate + (std.error * 2)) %>%
gt() %>%
cols_label(term = "Predictor",
estimate = "Beta",
std.error = "Standard Error",
confidencelow = "CI Low",
confidencehigh = "CI High") %>%
tab_header(title = "Regression of Trump's Twitter Sentiment Scores") %>%
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my table. Apologies!
tab_source_note("Source: Trump Twitter Archive, FiveThirtyEight, CBOE Volatility Index")
})
output$tweetread <- render_gt({
tweetib1 %>%
filter(element_id == input$tweetread) %>%
ungroup() %>%
select(text, sentimentmeans, Flesch) %>%
rename("Tweet" = "text",
"Sentiment" = "sentimentmeans",
"Readability" = "Flesch") %>%
gt() %>%
tab_header(title = "Sentiment and Readability of Trump's Tweets",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my table. Apologies!
subtitle = "Readability: 0 - 100, 100 is most readable; Sentiment: -1 to 1, 1 is most positive") %>%
tab_source_note("Source: Trump Twitter Archive") %>%
tab_style(
style = list(
cell_fill(color = "lightgreen")
),
locations = cells_body(
rows = Sentiment > 0)
) %>%
tab_style(
style = list(
cell_fill(color = "red")
),
locations = cells_body(
rows = Sentiment < 0)
)
})
output$readability <- renderPlot({
tweetgraph <- tweetib1 %>%
ggplot(aes(x = Flesch, y = sentimentmeans, color = str_length(text))) +
geom_point() +
geom_label_repel(aes(label = ifelse(str_length(text) < 35,
as.character(text),
'')),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
geom_smooth(formula = y ~ x, method = "lm", se = TRUE) +
# I know that the lines below surpass the 80 character limit, but cutting them
# off was not aesthetically appealing on my graph. Apologies!
labs(title = "Readability and Sentiment of Trump's Tweets (09/12/20 - 10/13/20)",
subtitle = "Readability has little relationship with Trump's sentiment on Twitter",
x = "Readability (0 - 100; 0 is the least readable)",
y = "Sentiment Score",
caption = "Source: Trump Twitter Archive",
color = "Character Count") +
xlim(0, 100) +
ylim(-1, 1) +
theme_bw()
tweetgraph
})
data_source <- reactive({
if (input$source == "hill16") {
data <- hillarytweets$text[1:100]
} else if (input$source == "don20") {
data <- trumptweets$text[1:100]
return(data)
}
})
create_wordcloud <- function(data, num_words = 100, background = "white") {
# Create corpus and clean.
if (is.character(data)) {
corpus <- Corpus(VectorSource(data))
corpus <- tm_map(corpus, tolower)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removeWords, stopwords(tolower(input$language)))
tdm <- as.matrix(TermDocumentMatrix(corpus))
data <- sort(rowSums(tdm), decreasing = TRUE)
data <- data.frame(word = names(data), freq = as.numeric(data))
}
# Make sure a proper num_words is provided:
if (!is.numeric(num_words) || num_words < 3) {
num_words <- 3
}
# Grab the top n most common words:
data <- head(data, n = num_words)
if (nrow(data) == 0) {
return(NULL)
}
wordcloud2(data, backgroundColor = background)
}
output$cloud <- renderWordcloud2({
create_wordcloud(data_source(),
num_words = input$num,
background = input$col)
})
histInput <- reactive({
switch(input$hist,
"Hillary Clinton" = hillarytweets,
"Donald Trump" = trumptweets)
})
output$char <- renderPlot({
histdataset <- histInput()
characterhist <- histdataset %>%
ggplot(aes(x = str_length(text))) +
geom_histogram(binwidth = 10,
color = "white",
fill = "darkslategray2") +
labs(title = "Character Count of Candidate's Tweets",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my graph. Apologies!
subtitle = "Hillary tends to be verbose; Trump is even across the distribution",
x = "Character Count",
y = "Frequency",
caption = "Source: Trump Twitter Archive") +
xlim(0, 140) +
theme_minimal()
characterhist
})
output$eq1 <- renderUI({
withMathJax(helpText("$$ sentiment_i = \\beta_0 +
\\beta_1 approvalratings_{i} + \\epsilon_i$$"))
})
output$eq2 <- renderUI({
withMathJax(helpText("$$ sentiment_i = \\beta_0 + \\beta_1 range_{i} +
\\epsilon_i$$"))
})
output$eq3 <- renderUI({
withMathJax(helpText("$$ sentiment_i = \\beta_0 +
\\beta_1 approvalratings_{i} + \\beta_2 range_{i}
+ \\beta_3 (approvalratings_{i} * range_{i}) + \\epsilon_i$$"))
})
}
# Run the application:
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
trishprabhu/analyzing-realdonaldTrump
|
R
| false
| false
| 32,172
|
r
|
# Just a note: many of my comments -- notes/explanations -- are in the R scripts
# "approvalgraph_code.R" and "model_code.R." Check in there to get a better
# sense of my thinking when putting together this ShinyApp!
# Download relevant libraries!
library(shiny)
library(readr)
library(sentimentr)
library(tidyverse)
library(ggthemes)
library(dplyr)
library(colourpicker)
library(wordcloud2)
library(tm)
library(gt)
library(magrittr)
library(dplyr)
library(ggthemes)
library(quanteda)
library(MASS)
library(rstanarm)
library(gtsummary)
library(broom.mixed)
library(ggrepel)
library(shinythemes)
# Save the needed tibbles from the R scripts as rds files.
finalstocktib <- read_rds("finalstock.rds")
finalgraphtib <- read_rds("finalgraph.rds")
tweetib1 <- read_rds("tweetib1.rds")
pp <- read_rds("pp.rds")
# Reading in the data.
trumptweets <- read_csv("data/Trump_tweets (1).csv")
summary(trumptweets)
hillarytweets <- read_csv("data/hillarytweets.csv")
summary(hillarytweets)
# Rather than calculate sentiment scores for all of the Tweets (thousands of
# observations, which would substantially slow things down, I took a subset
# of observations).
trump_sentiment_scores <- sentiment(trumptweets$text[1:100])
hillary_sentiment_scores <- sentiment(hillarytweets$text[1:100])
dataframe_options <- c("Hillary Clinton", "Donald Trump")
# Define UI:
ui <- navbarPage("Analyzing @realDonaldTrump: A Deep Dive Into Donald Trump's
Tweets",
tabPanel("Tweet Analysis",
fluidPage(theme = shinytheme("cerulean"),
titlePanel("Sentiment Analysis: A Glimpse At The Data"),
sidebarLayout(
sidebarPanel(
selectInput(inputId = "dataset",
label = "Choose a Twitter account:",
choices = c("Hillary Clinton",
"Donald Trump")),
# Originally, I just had a numericInput() box; at Dan's suggestion, I added a
# slider, so folks who visit my Shiny App can more easily look at the desired
# number of observations.
sliderInput("obs",
"Slide to the number of observations
to view:",
min = 0, max = 254, value = 30
)),
mainPanel(
verbatimTextOutput("summary"),
tableOutput("view")
)),
br(),
br(),
br(),
br(),
sidebarPanel(
numericInput("tweetread",
"Pick the Tweet you'd like to view:",
value = 5
)),
mainPanel(
gt_output(outputId = "tweetread")
),
# The sidebars are great spots to both 1) provide some context around the
# graphics, and 2) align/style the page so that the graphs were aesthetically
# appealing.
sidebarPanel(
p("Here, I visualize the distributions
of Trump and Clinton's Tweets' sentiment scores.
On average, they are both relatively neutral on Twitter,
but it's clear: Trump's Tweets see much more variation
in sentiment; by comparison, Clinton rarely reaches the
most extreme sentiment scores (1 and -1)."),
selectInput(inputId = "candidate",
label = "Choose a Twitter account:",
choices = dataframe_options)),
mainPanel(
br(),
br(),
br(),
br(),
plotOutput(outputId = "bothPlot"),
sliderInput("bins",
"Set the number of bins:",
min = 0, max = 50, value = 20
)))),
tabPanel("Models",
titlePanel("How/why does Trump's sentiment on Twitter change?"),
sidebarPanel(
titlePanel("Approval Rating"),
p("Here, I look at Donald Trump's daily approval
ratings and Twitter sentiment scores (the average sentiment
of his Tweets on a given day) over a 1 month period --
09/12/20 - 10/13/20. As we'd expect, Trump's approval
ratings and sentiment scores seem to be weakly positively
correlated (as his approval rating increases, he also
becomes more positive on Twitter -- perhaps as he becomes
more popular, it puts him in a sunny mood). One must be
cautious in drawing any conclusions, though -- not only is
the relationship relatively weak, this is also a relatively
short period of time; a longer period (like 1 year) -- with
more datapoints -- would likely be more telling. It's no
surprise that the confidence interval is so wide."),
br(),
br(),
br(),
br(),
br(),
p("In this graph, I visualize the posterior distributions for
Trump's daily Twitter sentiment score in 3 hypothetical
universes: one in which he has a 30% approval rating, one
in which he has a 45% approval rating, and one in which he has
a 60% approval rating. The distributions reflect the linear
relationship we observed above -- the hypothetical Trump with a
60% approval rating has a posterior distribution of sentiment
scores that is skewed to the right (more positive). It's also
clear that we have a much more precise estimate for the
hypothetical Trump with a 45% approval rating, given the data;
while, on average, the 30% and 60% approval rating scenarios
are less and more positive, respectively, the distributions are
rather wide, so we wouldn't be surprised if the Trump with a
30% approval rating had a positive daily Twitter sentiment
score."),
br(),
br(),
titlePanel("Stock Market"),
p("Here, I look at daily stock market opening/closing differences
and Donald Trump's corresponding Twitter sentiment scores over a
1 month period (09/12 - 10/13). Interestingly, the S&P 500's
opening/closing differences and Trump's sentiment scores seem to
be very weakly negatively correlated -- indeed the regression
results (which you can view below, in the interactive table!)
produce a coefficient which is very small/negative. Overall,
then, it seems that the stock market isn't associated with Donald
Trump's sentiment on Twitter, and any influence is such that as
the difference becomes more positive (a higher closing index
relative to the opening index) Trump becomes a bit more negative
on Twitter (perhaps he feels vindicated?).
While the relationship does seem to be very weak, we can still
use this dependent variable as a control in our regression of
Trump's sentiment scores on his approval ratings -- as we do
below."),
br(),
titlePanel("Interactive Regression Results"),
p("See an interpretation of these results in the Discussion
tab."),
selectInput(inputId = "regressiontable",
label = "Choose a variable:",
choices = c("Approval Rating",
"Stock Market",
"Interaction")),
br(),
br(),
br(),
br(),
titlePanel("Readability"),
p("Here, I look at the relationship between the readability of
Donald Trump's Tweets and the sentiment of those Tweets.
Interestingly, readability seems to have close to no relationship
with sentiment; regression results confirm this. The
visualization does pull out another trend, however; by only
displaying the text for those Tweets below a certain length
of characters, it seems that Trump's shorter tweets (generally)
tend to be more positive. Clearly, he doesn't like to brag!")),
mainPanel(
plotOutput(outputId = "approvalSentiment"),
plotOutput(outputId = "approvalPosterior"),
plotOutput(outputId = "stockSentiment"),
br(),
br(),
gt_output(outputId = "regressiontable"),
br(),
br(),
br(),
br(),
plotOutput(outputId = "readability"))),
tabPanel("Visualization",
titlePanel("Tweet Word Cloud"),
sidebarPanel(
radioButtons(
inputId = "source",
label = "Pick a candidate:",
choices = c(
"Hillary Clinton; 2016" = "hill16",
"Donald Trump; 2020" = "don20")
),
numericInput("num", "Maximum number of words:",
value = 100, min = 5),
colourInput("col", "Background Color:", value = "white"),
selectInput(
inputId = "language",
label = "Remove stopwords (e.g. and, the) in:",
choices = c("Danish", "Dutch", "English", "Finnish",
"French", "German", "Hungarian", "Italian",
"Norwegian", "Portuguese", "Russian", "Spanish",
"Swedish"),
multiple = FALSE,
selected = "English")),
mainPanel(wordcloud2Output("cloud")),
titlePanel("Character Count"),
sidebarPanel(selectInput(inputId = "hist",
label = "Choose a candidate:",
choices = c("Hillary Clinton",
"Donald Trump"))),
mainPanel(plotOutput(outputId = "char"))),
tabPanel("Discussion",
titlePanel("Interpreting the Models"),
p("This analysis refers to the Interactive Regression Results
displayed on the Models page."),
tags$b(p("Approval Rating")),
uiOutput('eq1'),
p("The first model regresses Trump's daily Twitter sentiment scores
on his associated daily approval ratings. The median of the
Intercept, -0.554, suggests that at a hypothetical approval rating
of 0, Trump's average sentiment score would be quite negative.
It is important to note: the standard error associated with
this value suggests that the 95% confidence interval is
(-1.17, 0.06), meaning that the true value could be positive, but
even so, barely positive. In other words, we can be fairly sure
-- though not entirely sure -- that Trump would have a negative
daily Twitter sentiment score at an approval rating of 0
(which, of course, makes sense!). The median of the coefficient
on the approval rating variable, 0.0138, suggests that on average,
a 1% increase in Trump's daily approval rating is associated with a
0.0138 increase in his daily Twitter sentiment score. In other
words, his popularity in the public is directly reflected in his
Tweets. Once again, the 95% confidence interval cautions us to
be wary; indeed, it suggests that the true value could be as
low as 0, or as as high as 0.02. We should far from accept these
findings as conclusive; they are not necessarily significant."),
tags$b(p("Stock Market")),
uiOutput('eq2'),
p("The second model regresses Trump's daily Twitter sentiment scores
on daily stock market opening/closing differences (does a big
jump or a big drop affect his sentiment on Twitter?). The median of
the Intercept, 0.05, suggests that at a hypothetical difference
value of 0, Trump's average sentiment score would be neutral.
Though relatively high, the Intercept's standard error value and
its resulting 95% confidence interval -- (0.015, 0.086) --
ultimately leads us to the same conclusion. The median of the
coefficient, -0.003, suggests that, on average, a 1 unit increase
in the stock market's opening/closing difference is associated with
a close to negligible dip in Trump's daily Twitter sentiment score.
In other words, it seems that the stock market's changes are not a
particularly powerful predictor of Trump's sentiment. This is, once
again, qualified by the standard error/confidence interval. The
standard error is very high -- 0.014 -- producing a wide confidence
interval of (-0.032, 0.027). It's clear, then, that the true value
could in fact suggest an important relationship between these two
variables. We should, then, take these findings with a grain of
salt."),
tags$b(p("Interaction")),
uiOutput('eq3'),
p("What if we create a model that looks at approval rating, stock
market opening/closing differences, and their interaction?"),
p("This is exactly what the last model aims to do, regressing
Trump's daily Twitter sentiment scores on his associated daily
approval ratings, the associated daily stock market
opening/closing differences, and their interaction."),
p("The median of the Intercept, -0.494, suggests that at a
hypothetical approval rating of 0% and a hypothetical stock
opening/closing difference of 0, Trump's average sentiment
score would be relatively negative; this should, however, be taken
with a grain of salt, given the high standard error value (0.334).
This ultimately implies a 95% confidence interval of
(-1.162, 0.174) -- so the true value could, in fact, represent
a positive sentiment score. (This is similar to the Intercept
we saw in the first model.) The median of the coefficient on
the approval rating variable suggests that at a hypothetical
stock difference value of 0, on average, a 1% increase in Trump's
daily approval rating is associated with a 0.0125 increase in his
daily Twitter sentiment score -- a value similar to the first
model, but slightly lower. A larger standard error value here
suggests that the true value could be as low as -0.003 or as
high as 0.003. The median of the coefficient on the range
variable suggests that at a hypothetical approval rating of
0%, on average, a 1 unit increase in the stock market's
opening/closing difference is associated with a 0.045 increase in
Trump's daily Twitter sentiment score. This is quite different from
the second model, which implied a neglible dip. In any case, once
again, a large standard error value keeps us from striking gold;
with a 95% confidence interval of (-0.228, 0.318), the true value
could be neglible or a robust increase/decrease."),
p("Finally, the median of the interaction term suggests that at
hypothetical values of 1 for the approval rating and difference
variables, one would want to add the median Intercept, median
approval rating coefficient, median difference coefficient, and,
on average, -0.000995 to predict Trump's sentiment score.
Like the others, this value is both small and insignificant, as
indicated by the broad 95% confidence interval (-0.00729,
0.00530).")),
tabPanel("About",
titlePanel("Project Background and Motivations"),
p("This project aims to explore US President Donald Trump's Tweets in
the months leading up to the 2020 General Election. Unlike his
predecessors, Trump has used social media extensively, through which
he reaches over 88 million followers on Twitter alone. Given the
influence his Tweets have had during his Presidency, I wanted to better
understand what was driving his behavior (and specifically, his
sentiment) on Twitter, and how those patterns compared to those of his
2016 rival, Hillary Clinton."),
a("Visit the GitHub repo for this project here.",
href = "https://github.com/trishprabhu/analyzing-realdonaldTrump"),
titlePanel("About The Data"),
p("In this project, I drew upon 3 distinct data sources, and ultimately
utilized 4 datasets. I sourced my Tweet data -- both for Donald Trump,
in 2020 (07/13/20 to 10/13/20), and Hillary Clinton, in 2016 (08/03/16
to 11/03/16) -- from the Trump Twitter Archive, a digital database of
prominent politicians' Tweets. In addition to the text data, the date,
time, Retweet count, and other relevant variables were included. I
sourced my data on Donald Trump's approval ratings from
FiveThirtyEight, a well-known forecasts website, that predicts
everything from election to sports outcomes. The data included the
various approval ratings captured by different polling agencies for
each day during Trump's presidency. Finally, I sourced my stock
volatility data from the CBOE's Volatility Index; the data included
daily datapoints on stock opening, closes, highs, and lows in 2020."),
a("See the data currently in use by visiting this Dropbox link.",
# At Dan's suggestion, I uploaded my datasets (which were large, and making it
# impossible for me to commit my work to GitHub) to Dropbox. Also, Dan,
# apologies -- the link below was too long to fit within the 80 character code
# line limit!
href = "https://www.dropbox.com/sh/5azksa5cvrsi9cs/AADvM-p9h8Sqf4oYzcgaMWXda?dl=0"),
titlePanel("About Me"),
p("My name is Trisha Prabhu, and I'm a member of Harvard College's
Class of 2022. Originally from Naperville, Illinois, at Harvard,
I reside in Cabot House. I'm concentrating in Government, on
the Tech Science pathway, and pursuing a secondary in Economics.
Within the broad field that is Government, I'm most passionate
about understanding the impact the rise of technology has had on
our society -- specifically, with regards to the way the digital
economy has shaped issues like free speech and privacy -- and
spearheading policy and work to address these challenges. You'll
often find me utilizing data science and quantitative research
methods to dig into this work.
You can reach me at trishaprabhu@college.harvard.edu.")
))
# Define server logic:
server <- function(input, output) {
datasetInput <- reactive({
switch(input$dataset,
# As I learned, the values below correspond to the choices argument above --
# important to ensure that everything stays consistent, or your code will break
# (as mine did, until I figured this out)!
"Hillary Clinton" = hillary_sentiment_scores,
"Donald Trump" = trump_sentiment_scores)
})
candidateInput <- reactive({
switch(input$candidate,
"Hillary Clinton" = hillary_sentiment_scores,
"Donald Trump" = trump_sentiment_scores)
})
output$summary <- renderPrint({
dataset <- datasetInput()
tib <- dataset %>%
rename("Tweets" = "element_id",
"Sentence Number" = "sentence_id",
"Word Count" = "word_count",
"Sentiment" = "sentiment")
summary(tib)
})
output$view <- renderTable({
dataset <- datasetInput()
nicetib <- dataset %>%
rename("Tweets" = "element_id",
"Sentence Number" = "sentence_id",
"Word Count" = "word_count",
"Sentiment" = "sentiment")
head(nicetib, n = input$obs)
})
output$bothPlot <- renderPlot({
candidate <- candidateInput()
candidate %>%
ggplot(aes(x = sentiment)) +
geom_histogram(bins = input$bins,
color = "white",
fill = "dodgerblue") +
labs(x = "Sentiment Score",
y = "Count",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my graph. Apologies!
subtitle = "Overall, Hillary is very neutral in her Tweets; Trump is too, but with more variation",
title = "Sentiment Expressed In Tweets",
caption = "Source: Trump Twitter Archive") +
# I thought that explicitly graphing the mean of both Trump and Clinton's
# sentiment scores could help viewers better visualize the distribution overall
# (I also thought it was interesting that, on average, they are both very
# neutral -- likely a result of Trump's more positive Tweets "canceling out"
# his more negative Tweets).
geom_vline(xintercept = mean(candidate$sentiment),
linetype = "dashed") +
theme_classic()
})
output$approvalSentiment <- renderPlot({
finalgraphtib %>%
ggplot(aes(x = (approval_ratings/100), y = meanofmeans)) +
geom_point() +
geom_smooth(formula = y ~ x, method = "lm", se = TRUE) +
# I know that the lines below surpass the 80 character limit, but cutting them
# off was not aesthetically appealing on my graph. Apologies!
labs(title = "Trump's daily approval ratings and sentiment scores on Twitter, 09/12 - 10/13",
subtitle = "Trump's approval ratings and sentiment scores seem to be weakly positively correlated",
x = "Approval Rating",
y = "Sentiment Score",
caption = "Source: Trump Twitter Archive") +
scale_x_continuous(labels = scales::percent_format()) +
theme_bw()
})
output$approvalPosterior <- renderPlot({
approvalratingdistribution <- pp %>%
rename(`30` = `1`) %>%
rename(`45` = `2`) %>%
rename(`60` = `3`) %>%
pivot_longer(cols = `30`:`60`,
names_to = "parameter",
values_to = "score") %>%
ggplot(aes(x = score, fill = parameter)) +
geom_histogram(aes(y = after_stat(count/sum(count))),
alpha = 0.7,
bins = 100,
color = "white",
position = "identity") +
labs(title = "Posterior Distributions for Sentiment Score",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my graph. Apologies!
subtitle = "We have a much more precise estimate for the hypothetical Trump with a 45% approval rating, given the data",
x = "Sentiment Score",
y = "Proportion",
caption = "Source: Trump Twitter Archive, FiveThirtyEight") +
scale_y_continuous(labels = scales::percent_format()) +
scale_fill_manual(name = "Approval Rating (%)",
values = c("dodgerblue", "salmon", "green")) +
theme_bw()
approvalratingdistribution
})
output$stockSentiment <- renderPlot({
stockgraph <- finalstocktib %>%
ggplot(aes(x = range, y = meanofmeans)) +
geom_point() +
geom_smooth(formula = y ~ x, method = "lm", se = TRUE) +
# I know that the lines below surpass the 80 character limit, but cutting them
# off was not aesthetically appealing on my graph. Apologies!
labs(title = "Stock opening/closing differences and Trump's daily sentiment scores on Twitter, 09/12 - 10/13",
subtitle = "The S&P 500's opening/closing differences and Trump's sentiment scores seem to be very, very weakly negatively correlated",
x = "Difference",
y = "Sentiment Score",
caption = "Source: Trump Twitter Archive, CBOE Volatility Index") +
theme_bw()
stockgraph
})
regressiontableInput <- reactive({
switch(input$regressiontable,
"Approval Rating" = formula(finalstocktib$meanofmeans ~
finalstocktib$approval_ratings),
"Stock Market" = formula(finalstocktib$meanofmeans ~
finalstocktib$range),
"Interaction" = formula(finalstocktib$meanofmeans ~
finalstocktib$approval_ratings * finalstocktib$range))
})
output$regressiontable <- render_gt({
formula <- regressiontableInput()
set.seed(10)
fit_obj <- stan_glm(formula,
data = finalstocktib,
family = gaussian(),
refresh = 0)
fit_obj %>%
tidy() %>%
mutate(confidencelow = estimate - (std.error * 2)) %>%
mutate(confidencehigh = estimate + (std.error * 2)) %>%
gt() %>%
cols_label(term = "Predictor",
estimate = "Beta",
std.error = "Standard Error",
confidencelow = "CI Low",
confidencehigh = "CI High") %>%
tab_header(title = "Regression of Trump's Twitter Sentiment Scores") %>%
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my table. Apologies!
tab_source_note("Source: Trump Twitter Archive, FiveThirtyEight, CBOE Volatility Index")
})
output$tweetread <- render_gt({
tweetib1 %>%
filter(element_id == input$tweetread) %>%
ungroup() %>%
select(text, sentimentmeans, Flesch) %>%
rename("Tweet" = "text",
"Sentiment" = "sentimentmeans",
"Readability" = "Flesch") %>%
gt() %>%
tab_header(title = "Sentiment and Readability of Trump's Tweets",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my table. Apologies!
subtitle = "Readability: 0 - 100, 100 is most readable; Sentiment: -1 to 1, 1 is most positive") %>%
tab_source_note("Source: Trump Twitter Archive") %>%
tab_style(
style = list(
cell_fill(color = "lightgreen")
),
locations = cells_body(
rows = Sentiment > 0)
) %>%
tab_style(
style = list(
cell_fill(color = "red")
),
locations = cells_body(
rows = Sentiment < 0)
)
})
output$readability <- renderPlot({
tweetgraph <- tweetib1 %>%
ggplot(aes(x = Flesch, y = sentimentmeans, color = str_length(text))) +
geom_point() +
geom_label_repel(aes(label = ifelse(str_length(text) < 35,
as.character(text),
'')),
box.padding = 0.35,
point.padding = 0.5,
segment.color = 'grey50') +
geom_smooth(formula = y ~ x, method = "lm", se = TRUE) +
# I know that the lines below surpass the 80 character limit, but cutting them
# off was not aesthetically appealing on my graph. Apologies!
labs(title = "Readability and Sentiment of Trump's Tweets (09/12/20 - 10/13/20)",
subtitle = "Readability has little relationship with Trump's sentiment on Twitter",
x = "Readability (0 - 100; 0 is the least readable)",
y = "Sentiment Score",
caption = "Source: Trump Twitter Archive",
color = "Character Count") +
xlim(0, 100) +
ylim(-1, 1) +
theme_bw()
tweetgraph
})
data_source <- reactive({
if (input$source == "hill16") {
data <- hillarytweets$text[1:100]
} else if (input$source == "don20") {
data <- trumptweets$text[1:100]
return(data)
}
})
create_wordcloud <- function(data, num_words = 100, background = "white") {
# Create corpus and clean.
if (is.character(data)) {
corpus <- Corpus(VectorSource(data))
corpus <- tm_map(corpus, tolower)
corpus <- tm_map(corpus, removePunctuation)
corpus <- tm_map(corpus, removeNumbers)
corpus <- tm_map(corpus, removeWords, stopwords(tolower(input$language)))
tdm <- as.matrix(TermDocumentMatrix(corpus))
data <- sort(rowSums(tdm), decreasing = TRUE)
data <- data.frame(word = names(data), freq = as.numeric(data))
}
# Make sure a proper num_words is provided:
if (!is.numeric(num_words) || num_words < 3) {
num_words <- 3
}
# Grab the top n most common words:
data <- head(data, n = num_words)
if (nrow(data) == 0) {
return(NULL)
}
wordcloud2(data, backgroundColor = background)
}
output$cloud <- renderWordcloud2({
create_wordcloud(data_source(),
num_words = input$num,
background = input$col)
})
histInput <- reactive({
switch(input$hist,
"Hillary Clinton" = hillarytweets,
"Donald Trump" = trumptweets)
})
output$char <- renderPlot({
histdataset <- histInput()
characterhist <- histdataset %>%
ggplot(aes(x = str_length(text))) +
geom_histogram(binwidth = 10,
color = "white",
fill = "darkslategray2") +
labs(title = "Character Count of Candidate's Tweets",
# I know that the line below surpasses the 80 character limit, but cutting it
# off was not aesthetically appealing on my graph. Apologies!
subtitle = "Hillary tends to be verbose; Trump is even across the distribution",
x = "Character Count",
y = "Frequency",
caption = "Source: Trump Twitter Archive") +
xlim(0, 140) +
theme_minimal()
characterhist
})
output$eq1 <- renderUI({
withMathJax(helpText("$$ sentiment_i = \\beta_0 +
\\beta_1 approvalratings_{i} + \\epsilon_i$$"))
})
output$eq2 <- renderUI({
withMathJax(helpText("$$ sentiment_i = \\beta_0 + \\beta_1 range_{i} +
\\epsilon_i$$"))
})
output$eq3 <- renderUI({
withMathJax(helpText("$$ sentiment_i = \\beta_0 +
\\beta_1 approvalratings_{i} + \\beta_2 range_{i}
+ \\beta_3 (approvalratings_{i} * range_{i}) + \\epsilon_i$$"))
})
}
# Run the application:
shinyApp(ui = ui, server = server)
|
crs1 <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
country_settings <- list(
"uganda" = list(
"boundary_shape_path" = "shapes/uga_admbnda_adm1/uga_admbnda_adm1_UBOS_v2.shp",
"boundary_layer_name" = "uga_admbnda_adm1_UBOS_v2",
"catchment_id_column" = "pcode"
),
"kenya" = list(
"boundary_shape_path" = "shapes/kenya_adm1/KEN_adm1_mapshaper_corrected.shp",
"boundary_layer_name" = "KEN_adm1_mapshaper_corrected",
"catchment_shape_path" = "shapes/kenya_catchment/Busa_catchment.shp",
"catchment_layer_name" = "Busa_catchment",
"catchment_id_column" = "HYBAS_ID"
)
)
|
/settings.R
|
no_license
|
tedbol1/statistical_floodimpact_kenya
|
R
| false
| false
| 629
|
r
|
crs1 <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
country_settings <- list(
"uganda" = list(
"boundary_shape_path" = "shapes/uga_admbnda_adm1/uga_admbnda_adm1_UBOS_v2.shp",
"boundary_layer_name" = "uga_admbnda_adm1_UBOS_v2",
"catchment_id_column" = "pcode"
),
"kenya" = list(
"boundary_shape_path" = "shapes/kenya_adm1/KEN_adm1_mapshaper_corrected.shp",
"boundary_layer_name" = "KEN_adm1_mapshaper_corrected",
"catchment_shape_path" = "shapes/kenya_catchment/Busa_catchment.shp",
"catchment_layer_name" = "Busa_catchment",
"catchment_id_column" = "HYBAS_ID"
)
)
|
pred.clusters <-
function(dataset,rawModel,additionalInfo){
#dataset:= list of 2 objects -
#datasetURI:= character sring, code name of dataset
#dataEntry:= data.frame with 2 columns,
#1st:name of compound,2nd:data.frame with values (colnames are feature names)
#rawModel:= numeric vector showing cluster memberships
#additionalInfo:= list with summary clustering statistisc and graphs
#returns array with cluster memberships per dimension
#d1<- read.in.json.for.pred(dataset,rawModel,additionalInfo)
#prot.data<- d1$x.mat
#clust.classes<- d1$model#
#adInfo<- d1$additionalInfo
dat1.m<- rawModel
dat1.m<- base64Decode(dat1.m,'raw')
clust.classes<- unserialize(dat1.m)
if(is.list(clust.classes)==FALSE){
clust.name<- additionalInfo$predictedFeatures#paste('cluster',1:length(unique(clust.classes)),sep=' ')
}else{
clust.name<- additionalInfo$predictedFeatures
#clust.name<- paste('rowClust',1:length(unique(clust.classes[[2]])),sep=' ')
#clust.name<- c(clust.name,paste('colClust',1:length(unique(clust.classes[[1]])),sep=' '))
clust.classes<- unlist(clust.classes)
names(clust.classes)<- NULL # ! until we solve the issue!
}
#clust.classes<- as.data.frame(clust.classes)
#colnames(clust.classes)<- clust.name
for(i in 1:length(clust.classes)){
w1<- data.frame(clust.classes[i])
colnames(w1)<- clust.name
if(i==1){p7.1<- list(unbox(w1))
}else{
p7.1[[i]]<- unbox(w1)
}
}
p7.2<- list(predictions=p7.1)
return(p7.2)#clust.classes)#as.data.frame(as.array())
}
|
/clusteringPkg.Rcheck/00_pkg_src/clusteringPkg/R/pred.clusters.R
|
no_license
|
GTsiliki/clustPkg
|
R
| false
| false
| 1,700
|
r
|
pred.clusters <-
function(dataset,rawModel,additionalInfo){
#dataset:= list of 2 objects -
#datasetURI:= character sring, code name of dataset
#dataEntry:= data.frame with 2 columns,
#1st:name of compound,2nd:data.frame with values (colnames are feature names)
#rawModel:= numeric vector showing cluster memberships
#additionalInfo:= list with summary clustering statistisc and graphs
#returns array with cluster memberships per dimension
#d1<- read.in.json.for.pred(dataset,rawModel,additionalInfo)
#prot.data<- d1$x.mat
#clust.classes<- d1$model#
#adInfo<- d1$additionalInfo
dat1.m<- rawModel
dat1.m<- base64Decode(dat1.m,'raw')
clust.classes<- unserialize(dat1.m)
if(is.list(clust.classes)==FALSE){
clust.name<- additionalInfo$predictedFeatures#paste('cluster',1:length(unique(clust.classes)),sep=' ')
}else{
clust.name<- additionalInfo$predictedFeatures
#clust.name<- paste('rowClust',1:length(unique(clust.classes[[2]])),sep=' ')
#clust.name<- c(clust.name,paste('colClust',1:length(unique(clust.classes[[1]])),sep=' '))
clust.classes<- unlist(clust.classes)
names(clust.classes)<- NULL # ! until we solve the issue!
}
#clust.classes<- as.data.frame(clust.classes)
#colnames(clust.classes)<- clust.name
for(i in 1:length(clust.classes)){
w1<- data.frame(clust.classes[i])
colnames(w1)<- clust.name
if(i==1){p7.1<- list(unbox(w1))
}else{
p7.1[[i]]<- unbox(w1)
}
}
p7.2<- list(predictions=p7.1)
return(p7.2)#clust.classes)#as.data.frame(as.array())
}
|
library("sp")
library('methods')
library("plyr")
library("dplyr")
#library(rSOILWAT2)
#These are the functions I need:
# if (!exists("vwcmatric.dy")) vwcmatric.dy <- get_Response_aggL(swof["sw_vwcmatric"], tscale = "dy",
# scaler = 1, FUN = stats::weighted.mean, weights = layers_width,
# x = runDataSC, st = isim_time, st2 = simTime2, topL = topL, bottomL = bottomL)
# if (!exists("swpmatric.dy")) swpmatric.dy <- get_SWPmatric_aggL(vwcmatric.dy, texture, sand, clay)
#dir.AFRI_Historical <- "/projects/ecogis/SOILWAT2_Projects/AFRI/Historical"
dir.AFRI_Historical <- "/cxfs/projects/usgs/ecosystems/sbsc/AFRI/Historical"
dir.jbHOME <- "/cxfs/projects/usgs/ecosystems/sbsc/drylandeco/AFRI/Exposure_Data"
regions <- c( "CaliforniaAnnual", "ColdDeserts", "HotDeserts", "NorthernMixedSubset", "SGS", "Western_Gap")#list.files(dir.AFRI_Historical)
print(regions)
dir.regions <- file.path(dir.AFRI_Historical, regions)
dir.regions_3Runs <- file.path(dir.AFRI_Historical, regions, "3_Runs" )
dir.regions_1Input <- file.path(dir.AFRI_Historical, regions, "1_Input")
print(dir.regions_3Runs)
print(dir.regions_1Input)
VWCtoSWP_simple <- function(vwc, sand, clay){
#Outputs SWP as negative MPa
bar_toMPa = -0.1
bar_conversion = 1024
thetas <- -14.2 * sand - 3.7 * clay + 50.5
psis <- 10 ^ (-1.58 * sand - 0.63 * clay + 2.17)
b <- -0.3 * sand + 15.7 * clay + 3.10
res <- psis / ((vwc * 100 / thetas) ^ b * bar_conversion) * bar_toMPa
return(res)
}
#Function for calculating average temp on dry days.
dryDD<-function(x){
Temp<-x$Temp
Temp[which(x$Temp<0)]<-0
if (length(which(x$SWP< -3))>0) {sum(Temp[which(x$SWP< -3)])} else{0}
}
calcHotDry_JulSep <- function(RUN_DATA, name){
#print("Pre d1")
#print(Sys.time())
# s=1
# sites <- list.files(dir.regions_3Runs[1])
# load(file.path(dir.regions_3Runs[1], sites[s], "sw_output_sc1.RData"))
# RUN_DATA <- runDataSC
# name=sites[s]
dVWC <- as.data.frame(RUN_DATA@VWCMATRIC@Day)
dTemps <- as.data.frame(RUN_DATA@TEMP@Day)
dVWC_JulSep <- dVWC[which(dVWC$Day %in% c(91:181)),]
dVWC_JulSep$Temp <- dTemps[which(dTemps$Day %in% c(91:181)),5]
s_name <- paste0("Site_", as.integer(substr(name, 1, regexpr('_', name)-1)) )
sdepths <- as.vector(soildepths[which(soildepths$Label==s_name), -1])
#str(sdepths)
maxdepth <- as.integer(sdepths[1])
#str(maxdepth)
sdepths[sdepths > maxdepth ] <- NA
sdepth <- sdepths[-1]
slyrwidths <- diff(c(0, na.omit(t(sdepth)) ) )
numlyrs <- dim(dVWC)[2] - 2
#print(numlyrs)
nlyrs<-if(numlyrs<7){numlyrs} else {6}
#print(nlyrs)
if(numlyrs>1 & numlyrs<7 ){dVWC_JulSep$Alllyrs <- apply(as.matrix(dVWC_JulSep[, c(3:(numlyrs+2))]), 1, FUN=function(x) weighted.mean(x, slyrwidths[1:nlyrs]))}
if(numlyrs>1 & numlyrs>6 ){dVWC_JulSep$Alllyrs <- apply(as.matrix(dVWC_JulSep[, c(3:(6+2))]), 1, FUN=function(x) weighted.mean(x, slyrwidths[1:nlyrs]))}
if(numlyrs==1){dVWC_JulSep$Alllyrs <- as.matrix(dVWC_JulSep[, c(3:(numlyrs+2))])}
sSAND <- soilSAND[which(soilSAND$Label==s_name), c(2:(1+length(slyrwidths)))]
sCLAY <- soilCLAY[which(soilCLAY$Label==s_name), c(2:(1+length(slyrwidths)))]
sandMEANtop <- weighted.mean(sSAND[1:nlyrs], slyrwidths[1:nlyrs])
clayMEANtop <- weighted.mean(sCLAY[1:nlyrs], slyrwidths[1:nlyrs])
#dVWC_JulSep$count<-1:length(dVWC_JulSep$Year)
dVWC_JulSep$SWP <- VWCtoSWP_simple(vwc=dVWC_JulSep$Alllyrs, sand=sandMEANtop, clay=clayMEANtop)
#print(dVWC_JulSep$SWP[1:5])
#print(head(dVWC_JulSep))
d <- dVWC_JulSep[, c("Year", "Alllyrs", "Temp", "SWP")]
#print(head(d))
d_all_list<-split(d,d$Year)
d_all_list1<- lapply(d_all_list, FUN=dryDD)
d1 <- ldply(d_all_list1, data.frame)
names(d1)[2] <- c(name)
d1 <- as.data.frame(t(d1))[2,]
rownames(d1) <- c( name)
return(d1)
}
print("Start Loop")
print(Sys.time())
#Try in parallel
library("parallel")
library("foreach")
library("doParallel")
#detectCores()
for (r in 1:length(regions)){
# r=1
soildepths <- read.csv(file=file.path(dir.regions_1Input[r], "SWRuns_InputData_SoilLayers_v9.csv"), header=TRUE )
print(paste("soildepths", dim(soildepths)) )
soildata <- read.csv(file=file.path(dir.regions_1Input[r], "datafiles" , "SWRuns_InputData_soils_v12.csv"), header=TRUE )
print(paste("soildata", dim(soildata)) )
#print(str(soildata))
# metadata <- readRDS(file=file.path(dir.regions[r], "SFSW2_project_descriptions.rds") )
# #str(metadata[["sim_time"]])
# isim_time <- metadata[["sim_time"]]
# simTime2 <- metadata[["sim_time"]]$sim_time2_North
soilSAND <- soildata[, c(1, grep("Sand", names(soildata))) ]
soilCLAY <- soildata[, c(1, grep("Clay", names(soildata))) ]
sites <- list.files(dir.regions_3Runs[r])
#print(sites[1:10])
cl<-makeCluster(20)
registerDoParallel(cl)
Below3DD_JulSep = foreach(s = sites, .combine = rbind,.packages=c('plyr','dplyr')) %dopar% {
f <- list.files(file.path(dir.regions_3Runs[r], s) )
if(length(f)==1){
load(file.path(dir.regions_3Runs[r], s, "sw_output_sc1.RData"))
print(s)
d <- calcHotDry_JulSep(RUN_DATA = runDataSC, name=s)
d
}
}
stopCluster(cl)
print(paste(regions[r], "Done"))
print(Sys.time())
ifelse (r == 1, annualBelow3DD_JulSep <- Below3DD_JulSep, annualBelow3DD_JulSep <- rbind(annualBelow3DD_JulSep, Below3DD_JulSep))
}
names(annualBelow3DD_JulSep) <- paste(c(1915:2015))
save(annualBelow3DD_JulSep, file=file.path(dir.jbHOME, "Below3DD_JulSep19152015.Rdata"))
#DEVELOPMENT
# soildepths <- read.csv(file=file.path(dir.regions_1Input[1], "SWRuns_InputData_SoilLayers_v9.csv"), header=TRUE )
#
# soildata <- read.csv(file=file.path(dir.regions_1Input[1], "datafiles", "SWRuns_InputData_soils_v12.csv"), header=TRUE )
#
# metadata <- readRDS(file=file.path(dir.regions[1], "SFSW2_project_descriptions.rds") )
# #str(metadata[["sim_time"]])
# isim_time <- metadata[["sim_time"]]
# simTime2 <- metadata[["sim_time"]]$sim_time2_North
#
# layers_width <- getLayersWidth(layers_depth)
#
# load(file.path(dir.regions_3Runs[1], sites[1], "sw_output_sc1.RData"))
# dtemps <- as.data.frame(runDataSC@TEMP@Day)
# dVWC <- as.data.frame(runDataSC@VWCMATRIC@Day)
# dwd <- as.data.frame(runDataSC@WETDAY@Day)
# dSM <- as.data.frame(runDataSC@SWPMATRIC@Day)
# str(dSM)
# names(dSM)[c(-1, -2)] <- paste("SM", names(dSM)[c(-1, -2)])
# d_all2 <- merge(d_all, dSM, by=c("Year", "Day"))
# d_all2[c(3050: 3080),]
#dSNOW <- as.data.frame(runDataSC@SNOWPACK@Day)
#dtst <-aggregate(d_all, by=list(d$Year), FUN=length(), na.rm=TRUE)
|
/For_Sense/Ex_Below3DD_JulSep.R
|
no_license
|
bobshriver/Exposure_scripts
|
R
| false
| false
| 7,245
|
r
|
library("sp")
library('methods')
library("plyr")
library("dplyr")
#library(rSOILWAT2)
#These are the functions I need:
# if (!exists("vwcmatric.dy")) vwcmatric.dy <- get_Response_aggL(swof["sw_vwcmatric"], tscale = "dy",
# scaler = 1, FUN = stats::weighted.mean, weights = layers_width,
# x = runDataSC, st = isim_time, st2 = simTime2, topL = topL, bottomL = bottomL)
# if (!exists("swpmatric.dy")) swpmatric.dy <- get_SWPmatric_aggL(vwcmatric.dy, texture, sand, clay)
#dir.AFRI_Historical <- "/projects/ecogis/SOILWAT2_Projects/AFRI/Historical"
dir.AFRI_Historical <- "/cxfs/projects/usgs/ecosystems/sbsc/AFRI/Historical"
dir.jbHOME <- "/cxfs/projects/usgs/ecosystems/sbsc/drylandeco/AFRI/Exposure_Data"
regions <- c( "CaliforniaAnnual", "ColdDeserts", "HotDeserts", "NorthernMixedSubset", "SGS", "Western_Gap")#list.files(dir.AFRI_Historical)
print(regions)
dir.regions <- file.path(dir.AFRI_Historical, regions)
dir.regions_3Runs <- file.path(dir.AFRI_Historical, regions, "3_Runs" )
dir.regions_1Input <- file.path(dir.AFRI_Historical, regions, "1_Input")
print(dir.regions_3Runs)
print(dir.regions_1Input)
VWCtoSWP_simple <- function(vwc, sand, clay){
#Outputs SWP as negative MPa
bar_toMPa = -0.1
bar_conversion = 1024
thetas <- -14.2 * sand - 3.7 * clay + 50.5
psis <- 10 ^ (-1.58 * sand - 0.63 * clay + 2.17)
b <- -0.3 * sand + 15.7 * clay + 3.10
res <- psis / ((vwc * 100 / thetas) ^ b * bar_conversion) * bar_toMPa
return(res)
}
#Function for calculating average temp on dry days.
dryDD<-function(x){
Temp<-x$Temp
Temp[which(x$Temp<0)]<-0
if (length(which(x$SWP< -3))>0) {sum(Temp[which(x$SWP< -3)])} else{0}
}
calcHotDry_JulSep <- function(RUN_DATA, name){
#print("Pre d1")
#print(Sys.time())
# s=1
# sites <- list.files(dir.regions_3Runs[1])
# load(file.path(dir.regions_3Runs[1], sites[s], "sw_output_sc1.RData"))
# RUN_DATA <- runDataSC
# name=sites[s]
dVWC <- as.data.frame(RUN_DATA@VWCMATRIC@Day)
dTemps <- as.data.frame(RUN_DATA@TEMP@Day)
dVWC_JulSep <- dVWC[which(dVWC$Day %in% c(91:181)),]
dVWC_JulSep$Temp <- dTemps[which(dTemps$Day %in% c(91:181)),5]
s_name <- paste0("Site_", as.integer(substr(name, 1, regexpr('_', name)-1)) )
sdepths <- as.vector(soildepths[which(soildepths$Label==s_name), -1])
#str(sdepths)
maxdepth <- as.integer(sdepths[1])
#str(maxdepth)
sdepths[sdepths > maxdepth ] <- NA
sdepth <- sdepths[-1]
slyrwidths <- diff(c(0, na.omit(t(sdepth)) ) )
numlyrs <- dim(dVWC)[2] - 2
#print(numlyrs)
nlyrs<-if(numlyrs<7){numlyrs} else {6}
#print(nlyrs)
if(numlyrs>1 & numlyrs<7 ){dVWC_JulSep$Alllyrs <- apply(as.matrix(dVWC_JulSep[, c(3:(numlyrs+2))]), 1, FUN=function(x) weighted.mean(x, slyrwidths[1:nlyrs]))}
if(numlyrs>1 & numlyrs>6 ){dVWC_JulSep$Alllyrs <- apply(as.matrix(dVWC_JulSep[, c(3:(6+2))]), 1, FUN=function(x) weighted.mean(x, slyrwidths[1:nlyrs]))}
if(numlyrs==1){dVWC_JulSep$Alllyrs <- as.matrix(dVWC_JulSep[, c(3:(numlyrs+2))])}
sSAND <- soilSAND[which(soilSAND$Label==s_name), c(2:(1+length(slyrwidths)))]
sCLAY <- soilCLAY[which(soilCLAY$Label==s_name), c(2:(1+length(slyrwidths)))]
sandMEANtop <- weighted.mean(sSAND[1:nlyrs], slyrwidths[1:nlyrs])
clayMEANtop <- weighted.mean(sCLAY[1:nlyrs], slyrwidths[1:nlyrs])
#dVWC_JulSep$count<-1:length(dVWC_JulSep$Year)
dVWC_JulSep$SWP <- VWCtoSWP_simple(vwc=dVWC_JulSep$Alllyrs, sand=sandMEANtop, clay=clayMEANtop)
#print(dVWC_JulSep$SWP[1:5])
#print(head(dVWC_JulSep))
d <- dVWC_JulSep[, c("Year", "Alllyrs", "Temp", "SWP")]
#print(head(d))
d_all_list<-split(d,d$Year)
d_all_list1<- lapply(d_all_list, FUN=dryDD)
d1 <- ldply(d_all_list1, data.frame)
names(d1)[2] <- c(name)
d1 <- as.data.frame(t(d1))[2,]
rownames(d1) <- c( name)
return(d1)
}
print("Start Loop")
print(Sys.time())
#Try in parallel
library("parallel")
library("foreach")
library("doParallel")
#detectCores()
for (r in 1:length(regions)){
# r=1
soildepths <- read.csv(file=file.path(dir.regions_1Input[r], "SWRuns_InputData_SoilLayers_v9.csv"), header=TRUE )
print(paste("soildepths", dim(soildepths)) )
soildata <- read.csv(file=file.path(dir.regions_1Input[r], "datafiles" , "SWRuns_InputData_soils_v12.csv"), header=TRUE )
print(paste("soildata", dim(soildata)) )
#print(str(soildata))
# metadata <- readRDS(file=file.path(dir.regions[r], "SFSW2_project_descriptions.rds") )
# #str(metadata[["sim_time"]])
# isim_time <- metadata[["sim_time"]]
# simTime2 <- metadata[["sim_time"]]$sim_time2_North
soilSAND <- soildata[, c(1, grep("Sand", names(soildata))) ]
soilCLAY <- soildata[, c(1, grep("Clay", names(soildata))) ]
sites <- list.files(dir.regions_3Runs[r])
#print(sites[1:10])
cl<-makeCluster(20)
registerDoParallel(cl)
Below3DD_JulSep = foreach(s = sites, .combine = rbind,.packages=c('plyr','dplyr')) %dopar% {
f <- list.files(file.path(dir.regions_3Runs[r], s) )
if(length(f)==1){
load(file.path(dir.regions_3Runs[r], s, "sw_output_sc1.RData"))
print(s)
d <- calcHotDry_JulSep(RUN_DATA = runDataSC, name=s)
d
}
}
stopCluster(cl)
print(paste(regions[r], "Done"))
print(Sys.time())
ifelse (r == 1, annualBelow3DD_JulSep <- Below3DD_JulSep, annualBelow3DD_JulSep <- rbind(annualBelow3DD_JulSep, Below3DD_JulSep))
}
names(annualBelow3DD_JulSep) <- paste(c(1915:2015))
save(annualBelow3DD_JulSep, file=file.path(dir.jbHOME, "Below3DD_JulSep19152015.Rdata"))
#DEVELOPMENT
# soildepths <- read.csv(file=file.path(dir.regions_1Input[1], "SWRuns_InputData_SoilLayers_v9.csv"), header=TRUE )
#
# soildata <- read.csv(file=file.path(dir.regions_1Input[1], "datafiles", "SWRuns_InputData_soils_v12.csv"), header=TRUE )
#
# metadata <- readRDS(file=file.path(dir.regions[1], "SFSW2_project_descriptions.rds") )
# #str(metadata[["sim_time"]])
# isim_time <- metadata[["sim_time"]]
# simTime2 <- metadata[["sim_time"]]$sim_time2_North
#
# layers_width <- getLayersWidth(layers_depth)
#
# load(file.path(dir.regions_3Runs[1], sites[1], "sw_output_sc1.RData"))
# dtemps <- as.data.frame(runDataSC@TEMP@Day)
# dVWC <- as.data.frame(runDataSC@VWCMATRIC@Day)
# dwd <- as.data.frame(runDataSC@WETDAY@Day)
# dSM <- as.data.frame(runDataSC@SWPMATRIC@Day)
# str(dSM)
# names(dSM)[c(-1, -2)] <- paste("SM", names(dSM)[c(-1, -2)])
# d_all2 <- merge(d_all, dSM, by=c("Year", "Day"))
# d_all2[c(3050: 3080),]
#dSNOW <- as.data.frame(runDataSC@SNOWPACK@Day)
#dtst <-aggregate(d_all, by=list(d$Year), FUN=length(), na.rm=TRUE)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.