content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
elm.fast <- function(y,x,hd=NULL,type=c("lasso","ridge","step","ls"),reps=20,
comb=c("median","mean","mode"),direct=c(FALSE,TRUE),
linscale=c(TRUE,FALSE),output=c("linear","logistic"),
core=c("FALSE","TRUE"),ortho=c(FALSE,TRUE)){
type <- match.arg(type,c("lasso","ridge","step","ls"))
comb <- match.arg(comb,c("median","mean","mode"))
output <- match.arg(output,c("linear","logistic"))
direct <- direct[1]
core <- core[1]
linscale <- linscale[1]
ortho <- ortho[1]
if (output == "logistic" & type != "lasso"){
warning("Logisitc output can only be used with lasso, switching to lasso.")
type <- "lasso"
}
n.y <- length(y)
x.rc <- dim(x)
n.x <- x.rc[1]
p <- x.rc[2]
x.names <- colnames(x)
if (linscale){
# Scale target
if (output == "logistic"){
sc.y <- linscale(y,minmax=list("mn"=-0,"mx"=1))
} else {
sc.y <- linscale(y,minmax=list("mn"=-.8,"mx"=0.8))
}
y.sc <- sc.y$x
minmax.y <- sc.y$minmax
# Scale all x's
x.sc <- apply(x,2,linscale,minmax=list("mn"=-.8,"mx"=0.8))
minmax.x <- sapply(x.sc, "[", 2)
x <- do.call(cbind,sapply(x.sc, "[", 1))
} else {
y.sc <- y
sc.y <- sc.x <- NULL
minmax.x <- minmax.y <- NULL
}
if (n.y != n.x){
stop("Number of fitting sample and input observations do not match")
}
if (is.null(hd)){
hd <- 100
}
# Initialise variables to store elm
W.in <- W <- W.dct <- vector("list",reps)
B <- Hd <- vector("numeric",reps)
Y.all <- array(NA,c(n.y,reps))
# Iterate for each training replication
for (r in 1:reps){
# Calculate hidden layer values
w.in <- init.w(p,hd)
if (ortho == TRUE){
# Orthogonal weights (when possible)
if ((p+1) >= hd){
w.in <- svd(t(w.in))$v
}
}
if (!is.null(x.names)){
rownames(w.in) <- c("Bias",x.names)
}
hd.hat <- fast.sig(cbind(1,x) %*% w.in)
# Allow direct connections
if (direct==TRUE){
z <- cbind(hd.hat,x)
} else {
z <- hd.hat
}
# Optimise last layer
w.out <- elm.train(y.sc,z,type,x,direct,hd,output)
# Distribute weights
B[r] <- w.out[1] # Bias (Constant)
if (direct == TRUE){ # Direct connections
w.dct <- w.out[(1+hd+1):(1+hd+p),,drop=FALSE]
if (!is.null(x.names)){
rownames(w.dct) <- x.names
}
W.dct[[r]] <- w.dct
}
w.out <- w.out[2:(1+hd),,drop=FALSE] # Hidden layer
# Eliminate unused neurons
W.in[[r]] <- w.in[,w.out != 0, drop=FALSE]
Hd[r] <- dim(W.in[[r]])[2]
W[[r]] <- w.out[w.out != 0,, drop=FALSE]
# Predict fitted values
Y.all[,r] <- predict.elm.fast.internal(x,W.in[[r]],W[[r]],B[r],W.dct[[r]],direct)
# Reverse scaling or apply logistic
if (linscale){
if (output != "logistic"){
Y.all[,r] <- linscale(Y.all[,r],sc.y$minmax,rev=TRUE)$x
}
}
if (output == "logistic"){
Y.all[,r] <- linscale(fast.sig(Y.all[,r]),minmax=list("mn"=0,"mx"=1))$x
}
}
if (core == FALSE){
if (reps > 1){
Y.hat <- frc.comb(Y.all,comb)
} else {
Y.hat <- Y.all
}
MSE <- mean((y-Y.hat)^2)
} else {
Y.hat <- NULL
MSE <- NULL
}
return(structure(list("hd"=Hd,"W"=W,"W.in"=W.in,"b"=B,"W.dct"=W.dct,
"fitted.all"=Y.all,"fitted"=Y.hat,"y"=y,
"type"=type,"comb"=comb,"direct"=direct,
"output"=output,"minmax"=minmax.y,"minmax.x"=minmax.x,
"MSE"=MSE),class="elm.fast"))
# out <- structure(list("net"=NULL,"hd"=f.elm$hd,"W"=f.elm$W,"W.in"=f.elm$W.in,"b"=f.elm$b,"W.dct"=f.elm$Wdct,
# "lags"=lags,"xreg.lags"=xreg.lags,"difforder"=difforder,
# "sdummy"=sdummy,"ff.det"=ff.det,"det.type"=det.type,"y"=y,"minmax"=sc$minmax,"xreg.minmax"=xreg.minmax,
# "comb"=comb,"type"=type,"direct"=direct,"fitted"=yout,"MSE"=MSE),class=c("elm.fast","elm"))
}
init.w <- function(p,hd){
# Initialise layer weights
bb <- c(-1,1)*(1/sqrt(p))
w <- matrix(runif((p+1)*hd,min=bb[1],max=bb[2]),nrow=(p+1)) # p + 1 for bias
return(w)
}
fast.sig <- function(x){
y <- x/(1+abs(x))
return(y)
}
predict.elm.fast.internal <- function(x,w.in,w.out,b,w.dct,direct){
y <- fast.sig(cbind(1,x) %*% w.in) %*% w.out + b + if(direct!=TRUE){0}else{x %*% w.dct}
return(y)
}
predict.elm.fast <- function(object,newx,na.rm=c(FALSE,TRUE),...){
# Prediction for elm.fast object
if (any(class(object) != "elm.fast")){
stop("Use exclusively with objects that are of elm.fast class only!")
}
na.rm <- na.rm[1]
reps <- length(object$b)
W.in <- object$W.in
output <- object$output
minmax.y <- object$minmax
minmax.x <- object$minmax.x
n <- dim(newx)[1]
p <- dim(newx)[2]
elm.p <- dim(W.in[[1]])[1]-1 # -1 for bias
if (p != elm.p){
stop(paste0("newx has incorrect number of variables. ELM trained with ",elm.p,"variables"))
}
# Apply scaling to xnew
if (!is.null(minmax.x)){
for (i in 1:p){
newx[,i] <- linscale(newx[,i],minmax=minmax.x[[i]])$x
}
}
Y.all <- array(NA,c(n,reps))
for (r in 1:reps){
Y.all[,r] <- predict.elm.fast.internal(newx,W.in[[r]],object$W[[r]],object$b[r],object$W.dct[[r]],object$direct)
# Reverse scaling or apply logistic
if (!is.null(minmax.y)){
if (output != "logistic"){
Y.all[,r] <- linscale(Y.all[,r],minmax.y,rev=TRUE)$x
}
}
if (output == "logistic"){
Y.all[,r] <- linscale(fast.sig(Y.all[,r]),minmax=list("mn"=0,"mx"=1))$x
}
}
if (reps > 1){
Y.hat <- frc.comb(Y.all,object$comb,na.rm)
} else {
Y.hat <- Y.all
}
return(list("Y.hat"=Y.hat,"Y.all"=Y.all))
}
plot.elm.fast <- function(x, ...){
reps <- length(x$b)
yy <- range(cbind(x$y,x$fitted,x$fitted.all))
yy <- xx <- c(min(yy),max(yy)) + c(-1,+1)*0.04*diff(yy)
plot(NA,NA,xlab="Fitted",ylab="Actual",xlim=xx,ylim=yy)
lines(xx+c(-1,1)*0.2*diff(xx),yy+c(-1,1)*0.2*diff(yy),col="grey")
for (r in 1:reps){
points(x$fitted.all[,r],x$y,col="gray30",pch=".")
}
points(x$fitted,x$y,pch=20)
}
print.elm.fast <- function(x, ...){
hd <- x$hd
reps <- length(x$b)
hdt <- paste0(min(hd)," up to ",max(hd))
if (any(hd>1)){
hde <- "s"
} else {
hde <- ""
}
dtx <- ""
if (x$direct == TRUE){
dtx <- ", direct output connections"
}
writeLines(paste0("ELM (fast) with"," fit with ", hdt," hidden node", hde, dtx," and ", reps, " repetition",if(reps>1){"s"},"."))
if (reps>1){
writeLines(paste0("Forecast combined using the ", x$comb, " operator."))
}
writeLines(paste0("Output weight estimation using: ", x$type, "."))
if (!is.null(x$MSE)){
writeLines(paste0("MSE: ",round(x$MSE,4),"."))
}
}
|
/R/elm.fast.R
|
no_license
|
edergsc/TStools
|
R
| false
| false
| 7,268
|
r
|
elm.fast <- function(y,x,hd=NULL,type=c("lasso","ridge","step","ls"),reps=20,
comb=c("median","mean","mode"),direct=c(FALSE,TRUE),
linscale=c(TRUE,FALSE),output=c("linear","logistic"),
core=c("FALSE","TRUE"),ortho=c(FALSE,TRUE)){
type <- match.arg(type,c("lasso","ridge","step","ls"))
comb <- match.arg(comb,c("median","mean","mode"))
output <- match.arg(output,c("linear","logistic"))
direct <- direct[1]
core <- core[1]
linscale <- linscale[1]
ortho <- ortho[1]
if (output == "logistic" & type != "lasso"){
warning("Logisitc output can only be used with lasso, switching to lasso.")
type <- "lasso"
}
n.y <- length(y)
x.rc <- dim(x)
n.x <- x.rc[1]
p <- x.rc[2]
x.names <- colnames(x)
if (linscale){
# Scale target
if (output == "logistic"){
sc.y <- linscale(y,minmax=list("mn"=-0,"mx"=1))
} else {
sc.y <- linscale(y,minmax=list("mn"=-.8,"mx"=0.8))
}
y.sc <- sc.y$x
minmax.y <- sc.y$minmax
# Scale all x's
x.sc <- apply(x,2,linscale,minmax=list("mn"=-.8,"mx"=0.8))
minmax.x <- sapply(x.sc, "[", 2)
x <- do.call(cbind,sapply(x.sc, "[", 1))
} else {
y.sc <- y
sc.y <- sc.x <- NULL
minmax.x <- minmax.y <- NULL
}
if (n.y != n.x){
stop("Number of fitting sample and input observations do not match")
}
if (is.null(hd)){
hd <- 100
}
# Initialise variables to store elm
W.in <- W <- W.dct <- vector("list",reps)
B <- Hd <- vector("numeric",reps)
Y.all <- array(NA,c(n.y,reps))
# Iterate for each training replication
for (r in 1:reps){
# Calculate hidden layer values
w.in <- init.w(p,hd)
if (ortho == TRUE){
# Orthogonal weights (when possible)
if ((p+1) >= hd){
w.in <- svd(t(w.in))$v
}
}
if (!is.null(x.names)){
rownames(w.in) <- c("Bias",x.names)
}
hd.hat <- fast.sig(cbind(1,x) %*% w.in)
# Allow direct connections
if (direct==TRUE){
z <- cbind(hd.hat,x)
} else {
z <- hd.hat
}
# Optimise last layer
w.out <- elm.train(y.sc,z,type,x,direct,hd,output)
# Distribute weights
B[r] <- w.out[1] # Bias (Constant)
if (direct == TRUE){ # Direct connections
w.dct <- w.out[(1+hd+1):(1+hd+p),,drop=FALSE]
if (!is.null(x.names)){
rownames(w.dct) <- x.names
}
W.dct[[r]] <- w.dct
}
w.out <- w.out[2:(1+hd),,drop=FALSE] # Hidden layer
# Eliminate unused neurons
W.in[[r]] <- w.in[,w.out != 0, drop=FALSE]
Hd[r] <- dim(W.in[[r]])[2]
W[[r]] <- w.out[w.out != 0,, drop=FALSE]
# Predict fitted values
Y.all[,r] <- predict.elm.fast.internal(x,W.in[[r]],W[[r]],B[r],W.dct[[r]],direct)
# Reverse scaling or apply logistic
if (linscale){
if (output != "logistic"){
Y.all[,r] <- linscale(Y.all[,r],sc.y$minmax,rev=TRUE)$x
}
}
if (output == "logistic"){
Y.all[,r] <- linscale(fast.sig(Y.all[,r]),minmax=list("mn"=0,"mx"=1))$x
}
}
if (core == FALSE){
if (reps > 1){
Y.hat <- frc.comb(Y.all,comb)
} else {
Y.hat <- Y.all
}
MSE <- mean((y-Y.hat)^2)
} else {
Y.hat <- NULL
MSE <- NULL
}
return(structure(list("hd"=Hd,"W"=W,"W.in"=W.in,"b"=B,"W.dct"=W.dct,
"fitted.all"=Y.all,"fitted"=Y.hat,"y"=y,
"type"=type,"comb"=comb,"direct"=direct,
"output"=output,"minmax"=minmax.y,"minmax.x"=minmax.x,
"MSE"=MSE),class="elm.fast"))
# out <- structure(list("net"=NULL,"hd"=f.elm$hd,"W"=f.elm$W,"W.in"=f.elm$W.in,"b"=f.elm$b,"W.dct"=f.elm$Wdct,
# "lags"=lags,"xreg.lags"=xreg.lags,"difforder"=difforder,
# "sdummy"=sdummy,"ff.det"=ff.det,"det.type"=det.type,"y"=y,"minmax"=sc$minmax,"xreg.minmax"=xreg.minmax,
# "comb"=comb,"type"=type,"direct"=direct,"fitted"=yout,"MSE"=MSE),class=c("elm.fast","elm"))
}
init.w <- function(p,hd){
# Initialise layer weights
bb <- c(-1,1)*(1/sqrt(p))
w <- matrix(runif((p+1)*hd,min=bb[1],max=bb[2]),nrow=(p+1)) # p + 1 for bias
return(w)
}
fast.sig <- function(x){
y <- x/(1+abs(x))
return(y)
}
predict.elm.fast.internal <- function(x,w.in,w.out,b,w.dct,direct){
y <- fast.sig(cbind(1,x) %*% w.in) %*% w.out + b + if(direct!=TRUE){0}else{x %*% w.dct}
return(y)
}
predict.elm.fast <- function(object,newx,na.rm=c(FALSE,TRUE),...){
# Prediction for elm.fast object
if (any(class(object) != "elm.fast")){
stop("Use exclusively with objects that are of elm.fast class only!")
}
na.rm <- na.rm[1]
reps <- length(object$b)
W.in <- object$W.in
output <- object$output
minmax.y <- object$minmax
minmax.x <- object$minmax.x
n <- dim(newx)[1]
p <- dim(newx)[2]
elm.p <- dim(W.in[[1]])[1]-1 # -1 for bias
if (p != elm.p){
stop(paste0("newx has incorrect number of variables. ELM trained with ",elm.p,"variables"))
}
# Apply scaling to xnew
if (!is.null(minmax.x)){
for (i in 1:p){
newx[,i] <- linscale(newx[,i],minmax=minmax.x[[i]])$x
}
}
Y.all <- array(NA,c(n,reps))
for (r in 1:reps){
Y.all[,r] <- predict.elm.fast.internal(newx,W.in[[r]],object$W[[r]],object$b[r],object$W.dct[[r]],object$direct)
# Reverse scaling or apply logistic
if (!is.null(minmax.y)){
if (output != "logistic"){
Y.all[,r] <- linscale(Y.all[,r],minmax.y,rev=TRUE)$x
}
}
if (output == "logistic"){
Y.all[,r] <- linscale(fast.sig(Y.all[,r]),minmax=list("mn"=0,"mx"=1))$x
}
}
if (reps > 1){
Y.hat <- frc.comb(Y.all,object$comb,na.rm)
} else {
Y.hat <- Y.all
}
return(list("Y.hat"=Y.hat,"Y.all"=Y.all))
}
plot.elm.fast <- function(x, ...){
reps <- length(x$b)
yy <- range(cbind(x$y,x$fitted,x$fitted.all))
yy <- xx <- c(min(yy),max(yy)) + c(-1,+1)*0.04*diff(yy)
plot(NA,NA,xlab="Fitted",ylab="Actual",xlim=xx,ylim=yy)
lines(xx+c(-1,1)*0.2*diff(xx),yy+c(-1,1)*0.2*diff(yy),col="grey")
for (r in 1:reps){
points(x$fitted.all[,r],x$y,col="gray30",pch=".")
}
points(x$fitted,x$y,pch=20)
}
print.elm.fast <- function(x, ...){
hd <- x$hd
reps <- length(x$b)
hdt <- paste0(min(hd)," up to ",max(hd))
if (any(hd>1)){
hde <- "s"
} else {
hde <- ""
}
dtx <- ""
if (x$direct == TRUE){
dtx <- ", direct output connections"
}
writeLines(paste0("ELM (fast) with"," fit with ", hdt," hidden node", hde, dtx," and ", reps, " repetition",if(reps>1){"s"},"."))
if (reps>1){
writeLines(paste0("Forecast combined using the ", x$comb, " operator."))
}
writeLines(paste0("Output weight estimation using: ", x$type, "."))
if (!is.null(x$MSE)){
writeLines(paste0("MSE: ",round(x$MSE,4),"."))
}
}
|
library("tidyverse")
filename <- "./result/pacbio/variant_called.tsv"
variant_data <- read_tsv(filename)
variant_data_filtered <- variant_data %>%
filter( share > 2) %>%
mutate(mlpvalue = -phyper(q=share, m=mac1,n=tot1-mac1, k = mac2, lower.tail=FALSE, log.p=TRUE)) %>%
filter(!is.na(mlpvalue))
temp <- variant_data_filtered %>% filter(mlpvalue > 20) %>% select(pos1,pos2)
unique_sites <- c(temp$pos1, temp$pos2) %>%
sort() %>% unique() %>% length()
rm(temp)
g <- variant_data_filtered %>% filter(mlpvalue < 30) %>%
ggplot() +
geom_histogram(mapping = aes(x = mlpvalue), bins = 100)
ggsave(filename="./pics/variant_call_pvalue.png",plot = g)
g <- variant_data_filtered %>% ggplot() + geom_histogram(mapping = aes(x = exp(-mlpvalue)),bins = 100)
g <- variant_data_filtered %>%
filter(10 < mlpvalue & mlpvalue < 30) %>%
mutate(dist = abs(pos1-pos2)) %>%
select(dist) %>%
ggplot() +
geom_histogram(mapping = aes(x = dist))
ggsave(filename="./pics/variant_call_dist.png",plot=g)
g <- variant_data_filtered %>% filter(10 < mlpvalue & mlpvalue < 30) %>%
sample_frac(0.01) %>%
mutate(size = mlpvalue / 10) %>%
ggplot() +
geom_point(mapping = aes(x=pos1,y = pos2, size = size))
ggsave(filename = "./pics/variant_call_pvalue_point.png", plot = g)
g <- variant_data_filtered %>%
filter(mlpvalue == Inf) %>%
ggplot() +
geom_point(mapping = aes(x=pos1,y = pos2))
ggsave(filename = "./pics/variant_call_pvalue_point_inf.png", plot = g)
variant_data_filtered %>%
select(pos1,pos2,mlpvalue) %>%
filter(mlpvalue > 9) %>%
mutate(mlpvalue = ifelse(mlpvalue == Inf, 10000, mlpvalue)) %>%
write_tsv("./result/variant_cooccurence.tsv")
|
/variant_calling/script/call_cooccurence.R
|
no_license
|
lizhizhong1992/reconstruct_mito_genome
|
R
| false
| false
| 1,719
|
r
|
library("tidyverse")
filename <- "./result/pacbio/variant_called.tsv"
variant_data <- read_tsv(filename)
variant_data_filtered <- variant_data %>%
filter( share > 2) %>%
mutate(mlpvalue = -phyper(q=share, m=mac1,n=tot1-mac1, k = mac2, lower.tail=FALSE, log.p=TRUE)) %>%
filter(!is.na(mlpvalue))
temp <- variant_data_filtered %>% filter(mlpvalue > 20) %>% select(pos1,pos2)
unique_sites <- c(temp$pos1, temp$pos2) %>%
sort() %>% unique() %>% length()
rm(temp)
g <- variant_data_filtered %>% filter(mlpvalue < 30) %>%
ggplot() +
geom_histogram(mapping = aes(x = mlpvalue), bins = 100)
ggsave(filename="./pics/variant_call_pvalue.png",plot = g)
g <- variant_data_filtered %>% ggplot() + geom_histogram(mapping = aes(x = exp(-mlpvalue)),bins = 100)
g <- variant_data_filtered %>%
filter(10 < mlpvalue & mlpvalue < 30) %>%
mutate(dist = abs(pos1-pos2)) %>%
select(dist) %>%
ggplot() +
geom_histogram(mapping = aes(x = dist))
ggsave(filename="./pics/variant_call_dist.png",plot=g)
g <- variant_data_filtered %>% filter(10 < mlpvalue & mlpvalue < 30) %>%
sample_frac(0.01) %>%
mutate(size = mlpvalue / 10) %>%
ggplot() +
geom_point(mapping = aes(x=pos1,y = pos2, size = size))
ggsave(filename = "./pics/variant_call_pvalue_point.png", plot = g)
g <- variant_data_filtered %>%
filter(mlpvalue == Inf) %>%
ggplot() +
geom_point(mapping = aes(x=pos1,y = pos2))
ggsave(filename = "./pics/variant_call_pvalue_point_inf.png", plot = g)
variant_data_filtered %>%
select(pos1,pos2,mlpvalue) %>%
filter(mlpvalue > 9) %>%
mutate(mlpvalue = ifelse(mlpvalue == Inf, 10000, mlpvalue)) %>%
write_tsv("./result/variant_cooccurence.tsv")
|
library(preprocessCore)
exonGeneNames <- function(exonList, GenesDf) {
exonGenes <- exonList %>% as.character() %>% gsub("\\..+", "", .)
indexes <- match(exonGenes, GenesDf[ ,'GeneID'])
exonGenes <- GenesDf[indexes, c('GeneName')] %>% as.character()
geneType <- GenesDf[indexes, c('GeneType')] %>% as.character()
return(cbind(Exon = exonList, GeneName = exonGenes, geneType = geneType))
}
makeExonBed <- function(InputFile, GenesDf) {
#Genes <- gsub("\\..+", "", InputFile$Exon)
Genes <- exonGeneNames(exonList = InputFile$Exon, GenesDf = GenesDf) %>% data.frame()
strand <- InputFile$Exon %>% gsub(".+:", "", .)
Exons <- InputFile$Exon %>%
gsub(".+SE:|[:-]\\d+:[+-]", "", .) %>%
gsub("\\d+-", "", .) %>%
strsplit(., ":") %>% unlist() %>% matrix(., ncol = 3, byrow = T) %>%
data.frame() %>% mutate(exon = InputFile$Exon, geneName = Genes$exonGenes, strand = strand)
names(Exons)[1:3] <- c("chr", "start", "end")
Exons$chr <- as.character(Exons$chr)
Exons$start <- Exons$start %>% as.character %>% as.numeric()
Exons$end <- Exons$end %>% as.character %>% as.numeric()
#Exons <- Exons[order(Exons$chr, Exons$start), ]
Exons$start <- Exons$start - 1
return(Exons)
}
processEpxressionFile <- function(dataFrame, Transcripts, replicates, normalize = T) {
if (normalize) {
print("performing quantile normalization")
} else {
print("processing without quantile normalization")
}
rownames(dataFrame) <- gsub("\\.\\d+", "", rownames(dataFrame))
matches <- match(rownames(dataFrame), Transcripts$tx_id)
Genes <- Transcripts[matches, 'gene_name']
dataFrame <- cbind("GeneName" = Genes, dataFrame)
GeneLevelTPM <- aggregate(.~GeneName, data = dataFrame, sum, na.rm = T, na.action = na.pass)
if (normalize) {
GeneLevelTPM <- GeneLevelTPM[ ,-1] %>% as.matrix() %>% normalize.quantiles() %>%
set_rownames(GeneLevelTPM$GeneName) %>%
set_colnames(colnames(GeneLevelTPM)[-1]) %>% data.frame(check.names = F)
}else {
GeneLevelTPM <- GeneLevelTPM[ ,-1] %>% set_rownames(GeneLevelTPM$GeneName)
}
if (replicates) {
GeneLevelTPM <- GeneLevelTPM %>% t() %>% data.frame() %>%
mutate(Stage = gsub("_Rep\\d+", "", colnames(GeneLevelTPM))) %>%
aggregate(.~Stage, data = ., mean, na.rm = T, na.action = na.pass) %>%
set_rownames(.$Stage) %>% .[,-1] %>% t() %>% data.frame() %>%
set_rownames(rownames(GeneLevelTPM))
}
#GeneLevelTPM <- GeneLevelTPM[ ,mixedsort(names(GeneLevelTPM))]
return(GeneLevelTPM)
}
processPSIfile <- function(psiFile) {
data.frame(t(psiFile)) %>%
mutate(Stage = gsub("_Rep\\d+", "", names(psiFile))) %>%
aggregate(.~Stage, data = ., mean, na.rm = T, na.action = na.pass) %>%
set_rownames(.$Stage) %>%
.[,-1] %>% t() %>% data.frame() %>%
set_rownames(rownames(psiFile))
}
naFilter <- function(dataFrame, cutoff) {
#nsize <- floor(ncol(dataFrame) * cutoff)
nsize <- ceiling(ncol(dataFrame) * cutoff)
indexes <- apply(dataFrame, 1, function(x) sum(is.na(x))) < nsize
dataFrame[indexes, ]
}
|
/helperFunctions.R
|
no_license
|
hannenhalli-lab/AltSplDevCancer
|
R
| false
| false
| 2,997
|
r
|
library(preprocessCore)
exonGeneNames <- function(exonList, GenesDf) {
exonGenes <- exonList %>% as.character() %>% gsub("\\..+", "", .)
indexes <- match(exonGenes, GenesDf[ ,'GeneID'])
exonGenes <- GenesDf[indexes, c('GeneName')] %>% as.character()
geneType <- GenesDf[indexes, c('GeneType')] %>% as.character()
return(cbind(Exon = exonList, GeneName = exonGenes, geneType = geneType))
}
makeExonBed <- function(InputFile, GenesDf) {
#Genes <- gsub("\\..+", "", InputFile$Exon)
Genes <- exonGeneNames(exonList = InputFile$Exon, GenesDf = GenesDf) %>% data.frame()
strand <- InputFile$Exon %>% gsub(".+:", "", .)
Exons <- InputFile$Exon %>%
gsub(".+SE:|[:-]\\d+:[+-]", "", .) %>%
gsub("\\d+-", "", .) %>%
strsplit(., ":") %>% unlist() %>% matrix(., ncol = 3, byrow = T) %>%
data.frame() %>% mutate(exon = InputFile$Exon, geneName = Genes$exonGenes, strand = strand)
names(Exons)[1:3] <- c("chr", "start", "end")
Exons$chr <- as.character(Exons$chr)
Exons$start <- Exons$start %>% as.character %>% as.numeric()
Exons$end <- Exons$end %>% as.character %>% as.numeric()
#Exons <- Exons[order(Exons$chr, Exons$start), ]
Exons$start <- Exons$start - 1
return(Exons)
}
processEpxressionFile <- function(dataFrame, Transcripts, replicates, normalize = T) {
if (normalize) {
print("performing quantile normalization")
} else {
print("processing without quantile normalization")
}
rownames(dataFrame) <- gsub("\\.\\d+", "", rownames(dataFrame))
matches <- match(rownames(dataFrame), Transcripts$tx_id)
Genes <- Transcripts[matches, 'gene_name']
dataFrame <- cbind("GeneName" = Genes, dataFrame)
GeneLevelTPM <- aggregate(.~GeneName, data = dataFrame, sum, na.rm = T, na.action = na.pass)
if (normalize) {
GeneLevelTPM <- GeneLevelTPM[ ,-1] %>% as.matrix() %>% normalize.quantiles() %>%
set_rownames(GeneLevelTPM$GeneName) %>%
set_colnames(colnames(GeneLevelTPM)[-1]) %>% data.frame(check.names = F)
}else {
GeneLevelTPM <- GeneLevelTPM[ ,-1] %>% set_rownames(GeneLevelTPM$GeneName)
}
if (replicates) {
GeneLevelTPM <- GeneLevelTPM %>% t() %>% data.frame() %>%
mutate(Stage = gsub("_Rep\\d+", "", colnames(GeneLevelTPM))) %>%
aggregate(.~Stage, data = ., mean, na.rm = T, na.action = na.pass) %>%
set_rownames(.$Stage) %>% .[,-1] %>% t() %>% data.frame() %>%
set_rownames(rownames(GeneLevelTPM))
}
#GeneLevelTPM <- GeneLevelTPM[ ,mixedsort(names(GeneLevelTPM))]
return(GeneLevelTPM)
}
processPSIfile <- function(psiFile) {
data.frame(t(psiFile)) %>%
mutate(Stage = gsub("_Rep\\d+", "", names(psiFile))) %>%
aggregate(.~Stage, data = ., mean, na.rm = T, na.action = na.pass) %>%
set_rownames(.$Stage) %>%
.[,-1] %>% t() %>% data.frame() %>%
set_rownames(rownames(psiFile))
}
naFilter <- function(dataFrame, cutoff) {
#nsize <- floor(ncol(dataFrame) * cutoff)
nsize <- ceiling(ncol(dataFrame) * cutoff)
indexes <- apply(dataFrame, 1, function(x) sum(is.na(x))) < nsize
dataFrame[indexes, ]
}
|
## Set Working Directory
setwd("~/Desktop/Course Project 1")
file <- "~/Desktop/Course Project 1/household_power_consumption.txt"
EPC <- read.table(file, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
EPC_Subset <- EPC[EPC$Date %in% c("1/2/2007", "2/2/2007"), ]
## Grab Necessary Data
Global_Active_Power <- as.numeric(EPC_Subset$Global_active_power)
DateTime <- strptime(paste(EPC_Subset$Date, EPC_Subset$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
## Png Data Plot
png("plot2.png", width = 480, height = 480)
plot(DateTime, Global_Active_Power, ylab = "Global Active Power (kilowatts)", xlab = "", type = "l")
dev.off()
|
/plot2.R
|
no_license
|
uwheartbreaker/ExData_Plotting1
|
R
| false
| false
| 661
|
r
|
## Set Working Directory
setwd("~/Desktop/Course Project 1")
file <- "~/Desktop/Course Project 1/household_power_consumption.txt"
EPC <- read.table(file, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
EPC_Subset <- EPC[EPC$Date %in% c("1/2/2007", "2/2/2007"), ]
## Grab Necessary Data
Global_Active_Power <- as.numeric(EPC_Subset$Global_active_power)
DateTime <- strptime(paste(EPC_Subset$Date, EPC_Subset$Time, sep = " "), "%d/%m/%Y %H:%M:%S")
## Png Data Plot
png("plot2.png", width = 480, height = 480)
plot(DateTime, Global_Active_Power, ylab = "Global Active Power (kilowatts)", xlab = "", type = "l")
dev.off()
|
insert_parent_env <- function(env, parent){
env <- as.environment(env)
parent <- as.environment(parent)
old.parent <- parent.env(env)
parent.env(parent) <- old.parent
parent.env(env) <- parent
invisible(parent)
}
if(FALSE){#@testing
a <- new.env(parent = baseenv())
b <- new.env(parent = emptyenv())
expect_identical(parent.env(a), baseenv())
expect_identical(parent.env(b), emptyenv())
insert_parent_env(a, b)
expect_identical(parent.env(a), b)
expect_identical(parent.env(b), baseenv())
}
|
/R/util-insert_parent_env.R
|
no_license
|
RDocTaskForce/extendedRef
|
R
| false
| false
| 549
|
r
|
insert_parent_env <- function(env, parent){
env <- as.environment(env)
parent <- as.environment(parent)
old.parent <- parent.env(env)
parent.env(parent) <- old.parent
parent.env(env) <- parent
invisible(parent)
}
if(FALSE){#@testing
a <- new.env(parent = baseenv())
b <- new.env(parent = emptyenv())
expect_identical(parent.env(a), baseenv())
expect_identical(parent.env(b), emptyenv())
insert_parent_env(a, b)
expect_identical(parent.env(a), b)
expect_identical(parent.env(b), baseenv())
}
|
#' parse a text using spaCy
#'
#' The spacy_parse() function calls spaCy to both tokenize and tag the texts,
#' and returns a data.table of the results. The function provides options on the
#' types of tagsets (\code{tagset_} options) either \code{"google"} or
#' \code{"detailed"}, as well as lemmatization (\code{lemma}). It provides a
#' functionalities of dependency parsing and named entity recognition as an
#' option. If \code{"full_parse = TRUE"} is provided, the function returns the
#' most extensive list of the parsing results from spaCy.
#'
#' @param x a character object, a \pkg{quanteda} corpus, or a TIF-compliant
#' corpus data.frame (see \url{https://github.com/ropensci/tif})
#' @param pos logical whether to return universal dependency POS tagset
#' \url{http://universaldependencies.org/u/pos/})
#' @param tag logical whether to return detailed part-of-speech tags, for the
#' langage model \code{en}, it uses the OntoNotes 5 version of the Penn
#' Treebank tag set (\url{https://spacy.io/docs/usage/pos-tagging#pos-schemes}).
#' Annotation specifications for other available languages are available on the
#' spaCy website (\url{https://spacy.io/api/annotation}).
#' @param lemma logical; inlucde lemmatized tokens in the output (lemmatization
#' may not work properly for non-English models)
#' @param entity logical; if \code{TRUE}, report named entities
#' @param dependency logical; if \code{TRUE}, analyze and return dependencies
#' @param ... not used directly
#' @return a \code{data.frame} of tokenized, parsed, and annotated tokens
#' @export
#' @examples
#' \donttest{
#' spacy_initialize()
#' # See Chap 5.1 of the NLTK book, http://www.nltk.org/book/ch05.html
#' txt <- "And now for something completely different."
#' spacy_parse(txt)
#' spacy_parse(txt, pos = TRUE, tag = TRUE)
#' spacy_parse(txt, dependency = TRUE)
#'
#' txt2 <- c(doc1 = "The fast cat catches mice.\\nThe quick brown dog jumped.",
#' doc2 = "This is the second document.",
#' doc3 = "This is a \\\"quoted\\\" text." )
#' spacy_parse(txt2, entity = TRUE, dependency = TRUE)
#' }
spacy_parse <- function(x,
pos = TRUE,
tag = FALSE,
lemma = TRUE,
entity = TRUE,
dependency = FALSE,
...) {
UseMethod("spacy_parse")
}
#' @export
#' @importFrom data.table data.table
#' @noRd
spacy_parse.character <- function(x,
pos = TRUE,
tag = FALSE,
lemma = TRUE,
entity = TRUE,
dependency = FALSE,
...) {
`:=` <- NULL
spacy_out <- process_document(x)
if (is.null(spacy_out$timestamps)) {
stop("Document parsing failed")
}
## check the omit_entity status
if (entity == TRUE & getOption("spacy_entity") == FALSE) {
message("entity == TRUE is ignored because spaCy model is initialized without Entity Recognizer")
message("In order to turn on entity recognition, run spacy_finalize(); spacy_initialize(entity = TURE)")
entity <- FALSE
}
tokens <- get_tokens(spacy_out)
ntokens <- get_ntokens(spacy_out)
ntokens_by_sent <- get_ntokens_by_sent(spacy_out)
dt <- data.table(doc_id = rep(spacy_out$docnames, ntokens),
sentence_id = unlist(lapply(ntokens_by_sent, function(x) rep(seq_along(x), x))),
token_id = unlist(lapply(unlist(ntokens_by_sent), function(x) seq(to = x))),
token = tokens)
if (lemma) {
model <- spacyr_pyget("model")
dt[, "lemma" := get_attrs(spacy_out, "lemma_", TRUE)]
if(model != 'en'){
warning("lemmatization may not work properly in model '", model, "'")
}
}
if (pos) {
dt[, "pos" := get_tags(spacy_out, "google")]
}
if (tag) {
dt[, "tag" := get_tags(spacy_out, "detailed")]
}
## add dependency data fields
if (dependency) {
subtractor <- unlist(lapply(ntokens_by_sent, function(x) {
if(length(x) == 0) return(NULL)
csumx <- cumsum(c(0, x[-length(x)]))
return(rep(csumx, x))
}))
deps <- get_dependency(spacy_out)
dt[, c("head_token_id", "dep_rel") := list(deps$head_id - subtractor,
deps$dep_rel)]
}
## named entity fields
if (entity) {
dt[, entity := get_named_entities(spacy_out)]
}
dt <- as.data.frame(dt)
class(dt) <- c("spacyr_parsed", class(dt))
return(dt)
}
#' @noRd
#' @export
spacy_parse.data.frame <- function(x, ...) {
# insert compliance check here - replace with tif package
if (!all(c("doc_id", "text") %in% names(x)))
stop("input data.frame does not conform to the TIF standard")
txt <- x$text
names(txt) <- x$doc_id
spacy_parse(txt, ...)
}
#' tokenize text using spaCy
#'
#' Tokenize text using spaCy. The results of tokenization is stored as a python object. To obtain the tokens results in R, use \code{get_tokens()}.
#' \url{http://spacy.io}.
#' @param x input text
#' functionalities including the tagging, named entity recognisiton, dependency
#' analysis.
#' This slows down \code{spacy_parse()} but speeds up the later parsing.
#' If FALSE, tagging, entity recogitions, and dependendcy analysis when
#' relevant functions are called.
#' @param python_exec character; select connection type to spaCy, either
#' "rPython" or "Rcpp".
#' @param ... arguments passed to specific methods
#' @return result marker object
#' @importFrom methods new
#' @examples
#' \donttest{spacy_initialize()
#' # the result has to be "tag() is ready to run" to run the following
#' txt <- c(text1 = "This is the first sentence.\nHere is the second sentence.",
#' text2 = "This is the second document.")
#' results <- spacy_parse(txt)
#'
#' }
#' @export
#' @keywords internal
process_document <- function(x, ...) {
# This function passes texts to python and spacy
# get or set document names
if (!is.null(names(x))) {
docnames <- names(x)
} else {
docnames <- paste0("text", 1:length(x))
}
if (is.null(options()$spacy_initialized)) spacy_initialize()
spacyr_pyexec("try:\n del spobj\nexcept NameError:\n 1")
spacyr_pyexec("texts = []")
x <- gsub("\\\\n","\\\n", x) # replace two quotes \\n with \n
x <- gsub("\\\\t","\\\t", x) # replace two quotes \\t with \t
x <- gsub("\\\\","", x) # delete unnecessary backslashes
x <- unname(x)
spacyr_pyassign("texts", x)
spacyr_pyexec("spobj = spacyr()")
spacyr_pyexec("timestamps = spobj.parse(texts)")
timestamps = as.character(spacyr_pyget("timestamps"))
output <- spacy_out$new(docnames = docnames,
timestamps = timestamps)
return(output)
}
|
/R/spacy_parse.R
|
no_license
|
MhAmine/spacyr
|
R
| false
| false
| 7,111
|
r
|
#' parse a text using spaCy
#'
#' The spacy_parse() function calls spaCy to both tokenize and tag the texts,
#' and returns a data.table of the results. The function provides options on the
#' types of tagsets (\code{tagset_} options) either \code{"google"} or
#' \code{"detailed"}, as well as lemmatization (\code{lemma}). It provides a
#' functionalities of dependency parsing and named entity recognition as an
#' option. If \code{"full_parse = TRUE"} is provided, the function returns the
#' most extensive list of the parsing results from spaCy.
#'
#' @param x a character object, a \pkg{quanteda} corpus, or a TIF-compliant
#' corpus data.frame (see \url{https://github.com/ropensci/tif})
#' @param pos logical whether to return universal dependency POS tagset
#' \url{http://universaldependencies.org/u/pos/})
#' @param tag logical whether to return detailed part-of-speech tags, for the
#' langage model \code{en}, it uses the OntoNotes 5 version of the Penn
#' Treebank tag set (\url{https://spacy.io/docs/usage/pos-tagging#pos-schemes}).
#' Annotation specifications for other available languages are available on the
#' spaCy website (\url{https://spacy.io/api/annotation}).
#' @param lemma logical; inlucde lemmatized tokens in the output (lemmatization
#' may not work properly for non-English models)
#' @param entity logical; if \code{TRUE}, report named entities
#' @param dependency logical; if \code{TRUE}, analyze and return dependencies
#' @param ... not used directly
#' @return a \code{data.frame} of tokenized, parsed, and annotated tokens
#' @export
#' @examples
#' \donttest{
#' spacy_initialize()
#' # See Chap 5.1 of the NLTK book, http://www.nltk.org/book/ch05.html
#' txt <- "And now for something completely different."
#' spacy_parse(txt)
#' spacy_parse(txt, pos = TRUE, tag = TRUE)
#' spacy_parse(txt, dependency = TRUE)
#'
#' txt2 <- c(doc1 = "The fast cat catches mice.\\nThe quick brown dog jumped.",
#' doc2 = "This is the second document.",
#' doc3 = "This is a \\\"quoted\\\" text." )
#' spacy_parse(txt2, entity = TRUE, dependency = TRUE)
#' }
spacy_parse <- function(x,
pos = TRUE,
tag = FALSE,
lemma = TRUE,
entity = TRUE,
dependency = FALSE,
...) {
UseMethod("spacy_parse")
}
#' @export
#' @importFrom data.table data.table
#' @noRd
spacy_parse.character <- function(x,
pos = TRUE,
tag = FALSE,
lemma = TRUE,
entity = TRUE,
dependency = FALSE,
...) {
`:=` <- NULL
spacy_out <- process_document(x)
if (is.null(spacy_out$timestamps)) {
stop("Document parsing failed")
}
## check the omit_entity status
if (entity == TRUE & getOption("spacy_entity") == FALSE) {
message("entity == TRUE is ignored because spaCy model is initialized without Entity Recognizer")
message("In order to turn on entity recognition, run spacy_finalize(); spacy_initialize(entity = TURE)")
entity <- FALSE
}
tokens <- get_tokens(spacy_out)
ntokens <- get_ntokens(spacy_out)
ntokens_by_sent <- get_ntokens_by_sent(spacy_out)
dt <- data.table(doc_id = rep(spacy_out$docnames, ntokens),
sentence_id = unlist(lapply(ntokens_by_sent, function(x) rep(seq_along(x), x))),
token_id = unlist(lapply(unlist(ntokens_by_sent), function(x) seq(to = x))),
token = tokens)
if (lemma) {
model <- spacyr_pyget("model")
dt[, "lemma" := get_attrs(spacy_out, "lemma_", TRUE)]
if(model != 'en'){
warning("lemmatization may not work properly in model '", model, "'")
}
}
if (pos) {
dt[, "pos" := get_tags(spacy_out, "google")]
}
if (tag) {
dt[, "tag" := get_tags(spacy_out, "detailed")]
}
## add dependency data fields
if (dependency) {
subtractor <- unlist(lapply(ntokens_by_sent, function(x) {
if(length(x) == 0) return(NULL)
csumx <- cumsum(c(0, x[-length(x)]))
return(rep(csumx, x))
}))
deps <- get_dependency(spacy_out)
dt[, c("head_token_id", "dep_rel") := list(deps$head_id - subtractor,
deps$dep_rel)]
}
## named entity fields
if (entity) {
dt[, entity := get_named_entities(spacy_out)]
}
dt <- as.data.frame(dt)
class(dt) <- c("spacyr_parsed", class(dt))
return(dt)
}
#' @noRd
#' @export
spacy_parse.data.frame <- function(x, ...) {
# insert compliance check here - replace with tif package
if (!all(c("doc_id", "text") %in% names(x)))
stop("input data.frame does not conform to the TIF standard")
txt <- x$text
names(txt) <- x$doc_id
spacy_parse(txt, ...)
}
#' tokenize text using spaCy
#'
#' Tokenize text using spaCy. The results of tokenization is stored as a python object. To obtain the tokens results in R, use \code{get_tokens()}.
#' \url{http://spacy.io}.
#' @param x input text
#' functionalities including the tagging, named entity recognisiton, dependency
#' analysis.
#' This slows down \code{spacy_parse()} but speeds up the later parsing.
#' If FALSE, tagging, entity recogitions, and dependendcy analysis when
#' relevant functions are called.
#' @param python_exec character; select connection type to spaCy, either
#' "rPython" or "Rcpp".
#' @param ... arguments passed to specific methods
#' @return result marker object
#' @importFrom methods new
#' @examples
#' \donttest{spacy_initialize()
#' # the result has to be "tag() is ready to run" to run the following
#' txt <- c(text1 = "This is the first sentence.\nHere is the second sentence.",
#' text2 = "This is the second document.")
#' results <- spacy_parse(txt)
#'
#' }
#' @export
#' @keywords internal
process_document <- function(x, ...) {
# This function passes texts to python and spacy
# get or set document names
if (!is.null(names(x))) {
docnames <- names(x)
} else {
docnames <- paste0("text", 1:length(x))
}
if (is.null(options()$spacy_initialized)) spacy_initialize()
spacyr_pyexec("try:\n del spobj\nexcept NameError:\n 1")
spacyr_pyexec("texts = []")
x <- gsub("\\\\n","\\\n", x) # replace two quotes \\n with \n
x <- gsub("\\\\t","\\\t", x) # replace two quotes \\t with \t
x <- gsub("\\\\","", x) # delete unnecessary backslashes
x <- unname(x)
spacyr_pyassign("texts", x)
spacyr_pyexec("spobj = spacyr()")
spacyr_pyexec("timestamps = spobj.parse(texts)")
timestamps = as.character(spacyr_pyget("timestamps"))
output <- spacy_out$new(docnames = docnames,
timestamps = timestamps)
return(output)
}
|
computeNumericalGradient <- function(J, theta){
#COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences"
#and gives us a numerical estimate of the gradient.
# numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical
# gradient of the function J around theta. Calling y = J(theta) should
# return the function value at theta.
# Notes: The following code implements numerical gradient checking, and
# returns the numerical gradient.It sets numgrad(i) to (a numerical
# approximation of) the partial derivative of J with respect to the
# i-th input argument, evaluated at theta. (i.e., numgrad(i) should
# be the (approximately) the partial derivative of J with respect
# to theta(i).)
#
numgrad <- rep(0, length(theta))
perturb <- rep(0, length(theta))
e <- 1e-4
for(p in 1:length(theta)){
# Set perturbation vector
perturb[p]<- e
loss1 <- J(theta - perturb)
loss2 <- J(theta + perturb)
# Compute Numerical Gradient
numgrad[p] <- (loss2 - loss1) / (2*e)
perturb[p] <- 0
}
return(numgrad)
}
|
/Week5/computeNumericalGradient.R
|
no_license
|
Tatortreiniger91/Coursera_Machine_Learning
|
R
| false
| false
| 1,108
|
r
|
computeNumericalGradient <- function(J, theta){
#COMPUTENUMERICALGRADIENT Computes the gradient using "finite differences"
#and gives us a numerical estimate of the gradient.
# numgrad = COMPUTENUMERICALGRADIENT(J, theta) computes the numerical
# gradient of the function J around theta. Calling y = J(theta) should
# return the function value at theta.
# Notes: The following code implements numerical gradient checking, and
# returns the numerical gradient.It sets numgrad(i) to (a numerical
# approximation of) the partial derivative of J with respect to the
# i-th input argument, evaluated at theta. (i.e., numgrad(i) should
# be the (approximately) the partial derivative of J with respect
# to theta(i).)
#
numgrad <- rep(0, length(theta))
perturb <- rep(0, length(theta))
e <- 1e-4
for(p in 1:length(theta)){
# Set perturbation vector
perturb[p]<- e
loss1 <- J(theta - perturb)
loss2 <- J(theta + perturb)
# Compute Numerical Gradient
numgrad[p] <- (loss2 - loss1) / (2*e)
perturb[p] <- 0
}
return(numgrad)
}
|
date_standards <- function(name, session, start.date, end.date, min.range) {
start.date <- as.Date(start.date)
end.date <- as.Date(end.date)
# If end date is earlier than start date, update the end date to be the same as the new start date
if (end.date < start.date | end.date - start.date < min.range) {
end.date = start.date + min.range
}
updateDateRangeInput(session, name, start = start.date, end = end.date)
}
#------------------------------------------------------------------------------
date_frame <- function(start.date, end.date, seq.by = "hour") {
data.frame(date_time = seq.POSIXt(start.date, end.date, by = seq.by))
}
#------------------------------------------------------------------------------
|
/functions/date_standards_func.R
|
no_license
|
cheriels/2017ops_beta
|
R
| false
| false
| 734
|
r
|
date_standards <- function(name, session, start.date, end.date, min.range) {
start.date <- as.Date(start.date)
end.date <- as.Date(end.date)
# If end date is earlier than start date, update the end date to be the same as the new start date
if (end.date < start.date | end.date - start.date < min.range) {
end.date = start.date + min.range
}
updateDateRangeInput(session, name, start = start.date, end = end.date)
}
#------------------------------------------------------------------------------
date_frame <- function(start.date, end.date, seq.by = "hour") {
data.frame(date_time = seq.POSIXt(start.date, end.date, by = seq.by))
}
#------------------------------------------------------------------------------
|
## plot1.R
## download data set if it is not already there
fileName <- "household_power_consumption.txt"
if (!file.exists(fileName)) {
require(downloader)
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zippedFileName <- "household_power_consumption.zip"
download(fileUrl, destfile = zippedFileName, mode = "wb")
unzip(zippedFileName)
unlink(zippedFileName)
}
## read power consumption data set
powerConsumption <- read.table(file = fileName,
header = TRUE, sep = ";", na.strings = "?")
## select data from 2007-02-01 and 2007-02-02
powerConsumption <- subset(powerConsumption,
powerConsumption$Date %in% c("1/2/2007", "2/2/2007"))
## create histogram of Global active power and save as png
png(file = "plot1.png")
hist(powerConsumption$Global_active_power, col = "red",
main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
curDevice = dev.off()
print("Created plot1.png")
|
/plot1.R
|
no_license
|
herofo/ExData_Plotting1
|
R
| false
| false
| 962
|
r
|
## plot1.R
## download data set if it is not already there
fileName <- "household_power_consumption.txt"
if (!file.exists(fileName)) {
require(downloader)
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zippedFileName <- "household_power_consumption.zip"
download(fileUrl, destfile = zippedFileName, mode = "wb")
unzip(zippedFileName)
unlink(zippedFileName)
}
## read power consumption data set
powerConsumption <- read.table(file = fileName,
header = TRUE, sep = ";", na.strings = "?")
## select data from 2007-02-01 and 2007-02-02
powerConsumption <- subset(powerConsumption,
powerConsumption$Date %in% c("1/2/2007", "2/2/2007"))
## create histogram of Global active power and save as png
png(file = "plot1.png")
hist(powerConsumption$Global_active_power, col = "red",
main = "Global Active Power", xlab = "Global Active Power (kilowatts)")
curDevice = dev.off()
print("Created plot1.png")
|
###############################################################################
############################ Process Data Function ###########################
###############################################################################
# This function is for when the user inputs file1_inputa and processing needs to occur to
# get it into the correct
# Inputs: The file1_inputa file needed for processing
# Outputs: The file1_inputa file
ProcessingData <- function(file1_input){
file1_input$Diagnosis <- factor(file1_input$Diagnosis, levels = c("AD", "MCI", "HC"))
# # See above for alternative way to use mutate - feed it multiple variables to make in the one go
file1_input <- mutate(file1_input,
A_status = ifelse(AB.status == "positive", "A+", "A-"),
T_status = ifelse(pTau.status == "positive", "T+", "T-"),
N_status = ifelse(tTau.status == "positive", "N+", "N-")
)
file1_input <- tidyr::unite(file1_input, Overall_status, A_status:N_status, sep = '/')
file1_input$Overall_status <- factor(file1_input$Overall_status, levels = c("A+/T+/N+", "A+/T+/N-", "A+/T-/N+",
"A-/T+/N+", "A+/T-/N-","A-/T-/N+",
"A-/T+/N-", "A-/T-/N-"))
file1_input <- mutate(
file1_input, Burnham_class = ifelse(Overall_status == "A-/T-/N-", "Normal AD Biomarkers",
ifelse(Overall_status == "A-/T+/N-", "Non-AD pathological Change",
ifelse(Overall_status == "A-/T+/N+", "Non-AD pathological Change",
ifelse(Overall_status == "A-/T-/N+", "Non-AD pathological Change",
ifelse(Overall_status == "A+/T-/N-", "Pathological Change",
ifelse(Overall_status == "A+/T-/N+", "Pathological Change",
ifelse(Overall_status == "A+/T+/N+", "AD",
ifelse(Overall_status == "A+/T+/N-", "AD", NA)))))))))
file1_input <- mutate(file1_input, Clifford_class = ifelse(Burnham_class == "Normal AD Biomarkers", "MCI unlikely due to AD",
ifelse(Burnham_class == "AD", "Stage 2, clinically asymptomatic",
ifelse(Burnham_class == "Pathological Change", "Stage 1, preclinical AD stage",
ifelse(Burnham_class == "Non-AD pathological Change", "SNAP", NA)))))
# # Surface for APOE4
#
# file1_input$Apoe4 <- factor(simulated.file1_inputa$apoe4, levels = c("1","0"))
#
file1_input$Clifford_class <- factor(file1_input$Clifford_class, levels = c("Stage 2, clinically asymptomatic",
"Stage 1, preclinical AD stage",
"SNAP",
"MCI unlikely due to AD"))
file1_input$Burnham_class <- factor(file1_input$Burnham_class, levels = c("AD",
"Pathological Change",
"Non-AD pathological Change",
"Normal AD Biomarkers"))
file1_input <- mutate(file1_input, Age_binary = ifelse(Age < 72.5,1,0))
# file1_input <- factor(file1_input$Education_binary, levels = c("1","0"))
ops <- list(file1_input = file1_input)
return(ops)
}
|
/Synthetic_app/ProcessingData.R
|
no_license
|
lucindanott/CSIRO_Vacation_Project
|
R
| false
| false
| 4,089
|
r
|
###############################################################################
############################ Process Data Function ###########################
###############################################################################
# This function is for when the user inputs file1_inputa and processing needs to occur to
# get it into the correct
# Inputs: The file1_inputa file needed for processing
# Outputs: The file1_inputa file
ProcessingData <- function(file1_input){
file1_input$Diagnosis <- factor(file1_input$Diagnosis, levels = c("AD", "MCI", "HC"))
# # See above for alternative way to use mutate - feed it multiple variables to make in the one go
file1_input <- mutate(file1_input,
A_status = ifelse(AB.status == "positive", "A+", "A-"),
T_status = ifelse(pTau.status == "positive", "T+", "T-"),
N_status = ifelse(tTau.status == "positive", "N+", "N-")
)
file1_input <- tidyr::unite(file1_input, Overall_status, A_status:N_status, sep = '/')
file1_input$Overall_status <- factor(file1_input$Overall_status, levels = c("A+/T+/N+", "A+/T+/N-", "A+/T-/N+",
"A-/T+/N+", "A+/T-/N-","A-/T-/N+",
"A-/T+/N-", "A-/T-/N-"))
file1_input <- mutate(
file1_input, Burnham_class = ifelse(Overall_status == "A-/T-/N-", "Normal AD Biomarkers",
ifelse(Overall_status == "A-/T+/N-", "Non-AD pathological Change",
ifelse(Overall_status == "A-/T+/N+", "Non-AD pathological Change",
ifelse(Overall_status == "A-/T-/N+", "Non-AD pathological Change",
ifelse(Overall_status == "A+/T-/N-", "Pathological Change",
ifelse(Overall_status == "A+/T-/N+", "Pathological Change",
ifelse(Overall_status == "A+/T+/N+", "AD",
ifelse(Overall_status == "A+/T+/N-", "AD", NA)))))))))
file1_input <- mutate(file1_input, Clifford_class = ifelse(Burnham_class == "Normal AD Biomarkers", "MCI unlikely due to AD",
ifelse(Burnham_class == "AD", "Stage 2, clinically asymptomatic",
ifelse(Burnham_class == "Pathological Change", "Stage 1, preclinical AD stage",
ifelse(Burnham_class == "Non-AD pathological Change", "SNAP", NA)))))
# # Surface for APOE4
#
# file1_input$Apoe4 <- factor(simulated.file1_inputa$apoe4, levels = c("1","0"))
#
file1_input$Clifford_class <- factor(file1_input$Clifford_class, levels = c("Stage 2, clinically asymptomatic",
"Stage 1, preclinical AD stage",
"SNAP",
"MCI unlikely due to AD"))
file1_input$Burnham_class <- factor(file1_input$Burnham_class, levels = c("AD",
"Pathological Change",
"Non-AD pathological Change",
"Normal AD Biomarkers"))
file1_input <- mutate(file1_input, Age_binary = ifelse(Age < 72.5,1,0))
# file1_input <- factor(file1_input$Education_binary, levels = c("1","0"))
ops <- list(file1_input = file1_input)
return(ops)
}
|
#### Problem 1 ####
#Add the values in the vector and then divide the total by the number of values
f1=function (x) { sum(x)/length(x) }
print(f1(c(1.2,1.5,2.7)))
#result: 1.8
#### Problem 2 ####
f2=function (x){sum (x [! is.na (x)])/length (x [! is.na (x)])}
print(f2(c(1.2,1.5,NA,2.7,NA)))
#result: 1.8
#### Problem 3 ####
gcd <- function(num1,num2) ifelse (num2==0, num1, gcd(num2, num1 %% num2))
print(gcd(175,25))
#result: 25
#### Problem 4 ####
gcd2<-function(num1, num2){
if(num1 == num2){
return(num1)
}
while(num1 != num2){
if(num1 < num2){
num2 = num2 - num1
}else{
num1 = num1 - num2
}
}
return(num1)
}
print(gcd2(175,25))
#result: 25
#### Problem 5 ####
f2<-function (x, y){
return((x^2)*y + 2*x*y - x*(y^2))
}
print(f2(3,4))
#Result 12
#### Problem 6 ####
d1=read.csv ('c:/Burton/week-3-price-data.csv')
d2=read.csv ('c:/Burton/week-3-make-model-data.csv')
m=merge (d1, d2, by.x="ModelNumber")
print(nrow(m))
#Result 27
# There are 27 observations in the merge though there are 28 rows in price-data. This is because the defaultvalue of 'all' is
# false, which results in a natural join which excludes rows that don't have a corresponding matching value in
# both the tables.
#### Problem 7 ####
m=merge (d1, d2, by.x="ModelNumber",all.x=TRUE)
print(nrow(m))
#Result 28
#### Problem 8 ####
n<-na.omit(m[m$Year==2010, ])
print(n)
#### Problem 9 ####
o<-na.omit(m [( (m$Color=="Red")&(m$Price> 10000) ), ])
print(o)
#### Problem 10 ####
p<-subset (o, select=-c (ModelNumber, Color))
print(p)
#### Problem 11 ####
g<-function(x){
return(nchar (x))
}
r=c ("one","two","three")
v=g(r)
print(v)
#Result: 3 3 5
#### Problem 12 ####
#Use runif (10,11,2) to generate 10 random numbers from 11 to 20
h<-function(x,y){
if(length(x) != length(y)){
return("The lengths of the arguments do not match")
}else{
z=paste (x, y, sep=" ")
print(z)
}
}
c1<-c("one", "two", "three")
c2<-c("four", "five", "six")
h(c1,c2)
#### Problem 13 ####
#aeiou are the vowels
p<-function(x){
m<-regexpr("[aeiou]..",x,perl=TRUE)
r<-regmatches(x,m)
if(length(r) == 0){
return("No vowel found")
}else{
return(r)
}
}
s<-"bllten"
print(p(s))
#### Problem 14 ####
mm=as.integer (runif (20,1,13))
dd=as.integer (runif (20,1,32))
yy=as.integer (runif (20,2000,2014))
d=data.frame (mm, dd, yy)
d$new=paste(d$mm ,d$dd, d$yy, sep="-")
print(d)
#### Problem 15 ####
dd<-as.date("05-14-1967", format="%m-%d-%Y")
print(dd)
#### Problem 16 ####
dd<-as.date("05-14-1967", format="%m-%d-%Y")
month (dd)
#### Problem 17 ####
dateRange=as.date (as.date ("01-01-2005", format="% m-%d-%Y"):as.date ("12-31-2014",format="% m-%d-%Y"), origin="1970-01-01")
|
/Week_3_Quiz.R
|
no_license
|
machadob/MSDA
|
R
| false
| false
| 3,017
|
r
|
#### Problem 1 ####
#Add the values in the vector and then divide the total by the number of values
f1=function (x) { sum(x)/length(x) }
print(f1(c(1.2,1.5,2.7)))
#result: 1.8
#### Problem 2 ####
f2=function (x){sum (x [! is.na (x)])/length (x [! is.na (x)])}
print(f2(c(1.2,1.5,NA,2.7,NA)))
#result: 1.8
#### Problem 3 ####
gcd <- function(num1,num2) ifelse (num2==0, num1, gcd(num2, num1 %% num2))
print(gcd(175,25))
#result: 25
#### Problem 4 ####
gcd2<-function(num1, num2){
if(num1 == num2){
return(num1)
}
while(num1 != num2){
if(num1 < num2){
num2 = num2 - num1
}else{
num1 = num1 - num2
}
}
return(num1)
}
print(gcd2(175,25))
#result: 25
#### Problem 5 ####
f2<-function (x, y){
return((x^2)*y + 2*x*y - x*(y^2))
}
print(f2(3,4))
#Result 12
#### Problem 6 ####
d1=read.csv ('c:/Burton/week-3-price-data.csv')
d2=read.csv ('c:/Burton/week-3-make-model-data.csv')
m=merge (d1, d2, by.x="ModelNumber")
print(nrow(m))
#Result 27
# There are 27 observations in the merge though there are 28 rows in price-data. This is because the defaultvalue of 'all' is
# false, which results in a natural join which excludes rows that don't have a corresponding matching value in
# both the tables.
#### Problem 7 ####
m=merge (d1, d2, by.x="ModelNumber",all.x=TRUE)
print(nrow(m))
#Result 28
#### Problem 8 ####
n<-na.omit(m[m$Year==2010, ])
print(n)
#### Problem 9 ####
o<-na.omit(m [( (m$Color=="Red")&(m$Price> 10000) ), ])
print(o)
#### Problem 10 ####
p<-subset (o, select=-c (ModelNumber, Color))
print(p)
#### Problem 11 ####
g<-function(x){
return(nchar (x))
}
r=c ("one","two","three")
v=g(r)
print(v)
#Result: 3 3 5
#### Problem 12 ####
#Use runif (10,11,2) to generate 10 random numbers from 11 to 20
h<-function(x,y){
if(length(x) != length(y)){
return("The lengths of the arguments do not match")
}else{
z=paste (x, y, sep=" ")
print(z)
}
}
c1<-c("one", "two", "three")
c2<-c("four", "five", "six")
h(c1,c2)
#### Problem 13 ####
#aeiou are the vowels
p<-function(x){
m<-regexpr("[aeiou]..",x,perl=TRUE)
r<-regmatches(x,m)
if(length(r) == 0){
return("No vowel found")
}else{
return(r)
}
}
s<-"bllten"
print(p(s))
#### Problem 14 ####
mm=as.integer (runif (20,1,13))
dd=as.integer (runif (20,1,32))
yy=as.integer (runif (20,2000,2014))
d=data.frame (mm, dd, yy)
d$new=paste(d$mm ,d$dd, d$yy, sep="-")
print(d)
#### Problem 15 ####
dd<-as.date("05-14-1967", format="%m-%d-%Y")
print(dd)
#### Problem 16 ####
dd<-as.date("05-14-1967", format="%m-%d-%Y")
month (dd)
#### Problem 17 ####
dateRange=as.date (as.date ("01-01-2005", format="% m-%d-%Y"):as.date ("12-31-2014",format="% m-%d-%Y"), origin="1970-01-01")
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/pancreas.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.85,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/pancreas/pancreas_085.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/pancreas/pancreas_085.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 356
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/pancreas.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.85,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/pancreas/pancreas_085.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' @import stats
#' @export
maxnet <-
function(p, data, f=maxnet.formula(p, data), regmult=1.0,
regfun=maxnet.default.regularization, addsamplestobackground=T, ...)
{
if (anyNA(data)) stop("NA values in data table. Please remove them and rerun.")
if (addsamplestobackground) {
pdata <- data[p==1,]
ndata <- data[p==0,]
# add to the background any presence data that isn't already in the background
toadd <- apply(pdata, 1, function(rr) !any(apply(ndata, 1, function(r) identical(r, rr))))
p <- c(p, rep(0, sum(toadd)))
data <- rbind(data, pdata[toadd,])
}
mm <- model.matrix(f, data)
reg <- regfun(p,mm) * regmult
weights <- p+(1-p)*100
glmnet::glmnet.control(pmin=1.0e-8, fdev=0)
model <- glmnet::glmnet(x=mm, y=as.factor(p), family="binomial", standardize=F, penalty.factor=reg, lambda=10^(seq(4,0,length.out=200))*sum(reg)/length(reg)*sum(p)/sum(weights), weights=weights, ...)
class(model) <- c("maxnet", class(model))
if (length(model$lambda) < 200) {
msg <- "Error: glmnet failed to complete regularization path. Model may be infeasible."
if (!addsamplestobackground)
msg <- paste(msg, " Try re-running with addsamplestobackground=T.")
stop(msg)
}
bb <- model$beta[,200]
model$betas <- bb[bb!=0]
model$alpha <- 0
rr <- predict.maxnet(model, data[p==0, , drop = FALSE], type="exponent", clamp=F)
raw <- rr / sum(rr)
model$entropy <- -sum(raw * log(raw))
model$alpha <- -log(sum(rr))
model$penalty.factor <- reg
model$featuremins <- apply(mm, 2, min)
model$featuremaxs <- apply(mm, 2, max)
vv <- (sapply(data, class)!="factor")
model$varmin <- apply(data[,vv, drop = FALSE], 2, min)
model$varmax <- apply(data[,vv, drop = FALSE], 2, max)
means <- apply(data[p==1,vv, drop = FALSE], 2, mean)
majorities <- sapply(names(data)[!vv],
function(n) which.max(table(data[p==1,n, drop = FALSE])))
names(majorities) <- names(data)[!vv]
model$samplemeans <- unlist(c(means, majorities))
model$levels <- lapply(data, levels)
model
}
|
/R/maxnet.R
|
no_license
|
cran/maxnet
|
R
| false
| false
| 2,116
|
r
|
#' @import stats
#' @export
maxnet <-
function(p, data, f=maxnet.formula(p, data), regmult=1.0,
regfun=maxnet.default.regularization, addsamplestobackground=T, ...)
{
if (anyNA(data)) stop("NA values in data table. Please remove them and rerun.")
if (addsamplestobackground) {
pdata <- data[p==1,]
ndata <- data[p==0,]
# add to the background any presence data that isn't already in the background
toadd <- apply(pdata, 1, function(rr) !any(apply(ndata, 1, function(r) identical(r, rr))))
p <- c(p, rep(0, sum(toadd)))
data <- rbind(data, pdata[toadd,])
}
mm <- model.matrix(f, data)
reg <- regfun(p,mm) * regmult
weights <- p+(1-p)*100
glmnet::glmnet.control(pmin=1.0e-8, fdev=0)
model <- glmnet::glmnet(x=mm, y=as.factor(p), family="binomial", standardize=F, penalty.factor=reg, lambda=10^(seq(4,0,length.out=200))*sum(reg)/length(reg)*sum(p)/sum(weights), weights=weights, ...)
class(model) <- c("maxnet", class(model))
if (length(model$lambda) < 200) {
msg <- "Error: glmnet failed to complete regularization path. Model may be infeasible."
if (!addsamplestobackground)
msg <- paste(msg, " Try re-running with addsamplestobackground=T.")
stop(msg)
}
bb <- model$beta[,200]
model$betas <- bb[bb!=0]
model$alpha <- 0
rr <- predict.maxnet(model, data[p==0, , drop = FALSE], type="exponent", clamp=F)
raw <- rr / sum(rr)
model$entropy <- -sum(raw * log(raw))
model$alpha <- -log(sum(rr))
model$penalty.factor <- reg
model$featuremins <- apply(mm, 2, min)
model$featuremaxs <- apply(mm, 2, max)
vv <- (sapply(data, class)!="factor")
model$varmin <- apply(data[,vv, drop = FALSE], 2, min)
model$varmax <- apply(data[,vv, drop = FALSE], 2, max)
means <- apply(data[p==1,vv, drop = FALSE], 2, mean)
majorities <- sapply(names(data)[!vv],
function(n) which.max(table(data[p==1,n, drop = FALSE])))
names(majorities) <- names(data)[!vv]
model$samplemeans <- unlist(c(means, majorities))
model$levels <- lapply(data, levels)
model
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above (assuming you are running R Studio).
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#library(latex2exp)
library(MASS)
library(shiny)
library(data.table)
library(ggplot2)
library(visreg)
library(rgl)
library(tidyverse)
library(gcookbook)
slider_width <- 220
graph_height <- "60%"
graph_ht <- 260
####################################
#
# Define server logic required to draw a histogram
#
server <- function(input, output) {
rnd <- 4 # number of digits to round numbers to in table
# modify print function to always print exactly 'rnd' digits to make the chart look nicer
print.numeric<-function(x, digits = rnd ) formatC(x, digits = digits, format = "f")
trial_color <-"mediumorchid2"
N_pop = 1000
min_cuttoff <- 0
max_cuttoff <- 15
lognorm_cuttoff <- max_cuttoff
x1_mu <- 7.5
x1_sigma <- 1.7
x2_mu <- 0.4
x2_sigma <- 0.8
x3_min <- 0
x3_max <- 15
x1_samp <- reactive({rnorm(N_pop*100,x1_mu,x1_sigma)})
x1_b <- reactive({x1_samp()[x1_samp() < max_cuttoff]})
x1 <- reactive({x1_b()[x1_samp() > min_cuttoff]})
x2_samp <- reactive({rlnorm(N_pop*100, x2_mu, x2_sigma)})
x2 <- reactive({x2_samp()[x2_samp() < max_cuttoff]})
x3 <- reactive({runif(N_pop*10000,x3_min, x3_max)})
x1_range_min <- reactive({0})
x1_range_max <- reactive({15})
x2_range_min <- reactive({0})
x2_range_max <- reactive({lognorm_cuttoff})
x3_range_min <- reactive({0})
x3_range_max <- reactive({15})
# set the bin size
bin_step <- reactive({.1})
########
#
# Generate all the plots
#
# The first three are histograms for the input variables, all normally distributed with slider-set
# values for number_samples, mean, and sd.
#
lsize <- 4.5
dot_color <- "#888888"
x_val <- seq(0,1,0.01)
y1 <- dnorm(x_val,mean=x1_mu,sd=x1_sigma)
dd <- data.frame(x_val,y1)
max_density_ht <- 0.55
output$x1Plot <- renderPlot({
hist(x1(), breaks = seq(x1_range_min()-1,x1_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='lightblue', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="Normal", col='lightblue')
curve(dnorm(x, mean=x1_mu, sd=x1_sigma),
col="darkblue", lwd=2, add=TRUE, yaxt="n")}, height=graph_ht)
output$x2Plot <- renderPlot({
hist(x2(), breaks = seq(x2_range_min()-1,x2_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='lightgreen', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="Lognormal", col='lightgreen')
curve(dlnorm(x, x2_mu, x2_sigma),
col="darkgreen", lwd=2, add=TRUE, yaxt="n")}, height=graph_ht)
output$x3Plot <- renderPlot({
hist(x3(), breaks = seq(x3_range_min()-1,x3_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='orange', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="Uniform", col='orange')
curve(dunif(x, x3_min, x3_max),
col="darkred", cex=2, lwd=2, add=TRUE, yaxt="n")}, height=graph_ht)
sample_ht <- 0.35
mean_ht <- 0.45
txt_offset <- .05
pts <- reactive({data.frame(x=sample(x1(),input$n_samples),y=c(rep(sample_ht,input$n_samples)))})
output$x4Plot <- renderPlot({
hist(x1(), breaks = seq(x1_range_min()-1,x1_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='white', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col='white')
curve(dnorm(x, mean=x1_mu, sd=x1_sigma),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
points(pts(),col="darkblue")
segments(pts()$x,0,pts()$x,sample_ht, col="darkblue")
points(mean(pts()$x),mean_ht,col=trial_color,pch=19)
segments(mean(pts()$x),0,mean(pts()$x),mean_ht, col=trial_color)
text(mean(pts()$x),mean_ht + txt_offset,labels=c("mean"),col="black")
}, height=graph_ht)
x2_pts <- reactive({data.frame(x=sample(x2(),input$n_samples),y=c(rep(sample_ht,input$n_samples)))})
output$x5Plot <- renderPlot({
hist(x2(), breaks = seq(x2_range_min()-1,x2_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='white', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col='white')
curve(dlnorm(x, mean=x2_mu, sd=x2_sigma),
col="darkgreen", lwd=2, add=TRUE, yaxt="n")
points(x2_pts(),col="darkgreen")
segments(x2_pts()$x,0,x2_pts()$x,sample_ht, col="darkgreen")
points(mean(x2_pts()$x),mean_ht,col=trial_color,pch=19)
segments(mean(x2_pts()$x),0,mean(x2_pts()$x),mean_ht, col=trial_color)
text(mean(x2_pts()$x),mean_ht + txt_offset,labels=c("mean"),col="black")
}, height=graph_ht)
x3_pts <- reactive({data.frame(x=sample(x3(),input$n_samples),y=c(rep(sample_ht,input$n_samples)))})
output$x6Plot <- renderPlot({
hist(x3(), breaks = seq(x3_range_min()-1,x3_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='white', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col='white')
curve(dunif(x, min=x3_min, max=x3_max),
col="darkred", lwd=2, add=TRUE, yaxt="n")
points(x3_pts(),col="darkred")
segments(x3_pts()$x,0,x3_pts()$x,sample_ht, col="orange")
points(mean(x3_pts()$x),mean_ht,col=trial_color,pch=19)
segments(mean(x3_pts()$x),0,mean(x3_pts()$x),mean_ht, col=trial_color)
text(mean(x3_pts()$x),mean_ht + txt_offset,labels=c("mean"),col="black")
}, height=graph_ht)
sample_curve_height<-.87
x4_pts <- reactive({
replicate(input$k_trials,mean(sample(x1(),input$n_samples)))
})
output$x7Plot <- renderPlot({
hist(x4_pts(), breaks = seq(x1_range_min()-1,x1_range_max()+1, by=bin_step()*1.4), ylim=c(0,sample_curve_height),
border=trial_color, yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col=trial_color)
curve(dnorm(x, mean=x1_mu, sd=x1_sigma/sqrt(input$n_samples)),
col="darkblue", lwd=2, add=TRUE, yaxt="n",lty="dotted")
curve(dnorm(x, mean=x1_mu, sd=x1_sigma),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
}, height=graph_ht)
mean_lnorm <- exp(x2_mu + (x2_sigma^2)/2)
sigma_lnorm <- sqrt((exp(x2_sigma^2)-1)*(exp(2*x2_mu+x2_sigma^2)))
x5_pts <- reactive({
replicate(input$k_trials,mean(sample(x2(),input$n_samples)))
})
output$x8Plot <- renderPlot({
hist(x5_pts(), breaks = seq(x2_range_min()-1,x2_range_max()+1, by=bin_step()*1.4), ylim=c(0,sample_curve_height),
border=trial_color, yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col=trial_color)
curve(dnorm(x, mean=mean_lnorm, sd=sigma_lnorm/sqrt(input$n_samples)),
col="darkgreen", lwd=2, add=TRUE, yaxt="n",lty="dotted")
curve(dlnorm(x, mean=x2_mu, sd=x2_sigma),
col="darkgreen", lwd=2, add=TRUE, yaxt="n")
}, height=graph_ht)
mean_unif <- (x3_max+x3_min)/2
sigma_unif <- sqrt(((x3_max-x3_min)^2)/12)
x6_pts <- reactive({
replicate(input$k_trials,mean(sample(x3(),input$n_samples)))
})
output$x9Plot <- renderPlot({
hist(x6_pts(), breaks = seq(x3_range_min()-1,x3_range_max()+1, by=bin_step()*2.5), ylim=c(0,sample_curve_height),
border=trial_color, yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col=trial_color)
curve(dnorm(x, mean=mean_unif, sd=sigma_unif/sqrt(input$n_samples)),
col="darkgreen", lwd=2, add=TRUE, yaxt="n",lty="dotted")
curve(dunif(x, min=x3_min, max=x3_max),
col="darkred", lwd=2, add=TRUE, yaxt="n")
}, height=graph_ht)
}
|
/server.R
|
no_license
|
firstthefacts/CLT
|
R
| false
| false
| 7,567
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above (assuming you are running R Studio).
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
#library(latex2exp)
library(MASS)
library(shiny)
library(data.table)
library(ggplot2)
library(visreg)
library(rgl)
library(tidyverse)
library(gcookbook)
slider_width <- 220
graph_height <- "60%"
graph_ht <- 260
####################################
#
# Define server logic required to draw a histogram
#
server <- function(input, output) {
rnd <- 4 # number of digits to round numbers to in table
# modify print function to always print exactly 'rnd' digits to make the chart look nicer
print.numeric<-function(x, digits = rnd ) formatC(x, digits = digits, format = "f")
trial_color <-"mediumorchid2"
N_pop = 1000
min_cuttoff <- 0
max_cuttoff <- 15
lognorm_cuttoff <- max_cuttoff
x1_mu <- 7.5
x1_sigma <- 1.7
x2_mu <- 0.4
x2_sigma <- 0.8
x3_min <- 0
x3_max <- 15
x1_samp <- reactive({rnorm(N_pop*100,x1_mu,x1_sigma)})
x1_b <- reactive({x1_samp()[x1_samp() < max_cuttoff]})
x1 <- reactive({x1_b()[x1_samp() > min_cuttoff]})
x2_samp <- reactive({rlnorm(N_pop*100, x2_mu, x2_sigma)})
x2 <- reactive({x2_samp()[x2_samp() < max_cuttoff]})
x3 <- reactive({runif(N_pop*10000,x3_min, x3_max)})
x1_range_min <- reactive({0})
x1_range_max <- reactive({15})
x2_range_min <- reactive({0})
x2_range_max <- reactive({lognorm_cuttoff})
x3_range_min <- reactive({0})
x3_range_max <- reactive({15})
# set the bin size
bin_step <- reactive({.1})
########
#
# Generate all the plots
#
# The first three are histograms for the input variables, all normally distributed with slider-set
# values for number_samples, mean, and sd.
#
lsize <- 4.5
dot_color <- "#888888"
x_val <- seq(0,1,0.01)
y1 <- dnorm(x_val,mean=x1_mu,sd=x1_sigma)
dd <- data.frame(x_val,y1)
max_density_ht <- 0.55
output$x1Plot <- renderPlot({
hist(x1(), breaks = seq(x1_range_min()-1,x1_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='lightblue', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="Normal", col='lightblue')
curve(dnorm(x, mean=x1_mu, sd=x1_sigma),
col="darkblue", lwd=2, add=TRUE, yaxt="n")}, height=graph_ht)
output$x2Plot <- renderPlot({
hist(x2(), breaks = seq(x2_range_min()-1,x2_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='lightgreen', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="Lognormal", col='lightgreen')
curve(dlnorm(x, x2_mu, x2_sigma),
col="darkgreen", lwd=2, add=TRUE, yaxt="n")}, height=graph_ht)
output$x3Plot <- renderPlot({
hist(x3(), breaks = seq(x3_range_min()-1,x3_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='orange', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="Uniform", col='orange')
curve(dunif(x, x3_min, x3_max),
col="darkred", cex=2, lwd=2, add=TRUE, yaxt="n")}, height=graph_ht)
sample_ht <- 0.35
mean_ht <- 0.45
txt_offset <- .05
pts <- reactive({data.frame(x=sample(x1(),input$n_samples),y=c(rep(sample_ht,input$n_samples)))})
output$x4Plot <- renderPlot({
hist(x1(), breaks = seq(x1_range_min()-1,x1_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='white', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col='white')
curve(dnorm(x, mean=x1_mu, sd=x1_sigma),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
points(pts(),col="darkblue")
segments(pts()$x,0,pts()$x,sample_ht, col="darkblue")
points(mean(pts()$x),mean_ht,col=trial_color,pch=19)
segments(mean(pts()$x),0,mean(pts()$x),mean_ht, col=trial_color)
text(mean(pts()$x),mean_ht + txt_offset,labels=c("mean"),col="black")
}, height=graph_ht)
x2_pts <- reactive({data.frame(x=sample(x2(),input$n_samples),y=c(rep(sample_ht,input$n_samples)))})
output$x5Plot <- renderPlot({
hist(x2(), breaks = seq(x2_range_min()-1,x2_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='white', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col='white')
curve(dlnorm(x, mean=x2_mu, sd=x2_sigma),
col="darkgreen", lwd=2, add=TRUE, yaxt="n")
points(x2_pts(),col="darkgreen")
segments(x2_pts()$x,0,x2_pts()$x,sample_ht, col="darkgreen")
points(mean(x2_pts()$x),mean_ht,col=trial_color,pch=19)
segments(mean(x2_pts()$x),0,mean(x2_pts()$x),mean_ht, col=trial_color)
text(mean(x2_pts()$x),mean_ht + txt_offset,labels=c("mean"),col="black")
}, height=graph_ht)
x3_pts <- reactive({data.frame(x=sample(x3(),input$n_samples),y=c(rep(sample_ht,input$n_samples)))})
output$x6Plot <- renderPlot({
hist(x3(), breaks = seq(x3_range_min()-1,x3_range_max()+1, by=bin_step()), ylim=c(0,max_density_ht),
border='white', yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col='white')
curve(dunif(x, min=x3_min, max=x3_max),
col="darkred", lwd=2, add=TRUE, yaxt="n")
points(x3_pts(),col="darkred")
segments(x3_pts()$x,0,x3_pts()$x,sample_ht, col="orange")
points(mean(x3_pts()$x),mean_ht,col=trial_color,pch=19)
segments(mean(x3_pts()$x),0,mean(x3_pts()$x),mean_ht, col=trial_color)
text(mean(x3_pts()$x),mean_ht + txt_offset,labels=c("mean"),col="black")
}, height=graph_ht)
sample_curve_height<-.87
x4_pts <- reactive({
replicate(input$k_trials,mean(sample(x1(),input$n_samples)))
})
output$x7Plot <- renderPlot({
hist(x4_pts(), breaks = seq(x1_range_min()-1,x1_range_max()+1, by=bin_step()*1.4), ylim=c(0,sample_curve_height),
border=trial_color, yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col=trial_color)
curve(dnorm(x, mean=x1_mu, sd=x1_sigma/sqrt(input$n_samples)),
col="darkblue", lwd=2, add=TRUE, yaxt="n",lty="dotted")
curve(dnorm(x, mean=x1_mu, sd=x1_sigma),
col="darkblue", lwd=2, add=TRUE, yaxt="n")
}, height=graph_ht)
mean_lnorm <- exp(x2_mu + (x2_sigma^2)/2)
sigma_lnorm <- sqrt((exp(x2_sigma^2)-1)*(exp(2*x2_mu+x2_sigma^2)))
x5_pts <- reactive({
replicate(input$k_trials,mean(sample(x2(),input$n_samples)))
})
output$x8Plot <- renderPlot({
hist(x5_pts(), breaks = seq(x2_range_min()-1,x2_range_max()+1, by=bin_step()*1.4), ylim=c(0,sample_curve_height),
border=trial_color, yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col=trial_color)
curve(dnorm(x, mean=mean_lnorm, sd=sigma_lnorm/sqrt(input$n_samples)),
col="darkgreen", lwd=2, add=TRUE, yaxt="n",lty="dotted")
curve(dlnorm(x, mean=x2_mu, sd=x2_sigma),
col="darkgreen", lwd=2, add=TRUE, yaxt="n")
}, height=graph_ht)
mean_unif <- (x3_max+x3_min)/2
sigma_unif <- sqrt(((x3_max-x3_min)^2)/12)
x6_pts <- reactive({
replicate(input$k_trials,mean(sample(x3(),input$n_samples)))
})
output$x9Plot <- renderPlot({
hist(x6_pts(), breaks = seq(x3_range_min()-1,x3_range_max()+1, by=bin_step()*2.5), ylim=c(0,sample_curve_height),
border=trial_color, yaxt='n', xlab = NULL, ylab=NULL, probability=TRUE, main="", col=trial_color)
curve(dnorm(x, mean=mean_unif, sd=sigma_unif/sqrt(input$n_samples)),
col="darkgreen", lwd=2, add=TRUE, yaxt="n",lty="dotted")
curve(dunif(x, min=x3_min, max=x3_max),
col="darkred", lwd=2, add=TRUE, yaxt="n")
}, height=graph_ht)
}
|
magcutout=function(image, loc = dim(image)/2, box = c(100, 100), shiftloc=TRUE, paddim=TRUE, plot = FALSE, ...){
loc = as.numeric(loc)
xcen = loc[1]
ycen = loc[2]
loc = ceiling(loc)
xlo = ceiling(loc[1] - (box[1]/2 - 0.5))
xhi = ceiling(loc[1] + (box[1]/2 - 0.5))
ylo = ceiling(loc[2] - (box[2]/2 - 0.5))
yhi = ceiling(loc[2] + (box[2]/2 - 0.5))
expand = paddim && shiftloc
diffxlo = xlo - 1
if (diffxlo < 0) {
xlo = 1
if(expand) xhi = xlo + (box[1] - 1)
}
diffxhi = xhi - dim(image)[1]
if (diffxhi > 0) {
xhi = dim(image)[1]
if(expand) {
xlo = xlo - diffxhi
if(xlo < 1) xlo = 1
}
}
diffylo = ylo - 1
if (diffylo < 0) {
ylo = 1
if(expand) yhi = ylo + (box[2] - 1)
}
diffyhi = yhi - dim(image)[2]
if (diffyhi > 0) {
yhi = dim(image)[2]
if(expand) {
ylo = ylo - diffyhi
if(ylo < 1) ylo = 1
}
}
if(!paddim && !shiftloc)
{
if(diffxlo < 0 && (-diffxlo > diffxhi)) xhi = xhi - max(diffxhi,0) + diffxlo
if(diffxhi > 0 && (-diffxlo < diffxhi)) xlo = xlo + diffxhi - min(diffxlo,0)
if(diffylo < 0 && (-diffylo > diffyhi)) yhi = yhi - max(diffyhi,0) + diffylo
if(diffyhi > 0 && (-diffylo < diffyhi)) ylo = ylo + diffyhi - min(diffylo,0)
}
xsel = xlo:xhi
ysel = ylo:yhi
image = image[xsel, ysel]
if(paddim && !shiftloc && any(c(diffxlo,-diffxhi,diffylo,-diffyhi) < 0)) {
padded = matrix(NA,box[1],box[2])
padded[xsel-diffxlo,ysel-diffylo] = image
image = padded
}
output = list(image = image, loc = c(x=xcen-xlo+1, y=ycen-ylo+1), loc.orig = c(x=xcen, y=ycen), loc.diff = c(x=xlo-1, y=ylo-1), xsel = xsel, ysel = ysel)
if (plot) {
magimage(image, ...)
}
return = output
}
magcutoutWCS=function(image, header, loc, box = c(100, 100), shiftloc=TRUE, paddim=TRUE, plot = FALSE, CRVAL1=0, CRVAL2=0, CRPIX1=0, CRPIX2=0, CD1_1=1, CD1_2=0, CD2_1=0, CD2_2=1, coord.type='deg', sep=':', loc.type=c('coord','coord'), ...){
if(length(loc.type)==1){loc.type=rep(loc.type,2)}
if(!missing(image)){
if(any(names(image)=='imDat') & missing(header)){
imtype='FITSio'
header=image$hdr
image=image$imDat
}
if(any(names(image)=='dat') & missing(header)){
imtype='astro'
header=image$hdr[[1]]
header=data.frame(key=header[,1],value=header[,2], stringsAsFactors = FALSE)
image=image$dat[[1]]
}
if(any(names(image)=='image') & missing(header)){
header=image$header
image=image$image
if(is.matrix(header) | is.data.frame(header)){imtype='astro'}else{imtype='FITSio'}
}
if(!missing(header)){
if(is.matrix(header) | is.data.frame(header)){imtype='astro'}else{imtype='FITSio'}
}
}
#Note below tempxy is R xy units, not FITS:
if(missing(loc)){
loc=magWCSxy2radec(dim(image)[1]/2, dim(image)[2]/2, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)[1,]
tempxy=cbind(dim(image)[1]/2, dim(image)[2]/2)
}else{
if(loc.type[1]=='coord'){
if(coord.type=='sex'){loc[1]=hms2deg(loc[1],sep=sep); loc[2]=dms2deg(loc[2],sep=sep)}
loc=as.numeric(loc)
tempxy=magWCSradec2xy(loc[1], loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
}else if(loc.type[1]=='image'){
tempxy=rbind(loc)
loc=magWCSxy2radec(loc[1], loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)[1,]
}
}
xcen = tempxy[1,1]
ycen = tempxy[1,2]
if(loc.type[2]=='coord'){
box=box/3600
tempxy=magWCSradec2xy(loc[1]-box[1]/2/cos(loc[2]*pi/180), loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
xlo = xcen - sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
tempxy=magWCSradec2xy(loc[1]+box[1]/2/cos(loc[2]*pi/180), loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
xhi = xcen + sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
tempxy=radec2xy(loc[1], loc[2]-box[2]/2, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
ylo = ycen - sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
tempxy=magWCSradec2xy(loc[1], loc[2]+box[2]/2, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
yhi = ycen + sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
xtemp=sort(c(xlo,xhi))
xlo=ceiling(xtemp[1])
xhi=ceiling(xtemp[2])
ytemp=sort(c(ylo,yhi))
ylo=ceiling(ytemp[1])
yhi=ceiling(ytemp[2])
box=c(xhi-xlo+1,yhi-ylo+1)
}else{
# Do nothing!
}
cutout = magcutout(image, loc = c(xcen,ycen), box = box, shiftloc=shiftloc, paddim=paddim, plot = FALSE)
cut_image = cutout$image
xlo = cutout$xsel[1]
xhi = cutout$xsel[length(cutout$xsel)]
ylo = cutout$ysel[1]
yhi = cutout$ysel[length(cutout$ysel)]
xcen.new=xcen-xlo+1
ycen.new=ycen-ylo+1
# xscale=abs(diff(magWCSxy2radec(c(xcen,xcen+1), c(ycen, ycen), header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)))
# xscale=sqrt(sum(xscale^2))*cos(loc[2]*pi/180)
# yscale=abs(diff(magWCSxy2radec(c(xcen, xcen), c(ycen, ycen+1), header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)))
# yscale=sqrt(sum(yscale^2))
pixscale=getpixscale(header=header, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
loc.diff = c(xlo - 1, ylo - 1)
cut_xlo=1
cut_xhi=dim(cut_image)[1]
cut_ylo=1
cut_yhi=dim(cut_image)[2]
usr.WCS=rbind(
magWCSxy2radec(xlo-1, ylo-1, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2),
magWCSxy2radec(xlo-1, yhi, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2),
magWCSxy2radec(xhi, ylo-1, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2),
magWCSxy2radec(xhi, yhi, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
)
#Below we want to define the R image usr coordinate system, so if e.g. a matrix is 10x10 this would have elements 1:10 x 1:10 but a usr image range of 0->10 x 0->10, hence the minus 1s below. Even a single pixel has a finite image extent (0->1 x 0->1).
usr.WCS=cbind(x.cut=c(cut_xlo-1, cut_xlo-1, cut_xhi, cut_xhi),
y.cut=c(cut_ylo-1, cut_yhi, cut_ylo-1, cut_yhi),
x.orig=c(xlo-1, xlo-1, xhi, xhi),
y.orig=c(ylo-1, yhi, ylo-1, yhi),
usr.WCS
)
approx.map.RA=approxfun(seq(usr.WCS[1,'RA'],usr.WCS[4,'RA'],len=1e2),seq(usr.WCS[1,'x.cut'],usr.WCS[4,'x.cut'],len=1e2))
approx.map.Dec=approxfun(seq(usr.WCS[1,'Dec'],usr.WCS[4,'Dec'],len=1e2),seq(usr.WCS[1,'y.cut'],usr.WCS[4,'y.cut'],len=1e2))
approx.map=function(RA, Dec){
if(length(dim(RA)) == 2){
Dec = RA[, 2]
RA = RA[, 1]
}
return=cbind(x=approx.map.RA(RA), y=approx.map.Dec(Dec))
}
if (plot) {
magimageWCS(image=cut_image, header=header, loc.diff=loc.diff, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2, ...)
}
if(!missing(header)){
dimdiff = dim(cut_image)-dim(image)
hdradd = list(CRPIX1 = -loc.diff[1], CRPIX2 = -loc.diff[2],
NAXIS1=dimdiff[1], NAXIS2=dimdiff[2])
if(imtype=='FITSio'){
for(hdrname in names(hdradd)){
if(hdradd[[hdrname]] != 0){
hdrrow = which(header==hdrname)+1
header[hdrrow] = as.character(as.numeric(header[hdrrow]) + hdradd[[hdrname]])
}
}
}else if(imtype=='astro'){
for(hdrname in names(hdradd)){
if(hdradd[[hdrname]] != 0){
hdrrow = which(header[,"key"]==hdrname)
header[hdrrow,"value"] = as.character(as.numeric(header[hdrrow,"value"]) + hdradd[[hdrname]])
}
}
}else{
header=NULL
}
}else{
header=NULL
}
output = list(image = cut_image, loc = c(x=as.numeric(xcen.new), y=as.numeric(ycen.new)), loc.orig = c(x=as.numeric(xcen), y=as.numeric(ycen)), loc.diff = c(as.numeric(loc.diff[1]),as.numeric(loc.diff[2])), xsel = xlo:xhi, ysel = ylo:yhi, loc.WCS = loc, scale.WCS=pixscale, usr.WCS=usr.WCS, approx.map=approx.map, header=header)
return = output
}
|
/R/magcutout.R
|
no_license
|
taranu/magicaxis
|
R
| false
| false
| 8,757
|
r
|
magcutout=function(image, loc = dim(image)/2, box = c(100, 100), shiftloc=TRUE, paddim=TRUE, plot = FALSE, ...){
loc = as.numeric(loc)
xcen = loc[1]
ycen = loc[2]
loc = ceiling(loc)
xlo = ceiling(loc[1] - (box[1]/2 - 0.5))
xhi = ceiling(loc[1] + (box[1]/2 - 0.5))
ylo = ceiling(loc[2] - (box[2]/2 - 0.5))
yhi = ceiling(loc[2] + (box[2]/2 - 0.5))
expand = paddim && shiftloc
diffxlo = xlo - 1
if (diffxlo < 0) {
xlo = 1
if(expand) xhi = xlo + (box[1] - 1)
}
diffxhi = xhi - dim(image)[1]
if (diffxhi > 0) {
xhi = dim(image)[1]
if(expand) {
xlo = xlo - diffxhi
if(xlo < 1) xlo = 1
}
}
diffylo = ylo - 1
if (diffylo < 0) {
ylo = 1
if(expand) yhi = ylo + (box[2] - 1)
}
diffyhi = yhi - dim(image)[2]
if (diffyhi > 0) {
yhi = dim(image)[2]
if(expand) {
ylo = ylo - diffyhi
if(ylo < 1) ylo = 1
}
}
if(!paddim && !shiftloc)
{
if(diffxlo < 0 && (-diffxlo > diffxhi)) xhi = xhi - max(diffxhi,0) + diffxlo
if(diffxhi > 0 && (-diffxlo < diffxhi)) xlo = xlo + diffxhi - min(diffxlo,0)
if(diffylo < 0 && (-diffylo > diffyhi)) yhi = yhi - max(diffyhi,0) + diffylo
if(diffyhi > 0 && (-diffylo < diffyhi)) ylo = ylo + diffyhi - min(diffylo,0)
}
xsel = xlo:xhi
ysel = ylo:yhi
image = image[xsel, ysel]
if(paddim && !shiftloc && any(c(diffxlo,-diffxhi,diffylo,-diffyhi) < 0)) {
padded = matrix(NA,box[1],box[2])
padded[xsel-diffxlo,ysel-diffylo] = image
image = padded
}
output = list(image = image, loc = c(x=xcen-xlo+1, y=ycen-ylo+1), loc.orig = c(x=xcen, y=ycen), loc.diff = c(x=xlo-1, y=ylo-1), xsel = xsel, ysel = ysel)
if (plot) {
magimage(image, ...)
}
return = output
}
magcutoutWCS=function(image, header, loc, box = c(100, 100), shiftloc=TRUE, paddim=TRUE, plot = FALSE, CRVAL1=0, CRVAL2=0, CRPIX1=0, CRPIX2=0, CD1_1=1, CD1_2=0, CD2_1=0, CD2_2=1, coord.type='deg', sep=':', loc.type=c('coord','coord'), ...){
if(length(loc.type)==1){loc.type=rep(loc.type,2)}
if(!missing(image)){
if(any(names(image)=='imDat') & missing(header)){
imtype='FITSio'
header=image$hdr
image=image$imDat
}
if(any(names(image)=='dat') & missing(header)){
imtype='astro'
header=image$hdr[[1]]
header=data.frame(key=header[,1],value=header[,2], stringsAsFactors = FALSE)
image=image$dat[[1]]
}
if(any(names(image)=='image') & missing(header)){
header=image$header
image=image$image
if(is.matrix(header) | is.data.frame(header)){imtype='astro'}else{imtype='FITSio'}
}
if(!missing(header)){
if(is.matrix(header) | is.data.frame(header)){imtype='astro'}else{imtype='FITSio'}
}
}
#Note below tempxy is R xy units, not FITS:
if(missing(loc)){
loc=magWCSxy2radec(dim(image)[1]/2, dim(image)[2]/2, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)[1,]
tempxy=cbind(dim(image)[1]/2, dim(image)[2]/2)
}else{
if(loc.type[1]=='coord'){
if(coord.type=='sex'){loc[1]=hms2deg(loc[1],sep=sep); loc[2]=dms2deg(loc[2],sep=sep)}
loc=as.numeric(loc)
tempxy=magWCSradec2xy(loc[1], loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
}else if(loc.type[1]=='image'){
tempxy=rbind(loc)
loc=magWCSxy2radec(loc[1], loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)[1,]
}
}
xcen = tempxy[1,1]
ycen = tempxy[1,2]
if(loc.type[2]=='coord'){
box=box/3600
tempxy=magWCSradec2xy(loc[1]-box[1]/2/cos(loc[2]*pi/180), loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
xlo = xcen - sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
tempxy=magWCSradec2xy(loc[1]+box[1]/2/cos(loc[2]*pi/180), loc[2], header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
xhi = xcen + sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
tempxy=radec2xy(loc[1], loc[2]-box[2]/2, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
ylo = ycen - sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
tempxy=magWCSradec2xy(loc[1], loc[2]+box[2]/2, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
yhi = ycen + sqrt((tempxy[1,1]-xcen)^2+(tempxy[1,2]-ycen)^2)
xtemp=sort(c(xlo,xhi))
xlo=ceiling(xtemp[1])
xhi=ceiling(xtemp[2])
ytemp=sort(c(ylo,yhi))
ylo=ceiling(ytemp[1])
yhi=ceiling(ytemp[2])
box=c(xhi-xlo+1,yhi-ylo+1)
}else{
# Do nothing!
}
cutout = magcutout(image, loc = c(xcen,ycen), box = box, shiftloc=shiftloc, paddim=paddim, plot = FALSE)
cut_image = cutout$image
xlo = cutout$xsel[1]
xhi = cutout$xsel[length(cutout$xsel)]
ylo = cutout$ysel[1]
yhi = cutout$ysel[length(cutout$ysel)]
xcen.new=xcen-xlo+1
ycen.new=ycen-ylo+1
# xscale=abs(diff(magWCSxy2radec(c(xcen,xcen+1), c(ycen, ycen), header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)))
# xscale=sqrt(sum(xscale^2))*cos(loc[2]*pi/180)
# yscale=abs(diff(magWCSxy2radec(c(xcen, xcen), c(ycen, ycen+1), header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)))
# yscale=sqrt(sum(yscale^2))
pixscale=getpixscale(header=header, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
loc.diff = c(xlo - 1, ylo - 1)
cut_xlo=1
cut_xhi=dim(cut_image)[1]
cut_ylo=1
cut_yhi=dim(cut_image)[2]
usr.WCS=rbind(
magWCSxy2radec(xlo-1, ylo-1, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2),
magWCSxy2radec(xlo-1, yhi, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2),
magWCSxy2radec(xhi, ylo-1, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2),
magWCSxy2radec(xhi, yhi, header=header, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2)
)
#Below we want to define the R image usr coordinate system, so if e.g. a matrix is 10x10 this would have elements 1:10 x 1:10 but a usr image range of 0->10 x 0->10, hence the minus 1s below. Even a single pixel has a finite image extent (0->1 x 0->1).
usr.WCS=cbind(x.cut=c(cut_xlo-1, cut_xlo-1, cut_xhi, cut_xhi),
y.cut=c(cut_ylo-1, cut_yhi, cut_ylo-1, cut_yhi),
x.orig=c(xlo-1, xlo-1, xhi, xhi),
y.orig=c(ylo-1, yhi, ylo-1, yhi),
usr.WCS
)
approx.map.RA=approxfun(seq(usr.WCS[1,'RA'],usr.WCS[4,'RA'],len=1e2),seq(usr.WCS[1,'x.cut'],usr.WCS[4,'x.cut'],len=1e2))
approx.map.Dec=approxfun(seq(usr.WCS[1,'Dec'],usr.WCS[4,'Dec'],len=1e2),seq(usr.WCS[1,'y.cut'],usr.WCS[4,'y.cut'],len=1e2))
approx.map=function(RA, Dec){
if(length(dim(RA)) == 2){
Dec = RA[, 2]
RA = RA[, 1]
}
return=cbind(x=approx.map.RA(RA), y=approx.map.Dec(Dec))
}
if (plot) {
magimageWCS(image=cut_image, header=header, loc.diff=loc.diff, CRVAL1=CRVAL1, CRVAL2=CRVAL2, CRPIX1=CRPIX1, CRPIX2=CRPIX2, CD1_1=CD1_1, CD1_2=CD1_2, CD2_1=CD2_1, CD2_2=CD2_2, ...)
}
if(!missing(header)){
dimdiff = dim(cut_image)-dim(image)
hdradd = list(CRPIX1 = -loc.diff[1], CRPIX2 = -loc.diff[2],
NAXIS1=dimdiff[1], NAXIS2=dimdiff[2])
if(imtype=='FITSio'){
for(hdrname in names(hdradd)){
if(hdradd[[hdrname]] != 0){
hdrrow = which(header==hdrname)+1
header[hdrrow] = as.character(as.numeric(header[hdrrow]) + hdradd[[hdrname]])
}
}
}else if(imtype=='astro'){
for(hdrname in names(hdradd)){
if(hdradd[[hdrname]] != 0){
hdrrow = which(header[,"key"]==hdrname)
header[hdrrow,"value"] = as.character(as.numeric(header[hdrrow,"value"]) + hdradd[[hdrname]])
}
}
}else{
header=NULL
}
}else{
header=NULL
}
output = list(image = cut_image, loc = c(x=as.numeric(xcen.new), y=as.numeric(ycen.new)), loc.orig = c(x=as.numeric(xcen), y=as.numeric(ycen)), loc.diff = c(as.numeric(loc.diff[1]),as.numeric(loc.diff[2])), xsel = xlo:xhi, ysel = ylo:yhi, loc.WCS = loc, scale.WCS=pixscale, usr.WCS=usr.WCS, approx.map=approx.map, header=header)
return = output
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects_doc.R
\docType{data}
\name{cntype.hetloss}
\alias{cntype.hetloss}
\title{copy number type "Heterozygous loss" (loss of one of the two copies).}
\format{character}
\source{
\code{cntype.hetloss <- "Heterozygous loss"}
}
\usage{
cntype.hetloss
}
\description{
copy number type "Heterozygous loss" (loss of one of the two copies).
}
\keyword{datasets}
|
/man/cntype.hetloss.Rd
|
permissive
|
1512474508/oncoscanR
|
R
| false
| true
| 435
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/objects_doc.R
\docType{data}
\name{cntype.hetloss}
\alias{cntype.hetloss}
\title{copy number type "Heterozygous loss" (loss of one of the two copies).}
\format{character}
\source{
\code{cntype.hetloss <- "Heterozygous loss"}
}
\usage{
cntype.hetloss
}
\description{
copy number type "Heterozygous loss" (loss of one of the two copies).
}
\keyword{datasets}
|
#' Calculate statstical power using effect size and confidence interval data
#'
#' Calculate statstical power using effect size and confidence interval data that
#' is typically reported in meta-analyses. Power is calculated assuming
#' the true effect size and a range of effect sizes. The script to calculate power
#' was adapted from the "viz_sunset" function of the metaviz package.
#'
#' \strong{The true effect size}
#'
#' For the purposes of power analysis a "true" effect size needs to be specified. This
#' is typically difficult to establish in practice, as reported effect sizes are typically
#' inflated. Here, the observed summary effect size estimate reported in the
#' meta-analysis results is used as one possible true effect size, however, other effect sizes can
#' be specified here instead. Additionaly, statistical power for a range of true effect sizes
#' are returned (0.1 to 1, in increments of 0.1).
#'
#' @param dat A dataset that contains one column with observed effect sizes or outcomes
#' labelled "yi", a column labelled "lower" with the lower confidence
#' interval bound, and column labelled "upper" with the upper confidence
#' interval bound. This function assumes a 95\% confidence interval.
#' @param observed_es The observed summary effect size estimate for the meta-analysis, which is one output
#' for the set of possible "true" effect sizes. See 'Details'
#' @param name A label with a name for the meta-analysis, which is required for using the "firepower" function.
#' See 'Examples'
#' @return This function returns the following:
#' \item{dat}{A dataset with results from power analyses for a range of effect sizes, including the
#' specified observed effect size, in a column labelled "es_observed". The additional added columns include
#' results for power analysis assuming a range of true effect sizes, beginning at 0.1 ("power_es01"),
#' then 0.2 ("power_es02"), then continuing in increments of 0.1 up to 1 ("power_es1").}
#' @examples
#' keech_power <- mapower_ul(
#' dat = dat_keech,
#' observed_es = 0.08,
#' name = "Keech et al 2017")
#' keech_power
#'
mapower_ul <-
function(dat,
observed_es,
name)
#'@import dplyr
{
dat[["sei"]] <- ((dat[["upper"]]) -
(dat[["lower"]])) / (2 * 1.96) # Calculate standard error if not already available
dat[["power_es_observed"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(observed_es), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(observed_es),
dat[["sei"]]) # Calculate power for each study for observed effect
dat[["power_es01"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.10), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.10),
dat[["sei"]]) # Calculate power for each study for an effect of 0.1
dat[["power_es02"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.20), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.20),
dat[["sei"]]) # Calculate power for each study for an effect of 0.2
dat[["power_es03"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.30), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.30),
dat[["sei"]]) # Calculate power for each study for an effect of 0.3
dat[["power_es04"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.40), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.40),
dat[["sei"]]) # Calculate power for each study for an effect of 0.4
dat[["power_es05"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.50), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.50),
dat[["sei"]]) # Calculate power for each study for an effect of 0.5
dat[["power_es06"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.60), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.60),
dat[["sei"]]) # Calculate power for each study for an effect of 0.6
dat[["power_es07"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.70), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.70),
dat[["sei"]]) # Calculate power for each study for an effect of 0.7
dat[["power_es08"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.80), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.80),
dat[["sei"]]) # Calculate power for each study for an effect of 0.8
dat[["power_es09"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.90), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.90),
dat[["sei"]]) # Calculate power for each study for an effect of 0.9
dat[["power_es1"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(1), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(1),
dat[["sei"]]) # Calculate power for each study for an effect of 1
dat <- as.data.frame(dat)
power_median_dat <- data.frame(power_es_observed=numeric(1),
es01=numeric(1),
es02=numeric(1),
es03=numeric(1),
es04=numeric(1),
es05=numeric(1),
es06=numeric(1),
es07=numeric(1),
es08=numeric(1),
es09=numeric(1),
es1=numeric(1)
)
power_median_dat[["es_observed"]] <- median(dat[["power_es_observed"]])
power_median_dat[["es01"]] <- median(dat[["power_es01"]])
power_median_dat[["es02"]] <- median(dat[["power_es02"]])
power_median_dat[["es03"]] <- median(dat[["power_es03"]])
power_median_dat[["es04"]] <- median(dat[["power_es04"]])
power_median_dat[["es05"]] <- median(dat[["power_es05"]])
power_median_dat[["es06"]] <- median(dat[["power_es06"]])
power_median_dat[["es07"]] <- median(dat[["power_es07"]])
power_median_dat[["es08"]] <- median(dat[["power_es08"]])
power_median_dat[["es09"]] <- median(dat[["power_es09"]])
power_median_dat[["es1"]] <- median(dat[["power_es1"]])
power_median_dat <- as.data.frame(power_median_dat)
power_median_dat <- dplyr::mutate(power_median_dat,
meta_analysis_name = name)
value <- list(
dat = dat,
power_median_dat = power_median_dat
) # Create a list of output objects
attr(value, "class") <- "mapower_ul"
value
}
|
/R/mapower_ul.R
|
permissive
|
sayanmitra/metameta
|
R
| false
| false
| 7,410
|
r
|
#' Calculate statstical power using effect size and confidence interval data
#'
#' Calculate statstical power using effect size and confidence interval data that
#' is typically reported in meta-analyses. Power is calculated assuming
#' the true effect size and a range of effect sizes. The script to calculate power
#' was adapted from the "viz_sunset" function of the metaviz package.
#'
#' \strong{The true effect size}
#'
#' For the purposes of power analysis a "true" effect size needs to be specified. This
#' is typically difficult to establish in practice, as reported effect sizes are typically
#' inflated. Here, the observed summary effect size estimate reported in the
#' meta-analysis results is used as one possible true effect size, however, other effect sizes can
#' be specified here instead. Additionaly, statistical power for a range of true effect sizes
#' are returned (0.1 to 1, in increments of 0.1).
#'
#' @param dat A dataset that contains one column with observed effect sizes or outcomes
#' labelled "yi", a column labelled "lower" with the lower confidence
#' interval bound, and column labelled "upper" with the upper confidence
#' interval bound. This function assumes a 95\% confidence interval.
#' @param observed_es The observed summary effect size estimate for the meta-analysis, which is one output
#' for the set of possible "true" effect sizes. See 'Details'
#' @param name A label with a name for the meta-analysis, which is required for using the "firepower" function.
#' See 'Examples'
#' @return This function returns the following:
#' \item{dat}{A dataset with results from power analyses for a range of effect sizes, including the
#' specified observed effect size, in a column labelled "es_observed". The additional added columns include
#' results for power analysis assuming a range of true effect sizes, beginning at 0.1 ("power_es01"),
#' then 0.2 ("power_es02"), then continuing in increments of 0.1 up to 1 ("power_es1").}
#' @examples
#' keech_power <- mapower_ul(
#' dat = dat_keech,
#' observed_es = 0.08,
#' name = "Keech et al 2017")
#' keech_power
#'
mapower_ul <-
function(dat,
observed_es,
name)
#'@import dplyr
{
dat[["sei"]] <- ((dat[["upper"]]) -
(dat[["lower"]])) / (2 * 1.96) # Calculate standard error if not already available
dat[["power_es_observed"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(observed_es), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(observed_es),
dat[["sei"]]) # Calculate power for each study for observed effect
dat[["power_es01"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.10), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.10),
dat[["sei"]]) # Calculate power for each study for an effect of 0.1
dat[["power_es02"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.20), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.20),
dat[["sei"]]) # Calculate power for each study for an effect of 0.2
dat[["power_es03"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.30), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.30),
dat[["sei"]]) # Calculate power for each study for an effect of 0.3
dat[["power_es04"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.40), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.40),
dat[["sei"]]) # Calculate power for each study for an effect of 0.4
dat[["power_es05"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.50), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.50),
dat[["sei"]]) # Calculate power for each study for an effect of 0.5
dat[["power_es06"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.60), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.60),
dat[["sei"]]) # Calculate power for each study for an effect of 0.6
dat[["power_es07"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.70), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.70),
dat[["sei"]]) # Calculate power for each study for an effect of 0.7
dat[["power_es08"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.80), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.80),
dat[["sei"]]) # Calculate power for each study for an effect of 0.8
dat[["power_es09"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(0.90), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(0.90),
dat[["sei"]]) # Calculate power for each study for an effect of 0.9
dat[["power_es1"]] <-
(1 - stats::pnorm(stats::qnorm(1 - 0.05 / 2) * dat[["sei"]],
abs(1), dat[["sei"]])) +
stats::pnorm(stats::qnorm(0.05 / 2) *
dat[["sei"]], abs(1),
dat[["sei"]]) # Calculate power for each study for an effect of 1
dat <- as.data.frame(dat)
power_median_dat <- data.frame(power_es_observed=numeric(1),
es01=numeric(1),
es02=numeric(1),
es03=numeric(1),
es04=numeric(1),
es05=numeric(1),
es06=numeric(1),
es07=numeric(1),
es08=numeric(1),
es09=numeric(1),
es1=numeric(1)
)
power_median_dat[["es_observed"]] <- median(dat[["power_es_observed"]])
power_median_dat[["es01"]] <- median(dat[["power_es01"]])
power_median_dat[["es02"]] <- median(dat[["power_es02"]])
power_median_dat[["es03"]] <- median(dat[["power_es03"]])
power_median_dat[["es04"]] <- median(dat[["power_es04"]])
power_median_dat[["es05"]] <- median(dat[["power_es05"]])
power_median_dat[["es06"]] <- median(dat[["power_es06"]])
power_median_dat[["es07"]] <- median(dat[["power_es07"]])
power_median_dat[["es08"]] <- median(dat[["power_es08"]])
power_median_dat[["es09"]] <- median(dat[["power_es09"]])
power_median_dat[["es1"]] <- median(dat[["power_es1"]])
power_median_dat <- as.data.frame(power_median_dat)
power_median_dat <- dplyr::mutate(power_median_dat,
meta_analysis_name = name)
value <- list(
dat = dat,
power_median_dat = power_median_dat
) # Create a list of output objects
attr(value, "class") <- "mapower_ul"
value
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DWR.R
\docType{package}
\name{DWR}
\alias{DWR}
\alias{DWR-package}
\title{DWR: a package for getting data from cdec.water.ca.gov}
\description{
The DWR package contains a single function (CDECquery) and two built-in datasets (sensorIDs and stationIDs).
}
\section{CDECquery function}{
This function is authored by Dylan Beaudette and implemented here as a standalone-version of the one in the sharpshootR package (https://CRAN.R-project.org/package=sharpshootR). Documentation has been generated to include the "E" duration type, as it's worked for us so far without modifying the original function (although limited testing has been done).
}
|
/man/DWR.Rd
|
no_license
|
Myfanwy/DWRpackage
|
R
| false
| true
| 724
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DWR.R
\docType{package}
\name{DWR}
\alias{DWR}
\alias{DWR-package}
\title{DWR: a package for getting data from cdec.water.ca.gov}
\description{
The DWR package contains a single function (CDECquery) and two built-in datasets (sensorIDs and stationIDs).
}
\section{CDECquery function}{
This function is authored by Dylan Beaudette and implemented here as a standalone-version of the one in the sharpshootR package (https://CRAN.R-project.org/package=sharpshootR). Documentation has been generated to include the "E" duration type, as it's worked for us so far without modifying the original function (although limited testing has been done).
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_PPP.R
\name{compute_state_length}
\alias{compute_state_length}
\title{Compute the state duration for each state across three equal-size splits of the input data.}
\usage{
compute_state_length(input_data)
}
\description{
Compute the state duration for each state across three equal-size splits of the input data.
}
|
/man/compute_state_length.Rd
|
permissive
|
JasperHG90/sleepsimReval
|
R
| false
| true
| 398
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils_PPP.R
\name{compute_state_length}
\alias{compute_state_length}
\title{Compute the state duration for each state across three equal-size splits of the input data.}
\usage{
compute_state_length(input_data)
}
\description{
Compute the state duration for each state across three equal-size splits of the input data.
}
|
#
# Release Version 130602 build - V1.0 (micromapST)
#
panelFill <-
function(col="#D0D0D0",border=NA,...)
# fill a panel specified by "usr" with a fill color of "col=" and a outline color of "border="
{
xy <- par("usr") # get usr data (x&y scaling) - points on the panel box
# draw polygon (box) with color "border" and fill "col"
polygon(xy[c(1, 2, 2, 1)], xy[c(3, 3, 4, 4)],col=col,border=border,xpd=TRUE,...)
}
panelGrid <-
function(x = NULL, y = NULL, col = 2, lwd = 1, lty = 1)
#
# defaults = col = red, lwd = 1 pt, lty = solid
# place grids in panel. If x present = vertical grids
# if y present = horizontal grids
# if x and y present = both grids
{
if(!is.null(x))
abline(v = x, lwd = lwd, lty = lty, col = col)
if(!is.null(y))
abline(h = y, lwd = lwd, lty = lty, col = col)
}
panelInbounds <-
function(bnds)
# bnds = min and max of panel boundaries.
# times potential pretty to panel limits.
{
grid = pretty(bnds)
return(grid[bnds[1] < grid & grid < bnds[2]])
}
panelLengthen <-
function(x, n=1)
# x = original vector to lengthen
# n = number of entries in new vector.
# expand number of columns or rows to "n", but replicated data may not be right.. WATCH!.
# data in original vector is to be replicated to new elements.
# If no original data, zeroes are provided.
{
if(n<1)stop("panelLengthen: invalid required length")
if(length(x)==0) return(rep(0,n)) # original vector length = 0, return vector with "n" zeros.
newx = rep(x,ceiling(n/length(x))) # repeat x into new space. (multiple length of original)
length(newx) = n # set results to length of n (trim to "n" length)
return(newx)
}
panelOutline <-
function(col = "black", lwd = 1, lty = 1)
# Outline panel in "col". Current panel = "usr", col="black", lwd = 1, lty = solid
{
xy <- par("usr") # get window size (save to reuse)
polygon(xy[c(1, 2, 2, 1)], xy[c(3, 3, 4, 4)], density=0, col=col, xpd=TRUE)
}
panelScale <-
function(rx = c(0, 1), ry = c(0, 1),firstp=FALSE, inches = FALSE)
# Set scale of panel.
# If inches - set rx and ry to current inches values and return.
# Do "new=TRUE" plot to set the scale. (could use plot.window?)
{
if(inches) {
pin = par("pin")
rx = c(0, pin[1])
ry = c(0, pin[2])
}
warn = unlist(options('warn'))
options(warn=-1)
par(new=TRUE)
options(warn=warn)
plot(rx, ry, type = "n", axes = FALSE, xaxs = "i", yaxs = "i",
xlab = "", ylab = "", main = "")
return(list(rx = rx, ry = ry))
}
panelSelect <-
function(layout, i = 1, j = 1, margin = NULL)
#
# Panel Select
# Layout = panel structure
# dim = dimensions of panel = c(i,j). If i or j > respective dimension - fatal.
# If no margin specified---
# datfig = par(fig <- data)
# pad = par(mai = layout$pad[c(4,1,3,2)] # reorder
# if margin specified---
# labfig = par(fig = layout$labfig[ind,]) # based on margin
# brd = par(mai = layout$brd[c(4,1,3,2)] # reorder
# par(fig = c(a,b,c,d)... NDC coordinates of figure region in the display.
# new plot, unless new=TRUE?
# par(mai = c(b,l,t,r)... numerical vector margin size (bot, left, top, right) in inches.
# i = column index
# j = row index
# margin = left, right, top, bottom, bot,...
#
{
dim = layout$dim
if(i > dim[1] || j > dim[2])
stop("PS-01 Dimension error. Program error - index 'i' or 'j' is out of bounds.")
if(is.null(margin)) { # "margin" is missing.
k = dim[2] * (i - 1) + j # #col * (rowInx-1) + colInx
# datfig layout as C1R1, C2R1, C3R1, C4R1, C2R1, ...
par(fig = layout$datfig[k, ],
mai = layout$pad[c(4, 1, 3, 2)] ) # pad is c(L(1), R(2), T(3), B(4))
#
# fig is c(x1, x2, y1, y2) # no units.
# mai is c(bot, left, top, right) <- pad[c(4,1,3,2)] in inches
}
else {
vec = 1:4
nam = c("left", "right", "top", "bottom", "bot")
ind = match(margin, nam)
if(is.na(ind))
stop("Bad label region name")
if(ind == 5) ind = 4 # "bot" -> "bottom"
par(fig = layout$labfig[ind, ],
mai = layout$brd[c(4, 1, 3, 2)] )
}
# "done"
}
panelLayout <-
function(nrow = 1,
ncol = 1,
leftMargin = 0, # inches
rightMargin = 0, # inches
topMargin = 1, # inches, leave room for page titles.
bottomMargin = 0, # inches
borders = rep(0.5, 4), # inches
# The figure borders are left(1), right, top, bottom(4)
colSize = 1,
rowSize = 1,
colSep = 0,
rowSep = 0,
pad = NULL)
{
# Note fig matrices rounded to 6 places in an effort of avoid a R problem with fig when
# values appear in exponential notation.
oldpar = par() # save original par values.
din = oldpar$din # get device dimensions (inches)
din.x = din[1] # x = width
din.y = din[2] # y = height
plotX = din.x - borders[1] - borders[2] - leftMargin - rightMargin # usable width inches
plotY = din.y - borders[3] - borders[4] - bottomMargin - topMargin # usable height inches
# bounds (x1, x2, y1, y2)
# bounds (edge left, margin left, margin right, edge right) shifted right by "borders[1]"
xbnds = c(0, leftMargin, leftMargin + plotX, leftMargin + plotX +
rightMargin) + borders[1] #shift all by the left border
# bounds (edge bottom, margin bottom, margin top, edge top) shifted up by "borders[4]"
ybnds = c(0, bottomMargin, bottomMargin + plotY, bottomMargin +
plotY + topMargin) + borders[4] # shift all by bottom border
# the right and top borders are handled in the first calculation.
# fig.scale = inch coordinates of device space.
fig.scale = c(din.x, din.x, din.y, din.y)
# left figure is in the left margin space of the plot area from top to bottom
leftfig = c(xbnds[1] - borders[1], xbnds[2] + borders[2], ybnds[1] -
borders[4], ybnds[4] + borders[3])
# right figure is in the right margin space of the plot area from top to bottom
rightfig = c(xbnds[3] - borders[1], xbnds[4] + borders[2], ybnds[1] -
borders[4], ybnds[4] + borders[3])
# top figure is in the top margin space from from left to right
topfig = c(xbnds[1] - borders[1], xbnds[4] + borders[2], ybnds[3] -
borders[4], ybnds[4] + borders[3])
# bottom figure is in the bottom margin space from left to right
botfig = c(xbnds[1] - borders[1], xbnds[4] + borders[2], ybnds[1] -
borders[4], ybnds[2] + borders[3])
# these figure areas are from the devices left to right and top to bottom limits.
#
colSep = panelLengthen(colSep, ncol + 1) # initially 1 element of zero - now "ncol" elements
rowSep = panelLengthen(rowSep, nrow + 1) # nrow elements.
if(is.null(pad))
{ # no pad, initialize
pad = c(borders[1] + colSep[1] + leftMargin,
borders[2] + colSep[ncol + 1] + rightMargin,
borders[3] + rowSep[1] + topMargin,
borders[4] + rowSep[nrow + 1] + bottomMargin)
}
# The borders should align around the edge.
colSep = cumsum(colSep) # convert individual spaces to cumulative sums.
rowSep = cumsum(rowSep)
plotX = plotX - colSep[ncol + 1] # subtract room for separators (value of last colSep or rowSep
plotY = plotY - rowSep[nrow + 1]
# the colSize and rowSize values are relative to a projected sum of units.
# sum(colSize) = ncol (number of columns)
# sum(rowSize) = 71.64 as coded for 10 full groups and the median group
relx = panelLengthen(colSize, ncol) # size required == 3 * 1 relx is 3 elements of 1
rely = panelLengthen(rowSize, nrow) # size required == 10 * 7 + 1.64
relx = relx/sum(relx) # Factional each element / sum
rely = rely/sum(rely)
xinc = plotX * cumsum(c(0, relx))
yinc = plotY * cumsum(c(0, rely))
fig = matrix(0, nrow = nrow * ncol, ncol = 4)
k = 0
for(i in 1:nrow) {
for(j in 1:ncol) {
k = k + 1
fig[k, 1] = xbnds[2] + xinc[j] + colSep[j] - pad[1] # x.pos k<=1:- start
fig[k, 2] = xbnds[2] + xinc[j + 1] + colSep[j] + pad[2] # x pos - end
fig[k, 4] = ybnds[3] - yinc[i] - rowSep[i] + pad[3] # y pos - start
fig[k, 3] = ybnds[3] - yinc[i + 1] - rowSep[i] - pad[4] # y pos - end
}
}
# fig now has the x1, x2, y1, y2 physical position of each panel on the page.
labfig = rbind(leftfig, rightfig, topfig, botfig)
# lab figure has four rows - one for each margin/border space around the plot area.
# Scale the "inch" coordinates to coordinates 0 to 1 (relative)
fig = abs(t(t(fig)/fig.scale))
labfig = t(t(labfig)/fig.scale)
# coltabs are in inches and start inside the left border
coltabs = cbind(c(0, colSep + xinc + leftMargin), leftMargin + c(0,colSep) + c(xinc,
xinc[length(xinc)] + rightMargin))
# rowtabs are in inches and start below the upper border
# yinc is padded with a leading 0.
rowtabs = cbind(c(ybnds[3], ybnds[3] - rowSep - c(yinc[-1], yinc[nrow + 1] + bottomMargin)),
c(ybnds[4], ybnds[3] - rowSep - yinc)) - borders[4]
# The tabs provide the physical points for each panel.
list(dim = c(nrow, ncol), datfig = round(fig,6), labfig = round(labfig,6), brd = borders,
pad = pad, coltabs = coltabs, rowtabs = rowtabs,
figsize = c(din.x, din.y))
}
|
/micromapST/R/panelFunctions.r
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 10,013
|
r
|
#
# Release Version 130602 build - V1.0 (micromapST)
#
panelFill <-
function(col="#D0D0D0",border=NA,...)
# fill a panel specified by "usr" with a fill color of "col=" and a outline color of "border="
{
xy <- par("usr") # get usr data (x&y scaling) - points on the panel box
# draw polygon (box) with color "border" and fill "col"
polygon(xy[c(1, 2, 2, 1)], xy[c(3, 3, 4, 4)],col=col,border=border,xpd=TRUE,...)
}
panelGrid <-
function(x = NULL, y = NULL, col = 2, lwd = 1, lty = 1)
#
# defaults = col = red, lwd = 1 pt, lty = solid
# place grids in panel. If x present = vertical grids
# if y present = horizontal grids
# if x and y present = both grids
{
if(!is.null(x))
abline(v = x, lwd = lwd, lty = lty, col = col)
if(!is.null(y))
abline(h = y, lwd = lwd, lty = lty, col = col)
}
panelInbounds <-
function(bnds)
# bnds = min and max of panel boundaries.
# times potential pretty to panel limits.
{
grid = pretty(bnds)
return(grid[bnds[1] < grid & grid < bnds[2]])
}
panelLengthen <-
function(x, n=1)
# x = original vector to lengthen
# n = number of entries in new vector.
# expand number of columns or rows to "n", but replicated data may not be right.. WATCH!.
# data in original vector is to be replicated to new elements.
# If no original data, zeroes are provided.
{
if(n<1)stop("panelLengthen: invalid required length")
if(length(x)==0) return(rep(0,n)) # original vector length = 0, return vector with "n" zeros.
newx = rep(x,ceiling(n/length(x))) # repeat x into new space. (multiple length of original)
length(newx) = n # set results to length of n (trim to "n" length)
return(newx)
}
panelOutline <-
function(col = "black", lwd = 1, lty = 1)
# Outline panel in "col". Current panel = "usr", col="black", lwd = 1, lty = solid
{
xy <- par("usr") # get window size (save to reuse)
polygon(xy[c(1, 2, 2, 1)], xy[c(3, 3, 4, 4)], density=0, col=col, xpd=TRUE)
}
panelScale <-
function(rx = c(0, 1), ry = c(0, 1),firstp=FALSE, inches = FALSE)
# Set scale of panel.
# If inches - set rx and ry to current inches values and return.
# Do "new=TRUE" plot to set the scale. (could use plot.window?)
{
if(inches) {
pin = par("pin")
rx = c(0, pin[1])
ry = c(0, pin[2])
}
warn = unlist(options('warn'))
options(warn=-1)
par(new=TRUE)
options(warn=warn)
plot(rx, ry, type = "n", axes = FALSE, xaxs = "i", yaxs = "i",
xlab = "", ylab = "", main = "")
return(list(rx = rx, ry = ry))
}
panelSelect <-
function(layout, i = 1, j = 1, margin = NULL)
#
# Panel Select
# Layout = panel structure
# dim = dimensions of panel = c(i,j). If i or j > respective dimension - fatal.
# If no margin specified---
# datfig = par(fig <- data)
# pad = par(mai = layout$pad[c(4,1,3,2)] # reorder
# if margin specified---
# labfig = par(fig = layout$labfig[ind,]) # based on margin
# brd = par(mai = layout$brd[c(4,1,3,2)] # reorder
# par(fig = c(a,b,c,d)... NDC coordinates of figure region in the display.
# new plot, unless new=TRUE?
# par(mai = c(b,l,t,r)... numerical vector margin size (bot, left, top, right) in inches.
# i = column index
# j = row index
# margin = left, right, top, bottom, bot,...
#
{
dim = layout$dim
if(i > dim[1] || j > dim[2])
stop("PS-01 Dimension error. Program error - index 'i' or 'j' is out of bounds.")
if(is.null(margin)) { # "margin" is missing.
k = dim[2] * (i - 1) + j # #col * (rowInx-1) + colInx
# datfig layout as C1R1, C2R1, C3R1, C4R1, C2R1, ...
par(fig = layout$datfig[k, ],
mai = layout$pad[c(4, 1, 3, 2)] ) # pad is c(L(1), R(2), T(3), B(4))
#
# fig is c(x1, x2, y1, y2) # no units.
# mai is c(bot, left, top, right) <- pad[c(4,1,3,2)] in inches
}
else {
vec = 1:4
nam = c("left", "right", "top", "bottom", "bot")
ind = match(margin, nam)
if(is.na(ind))
stop("Bad label region name")
if(ind == 5) ind = 4 # "bot" -> "bottom"
par(fig = layout$labfig[ind, ],
mai = layout$brd[c(4, 1, 3, 2)] )
}
# "done"
}
panelLayout <-
function(nrow = 1,
ncol = 1,
leftMargin = 0, # inches
rightMargin = 0, # inches
topMargin = 1, # inches, leave room for page titles.
bottomMargin = 0, # inches
borders = rep(0.5, 4), # inches
# The figure borders are left(1), right, top, bottom(4)
colSize = 1,
rowSize = 1,
colSep = 0,
rowSep = 0,
pad = NULL)
{
# Note fig matrices rounded to 6 places in an effort of avoid a R problem with fig when
# values appear in exponential notation.
oldpar = par() # save original par values.
din = oldpar$din # get device dimensions (inches)
din.x = din[1] # x = width
din.y = din[2] # y = height
plotX = din.x - borders[1] - borders[2] - leftMargin - rightMargin # usable width inches
plotY = din.y - borders[3] - borders[4] - bottomMargin - topMargin # usable height inches
# bounds (x1, x2, y1, y2)
# bounds (edge left, margin left, margin right, edge right) shifted right by "borders[1]"
xbnds = c(0, leftMargin, leftMargin + plotX, leftMargin + plotX +
rightMargin) + borders[1] #shift all by the left border
# bounds (edge bottom, margin bottom, margin top, edge top) shifted up by "borders[4]"
ybnds = c(0, bottomMargin, bottomMargin + plotY, bottomMargin +
plotY + topMargin) + borders[4] # shift all by bottom border
# the right and top borders are handled in the first calculation.
# fig.scale = inch coordinates of device space.
fig.scale = c(din.x, din.x, din.y, din.y)
# left figure is in the left margin space of the plot area from top to bottom
leftfig = c(xbnds[1] - borders[1], xbnds[2] + borders[2], ybnds[1] -
borders[4], ybnds[4] + borders[3])
# right figure is in the right margin space of the plot area from top to bottom
rightfig = c(xbnds[3] - borders[1], xbnds[4] + borders[2], ybnds[1] -
borders[4], ybnds[4] + borders[3])
# top figure is in the top margin space from from left to right
topfig = c(xbnds[1] - borders[1], xbnds[4] + borders[2], ybnds[3] -
borders[4], ybnds[4] + borders[3])
# bottom figure is in the bottom margin space from left to right
botfig = c(xbnds[1] - borders[1], xbnds[4] + borders[2], ybnds[1] -
borders[4], ybnds[2] + borders[3])
# these figure areas are from the devices left to right and top to bottom limits.
#
colSep = panelLengthen(colSep, ncol + 1) # initially 1 element of zero - now "ncol" elements
rowSep = panelLengthen(rowSep, nrow + 1) # nrow elements.
if(is.null(pad))
{ # no pad, initialize
pad = c(borders[1] + colSep[1] + leftMargin,
borders[2] + colSep[ncol + 1] + rightMargin,
borders[3] + rowSep[1] + topMargin,
borders[4] + rowSep[nrow + 1] + bottomMargin)
}
# The borders should align around the edge.
colSep = cumsum(colSep) # convert individual spaces to cumulative sums.
rowSep = cumsum(rowSep)
plotX = plotX - colSep[ncol + 1] # subtract room for separators (value of last colSep or rowSep
plotY = plotY - rowSep[nrow + 1]
# the colSize and rowSize values are relative to a projected sum of units.
# sum(colSize) = ncol (number of columns)
# sum(rowSize) = 71.64 as coded for 10 full groups and the median group
relx = panelLengthen(colSize, ncol) # size required == 3 * 1 relx is 3 elements of 1
rely = panelLengthen(rowSize, nrow) # size required == 10 * 7 + 1.64
relx = relx/sum(relx) # Factional each element / sum
rely = rely/sum(rely)
xinc = plotX * cumsum(c(0, relx))
yinc = plotY * cumsum(c(0, rely))
fig = matrix(0, nrow = nrow * ncol, ncol = 4)
k = 0
for(i in 1:nrow) {
for(j in 1:ncol) {
k = k + 1
fig[k, 1] = xbnds[2] + xinc[j] + colSep[j] - pad[1] # x.pos k<=1:- start
fig[k, 2] = xbnds[2] + xinc[j + 1] + colSep[j] + pad[2] # x pos - end
fig[k, 4] = ybnds[3] - yinc[i] - rowSep[i] + pad[3] # y pos - start
fig[k, 3] = ybnds[3] - yinc[i + 1] - rowSep[i] - pad[4] # y pos - end
}
}
# fig now has the x1, x2, y1, y2 physical position of each panel on the page.
labfig = rbind(leftfig, rightfig, topfig, botfig)
# lab figure has four rows - one for each margin/border space around the plot area.
# Scale the "inch" coordinates to coordinates 0 to 1 (relative)
fig = abs(t(t(fig)/fig.scale))
labfig = t(t(labfig)/fig.scale)
# coltabs are in inches and start inside the left border
coltabs = cbind(c(0, colSep + xinc + leftMargin), leftMargin + c(0,colSep) + c(xinc,
xinc[length(xinc)] + rightMargin))
# rowtabs are in inches and start below the upper border
# yinc is padded with a leading 0.
rowtabs = cbind(c(ybnds[3], ybnds[3] - rowSep - c(yinc[-1], yinc[nrow + 1] + bottomMargin)),
c(ybnds[4], ybnds[3] - rowSep - yinc)) - borders[4]
# The tabs provide the physical points for each panel.
list(dim = c(nrow, ncol), datfig = round(fig,6), labfig = round(labfig,6), brd = borders,
pad = pad, coltabs = coltabs, rowtabs = rowtabs,
figsize = c(din.x, din.y))
}
|
#reading data from the file
#data is subsetted according to required dates during the read from the file
data <- subset(read.table("household_power_consumption.txt", na.strings = "?", sep = ";", header = TRUE), Date == "1/2/2007" | Date == "2/2/2007")
#adding a new column by concatenating date and time columns and operating with strptime to convert
data1 <- transform(data, Date_time =strptime(paste(data$Date, data$Time), format ="%d/%m/%Y %H:%M:%S"))
#Opening the png device
png("plot4.png")
#setting mfrow to add multiple graphs
par(mfrow = c(2,2))
#adding graph column 1, row 1
with(data1, plot(data1$Date_time, data1$Global_active_power, xlab = "", ylab = "Global Active Power", type = "l"))
#adding graph column 2, row 1
with(data1, plot(data1$Date_time, data1$Voltage, xlab = "datetime", ylab = "Voltage", type = "l"))
#adding graph column 1, row 2
with(data1, plot(data1$Date_time, y = data1$Sub_metering_1, type = "l", xlab ="", ylab = "Energy sub metering"))
with(data1, lines(data1$Date_time, y = data1$Sub_metering_2, col = "red"))
with(data1, lines(data1$Date_time, y = data1$Sub_metering_3, col = "blue"))
legend("topright", lty =1, col = c("black","red", "blue"), bty = "n" , legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#adding graph column 2, row 2
with(data1, plot(data1$Date_time, data1$Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "l"))
#closing the device
dev.off()
|
/plot4.R
|
no_license
|
statik22/ExData_Plotting1
|
R
| false
| false
| 1,453
|
r
|
#reading data from the file
#data is subsetted according to required dates during the read from the file
data <- subset(read.table("household_power_consumption.txt", na.strings = "?", sep = ";", header = TRUE), Date == "1/2/2007" | Date == "2/2/2007")
#adding a new column by concatenating date and time columns and operating with strptime to convert
data1 <- transform(data, Date_time =strptime(paste(data$Date, data$Time), format ="%d/%m/%Y %H:%M:%S"))
#Opening the png device
png("plot4.png")
#setting mfrow to add multiple graphs
par(mfrow = c(2,2))
#adding graph column 1, row 1
with(data1, plot(data1$Date_time, data1$Global_active_power, xlab = "", ylab = "Global Active Power", type = "l"))
#adding graph column 2, row 1
with(data1, plot(data1$Date_time, data1$Voltage, xlab = "datetime", ylab = "Voltage", type = "l"))
#adding graph column 1, row 2
with(data1, plot(data1$Date_time, y = data1$Sub_metering_1, type = "l", xlab ="", ylab = "Energy sub metering"))
with(data1, lines(data1$Date_time, y = data1$Sub_metering_2, col = "red"))
with(data1, lines(data1$Date_time, y = data1$Sub_metering_3, col = "blue"))
legend("topright", lty =1, col = c("black","red", "blue"), bty = "n" , legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#adding graph column 2, row 2
with(data1, plot(data1$Date_time, data1$Global_reactive_power, xlab = "datetime", ylab = "Global_reactive_power", type = "l"))
#closing the device
dev.off()
|
library(dplyr)
#
original_data_url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
original_data_filename = "dataset.zip"
if (!file.exists(original_data_filename))
download.file(original_data_url,destfile = original_data_filename)
if(!dir.exists("UCI HAR Dataset")) {
unzip(original_data_filename)
}
#
data_dir = file.path("UCI HAR Dataset")
training_data_dir = file.path(data_dir, "train")
test_data_dir = file.path(data_dir, "test")
x_training_data_path = file.path(training_data_dir, "X_train.txt")
y_training_data_path = file.path(training_data_dir, "y_train.txt")
x_test_data_path = file.path(test_data_dir, "X_test.txt")
y_test_data_path = file.path(test_data_dir, "y_test.txt")
subject_training_data_path = file.path(training_data_dir, "subject_train.txt")
subject_test_data_path = file.path(test_data_dir, "subject_test.txt")
features_data_path = file.path(data_dir, "features.txt")
activity_labels_data_path = file.path(data_dir, "activity_labels.txt")
feature_names_table <- read.csv(file=features_data_path,sep=" ", header = FALSE, col.names = c("id", "feature_name"))
feature_names_table$feature_name <- tolower(gsub("[\\(\\),-]", "", feature_names_table$feature_name))
feature_names <- feature_names_table$feature_name
training_data <-read.table(x_training_data_path, col.names=feature_names)
training_data_limited <- select(training_data, matches("mean|std"))
test_data <-read.table(x_test_data_path, col.names=feature_names)
test_data_limited <- select(test_data, matches("mean|std"))
training_data_activities <- read.table(y_training_data_path,col.names = c("activity_id"))
test_data_activities <- read.table(y_test_data_path,col.names = c("activity_id"))
training_data_subjects <- read.csv(file=subject_training_data_path, sep=" ", header=FALSE, col.names=c("subject_id"))
test_data_subjects <- read.csv(file=subject_test_data_path, sep=" ", header=FALSE, col.names=c("subject_id"))
activity_labels <- read.csv(file=activity_labels_data_path,sep=" ", header=FALSE, col.names = c("id", "activity_label"))
training_data_full <- cbind(training_data_limited, training_data_activities, training_data_subjects)
training_data_final <- left_join(training_data_full, activity_labels, by=c("activity_id" = "id"))
test_data_full <- cbind(test_data_limited, test_data_activities, test_data_subjects)
test_data_final <- left_join(test_data_full, activity_labels, by=c("activity_id" = "id"))
data <- rbind(training_data_final, test_data_final)
data <- select(data, -activity_id)
tidy2 <- data %>% group_by(subject_id, activity_label) %>% summarise_all(mean)
write.csv(data, file="tidy.csv", row.names=FALSE)
write.table(data, file="tidy.txt", row.name=FALSE)
write.csv(tidy2, file="tidy2.csv", row.names=FALSE)
write.table(tidy2, file="tidy2.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
tdatascience/GettingAndCleaning
|
R
| false
| false
| 2,843
|
r
|
library(dplyr)
#
original_data_url = "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
original_data_filename = "dataset.zip"
if (!file.exists(original_data_filename))
download.file(original_data_url,destfile = original_data_filename)
if(!dir.exists("UCI HAR Dataset")) {
unzip(original_data_filename)
}
#
data_dir = file.path("UCI HAR Dataset")
training_data_dir = file.path(data_dir, "train")
test_data_dir = file.path(data_dir, "test")
x_training_data_path = file.path(training_data_dir, "X_train.txt")
y_training_data_path = file.path(training_data_dir, "y_train.txt")
x_test_data_path = file.path(test_data_dir, "X_test.txt")
y_test_data_path = file.path(test_data_dir, "y_test.txt")
subject_training_data_path = file.path(training_data_dir, "subject_train.txt")
subject_test_data_path = file.path(test_data_dir, "subject_test.txt")
features_data_path = file.path(data_dir, "features.txt")
activity_labels_data_path = file.path(data_dir, "activity_labels.txt")
feature_names_table <- read.csv(file=features_data_path,sep=" ", header = FALSE, col.names = c("id", "feature_name"))
feature_names_table$feature_name <- tolower(gsub("[\\(\\),-]", "", feature_names_table$feature_name))
feature_names <- feature_names_table$feature_name
training_data <-read.table(x_training_data_path, col.names=feature_names)
training_data_limited <- select(training_data, matches("mean|std"))
test_data <-read.table(x_test_data_path, col.names=feature_names)
test_data_limited <- select(test_data, matches("mean|std"))
training_data_activities <- read.table(y_training_data_path,col.names = c("activity_id"))
test_data_activities <- read.table(y_test_data_path,col.names = c("activity_id"))
training_data_subjects <- read.csv(file=subject_training_data_path, sep=" ", header=FALSE, col.names=c("subject_id"))
test_data_subjects <- read.csv(file=subject_test_data_path, sep=" ", header=FALSE, col.names=c("subject_id"))
activity_labels <- read.csv(file=activity_labels_data_path,sep=" ", header=FALSE, col.names = c("id", "activity_label"))
training_data_full <- cbind(training_data_limited, training_data_activities, training_data_subjects)
training_data_final <- left_join(training_data_full, activity_labels, by=c("activity_id" = "id"))
test_data_full <- cbind(test_data_limited, test_data_activities, test_data_subjects)
test_data_final <- left_join(test_data_full, activity_labels, by=c("activity_id" = "id"))
data <- rbind(training_data_final, test_data_final)
data <- select(data, -activity_id)
tidy2 <- data %>% group_by(subject_id, activity_label) %>% summarise_all(mean)
write.csv(data, file="tidy.csv", row.names=FALSE)
write.table(data, file="tidy.txt", row.name=FALSE)
write.csv(tidy2, file="tidy2.csv", row.names=FALSE)
write.table(tidy2, file="tidy2.txt", row.name=FALSE)
|
library(tidyverse)
library(maps)
middle_ages <- read.csv('middle_ages.csv', stringsAsFactors = FALSE)
renaissance <- read.csv('renaissance.csv', stringsAsFactors = FALSE)
enlight_industrial <- read.csv('enlight_industrial.csv', stringsAsFactors = FALSE)
modern <- read.csv('modern.csv', stringsAsFactors = FALSE)
birthYear <- format(as.Date(birthYear, format="%Y-%M-%D"),"%Y")
ggplot() +
geom_bar(data=middle_ages, mapping = aes(x = deathCause_label))
ggplot() +
geom_bar(data=renaissance, mapping = aes(x= deathCause_label))
ggplot() +
geom_bar(data=enlight_industrial, mapping = aes(x= deathCause_label))
library('plyr')
count(enlight_industrial, 'deathCause_label')
ggplot() +
geom_bar(data=modern, mapping = aes(x=deathCause_label))
|
/group_project.R
|
no_license
|
ivvatt/group_project
|
R
| false
| false
| 756
|
r
|
library(tidyverse)
library(maps)
middle_ages <- read.csv('middle_ages.csv', stringsAsFactors = FALSE)
renaissance <- read.csv('renaissance.csv', stringsAsFactors = FALSE)
enlight_industrial <- read.csv('enlight_industrial.csv', stringsAsFactors = FALSE)
modern <- read.csv('modern.csv', stringsAsFactors = FALSE)
birthYear <- format(as.Date(birthYear, format="%Y-%M-%D"),"%Y")
ggplot() +
geom_bar(data=middle_ages, mapping = aes(x = deathCause_label))
ggplot() +
geom_bar(data=renaissance, mapping = aes(x= deathCause_label))
ggplot() +
geom_bar(data=enlight_industrial, mapping = aes(x= deathCause_label))
library('plyr')
count(enlight_industrial, 'deathCause_label')
ggplot() +
geom_bar(data=modern, mapping = aes(x=deathCause_label))
|
mixer<-function( x, qmin=2, qmax=NULL, method="variational",
directed=NULL, nbiter=10, fpnbiter=5, improve=FALSE, verbose=TRUE )
{
## How the graph is coded ?
if (is.character(x) & (length(x) == 1) ){
## spm file
g <- new("spmgraph",x)
m.save <- getEdges(g)
m <- m.save
NodeName <- g@nodenames
NbrNodes <- max( m )
if( is.null(directed) ) {
directed = ! is.symmetric(m)
if (verbose) {
cat("Mixer: the edge list has been transformed to ")
if (directed)
cat("a directed adjacency matrix\n")
else
cat("an undirected adjacency matrix\n")
}
} else if( !(directed) & !(is.symmetric( m )) ) {
cat("Mixer: unsymmetric matrix not suitable with directed=FALSE \n")
return(NULL)
}
# Only connected nodes are in "m"
# The nodes are renumbered
} else if (dim(x)[1]==dim(x)[2]){
## Adjacency matrix
##
if( is.null(directed) ) {
directed <- (! is.symmetric( x ) )
if (verbose) {
cat("Mixer: the adjacency matrix has been transformed in a ")
if (directed)
cat("directed edge list\n")
else
cat("undirected edge list\n")
}
} else if( !(directed) & !(is.symmetric( x )) ) {
cat("Mixer: unsymmetric matrix not suitable with directed=FALSE \n")
return(NULL)
}
NbrNodes <- dim(x)[1]
NodeName <- dimnames(x)[1]
m <- AdjMat2Edges(x, directed=directed, verbose=verbose)
m.save <- m
} else if( dim(x)[1] == 2) {
## Edge list
m <- x
NodeName <- getNodeName( m )
# To avoid index gap
NbrNodes <- length( NodeName )
m <- renumberNodes( m )
m.save <- m
if( is.null(directed) ) {
directed <- (! is.symmetric( m ) )
if( verbose) {
cat("Mixer: the edge list has been transformed in a ")
if (directed)
cat("directed one\n")
else
cat("undirected one\n")
}
} else if( !(directed) & !(is.symmetric( m )) ) {
cat("Mixer: unsymmetric matrix not suitable with directed=FALSE \n")
return(NULL)
}
} else {
cat("Mixer: not an adjacency matrix or bad edge list\n")
}
## Get the mixnet node order
# Invalid : readingOrder<-unique(as.numeric(m));
# Get the mapping Mixnet -> initial Graph
Mixnet2Graph <- getMixnetNumbering( m )
m <- removeLoops( m )
m <- renumberNodes( m )
# NbrConnectedNodes : number of connected nodes
NbrConnectedNodes <- length( Mixnet2Graph )
## prepare the arguments
undirected<- !( directed)
loop<-FALSE
kmeansnbclass<- 0 # Accelerate the initialization (used to start the HAC
# (should be between NbrConnectedNodes and qmax)
kmeansnbiter<-30 #
emeps<-1e-10 # tolerance for em (compare the likelihood)
fpeps<-1e-4 # tolerance for the fixed point internal loop
# (on the taus...)
nokmeans<-TRUE # No acceleration via kmeans
silent<-TRUE # no verbose
initnbv<-0 # size of the initial network for the online version
improvenbiter<-3 # number of iteration for the improvment phase
## ensure the options compatibility
if (method=="classification") {
classif<-TRUE; stochastique<-FALSE; online<-TRUE}
else {
stochastique<-FALSE; classif<-FALSE; online<-FALSE}
if (undirected==TRUE){
symetrize<-TRUE}
else {
symetrize<-FALSE}
## Ensure number of classes coherence
if (is.null(qmax)){
qmax<-qmin
}
if( NbrConnectedNodes < qmax ) {
stop("q-class value greater than the number of nodes.")
}
## compute the size of the returned array from the .C call
nbrClasses <- qmax - qmin + 1
span <- qmin:qmax
nbrICL <- nbrClasses; elts <- c(nbrICL)
nbrAlphas <- sum(span); elts <- c(elts, nbrAlphas)
nbrPis <- sum(span*span); elts <- c(elts, nbrPis)
nbrTaus <- NbrConnectedNodes*nbrAlphas; elts <- c(elts, nbrTaus)
nbrValues <- sum(elts)
##Chose the method for the parameter estimation
if (method=="bayesian"){
bout <- VariationalBayes(m, qmin, qmax, nbiter, fpnbiter,
emeps, fpeps, directed )
}
else if (method=="classification"){
xout <- .C("main_ermgo",
as.integer(loop),
as.integer(silent),
as.integer(initnbv),
as.integer(improvenbiter),
as.integer(nbiter),
as.integer(improve),
as.integer(classif),
as.integer(stochastique),
as.integer(qmax),
as.integer(qmin),
nbrEdges = as.integer(length(m)/2),# size of the given array
size = as.integer(nbrValues), # size of the returned array
lNodes = as.integer(m), # given array
res = double(nbrValues)) # returned array
y <- vector("list", length(span))
} else {
xout <- .C("main_ermg",
as.integer(symetrize),
as.integer(loop),
as.integer(undirected),
as.integer(silent),
as.integer(improvenbiter),
as.integer(kmeansnbclass),
as.integer(kmeansnbiter),
as.integer(nbiter),
as.double(emeps),
as.integer(fpnbiter),
as.double(fpeps),
as.integer(improve),
as.integer(classif),
as.integer(nokmeans),
as.integer(qmax),
as.integer(qmin),
nbrEdges = as.integer(length(m)/2),# size of the given array
size = as.integer(nbrValues), # size of the returned array
lNodes = as.integer(m), # given array
res = double(nbrValues)) # returned array
y <- vector("list", length(span))
}
if (method != "bayesian") {
j <- 1
cur <- 1
for (i in span){
## format : y[[j]]$name <- dataFormat(x$res[cur:end]);
## cur <- (offset equal to the size of dataFormat)
y[[j]]$criterion <- xout$res[cur]; cur <- cur+1
y[[j]]$alphas <- xout$res[cur:(cur-1+i)]; cur <- cur+i
y[[j]]$Pis <- matrix(xout$res[cur:(cur-1+(i*i))], i,i);
cur <- cur+(i*i)
tmp <- matrix(xout$res[cur:(cur-1+(i*NbrConnectedNodes))], i,
NbrConnectedNodes,byrow=TRUE);
# Invalid : y[[j]]$Taus[,readingOrder] <- y[[j]]$Taus
# replaced by :
y[[j]]$Taus <- matrix( 0, i, NbrNodes )
y[[j]]$Taus[ , Mixnet2Graph[ ]] <- tmp[ , ]
# y[[j]]$Taus <- matrix(xout$res[cur:(cur-1+(i*NbrConnectedNodes))], i,
# NbrConnectedNodes,byrow=TRUE);
cur <- cur+(i*NbrConnectedNodes)
j <- j+1
}
result<-list(method=method,nnames=NodeName, nnodes=NbrNodes,
map=Mixnet2Graph, edges=m.save,qmin=qmin,qmax=qmax,output=y,
directed=directed)
} else {
j <- 1
for (i in span){
tmp <- bout[[j]]$Taus
bout[[j]]$Taus <- matrix( 0, i, NbrNodes )
bout[[j]]$Taus[ , Mixnet2Graph[ ]] <- tmp[ , ]
j <- j+1
}
result<-list(method=method, nnames=NodeName, nnodes=NbrNodes,
map=Mixnet2Graph, edges=m.save, qmin=qmin, qmax=qmax, output=bout,
directed=directed)
}
class(result)<-"mixer"
return(result)
}
############################################################
# Plot the icl criterion
############################################################
ploticl<-function(x,q,...)
{
if (x$method == "bayesian" ){
title = "Bayesian criterion vs class number"
y.lab = "Bayesian criterion"
} else {
title = "Integrated Classification Likelihood"
y.lab = "ICL"
}
Q<-unlist(lapply(x$output,ICL<-function(x) length(x$alphas)))
ICL<-unlist(lapply(x$output,ICL<-function(x) x$criterion))
plot(Q,ICL,xlab="Number of classes",ylab=y.lab,main=title)
lines(Q,ICL)
abline(v=q,col="red",lty=2)
}
############################################################
# Plot the reorganized adjacency matrix
############################################################
plotam<-function(edges,cluster)
{
neworder<-order(cluster)
max(edges)->n
m<-t(matrix(order(neworder)[as.numeric(edges)],2))
plot(1, 1, xlim = c(0, n + 1), ylim = c(n + 1, 0), type = "n", axes= FALSE,xlab="classes",ylab="classes",main="Reorganized Adjacency matrix")
rect(m[,2]-0.5,m[,1]-0.5,m[,2]+0.5,m[,1]+0.5,col=1)
rect(m[,1]-0.5,m[,2]-0.5,m[,1]+0.5,m[,2]+0.5,col=1)
table(cluster)->limits # find the class limits
cumsum(limits)[1:(length(limits)-1)]+0.5->limits
abline(v=c(0.5,limits,n+0.5),h=c(0.5,limits,n+0.5),col="red")
}
############################################################
# Plot the Pis matrix and alphas vector using spectral decomposition
############################################################
plotparam<-function(Pis,alphas,q=NULL){
length(alphas)->q
if (q==1) {D<-list(vector=data.frame(1,1)); a<-b<-1} else {
if (q==2) {a<-b<-1} else {a<-2; b<-3}
D<-colSums(Pis)
L<-diag(rep(1,q)) - diag(D^(-1/2)) %*% Pis %*% diag(D^(-1/2))
eigen(L)->D
}
plot(D$vector[,a],D$vector[,b],cex=1/min(alphas^(1/2))*alphas^(1/2)*3,axes=FALSE,xlab="",ylab="",main="Specral view of the connection matrix",pch=19,col="red")
points(D$vector[,a],D$vector[,b],cex=1/min(alphas^(1/2))*alphas^(1/2)*3)
text(D$vector[,a],D$vector[,b],label=1:q)
#gplot((Pis>median(Pis))*Pis,vertex.cex=1/min(alphas^(1/2))*alphas^(1/2)*3,edge.lwd=(Pis>median(Pis))*Pis*1/min(median(Pis)),label=1:length(alphas),label.pos=6)
}
############################################################
# Plot the reorganized adjacency matrix
############################################################
mixture<-function(x,alphas,lambdaq){
fx<-0; for (q in 1:length(alphas)) {
fx<-fx+alphas[q]*dpois(x,lambda=lambdaq[q])
}
return(fx)
}
plotmixture<-function(degrees,Pis,alphas,n, directed=FALSE){
if( directed )
colSums(Pis*alphas)*(2*n-2)->lambdaq
else
colSums(Pis*alphas)*(n-1)->lambdaq
# Remove unconnected nodes
degrees <- degrees[ which( degrees != 0) ]
min(degrees):max(degrees)->x
mixture(x,alphas,lambdaq)->y
histo<-hist(degrees,plot=FALSE)
plot(histo,ylim=c(0,max(histo$density,y)),freq=FALSE,col=7,main="Degree distribution",)
lines(x,y,lwd=2,col="blue")
points(x,y)
}
############################################################
# Plot the estimated degree distribution
############################################################
is.mixer<-function(x){if (class(x)=="mixer") TRUE else FALSE}
plot.mixer<-function(x, q=NULL, frame=1:4, classes=NULL, classes.col=NULL, quantile.val=0.1, ...){
# Test x
if (!is.mixer( x ))
stop("Not a mixer object")
x->mixer.res
# Test q
if( ! is.null(q)) {
if( ! (q %in% x$qmin:x$qmax) )
stop("Bad value of 'q'")
}
# Test frame
if( ! (is.numeric(frame) & all( frame %in% 1:5) ) )
stop("Bad frame number")
# Test classes
if( ! ( is.factor( classes ) | is.null( classes ) ))
stop("'classes' not factor")
if( ! is.null( classes) & length(classes) != x$nnodes )
stop("Bad 'classes' length ")
if( ! is.numeric(quantile.val) )
stop("Bad 'quantile.val' value")
#
# Frames
#
# Remove bad Frames numbers
index <- which( frame > 5 )
if( length( index ) != 0 )
frame <- frame[ - index ]
frame <- unique( frame )
nb.frame <- length( frame )
# Tool large number of frames : remove the last frames
if( nb.frame > 4 ) {
frame <- frame[ -5:-nb.frame]
nb.frame <- 4
}
n<-dim(mixer.res$x)[1]
if ( is.null(q) ) {
# find the best number of classes according ICL
ICL<-unlist( lapply( mixer.res$output,
ICL<-function(x) x$criterion))
which.max(ICL)->i
q<-length(mixer.res$output[[i]]$alphas)
}
index <- q-mixer.res$qmin+1
apply(mixer.res$output[[index]]$Taus,2,which.max)->cluster
# Not connected nodes
cluster[ (colSums( mixer.res$output[[index]]$Taus ) == 0 ) ] <- -1
Pis <- mixer.res$output[[q-mixer.res$qmin+1]]$Pis
alphas <- mixer.res$output[[q-mixer.res$qmin+1]]$alphas
# Frames to display
nb.col <- 2
nb.lin <- 2
if ( nb.frame == 1) {
nb.col <- 1
nb.lin <- 1
} else if ( nb.frame == 2) {
nb.col <- 2
nb.lin <- 1
}
par(mfrow=c(nb.lin, nb.col))
if( 1 %in% frame ){
ploticl(mixer.res,q)
}
if( 2 %in% frame ) {
plotam(mixer.res$edges,cluster)
}
if( 3 %in% frame ){
Degrees <- rep( 0, mixer.res$nnodes )
for ( i in 1:dim(mixer.res$edges)[2] ) {
# Remove loops
if( mixer.res$edges[1, i] != mixer.res$edges[2, i] ) {
node = mixer.res$edges[1, i]
Degrees[ node ] = Degrees[ node ] + 1
node = mixer.res$edges[2, i]
Degrees[ node ] = Degrees[ node ] + 1
}
}
plotmixture( Degrees, Pis, alphas, length( mixer.res$map ), mixer.res$directed )
}
if( 4 %in% frame ){
if( !is.null( classes ) ) {
pie.coef = table( factor( cluster, levels=1:q ) , classes )
# Normalization
for ( i in 1:dim(pie.coef)[1] ) {
max = max( pie.coef[i, ] )
if ( max != 0 )
pie.coef[i, ] = pie.coef[i, ] / max
}
} else {
pie.coef = NULL
}
Gplot( Pis, type="pie.nodes", node.weight=alphas, node.pie.coef=pie.coef,
quantile.val = quantile.val, colors=classes.col,
main="Inter/intra class probabilities",
... )
}
if( 5 %in% frame )
Gplot( mixer.res$edges, class=cluster, colors=classes.col,
main="Graph", directed=x$directed,
... )
par( mfrow=c(1, 1) )
}
############################################################
# Simulation of an affiliation graph
############################################################
class.ind<-function (cl)
{
n <- length(cl)
cl <- as.factor(cl)
x <- matrix(0, n, length(levels(cl)))
x[(1:n) + n * (unclass(cl) - 1)] <- 1
dimnames(x) <- list(names(cl), levels(cl))
x
}
graph.affiliation<-function( n=100,
alphaVect=c(1/2,1/2), lambda=0.7, epsilon=0.05,
directed=FALSE) {
# INPUT n: number of vertex
# alphaVect : vecteur of class proportion
# lambda: proba of edge given same classe
# epsilon: proba of edge given two different classes
# OUTPUT x: adjacency matrix
# cluster: class vector
#
x<-matrix(0,n,n);
Q<-length(alphaVect);
NodeToClass <- vector(length=n)
rmultinom(1, size=n, prob = alphaVect)->nq;
Z<-class.ind(rep(1:Q,nq));
Z<-Z[sample(1:n,n),];
for (i in 1:n) {
NodeToClass[i] <- which.max( Z[i,] )
}
for (i in 1:n) {
if ( i != n) {
for (j in (i+1):n) {
# if i and j in same class
if ( NodeToClass[i] == NodeToClass[j]) p<-lambda else p<-epsilon
if ( (rbinom(1,1,p) )) { x[i,j] <- 1 }
}
if ( directed ) {
if ( i != 1) {
for (j in 1:(i-1)) {
if ( NodeToClass[i] == NodeToClass[j]) p<-lambda else p<-epsilon
if ( (rbinom(1,1,p) )) { x[i,j] <- 1 }
}
}
}
}
}
if ( ! directed ) {
x <- x + t(x)
}
return(list(x=x,cluster=apply(Z,1,which.max)) )
}
##############################################################
# Spectral Clustering using normalized Laplacian
##############################################################
spectralkmeans<-function(x,q=2){
#INPUT:
# x is an adjacency matrix
#OUTPUT:
# An object of class "kmeans" which is a list with components:
n<-dim(x)[1]
D<-colSums(x)
L<-diag(rep(1,n)) - diag(D^(-1/2))%*% x %*% diag(D^(-1/2))
eigen(L)->D
kmeans(as.matrix(D$vectors[,max(1,(n-q)): (n-1)]),q)->res
}
##############################################################
# Compute the rand index between two partition
##############################################################
randError<-function(x, y) {
# function to calculate the adjusted rand statistic
# x and y are vectors containing the two partitions to be compared
# first, get crosstabs
ctab <- table(x,y);
# now calculate 4 intermediary sums
cellsum <- sum(ctab*(ctab-1)/2)
totsum <- sum(ctab)*(sum(ctab)-1)/2
# use matrix multiplication to get row and column marginal sums
rows <- ctab %*% rep(1,ncol(ctab))
rowsum <- sum(rows*(rows-1)/2)
cols <- rep(1,nrow(ctab)) %*% ctab
colsum <- sum(cols*(cols-1)/2)
# now put them together
adj.rand <- (cellsum - (rowsum*colsum/totsum))/(.5*(rowsum +colsum)-(rowsum*colsum/totsum))
return (adj.rand);
}
##############################################################
# transform of an adjacency matrix into an array of edges
##############################################################
AdjMat2Edges<-function(x, directed=FALSE, verbose=TRUE ) {
if (dim(x)[1] == dim(x)[2]){
# Adjacency matrix case
nbrNodes<-dim(x)[1]
ConnectedNodes <- getConnectedNodes( x )
# if ( length(ConnectedNodes) > 0 ) {
# x <- x[ ConnectedNodes, ConnectedNodes ]
# }
if ( directed ) {
m <- t( which( (x==1) , arr.ind=TRUE) )
} else {
m <- t( which( (x==1) & (upper.tri(x, diag=TRUE)), arr.ind=TRUE) )
}
}
return( m )
}
##############################################################
# Return the edge list or adjacency matrix without loops
##############################################################
removeLoops<-function(x, adj=FALSE) {
if (adj){
## Adjacency matrix
diag(x) <- 0
} else if ( dim(x)[1] == 2) {
## Edge list
ilist <- which( x[1,] != x[2,])
if( length(ilist) != 0) {
x <- as.matrix( x[ , ilist] )
}
} else {
cat("Mixer: removeLoops not an adjacency matrix nor edge list\n")
}
return( x )
}
##############################################################
# Return the index list of connected nodes
##############################################################
getConnectedNodes<-function( x ) {
if (dim(x)[1] == dim(x)[2]){
# Adjacency matrix case
nbrNodes<-dim(x)[1]
ConnectedNodes <- which( (rowSums( x ) + colSums(x)) != 0)
}
else if ( dim(x)[1] == 2) {
ConnectedNodes = unique( as.vector( x ) )
} else {
cat("Mixer: getConnectedNodes not an adjacency matrix nor edge list\n")
}
return( ConnectedNodes )
}
##############################################################
# Set the seed of random functions of C/C++ part
##############################################################
setSeed <- function( seed=1 ) {
#invisible( .C("srand_stdlib",
# as.integer(seed)
# ) )
set.seed(seed)
}
##############################################################
# Declare a generic function
##############################################################
getModel <- function( object, ... )
{
UseMethod("getModel", object)
}
##############################################################
# Return model parameters.
##############################################################
getModel.mixer <- function( object, ...) {
# Test x
if ( !is.mixer( object ) )
stop("Not a mixer object")
x <- object
# Get optional parameter q
q <- sub.param("q", NULL , ...)
# Test q
if( ! is.null(q)) {
if( ! (q %in% x$qmin:x$qmax) )
stop("Bad value of 'q'")
}
if ( is.null(q) ) {
# find the best number of classes according ICL
ICL <- unlist( lapply( x$output, ICL<-function(x) x$criterion))
i <- which.max(ICL)
q <- length(x$output[[i]]$alphas)
}
i <- q - x$qmin + 1
res <- list(q = q,
criterion = x$output[[i]]$criterion ,
alphas = x$output[[i]]$alphas,
Pis = x$output[[i]]$Pis,
Taus = x$output[[i]]$Taus
)
return( res )
}
##############################################################
# Return tke mapping.
# 'x[2, nbedges]' edge list
##############################################################
getMixnetNumbering <- function(x) {
NodeList <- vector()
NbEdges <- dim(x)[2]
n.nodes <- 0
for ( i in 1:NbEdges ) {
if( x[1,i] != x[2,i] ) {
# Add in the node list if a new one
if ( !( x[1, i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[1, i]
}
if ( !( x[2,i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[2,i]
}
}
}
# Return the mapping Mixnet to the initial graph 'x'
return( NodeList)
}
##############################################################
# Renumber the nodes.
# 'x[2, nbedges]' edge list
##############################################################
renumberNodes <- function( x ) {
NodeList <- vector()
NbEdges <- dim(x)[2]
n.nodes <- 0
res <- matrix( 0, dim(x)[1], dim(x)[2])
for ( i in 1:NbEdges ) {
# Add in the node list if a new one
if ( !( x[1, i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[1, i]
res[1, i] <- n.nodes
} else {
res[1, i] <- which( NodeList == x[1, i] )
}
if ( !( x[2,i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[2,i]
res[2, i] <- n.nodes
} else {
res[2, i] <- which( NodeList == x[2, i] )
}
}
# Return the mapping Mixnet to the initial graph 'x'
return( res )
}
##############################################################
# getNodeNames
# 'x[2, nbedges]' edge list
##############################################################
getNodeName <- function( x ) {
NodeName <- vector()
NbEdges <- dim(x)[2]
n.nodes <- 0
for ( i in 1:NbEdges ) {
# Add in the node list if a new one
if ( !( x[1, i] %in% NodeName ) ) {
n.nodes <- n.nodes+1
NodeName[n.nodes] <- x[1, i]
}
if ( !( x[2,i] %in% NodeName ) ) {
n.nodes <- n.nodes+1
NodeName[n.nodes] <- x[2,i]
}
}
# Return the mapping Mixnet to the initial graph 'x'
return( NodeName )
}
|
/mixer/R/mixer.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 22,332
|
r
|
mixer<-function( x, qmin=2, qmax=NULL, method="variational",
directed=NULL, nbiter=10, fpnbiter=5, improve=FALSE, verbose=TRUE )
{
## How the graph is coded ?
if (is.character(x) & (length(x) == 1) ){
## spm file
g <- new("spmgraph",x)
m.save <- getEdges(g)
m <- m.save
NodeName <- g@nodenames
NbrNodes <- max( m )
if( is.null(directed) ) {
directed = ! is.symmetric(m)
if (verbose) {
cat("Mixer: the edge list has been transformed to ")
if (directed)
cat("a directed adjacency matrix\n")
else
cat("an undirected adjacency matrix\n")
}
} else if( !(directed) & !(is.symmetric( m )) ) {
cat("Mixer: unsymmetric matrix not suitable with directed=FALSE \n")
return(NULL)
}
# Only connected nodes are in "m"
# The nodes are renumbered
} else if (dim(x)[1]==dim(x)[2]){
## Adjacency matrix
##
if( is.null(directed) ) {
directed <- (! is.symmetric( x ) )
if (verbose) {
cat("Mixer: the adjacency matrix has been transformed in a ")
if (directed)
cat("directed edge list\n")
else
cat("undirected edge list\n")
}
} else if( !(directed) & !(is.symmetric( x )) ) {
cat("Mixer: unsymmetric matrix not suitable with directed=FALSE \n")
return(NULL)
}
NbrNodes <- dim(x)[1]
NodeName <- dimnames(x)[1]
m <- AdjMat2Edges(x, directed=directed, verbose=verbose)
m.save <- m
} else if( dim(x)[1] == 2) {
## Edge list
m <- x
NodeName <- getNodeName( m )
# To avoid index gap
NbrNodes <- length( NodeName )
m <- renumberNodes( m )
m.save <- m
if( is.null(directed) ) {
directed <- (! is.symmetric( m ) )
if( verbose) {
cat("Mixer: the edge list has been transformed in a ")
if (directed)
cat("directed one\n")
else
cat("undirected one\n")
}
} else if( !(directed) & !(is.symmetric( m )) ) {
cat("Mixer: unsymmetric matrix not suitable with directed=FALSE \n")
return(NULL)
}
} else {
cat("Mixer: not an adjacency matrix or bad edge list\n")
}
## Get the mixnet node order
# Invalid : readingOrder<-unique(as.numeric(m));
# Get the mapping Mixnet -> initial Graph
Mixnet2Graph <- getMixnetNumbering( m )
m <- removeLoops( m )
m <- renumberNodes( m )
# NbrConnectedNodes : number of connected nodes
NbrConnectedNodes <- length( Mixnet2Graph )
## prepare the arguments
undirected<- !( directed)
loop<-FALSE
kmeansnbclass<- 0 # Accelerate the initialization (used to start the HAC
# (should be between NbrConnectedNodes and qmax)
kmeansnbiter<-30 #
emeps<-1e-10 # tolerance for em (compare the likelihood)
fpeps<-1e-4 # tolerance for the fixed point internal loop
# (on the taus...)
nokmeans<-TRUE # No acceleration via kmeans
silent<-TRUE # no verbose
initnbv<-0 # size of the initial network for the online version
improvenbiter<-3 # number of iteration for the improvment phase
## ensure the options compatibility
if (method=="classification") {
classif<-TRUE; stochastique<-FALSE; online<-TRUE}
else {
stochastique<-FALSE; classif<-FALSE; online<-FALSE}
if (undirected==TRUE){
symetrize<-TRUE}
else {
symetrize<-FALSE}
## Ensure number of classes coherence
if (is.null(qmax)){
qmax<-qmin
}
if( NbrConnectedNodes < qmax ) {
stop("q-class value greater than the number of nodes.")
}
## compute the size of the returned array from the .C call
nbrClasses <- qmax - qmin + 1
span <- qmin:qmax
nbrICL <- nbrClasses; elts <- c(nbrICL)
nbrAlphas <- sum(span); elts <- c(elts, nbrAlphas)
nbrPis <- sum(span*span); elts <- c(elts, nbrPis)
nbrTaus <- NbrConnectedNodes*nbrAlphas; elts <- c(elts, nbrTaus)
nbrValues <- sum(elts)
##Chose the method for the parameter estimation
if (method=="bayesian"){
bout <- VariationalBayes(m, qmin, qmax, nbiter, fpnbiter,
emeps, fpeps, directed )
}
else if (method=="classification"){
xout <- .C("main_ermgo",
as.integer(loop),
as.integer(silent),
as.integer(initnbv),
as.integer(improvenbiter),
as.integer(nbiter),
as.integer(improve),
as.integer(classif),
as.integer(stochastique),
as.integer(qmax),
as.integer(qmin),
nbrEdges = as.integer(length(m)/2),# size of the given array
size = as.integer(nbrValues), # size of the returned array
lNodes = as.integer(m), # given array
res = double(nbrValues)) # returned array
y <- vector("list", length(span))
} else {
xout <- .C("main_ermg",
as.integer(symetrize),
as.integer(loop),
as.integer(undirected),
as.integer(silent),
as.integer(improvenbiter),
as.integer(kmeansnbclass),
as.integer(kmeansnbiter),
as.integer(nbiter),
as.double(emeps),
as.integer(fpnbiter),
as.double(fpeps),
as.integer(improve),
as.integer(classif),
as.integer(nokmeans),
as.integer(qmax),
as.integer(qmin),
nbrEdges = as.integer(length(m)/2),# size of the given array
size = as.integer(nbrValues), # size of the returned array
lNodes = as.integer(m), # given array
res = double(nbrValues)) # returned array
y <- vector("list", length(span))
}
if (method != "bayesian") {
j <- 1
cur <- 1
for (i in span){
## format : y[[j]]$name <- dataFormat(x$res[cur:end]);
## cur <- (offset equal to the size of dataFormat)
y[[j]]$criterion <- xout$res[cur]; cur <- cur+1
y[[j]]$alphas <- xout$res[cur:(cur-1+i)]; cur <- cur+i
y[[j]]$Pis <- matrix(xout$res[cur:(cur-1+(i*i))], i,i);
cur <- cur+(i*i)
tmp <- matrix(xout$res[cur:(cur-1+(i*NbrConnectedNodes))], i,
NbrConnectedNodes,byrow=TRUE);
# Invalid : y[[j]]$Taus[,readingOrder] <- y[[j]]$Taus
# replaced by :
y[[j]]$Taus <- matrix( 0, i, NbrNodes )
y[[j]]$Taus[ , Mixnet2Graph[ ]] <- tmp[ , ]
# y[[j]]$Taus <- matrix(xout$res[cur:(cur-1+(i*NbrConnectedNodes))], i,
# NbrConnectedNodes,byrow=TRUE);
cur <- cur+(i*NbrConnectedNodes)
j <- j+1
}
result<-list(method=method,nnames=NodeName, nnodes=NbrNodes,
map=Mixnet2Graph, edges=m.save,qmin=qmin,qmax=qmax,output=y,
directed=directed)
} else {
j <- 1
for (i in span){
tmp <- bout[[j]]$Taus
bout[[j]]$Taus <- matrix( 0, i, NbrNodes )
bout[[j]]$Taus[ , Mixnet2Graph[ ]] <- tmp[ , ]
j <- j+1
}
result<-list(method=method, nnames=NodeName, nnodes=NbrNodes,
map=Mixnet2Graph, edges=m.save, qmin=qmin, qmax=qmax, output=bout,
directed=directed)
}
class(result)<-"mixer"
return(result)
}
############################################################
# Plot the icl criterion
############################################################
ploticl<-function(x,q,...)
{
if (x$method == "bayesian" ){
title = "Bayesian criterion vs class number"
y.lab = "Bayesian criterion"
} else {
title = "Integrated Classification Likelihood"
y.lab = "ICL"
}
Q<-unlist(lapply(x$output,ICL<-function(x) length(x$alphas)))
ICL<-unlist(lapply(x$output,ICL<-function(x) x$criterion))
plot(Q,ICL,xlab="Number of classes",ylab=y.lab,main=title)
lines(Q,ICL)
abline(v=q,col="red",lty=2)
}
############################################################
# Plot the reorganized adjacency matrix
############################################################
plotam<-function(edges,cluster)
{
neworder<-order(cluster)
max(edges)->n
m<-t(matrix(order(neworder)[as.numeric(edges)],2))
plot(1, 1, xlim = c(0, n + 1), ylim = c(n + 1, 0), type = "n", axes= FALSE,xlab="classes",ylab="classes",main="Reorganized Adjacency matrix")
rect(m[,2]-0.5,m[,1]-0.5,m[,2]+0.5,m[,1]+0.5,col=1)
rect(m[,1]-0.5,m[,2]-0.5,m[,1]+0.5,m[,2]+0.5,col=1)
table(cluster)->limits # find the class limits
cumsum(limits)[1:(length(limits)-1)]+0.5->limits
abline(v=c(0.5,limits,n+0.5),h=c(0.5,limits,n+0.5),col="red")
}
############################################################
# Plot the Pis matrix and alphas vector using spectral decomposition
############################################################
plotparam<-function(Pis,alphas,q=NULL){
length(alphas)->q
if (q==1) {D<-list(vector=data.frame(1,1)); a<-b<-1} else {
if (q==2) {a<-b<-1} else {a<-2; b<-3}
D<-colSums(Pis)
L<-diag(rep(1,q)) - diag(D^(-1/2)) %*% Pis %*% diag(D^(-1/2))
eigen(L)->D
}
plot(D$vector[,a],D$vector[,b],cex=1/min(alphas^(1/2))*alphas^(1/2)*3,axes=FALSE,xlab="",ylab="",main="Specral view of the connection matrix",pch=19,col="red")
points(D$vector[,a],D$vector[,b],cex=1/min(alphas^(1/2))*alphas^(1/2)*3)
text(D$vector[,a],D$vector[,b],label=1:q)
#gplot((Pis>median(Pis))*Pis,vertex.cex=1/min(alphas^(1/2))*alphas^(1/2)*3,edge.lwd=(Pis>median(Pis))*Pis*1/min(median(Pis)),label=1:length(alphas),label.pos=6)
}
############################################################
# Plot the reorganized adjacency matrix
############################################################
mixture<-function(x,alphas,lambdaq){
fx<-0; for (q in 1:length(alphas)) {
fx<-fx+alphas[q]*dpois(x,lambda=lambdaq[q])
}
return(fx)
}
plotmixture<-function(degrees,Pis,alphas,n, directed=FALSE){
if( directed )
colSums(Pis*alphas)*(2*n-2)->lambdaq
else
colSums(Pis*alphas)*(n-1)->lambdaq
# Remove unconnected nodes
degrees <- degrees[ which( degrees != 0) ]
min(degrees):max(degrees)->x
mixture(x,alphas,lambdaq)->y
histo<-hist(degrees,plot=FALSE)
plot(histo,ylim=c(0,max(histo$density,y)),freq=FALSE,col=7,main="Degree distribution",)
lines(x,y,lwd=2,col="blue")
points(x,y)
}
############################################################
# Plot the estimated degree distribution
############################################################
is.mixer<-function(x){if (class(x)=="mixer") TRUE else FALSE}
plot.mixer<-function(x, q=NULL, frame=1:4, classes=NULL, classes.col=NULL, quantile.val=0.1, ...){
# Test x
if (!is.mixer( x ))
stop("Not a mixer object")
x->mixer.res
# Test q
if( ! is.null(q)) {
if( ! (q %in% x$qmin:x$qmax) )
stop("Bad value of 'q'")
}
# Test frame
if( ! (is.numeric(frame) & all( frame %in% 1:5) ) )
stop("Bad frame number")
# Test classes
if( ! ( is.factor( classes ) | is.null( classes ) ))
stop("'classes' not factor")
if( ! is.null( classes) & length(classes) != x$nnodes )
stop("Bad 'classes' length ")
if( ! is.numeric(quantile.val) )
stop("Bad 'quantile.val' value")
#
# Frames
#
# Remove bad Frames numbers
index <- which( frame > 5 )
if( length( index ) != 0 )
frame <- frame[ - index ]
frame <- unique( frame )
nb.frame <- length( frame )
# Tool large number of frames : remove the last frames
if( nb.frame > 4 ) {
frame <- frame[ -5:-nb.frame]
nb.frame <- 4
}
n<-dim(mixer.res$x)[1]
if ( is.null(q) ) {
# find the best number of classes according ICL
ICL<-unlist( lapply( mixer.res$output,
ICL<-function(x) x$criterion))
which.max(ICL)->i
q<-length(mixer.res$output[[i]]$alphas)
}
index <- q-mixer.res$qmin+1
apply(mixer.res$output[[index]]$Taus,2,which.max)->cluster
# Not connected nodes
cluster[ (colSums( mixer.res$output[[index]]$Taus ) == 0 ) ] <- -1
Pis <- mixer.res$output[[q-mixer.res$qmin+1]]$Pis
alphas <- mixer.res$output[[q-mixer.res$qmin+1]]$alphas
# Frames to display
nb.col <- 2
nb.lin <- 2
if ( nb.frame == 1) {
nb.col <- 1
nb.lin <- 1
} else if ( nb.frame == 2) {
nb.col <- 2
nb.lin <- 1
}
par(mfrow=c(nb.lin, nb.col))
if( 1 %in% frame ){
ploticl(mixer.res,q)
}
if( 2 %in% frame ) {
plotam(mixer.res$edges,cluster)
}
if( 3 %in% frame ){
Degrees <- rep( 0, mixer.res$nnodes )
for ( i in 1:dim(mixer.res$edges)[2] ) {
# Remove loops
if( mixer.res$edges[1, i] != mixer.res$edges[2, i] ) {
node = mixer.res$edges[1, i]
Degrees[ node ] = Degrees[ node ] + 1
node = mixer.res$edges[2, i]
Degrees[ node ] = Degrees[ node ] + 1
}
}
plotmixture( Degrees, Pis, alphas, length( mixer.res$map ), mixer.res$directed )
}
if( 4 %in% frame ){
if( !is.null( classes ) ) {
pie.coef = table( factor( cluster, levels=1:q ) , classes )
# Normalization
for ( i in 1:dim(pie.coef)[1] ) {
max = max( pie.coef[i, ] )
if ( max != 0 )
pie.coef[i, ] = pie.coef[i, ] / max
}
} else {
pie.coef = NULL
}
Gplot( Pis, type="pie.nodes", node.weight=alphas, node.pie.coef=pie.coef,
quantile.val = quantile.val, colors=classes.col,
main="Inter/intra class probabilities",
... )
}
if( 5 %in% frame )
Gplot( mixer.res$edges, class=cluster, colors=classes.col,
main="Graph", directed=x$directed,
... )
par( mfrow=c(1, 1) )
}
############################################################
# Simulation of an affiliation graph
############################################################
class.ind<-function (cl)
{
n <- length(cl)
cl <- as.factor(cl)
x <- matrix(0, n, length(levels(cl)))
x[(1:n) + n * (unclass(cl) - 1)] <- 1
dimnames(x) <- list(names(cl), levels(cl))
x
}
graph.affiliation<-function( n=100,
alphaVect=c(1/2,1/2), lambda=0.7, epsilon=0.05,
directed=FALSE) {
# INPUT n: number of vertex
# alphaVect : vecteur of class proportion
# lambda: proba of edge given same classe
# epsilon: proba of edge given two different classes
# OUTPUT x: adjacency matrix
# cluster: class vector
#
x<-matrix(0,n,n);
Q<-length(alphaVect);
NodeToClass <- vector(length=n)
rmultinom(1, size=n, prob = alphaVect)->nq;
Z<-class.ind(rep(1:Q,nq));
Z<-Z[sample(1:n,n),];
for (i in 1:n) {
NodeToClass[i] <- which.max( Z[i,] )
}
for (i in 1:n) {
if ( i != n) {
for (j in (i+1):n) {
# if i and j in same class
if ( NodeToClass[i] == NodeToClass[j]) p<-lambda else p<-epsilon
if ( (rbinom(1,1,p) )) { x[i,j] <- 1 }
}
if ( directed ) {
if ( i != 1) {
for (j in 1:(i-1)) {
if ( NodeToClass[i] == NodeToClass[j]) p<-lambda else p<-epsilon
if ( (rbinom(1,1,p) )) { x[i,j] <- 1 }
}
}
}
}
}
if ( ! directed ) {
x <- x + t(x)
}
return(list(x=x,cluster=apply(Z,1,which.max)) )
}
##############################################################
# Spectral Clustering using normalized Laplacian
##############################################################
spectralkmeans<-function(x,q=2){
#INPUT:
# x is an adjacency matrix
#OUTPUT:
# An object of class "kmeans" which is a list with components:
n<-dim(x)[1]
D<-colSums(x)
L<-diag(rep(1,n)) - diag(D^(-1/2))%*% x %*% diag(D^(-1/2))
eigen(L)->D
kmeans(as.matrix(D$vectors[,max(1,(n-q)): (n-1)]),q)->res
}
##############################################################
# Compute the rand index between two partition
##############################################################
randError<-function(x, y) {
# function to calculate the adjusted rand statistic
# x and y are vectors containing the two partitions to be compared
# first, get crosstabs
ctab <- table(x,y);
# now calculate 4 intermediary sums
cellsum <- sum(ctab*(ctab-1)/2)
totsum <- sum(ctab)*(sum(ctab)-1)/2
# use matrix multiplication to get row and column marginal sums
rows <- ctab %*% rep(1,ncol(ctab))
rowsum <- sum(rows*(rows-1)/2)
cols <- rep(1,nrow(ctab)) %*% ctab
colsum <- sum(cols*(cols-1)/2)
# now put them together
adj.rand <- (cellsum - (rowsum*colsum/totsum))/(.5*(rowsum +colsum)-(rowsum*colsum/totsum))
return (adj.rand);
}
##############################################################
# transform of an adjacency matrix into an array of edges
##############################################################
AdjMat2Edges<-function(x, directed=FALSE, verbose=TRUE ) {
if (dim(x)[1] == dim(x)[2]){
# Adjacency matrix case
nbrNodes<-dim(x)[1]
ConnectedNodes <- getConnectedNodes( x )
# if ( length(ConnectedNodes) > 0 ) {
# x <- x[ ConnectedNodes, ConnectedNodes ]
# }
if ( directed ) {
m <- t( which( (x==1) , arr.ind=TRUE) )
} else {
m <- t( which( (x==1) & (upper.tri(x, diag=TRUE)), arr.ind=TRUE) )
}
}
return( m )
}
##############################################################
# Return the edge list or adjacency matrix without loops
##############################################################
removeLoops<-function(x, adj=FALSE) {
if (adj){
## Adjacency matrix
diag(x) <- 0
} else if ( dim(x)[1] == 2) {
## Edge list
ilist <- which( x[1,] != x[2,])
if( length(ilist) != 0) {
x <- as.matrix( x[ , ilist] )
}
} else {
cat("Mixer: removeLoops not an adjacency matrix nor edge list\n")
}
return( x )
}
##############################################################
# Return the index list of connected nodes
##############################################################
getConnectedNodes<-function( x ) {
if (dim(x)[1] == dim(x)[2]){
# Adjacency matrix case
nbrNodes<-dim(x)[1]
ConnectedNodes <- which( (rowSums( x ) + colSums(x)) != 0)
}
else if ( dim(x)[1] == 2) {
ConnectedNodes = unique( as.vector( x ) )
} else {
cat("Mixer: getConnectedNodes not an adjacency matrix nor edge list\n")
}
return( ConnectedNodes )
}
##############################################################
# Set the seed of random functions of C/C++ part
##############################################################
setSeed <- function( seed=1 ) {
#invisible( .C("srand_stdlib",
# as.integer(seed)
# ) )
set.seed(seed)
}
##############################################################
# Declare a generic function
##############################################################
getModel <- function( object, ... )
{
UseMethod("getModel", object)
}
##############################################################
# Return model parameters.
##############################################################
getModel.mixer <- function( object, ...) {
# Test x
if ( !is.mixer( object ) )
stop("Not a mixer object")
x <- object
# Get optional parameter q
q <- sub.param("q", NULL , ...)
# Test q
if( ! is.null(q)) {
if( ! (q %in% x$qmin:x$qmax) )
stop("Bad value of 'q'")
}
if ( is.null(q) ) {
# find the best number of classes according ICL
ICL <- unlist( lapply( x$output, ICL<-function(x) x$criterion))
i <- which.max(ICL)
q <- length(x$output[[i]]$alphas)
}
i <- q - x$qmin + 1
res <- list(q = q,
criterion = x$output[[i]]$criterion ,
alphas = x$output[[i]]$alphas,
Pis = x$output[[i]]$Pis,
Taus = x$output[[i]]$Taus
)
return( res )
}
##############################################################
# Return tke mapping.
# 'x[2, nbedges]' edge list
##############################################################
getMixnetNumbering <- function(x) {
NodeList <- vector()
NbEdges <- dim(x)[2]
n.nodes <- 0
for ( i in 1:NbEdges ) {
if( x[1,i] != x[2,i] ) {
# Add in the node list if a new one
if ( !( x[1, i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[1, i]
}
if ( !( x[2,i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[2,i]
}
}
}
# Return the mapping Mixnet to the initial graph 'x'
return( NodeList)
}
##############################################################
# Renumber the nodes.
# 'x[2, nbedges]' edge list
##############################################################
renumberNodes <- function( x ) {
NodeList <- vector()
NbEdges <- dim(x)[2]
n.nodes <- 0
res <- matrix( 0, dim(x)[1], dim(x)[2])
for ( i in 1:NbEdges ) {
# Add in the node list if a new one
if ( !( x[1, i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[1, i]
res[1, i] <- n.nodes
} else {
res[1, i] <- which( NodeList == x[1, i] )
}
if ( !( x[2,i] %in% NodeList ) ) {
n.nodes <- n.nodes+1
NodeList[n.nodes] <- x[2,i]
res[2, i] <- n.nodes
} else {
res[2, i] <- which( NodeList == x[2, i] )
}
}
# Return the mapping Mixnet to the initial graph 'x'
return( res )
}
##############################################################
# getNodeNames
# 'x[2, nbedges]' edge list
##############################################################
getNodeName <- function( x ) {
NodeName <- vector()
NbEdges <- dim(x)[2]
n.nodes <- 0
for ( i in 1:NbEdges ) {
# Add in the node list if a new one
if ( !( x[1, i] %in% NodeName ) ) {
n.nodes <- n.nodes+1
NodeName[n.nodes] <- x[1, i]
}
if ( !( x[2,i] %in% NodeName ) ) {
n.nodes <- n.nodes+1
NodeName[n.nodes] <- x[2,i]
}
}
# Return the mapping Mixnet to the initial graph 'x'
return( NodeName )
}
|
#' @title count_vowels
#' @description counts the vowels in a string
#' @param a string
#' @return number of vowels
count_vowels <- function(a){
if(!is.character(a)){
stop("invalid input; a string was expected")
}
vowels <- c('a', 'e', 'i', 'o', 'u')
x <- tolower(strsplit(a, "")[[1]])
aa <- as.double(sum(str_count(x,'[a]')))
ee <- as.double(sum(str_count(x,'[e]')))
ii <- as.double(sum(str_count(x,'[i]')))
oo <- as.double(sum(str_count(x,'[o]')))
uu <- as.double(sum(str_count(x,'[uu]')))
c_vow <- c(aa,ee,ii,oo,uu)
names(c_vow) <- vowels
c_vow
}
|
/workout02/code/functions/count-vowels.R
|
no_license
|
nabkizil/stat133_fall2018
|
R
| false
| false
| 586
|
r
|
#' @title count_vowels
#' @description counts the vowels in a string
#' @param a string
#' @return number of vowels
count_vowels <- function(a){
if(!is.character(a)){
stop("invalid input; a string was expected")
}
vowels <- c('a', 'e', 'i', 'o', 'u')
x <- tolower(strsplit(a, "")[[1]])
aa <- as.double(sum(str_count(x,'[a]')))
ee <- as.double(sum(str_count(x,'[e]')))
ii <- as.double(sum(str_count(x,'[i]')))
oo <- as.double(sum(str_count(x,'[o]')))
uu <- as.double(sum(str_count(x,'[uu]')))
c_vow <- c(aa,ee,ii,oo,uu)
names(c_vow) <- vowels
c_vow
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_nbrs.R
\name{get_nbrs}
\alias{get_nbrs}
\title{Get all neighbors of one or more nodes}
\usage{
get_nbrs(graph, nodes)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
\item{nodes}{a vector of node ID values.}
}
\value{
a vector of node ID values.
}
\description{
With one or more nodes, get the set of
all neighboring nodes.
}
\examples{
# Create a simple, directed graph with 5
# nodes and 4 edges
graph <-
create_graph() \%>\%
add_path(n = 5)
# Find all neighbor nodes for node `2`
graph \%>\%
get_nbrs(nodes = 2)
#> [1] 1 3
# Find all neighbor nodes for nodes `1`
# and `5`
graph \%>\%
get_nbrs(nodes = c(1, 5))
#> [1] 2 4
# Color node `3` with purple, get its
# neighbors and color those nodes green
graph <-
graph \%>\%
select_nodes_by_id(nodes = 3) \%>\%
set_node_attrs_ws(
node_attr = "color",
value = "purple") \%>\%
clear_selection() \%>\%
select_nodes_by_id(
nodes = get_nbrs(., 3)) \%>\%
set_node_attrs_ws(
node_attr = "color",
value = "green")
}
|
/man/get_nbrs.Rd
|
no_license
|
DataXujing/DiagrammeR
|
R
| false
| true
| 1,105
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_nbrs.R
\name{get_nbrs}
\alias{get_nbrs}
\title{Get all neighbors of one or more nodes}
\usage{
get_nbrs(graph, nodes)
}
\arguments{
\item{graph}{a graph object of class
\code{dgr_graph}.}
\item{nodes}{a vector of node ID values.}
}
\value{
a vector of node ID values.
}
\description{
With one or more nodes, get the set of
all neighboring nodes.
}
\examples{
# Create a simple, directed graph with 5
# nodes and 4 edges
graph <-
create_graph() \%>\%
add_path(n = 5)
# Find all neighbor nodes for node `2`
graph \%>\%
get_nbrs(nodes = 2)
#> [1] 1 3
# Find all neighbor nodes for nodes `1`
# and `5`
graph \%>\%
get_nbrs(nodes = c(1, 5))
#> [1] 2 4
# Color node `3` with purple, get its
# neighbors and color those nodes green
graph <-
graph \%>\%
select_nodes_by_id(nodes = 3) \%>\%
set_node_attrs_ws(
node_attr = "color",
value = "purple") \%>\%
clear_selection() \%>\%
select_nodes_by_id(
nodes = get_nbrs(., 3)) \%>\%
set_node_attrs_ws(
node_attr = "color",
value = "green")
}
|
library(DAMOCLES)
library(picante)
##### Prepare data #####
DuskyTree <- read.tree("your_tree.phy")
comm <- read.csv("comm_phylo.csv", header = TRUE)
comm_sp <- subset(comm, select = -c(elev, Longitude, Latitude, N))
## Match phylogenetic and community data
matched2 <- match.phylo.comm(phy = DuskyTree, comm = comm_sp)
comNAMES <- as.character(comm_sp$site)
# Just trying with a single community
tr <- matched2$phy
coms <- t(matched2$comm)
com1 <- as.matrix(cbind(matrix(rownames(coms)), matrix(coms[, 10])))
x <- DAMOCLES_bootstrap(phy = tr, pa = com1,
initparsopt = c(0.05, 0.05),
idparsopt = c(1, 2), parsfix = NULL, #idparsfix = c(3),
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 100, estimate_pars = TRUE, conf.int = 0.95)
## Example using a simple FOR loop to run DAMOCLES for all communities
# Split communities
comLST <- list()
for(i in 1:ncol(coms)){
comLST[[i]] <- as.matrix(cbind(matrix(rownames(coms)), matrix(coms[, i])))
}
names(comLST) <- comNAMES
# Alternative 1 - using for loop
DAMOCLESall <- list()
for(j in 1:length(comNAMES)){
print(comNAMES[j])
DAMOCLESall[[j]] <- DAMOCLES_bootstrap(phy = tr, pa = comLST[[j]],
initparsopt = c(0.1, 0.1),
idparsopt = c(1, 2), parsfix = NULL, #idparsfix = c(3),
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 1000, estimate_pars = TRUE,
conf.int = 0.95)
}
save(list = c("comLST", "DAMOCLESall"), file = "Results")
# Alternative 2 - using lapply function from {pbapply}
DAMOCLESall <- pbapply::pblapply(comLST, function(x) DAMOCLES::DAMOCLES_bootstrap(phy = tr, pa = x,
initparsopt = c(0.1, 0.1),
idparsopt = c(1, 2), parsfix = NULL,
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 1000, estimate_pars = TRUE,
conf.int = 0.95)
)
# Alternative 3 - using parallelized lapply function from {future.apply}
library("future.apply")
plan(multisession) ## Run in parallel on local computer
DAMOCLESall <- future.apply::future_lapply(comLST, function(x) DAMOCLES::DAMOCLES_bootstrap(phy = tr, pa = x,
initparsopt = c(0.1, 0.1),
idparsopt = c(1, 2), parsfix = NULL,
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 1000, estimate_pars = TRUE,
conf.int = 0.95)
)
|
/simpleDAMOCLES.R
|
no_license
|
jesusNPL/FurnariidesDAMOCLES
|
R
| false
| false
| 3,293
|
r
|
library(DAMOCLES)
library(picante)
##### Prepare data #####
DuskyTree <- read.tree("your_tree.phy")
comm <- read.csv("comm_phylo.csv", header = TRUE)
comm_sp <- subset(comm, select = -c(elev, Longitude, Latitude, N))
## Match phylogenetic and community data
matched2 <- match.phylo.comm(phy = DuskyTree, comm = comm_sp)
comNAMES <- as.character(comm_sp$site)
# Just trying with a single community
tr <- matched2$phy
coms <- t(matched2$comm)
com1 <- as.matrix(cbind(matrix(rownames(coms)), matrix(coms[, 10])))
x <- DAMOCLES_bootstrap(phy = tr, pa = com1,
initparsopt = c(0.05, 0.05),
idparsopt = c(1, 2), parsfix = NULL, #idparsfix = c(3),
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 100, estimate_pars = TRUE, conf.int = 0.95)
## Example using a simple FOR loop to run DAMOCLES for all communities
# Split communities
comLST <- list()
for(i in 1:ncol(coms)){
comLST[[i]] <- as.matrix(cbind(matrix(rownames(coms)), matrix(coms[, i])))
}
names(comLST) <- comNAMES
# Alternative 1 - using for loop
DAMOCLESall <- list()
for(j in 1:length(comNAMES)){
print(comNAMES[j])
DAMOCLESall[[j]] <- DAMOCLES_bootstrap(phy = tr, pa = comLST[[j]],
initparsopt = c(0.1, 0.1),
idparsopt = c(1, 2), parsfix = NULL, #idparsfix = c(3),
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 1000, estimate_pars = TRUE,
conf.int = 0.95)
}
save(list = c("comLST", "DAMOCLESall"), file = "Results")
# Alternative 2 - using lapply function from {pbapply}
DAMOCLESall <- pbapply::pblapply(comLST, function(x) DAMOCLES::DAMOCLES_bootstrap(phy = tr, pa = x,
initparsopt = c(0.1, 0.1),
idparsopt = c(1, 2), parsfix = NULL,
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 1000, estimate_pars = TRUE,
conf.int = 0.95)
)
# Alternative 3 - using parallelized lapply function from {future.apply}
library("future.apply")
plan(multisession) ## Run in parallel on local computer
DAMOCLESall <- future.apply::future_lapply(comLST, function(x) DAMOCLES::DAMOCLES_bootstrap(phy = tr, pa = x,
initparsopt = c(0.1, 0.1),
idparsopt = c(1, 2), parsfix = NULL,
pars2 = c(0.001, 1e-04, 1e-05, 10000),
pchoice = 0, runs = 1000, estimate_pars = TRUE,
conf.int = 0.95)
)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 546
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 540
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 540
c
c Input Parameter (command line, file):
c input filename QBFLIB/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_3x3-shape-3-GTTT-2-2-torus-0.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 245
c no.of clauses 546
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 540
c
c QBFLIB/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_3x3-shape-3-GTTT-2-2-torus-0.qdimacs 245 546 E1 [82 83 193 210 227 244] 0 36 203 540 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_3x3-shape-3-GTTT-2-2-torus-0/ttt_3x3-shape-3-GTTT-2-2-torus-0.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 816
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 546
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 540
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 540
c
c Input Parameter (command line, file):
c input filename QBFLIB/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_3x3-shape-3-GTTT-2-2-torus-0.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 245
c no.of clauses 546
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 540
c
c QBFLIB/MayerEichberger-Saffidine/PositionalGames_gttt/ttt_3x3-shape-3-GTTT-2-2-torus-0.qdimacs 245 546 E1 [82 83 193 210 227 244] 0 36 203 540 RED
|
stochprof.search.LNLN <-
function(dataset,n,TY,method="grid",M=10,par.range=NULL,prev.result=NULL,fix.mu=F,fixed.mu,genenames=NULL,print.output=F,use.constraints=F) {
# Calculates the log likelihood function of all model parameters for a given dataset
# at certain parameter values. The so-obtained values are returned in a matrix with
# the following entries: Each row corresponds to one parameter combination. All columns
# but the last one contain the parameter values at which the log likelihood function has
# been computed. The column names are the parameter names. The last column ("target") is the
# negative log likelihood function computed at the respective parameter vector. For numerical
# reasons, this target value is set to the minimum of 10^7 and the actual value.
#
# The values at which the target function is calculated are randomly drawn from some range
# specified by "par.range". If method=="grid", the target function is simply evaluated
# at such a randomly drawn parameter vector. If method=="optim", this randomly drawn vector is
# passed to the Nelder-Mead algorithm as a starting value in order to search for a local
# maximum around it.
#
# Parameters:
#
# - dataset is a matrix which contains the cumulated expression data over all cells in a tissue sample.
# Columns represent different genes, rows represent different tissue samples.
# - n is the number of cells taken from each tissue sample. This can also be a vector stating how many
# cells are in each sample seperatly.
# - TY is the number of types of cells that is assumed in the stochastic model.
# - method (default="grid") determines whether a grid search or the Nelder-Mead algorithm should be applied:
# If method=="grid", the log likelihood function is simply evaluated at certain parameter values that are
# randomly drawn.
# If method=="optim", a Nelder-Mead search starts at a randomly drawn set of parameter values in order to
# find a local maximum. The resulting locally optimal parameter is stored in the results matrix as one row.
# - M (default=10) is the number of randomly drawn parameter combinations.
# - par.range (default=NULL) is the range from which the parameter values should be randomly drawn. This is
# a matrix with the number of rows being equal to the number of model parameters. The first column contains
# the lower bound, the second column the upper bound. If par.range==NULL, some rather large range is defined.
# - prev.result (default=NULL) can contain results from former calls of this function.
# - fix.mu (default=F) indicates whether the log-means are kept fixed in the estimation procedure or whether
# they are to be estimated.
# - fixed.mu (no default, needs to be specified only when fix.mu==T) is a vector containing the values to which
# the log-means should be fixed if fix.mu==T. The order of components is as follows:
# (mu_type_1_gene_1, mu_type_1_gene_2, ..., mu_type_2_gene_1, mu_type__gene_2, ...)
# - genenames (default=NULL) are the names of the genes in the dataset.
# For genenames==NULL, the genes will simply be enumerated according to the column numbers in the dataset.
# - If print.output==T (default=F), interim results of the grid search and numerical optimization are printed
# into the console throughout the estimation procedure.
# - If use.constraints==T, constraints on the densities of the populations will be applied.
# definition of variables (necessary for CMD R check)
# (these variables will be initialized later, but they are not visible as global functions/data)
d.sum.of.mixtures <- NULL
backtransform.par <- NULL
penalty.constraint <- NULL
draw.parameters <- NULL
transform.par <- NULL
rm(d.sum.of.mixtures)
rm(backtransform.par)
rm(penalty.constraint)
rm(draw.parameters)
rm(transform.par)
if (M==0) return(NULL)
####################
# general settings #
####################
# number of genes
m <- ncol(dataset)
# gene names
if (is.null(genenames)) {
genenames <- 1:m
}
# names of variables
if (TY==1) {
varnames <- c(paste("mu_",genenames,sep=""),
"sigma")
}
else {
varnames <- paste("p_",1:(TY-1),sep="")
for (i in 1:TY) {
varnames <- c(varnames,paste("mu",i,"gene",genenames,sep="_"))
}
varnames <- c(varnames,"sigma")
}
###########################################
## ML estimation: define target function ##
###########################################
# loglikelihood for one gene
loglikeli <- function(y,p,mu,sigma) {
# p and mu are of length TY, sigma is scalar
max(-10^7,sum(d.sum.of.mixtures(y,n,p,mu,sigma.vector=rep(sigma,TY),logdens=T)))
}
# this function will be minimized (the function "to.minimize" below just
# changes the parameterisation)
target.function <- function(p,mu,sigma) {
# p is of length TY
# mu is of length TY*m
# sigma is scalar
# build mu.matrix such that the g.th row contains the values for gene g
mu.matrix <- matrix(mu,byrow=F,ncol=TY)
# consider negative log likelihood because the target function will be minimized
this.sum <- 0
for (g in 1:m) {
this.sum <- this.sum - loglikeli(dataset[,g],p,mu.matrix[g,,drop=T],sigma)
}
return(this.sum)
}
# For identifiability purposes, we require the mu-values of the first gene to be
# in descending order. Otherwise, the parameter combinations (p,mu1_gene1,mu2_gene1,sigma)
# and (1-p,mu2_gene1,mu2_gene1,sigma) would yield identical values of the log-likelihood
# function.
# The randomly drawn parameters will always fulfil the above requirement. However, during the
# Nelder-Mead optimization procedure it might happen that it is violated. In that case, the
# optim algorithm might jump back and forth between two equivalent states.
# In order to avoid this, a penalty term is introduced which is added to the actual target
# function. This penalty is positive if mu_1^1 >= mu_2^1 >= ... >= mu_TY^1 is not fulfilled,
# and 0 otherwise.
penalty.mu <- function(mu,m,lambda=100) {
# build a matrix such that the g.th column contains the mu values for gene g
mu <- matrix(mu,byrow=T,ncol=m)
# mu for gene 1
mu.g1 <- mu[,1]
# penalty
differences <- mu.g1[-1]-mu.g1[-length(mu.g1)]
pen <- pmax(0,differences)
return(lambda*sum(pen^2))
}
# this function should be minimized
to.minimize <- function(theta) {
# theta=(w_1,...,w_{T-1},mu,log(sigma)).
# Everything but mu is one-dimensional.
# If fix.mu==F, then mu is TY*m-dim., otherwise zero-dimensional.
# backtransformation
# afterwards, back.theta is full-dim, incl. mu
back.theta <- backtransform.par(this.par=theta,m=m,fix.mu=fix.mu,fixed.mu=fixed.mu)
if (TY>1) {
p <- back.theta[1:(TY-1)]
p <- c(p,1-sum(p))
}
else {
p <- 1
}
mu <- back.theta[TY:(length(back.theta)-1)]
sigma <- back.theta[length(back.theta)]
# penalty
pen.mu <- 0
if (TY>1) {
pen.mu <- penalty.mu(mu,m)
}
pen.constr <- 0
if ((TY>1) && (use.constraints)) {
pen.constr <- penalty.constraint(dataset,parameter=c(p[-TY],mu,sigma))
}
# target
a <- target.function(p,mu,sigma) + pen.mu + pen.constr
a <- min(10^7,a)
return(a)
}
####################
# previous results #
####################
# are there previous results already?
if (is.null(prev.result)) {
# no, there aren't
all.results <- matrix(nrow=0,ncol=length(varnames)+1)
colnames(all.results) <- c(varnames,"target")
}
else {
# yes, there are
all.results <- prev.result
}
###################################
# parameter ranges to be searched #
###################################
if (is.null(par.range)) {
# determine some range
ranges <- matrix(NA,ncol=2,nrow=length(varnames))
# p
if (TY>1) {
ranges[1:(TY-1),1] <- 0
ranges[1:(TY-1),2] <- 1
}
# mu
ranges[TY:(nrow(ranges)-1),1] <- -4.5
ranges[TY:(nrow(ranges)-1),2] <- 2.5
# sigma
ranges[nrow(ranges),1] <- 0.01
ranges[nrow(ranges),2] <- 1
}
else {
ranges <- par.range
}
if (fix.mu) {
ranges[TY:(nrow(ranges)-1),1] <- fixed.mu
ranges[TY:(nrow(ranges)-1),2] <- fixed.mu
}
###################################
## estimation: optimization step ##
###################################
#--------------#
# optim method #
#--------------#
if (method=="optim") {
for (i in 1:M) {
# draw starting value
par0 <- draw.parameters(ranges,m) # full-dim.
theta0 <- transform.par(this.par=par0,m=m,fix.mu=fix.mu) # lower-dim. if fix.mu==T
theta0[theta0==-Inf] <- -10^7
theta0[theta0==Inf] <- 10^7
# numerically optimize
if (length(theta0)==1) {
result <- optim(theta0,fn=to.minimize,control=list(maxit=10^5),method="Brent",hessian=F, lower=-10^7, upper=10^7)
}
else {
result <- optim(theta0,fn=to.minimize,control=list(maxit=10^5),method="Nelder-Mead",hessian=F)
}
# result
this.theta <- result$par
this.par <- backtransform.par(this.par=this.theta,m=m,fix.mu=fix.mu,fixed.mu=fixed.mu) # full-dim
this.value <- result$value
# attach new result to all former ones
all.results <- rbind(all.results,c(this.par,this.value))
if (print.output) {
cat("---\n")
cat("Start optim at:\n")
cat(par0,"\n")
cat("Arrived at:\n")
cat(this.par,"\n")
}
}
}
#-------------#
# grid search #
#-------------#
else if (method=="grid") {
for (i in 1:M) {
# randomly draw parameter
this.par <- draw.parameters(ranges,m) # full-dim
this.theta <- transform.par(this.par=this.par,m=m,fix.mu=fix.mu) # lower-dim if fix.mu==T
if (print.output) {
cat("---\n")
cat("Compute grid at:\n")
cat(this.par,"\n")
}
# target function
this.value <- to.minimize(this.theta)
# attach new result to all former ones
all.results <- rbind(all.results,c(this.par,this.value))
}
}
return(all.results)
}
|
/R/stochprof.search.LNLN.R
|
no_license
|
fuchslab/stochprofML
|
R
| false
| false
| 10,507
|
r
|
stochprof.search.LNLN <-
function(dataset,n,TY,method="grid",M=10,par.range=NULL,prev.result=NULL,fix.mu=F,fixed.mu,genenames=NULL,print.output=F,use.constraints=F) {
# Calculates the log likelihood function of all model parameters for a given dataset
# at certain parameter values. The so-obtained values are returned in a matrix with
# the following entries: Each row corresponds to one parameter combination. All columns
# but the last one contain the parameter values at which the log likelihood function has
# been computed. The column names are the parameter names. The last column ("target") is the
# negative log likelihood function computed at the respective parameter vector. For numerical
# reasons, this target value is set to the minimum of 10^7 and the actual value.
#
# The values at which the target function is calculated are randomly drawn from some range
# specified by "par.range". If method=="grid", the target function is simply evaluated
# at such a randomly drawn parameter vector. If method=="optim", this randomly drawn vector is
# passed to the Nelder-Mead algorithm as a starting value in order to search for a local
# maximum around it.
#
# Parameters:
#
# - dataset is a matrix which contains the cumulated expression data over all cells in a tissue sample.
# Columns represent different genes, rows represent different tissue samples.
# - n is the number of cells taken from each tissue sample. This can also be a vector stating how many
# cells are in each sample seperatly.
# - TY is the number of types of cells that is assumed in the stochastic model.
# - method (default="grid") determines whether a grid search or the Nelder-Mead algorithm should be applied:
# If method=="grid", the log likelihood function is simply evaluated at certain parameter values that are
# randomly drawn.
# If method=="optim", a Nelder-Mead search starts at a randomly drawn set of parameter values in order to
# find a local maximum. The resulting locally optimal parameter is stored in the results matrix as one row.
# - M (default=10) is the number of randomly drawn parameter combinations.
# - par.range (default=NULL) is the range from which the parameter values should be randomly drawn. This is
# a matrix with the number of rows being equal to the number of model parameters. The first column contains
# the lower bound, the second column the upper bound. If par.range==NULL, some rather large range is defined.
# - prev.result (default=NULL) can contain results from former calls of this function.
# - fix.mu (default=F) indicates whether the log-means are kept fixed in the estimation procedure or whether
# they are to be estimated.
# - fixed.mu (no default, needs to be specified only when fix.mu==T) is a vector containing the values to which
# the log-means should be fixed if fix.mu==T. The order of components is as follows:
# (mu_type_1_gene_1, mu_type_1_gene_2, ..., mu_type_2_gene_1, mu_type__gene_2, ...)
# - genenames (default=NULL) are the names of the genes in the dataset.
# For genenames==NULL, the genes will simply be enumerated according to the column numbers in the dataset.
# - If print.output==T (default=F), interim results of the grid search and numerical optimization are printed
# into the console throughout the estimation procedure.
# - If use.constraints==T, constraints on the densities of the populations will be applied.
# definition of variables (necessary for CMD R check)
# (these variables will be initialized later, but they are not visible as global functions/data)
d.sum.of.mixtures <- NULL
backtransform.par <- NULL
penalty.constraint <- NULL
draw.parameters <- NULL
transform.par <- NULL
rm(d.sum.of.mixtures)
rm(backtransform.par)
rm(penalty.constraint)
rm(draw.parameters)
rm(transform.par)
if (M==0) return(NULL)
####################
# general settings #
####################
# number of genes
m <- ncol(dataset)
# gene names
if (is.null(genenames)) {
genenames <- 1:m
}
# names of variables
if (TY==1) {
varnames <- c(paste("mu_",genenames,sep=""),
"sigma")
}
else {
varnames <- paste("p_",1:(TY-1),sep="")
for (i in 1:TY) {
varnames <- c(varnames,paste("mu",i,"gene",genenames,sep="_"))
}
varnames <- c(varnames,"sigma")
}
###########################################
## ML estimation: define target function ##
###########################################
# loglikelihood for one gene
loglikeli <- function(y,p,mu,sigma) {
# p and mu are of length TY, sigma is scalar
max(-10^7,sum(d.sum.of.mixtures(y,n,p,mu,sigma.vector=rep(sigma,TY),logdens=T)))
}
# this function will be minimized (the function "to.minimize" below just
# changes the parameterisation)
target.function <- function(p,mu,sigma) {
# p is of length TY
# mu is of length TY*m
# sigma is scalar
# build mu.matrix such that the g.th row contains the values for gene g
mu.matrix <- matrix(mu,byrow=F,ncol=TY)
# consider negative log likelihood because the target function will be minimized
this.sum <- 0
for (g in 1:m) {
this.sum <- this.sum - loglikeli(dataset[,g],p,mu.matrix[g,,drop=T],sigma)
}
return(this.sum)
}
# For identifiability purposes, we require the mu-values of the first gene to be
# in descending order. Otherwise, the parameter combinations (p,mu1_gene1,mu2_gene1,sigma)
# and (1-p,mu2_gene1,mu2_gene1,sigma) would yield identical values of the log-likelihood
# function.
# The randomly drawn parameters will always fulfil the above requirement. However, during the
# Nelder-Mead optimization procedure it might happen that it is violated. In that case, the
# optim algorithm might jump back and forth between two equivalent states.
# In order to avoid this, a penalty term is introduced which is added to the actual target
# function. This penalty is positive if mu_1^1 >= mu_2^1 >= ... >= mu_TY^1 is not fulfilled,
# and 0 otherwise.
penalty.mu <- function(mu,m,lambda=100) {
# build a matrix such that the g.th column contains the mu values for gene g
mu <- matrix(mu,byrow=T,ncol=m)
# mu for gene 1
mu.g1 <- mu[,1]
# penalty
differences <- mu.g1[-1]-mu.g1[-length(mu.g1)]
pen <- pmax(0,differences)
return(lambda*sum(pen^2))
}
# this function should be minimized
to.minimize <- function(theta) {
# theta=(w_1,...,w_{T-1},mu,log(sigma)).
# Everything but mu is one-dimensional.
# If fix.mu==F, then mu is TY*m-dim., otherwise zero-dimensional.
# backtransformation
# afterwards, back.theta is full-dim, incl. mu
back.theta <- backtransform.par(this.par=theta,m=m,fix.mu=fix.mu,fixed.mu=fixed.mu)
if (TY>1) {
p <- back.theta[1:(TY-1)]
p <- c(p,1-sum(p))
}
else {
p <- 1
}
mu <- back.theta[TY:(length(back.theta)-1)]
sigma <- back.theta[length(back.theta)]
# penalty
pen.mu <- 0
if (TY>1) {
pen.mu <- penalty.mu(mu,m)
}
pen.constr <- 0
if ((TY>1) && (use.constraints)) {
pen.constr <- penalty.constraint(dataset,parameter=c(p[-TY],mu,sigma))
}
# target
a <- target.function(p,mu,sigma) + pen.mu + pen.constr
a <- min(10^7,a)
return(a)
}
####################
# previous results #
####################
# are there previous results already?
if (is.null(prev.result)) {
# no, there aren't
all.results <- matrix(nrow=0,ncol=length(varnames)+1)
colnames(all.results) <- c(varnames,"target")
}
else {
# yes, there are
all.results <- prev.result
}
###################################
# parameter ranges to be searched #
###################################
if (is.null(par.range)) {
# determine some range
ranges <- matrix(NA,ncol=2,nrow=length(varnames))
# p
if (TY>1) {
ranges[1:(TY-1),1] <- 0
ranges[1:(TY-1),2] <- 1
}
# mu
ranges[TY:(nrow(ranges)-1),1] <- -4.5
ranges[TY:(nrow(ranges)-1),2] <- 2.5
# sigma
ranges[nrow(ranges),1] <- 0.01
ranges[nrow(ranges),2] <- 1
}
else {
ranges <- par.range
}
if (fix.mu) {
ranges[TY:(nrow(ranges)-1),1] <- fixed.mu
ranges[TY:(nrow(ranges)-1),2] <- fixed.mu
}
###################################
## estimation: optimization step ##
###################################
#--------------#
# optim method #
#--------------#
if (method=="optim") {
for (i in 1:M) {
# draw starting value
par0 <- draw.parameters(ranges,m) # full-dim.
theta0 <- transform.par(this.par=par0,m=m,fix.mu=fix.mu) # lower-dim. if fix.mu==T
theta0[theta0==-Inf] <- -10^7
theta0[theta0==Inf] <- 10^7
# numerically optimize
if (length(theta0)==1) {
result <- optim(theta0,fn=to.minimize,control=list(maxit=10^5),method="Brent",hessian=F, lower=-10^7, upper=10^7)
}
else {
result <- optim(theta0,fn=to.minimize,control=list(maxit=10^5),method="Nelder-Mead",hessian=F)
}
# result
this.theta <- result$par
this.par <- backtransform.par(this.par=this.theta,m=m,fix.mu=fix.mu,fixed.mu=fixed.mu) # full-dim
this.value <- result$value
# attach new result to all former ones
all.results <- rbind(all.results,c(this.par,this.value))
if (print.output) {
cat("---\n")
cat("Start optim at:\n")
cat(par0,"\n")
cat("Arrived at:\n")
cat(this.par,"\n")
}
}
}
#-------------#
# grid search #
#-------------#
else if (method=="grid") {
for (i in 1:M) {
# randomly draw parameter
this.par <- draw.parameters(ranges,m) # full-dim
this.theta <- transform.par(this.par=this.par,m=m,fix.mu=fix.mu) # lower-dim if fix.mu==T
if (print.output) {
cat("---\n")
cat("Compute grid at:\n")
cat(this.par,"\n")
}
# target function
this.value <- to.minimize(this.theta)
# attach new result to all former ones
all.results <- rbind(all.results,c(this.par,this.value))
}
}
return(all.results)
}
|
# hold_num = rep(NA, len)
n_in_pool = 137
len = n_result_sims * n_in_pool
hold_num = rep(NA, len)
#hold_num <- character(len)
# hold_which_bracket <- character(len)
# sim_num <- character(len)
hold_which_bracket = rep(NA, len)
sim_num = rep(NA, len)
calc_bracket_score = function(bracket_created, round_winners){
chunk_length = 32
creation_rd1 = split(bracket_created[[1]], # Applying split() function
ceiling(seq_along(bracket_created[[1]]) / 32))
results_rd1 = split(round_winners[[1]],
ceiling(seq_along(round_winners[[1]]) / 32))
creation_rd2 = split(bracket_created[[2]], # Applying split() function
ceiling(seq_along(bracket_created[[2]]) / 16))
results_rd2 = split(round_winners[[2]],
ceiling(seq_along(round_winners[[2]]) / 16))
creation_rd3 = split(bracket_created[[3]], # Applying split() function
ceiling(seq_along(bracket_created[[3]]) / 8))
results_rd3 = split(round_winners[[3]],
ceiling(seq_along(round_winners[[3]]) / 8))
creation_rd4 = split(bracket_created[[4]], # Applying split() function
ceiling(seq_along(bracket_created[[4]]) / 4))
results_rd4 = split(round_winners[[4]],
ceiling(seq_along(round_winners[[4]]) / 4))
creation_rd5 = split(bracket_created[[5]], # Applying split() function
ceiling(seq_along(bracket_created[[5]]) / 2))
results_rd5 = split(round_winners[[5]],
ceiling(seq_along(round_winners[[5]]) / 2))
creation_rd6 = split(bracket_created[[6]], # Applying split() function
ceiling(seq_along(bracket_created[[6]]) / 1))
results_rd6 = split(round_winners[[6]],
ceiling(seq_along(round_winners[[6]]) / 1))
x = 0
for (j in 1:n_result_sims) {
for (i in 1:n_in_pool) {
x = x+1
round_1_score <- creation_rd1[[i]] == results_rd1[[j]]
round_1_score = (length(which(round_1_score)))
round_2_score <- creation_rd2[[i]] == results_rd2[[j]]
round_2_score = (length(which(round_2_score)))*2
round_3_score <- creation_rd3[[i]] == results_rd3[[j]]
round_3_score = (length(which(round_3_score)))*4
round_4_score <- creation_rd4[[i]] == results_rd4[[j]]
round_4_score = (length(which(round_4_score)))*8
round_5_score <- creation_rd5[[i]] == results_rd5[[j]]
round_5_score = (length(which(round_5_score)))*16
round_6_score <- creation_rd6[[i]] == results_rd6[[j]]
round_6_score = (length(which(round_6_score)))*32
score = sum(round_1_score, round_2_score, round_3_score, round_4_score, round_5_score, round_6_score)
# print(i)
# print(j)
hold_num[x] = score
hold_which_bracket[x] = i
sim_num[x] = j
}
print(j/n_result_sims)
# round_2_score <- bracket_created[[2]] == round_winners[[2]]
# round_2_score = (length(which(round_2_score)))*2
#
# round_3_score <- bracket_created[[3]] == round_winners[[3]]
# round_3_score = (length(which(round_3_score)))*4
#
# round_4_score <- bracket_created[[4]] == round_winners[[4]]
# round_4_score = (length(which(round_4_score)))*8
#
# round_5_score <- bracket_created[[5]] == round_winners[[5]]
# round_5_score = (length(which(round_5_score)))*16
#
# round_6_score <- bracket_created[[6]] == round_winners[[6]]
# round_6_score = (length(which(round_6_score)))*32
#
# score = sum(round_1_score, round_2_score, round_3_score, round_4_score, round_5_score, round_6_score)
# return(score)
}
newlist = list(hold_num = hold_num, which_bracket = hold_which_bracket, sim_num = sim_num)
return(newlist)
}
g = calc_bracket_score(bracket_created, round_winners)
scores = g[[1]]
bracket_num = g[[2]]
sim_num = g[[3]]
results = data.frame(scores, bracket_num, sim_num)
results = left_join(results, summary_which, by = c("bracket_num"="bracket_number"))
max_pts = results %>% group_by(sim_num)%>%
mutate(rank = rank(-scores, ties.method = c("random")))
summary = max_pts %>% group_by(df)%>%
summarise(win_prob = sum(ifelse(rank == 1, 1,0))/n_result_sims)
summary = summary %>% rename("Bracket" = "df")
summary$win_prob = round(summary$win_prob, 3)
summary = summary %>% arrange(-win_prob)
summary$Time = Sys.time()
write.csv(summary, file = "win_prob_as_of_now.csv", row.names = FALSE)
#wins_to_check = subset(max_pts, df == "Eric Thiel 3" & rank == 1)
get_cheering = function(bracket_name){
wins_to_check = subset(max_pts, df == bracket_name & rank == 1)
#winner_bracket = 29
#get_best_bracket(round_winners, 29)
hold_rd1_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[1]]")
colnames(hold_rd1_bb) <- x
hold_rd2_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[2]]")
colnames(hold_rd2_bb) <- x
hold_rd3_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[3]]")
colnames(hold_rd3_bb) <- x
hold_rd4_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[4]]")
colnames(hold_rd4_bb) <- x
hold_rd5_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[5]]")
colnames(hold_rd5_bb) <- x
hold_rd6_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[6]]")
colnames(hold_rd6_bb) <- x
for (t in wins_to_check$sim_num) {
hmmmmmm = get_best_bracket(round_winners, t)
rd1_bb = as.data.frame(hmmmmmm[[1]])
rd2_bb = as.data.frame(hmmmmmm[[2]])
rd3_bb = as.data.frame(hmmmmmm[[3]])
rd4_bb = as.data.frame(hmmmmmm[[4]])
rd5_bb = as.data.frame(hmmmmmm[[5]])
rd6_bb = as.data.frame(hmmmmmm[[6]])
hold_rd1_bb = rbind(hold_rd1_bb, rd1_bb)
hold_rd2_bb = rbind(hold_rd2_bb, rd2_bb)
hold_rd3_bb = rbind(hold_rd3_bb, rd3_bb)
hold_rd4_bb = rbind(hold_rd4_bb, rd4_bb)
hold_rd5_bb = rbind(hold_rd5_bb, rd5_bb)
hold_rd6_bb = rbind(hold_rd6_bb, rd6_bb)
}
wins_who_to_cheer_for <-
seed_list %>%
select(-elim_round) %>%
group_by(team) %>%
mutate(
'second_round' = sum(team == hold_rd1_bb$`hmmmmmm[[1]]`)/nrow(wins_to_check),
'sweet_sixteen' = sum(team == hold_rd2_bb$`hmmmmmm[[2]]`)/nrow(wins_to_check),
'elite_eight' = sum(team == hold_rd3_bb$`hmmmmmm[[3]]`)/nrow(wins_to_check),
'final_four' = sum(team == hold_rd4_bb$`hmmmmmm[[4]]`)/nrow(wins_to_check),
'championship_game' = sum(team == hold_rd5_bb$`hmmmmmm[[5]]`)/nrow(wins_to_check),
'champ' = sum(team == hold_rd6_bb$`hmmmmmm[[6]]`)/nrow(wins_to_check)) %>%
ungroup()
return(wins_who_to_cheer_for)
}
# get_sim = subset(max_pts, sim_num == 29)
wins_who_to_cheer_for = get_cheering("thomas Goetz 3")
|
/Post-Tournament/live_win_prob_pools.R
|
no_license
|
eric-thiel/brack_sim
|
R
| false
| false
| 6,604
|
r
|
# hold_num = rep(NA, len)
n_in_pool = 137
len = n_result_sims * n_in_pool
hold_num = rep(NA, len)
#hold_num <- character(len)
# hold_which_bracket <- character(len)
# sim_num <- character(len)
hold_which_bracket = rep(NA, len)
sim_num = rep(NA, len)
calc_bracket_score = function(bracket_created, round_winners){
chunk_length = 32
creation_rd1 = split(bracket_created[[1]], # Applying split() function
ceiling(seq_along(bracket_created[[1]]) / 32))
results_rd1 = split(round_winners[[1]],
ceiling(seq_along(round_winners[[1]]) / 32))
creation_rd2 = split(bracket_created[[2]], # Applying split() function
ceiling(seq_along(bracket_created[[2]]) / 16))
results_rd2 = split(round_winners[[2]],
ceiling(seq_along(round_winners[[2]]) / 16))
creation_rd3 = split(bracket_created[[3]], # Applying split() function
ceiling(seq_along(bracket_created[[3]]) / 8))
results_rd3 = split(round_winners[[3]],
ceiling(seq_along(round_winners[[3]]) / 8))
creation_rd4 = split(bracket_created[[4]], # Applying split() function
ceiling(seq_along(bracket_created[[4]]) / 4))
results_rd4 = split(round_winners[[4]],
ceiling(seq_along(round_winners[[4]]) / 4))
creation_rd5 = split(bracket_created[[5]], # Applying split() function
ceiling(seq_along(bracket_created[[5]]) / 2))
results_rd5 = split(round_winners[[5]],
ceiling(seq_along(round_winners[[5]]) / 2))
creation_rd6 = split(bracket_created[[6]], # Applying split() function
ceiling(seq_along(bracket_created[[6]]) / 1))
results_rd6 = split(round_winners[[6]],
ceiling(seq_along(round_winners[[6]]) / 1))
x = 0
for (j in 1:n_result_sims) {
for (i in 1:n_in_pool) {
x = x+1
round_1_score <- creation_rd1[[i]] == results_rd1[[j]]
round_1_score = (length(which(round_1_score)))
round_2_score <- creation_rd2[[i]] == results_rd2[[j]]
round_2_score = (length(which(round_2_score)))*2
round_3_score <- creation_rd3[[i]] == results_rd3[[j]]
round_3_score = (length(which(round_3_score)))*4
round_4_score <- creation_rd4[[i]] == results_rd4[[j]]
round_4_score = (length(which(round_4_score)))*8
round_5_score <- creation_rd5[[i]] == results_rd5[[j]]
round_5_score = (length(which(round_5_score)))*16
round_6_score <- creation_rd6[[i]] == results_rd6[[j]]
round_6_score = (length(which(round_6_score)))*32
score = sum(round_1_score, round_2_score, round_3_score, round_4_score, round_5_score, round_6_score)
# print(i)
# print(j)
hold_num[x] = score
hold_which_bracket[x] = i
sim_num[x] = j
}
print(j/n_result_sims)
# round_2_score <- bracket_created[[2]] == round_winners[[2]]
# round_2_score = (length(which(round_2_score)))*2
#
# round_3_score <- bracket_created[[3]] == round_winners[[3]]
# round_3_score = (length(which(round_3_score)))*4
#
# round_4_score <- bracket_created[[4]] == round_winners[[4]]
# round_4_score = (length(which(round_4_score)))*8
#
# round_5_score <- bracket_created[[5]] == round_winners[[5]]
# round_5_score = (length(which(round_5_score)))*16
#
# round_6_score <- bracket_created[[6]] == round_winners[[6]]
# round_6_score = (length(which(round_6_score)))*32
#
# score = sum(round_1_score, round_2_score, round_3_score, round_4_score, round_5_score, round_6_score)
# return(score)
}
newlist = list(hold_num = hold_num, which_bracket = hold_which_bracket, sim_num = sim_num)
return(newlist)
}
g = calc_bracket_score(bracket_created, round_winners)
scores = g[[1]]
bracket_num = g[[2]]
sim_num = g[[3]]
results = data.frame(scores, bracket_num, sim_num)
results = left_join(results, summary_which, by = c("bracket_num"="bracket_number"))
max_pts = results %>% group_by(sim_num)%>%
mutate(rank = rank(-scores, ties.method = c("random")))
summary = max_pts %>% group_by(df)%>%
summarise(win_prob = sum(ifelse(rank == 1, 1,0))/n_result_sims)
summary = summary %>% rename("Bracket" = "df")
summary$win_prob = round(summary$win_prob, 3)
summary = summary %>% arrange(-win_prob)
summary$Time = Sys.time()
write.csv(summary, file = "win_prob_as_of_now.csv", row.names = FALSE)
#wins_to_check = subset(max_pts, df == "Eric Thiel 3" & rank == 1)
get_cheering = function(bracket_name){
wins_to_check = subset(max_pts, df == bracket_name & rank == 1)
#winner_bracket = 29
#get_best_bracket(round_winners, 29)
hold_rd1_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[1]]")
colnames(hold_rd1_bb) <- x
hold_rd2_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[2]]")
colnames(hold_rd2_bb) <- x
hold_rd3_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[3]]")
colnames(hold_rd3_bb) <- x
hold_rd4_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[4]]")
colnames(hold_rd4_bb) <- x
hold_rd5_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[5]]")
colnames(hold_rd5_bb) <- x
hold_rd6_bb <- data.frame(matrix(ncol = 1, nrow = 0))
x <- c("hmmmmmm[[6]]")
colnames(hold_rd6_bb) <- x
for (t in wins_to_check$sim_num) {
hmmmmmm = get_best_bracket(round_winners, t)
rd1_bb = as.data.frame(hmmmmmm[[1]])
rd2_bb = as.data.frame(hmmmmmm[[2]])
rd3_bb = as.data.frame(hmmmmmm[[3]])
rd4_bb = as.data.frame(hmmmmmm[[4]])
rd5_bb = as.data.frame(hmmmmmm[[5]])
rd6_bb = as.data.frame(hmmmmmm[[6]])
hold_rd1_bb = rbind(hold_rd1_bb, rd1_bb)
hold_rd2_bb = rbind(hold_rd2_bb, rd2_bb)
hold_rd3_bb = rbind(hold_rd3_bb, rd3_bb)
hold_rd4_bb = rbind(hold_rd4_bb, rd4_bb)
hold_rd5_bb = rbind(hold_rd5_bb, rd5_bb)
hold_rd6_bb = rbind(hold_rd6_bb, rd6_bb)
}
wins_who_to_cheer_for <-
seed_list %>%
select(-elim_round) %>%
group_by(team) %>%
mutate(
'second_round' = sum(team == hold_rd1_bb$`hmmmmmm[[1]]`)/nrow(wins_to_check),
'sweet_sixteen' = sum(team == hold_rd2_bb$`hmmmmmm[[2]]`)/nrow(wins_to_check),
'elite_eight' = sum(team == hold_rd3_bb$`hmmmmmm[[3]]`)/nrow(wins_to_check),
'final_four' = sum(team == hold_rd4_bb$`hmmmmmm[[4]]`)/nrow(wins_to_check),
'championship_game' = sum(team == hold_rd5_bb$`hmmmmmm[[5]]`)/nrow(wins_to_check),
'champ' = sum(team == hold_rd6_bb$`hmmmmmm[[6]]`)/nrow(wins_to_check)) %>%
ungroup()
return(wins_who_to_cheer_for)
}
# get_sim = subset(max_pts, sim_num == 29)
wins_who_to_cheer_for = get_cheering("thomas Goetz 3")
|
library(stringr)
library(dplyr)
library(purrr)
library(readr)
#### Part 1 ----
test <- c("abcdef", "bababc",
"abbcde", "abcccd",
"aabcdd", "abcdee",
"ababab")
TESTCHKSUM <- 12
data <- read_csv("Input02.txt", col_names = FALSE, col_types = "c")
# counts the number of strings in a vector with exactly two repeating letters
TwoMatch <- function(input) {
strsplit(input, "") %>%
map(table) %>%
map(as.integer) %>%
map_lgl(~any(.x == 2)) %>%
sum()
}
# counts the number of strings in a vector with exactly three repeating letters
ThreeMatch <- function(input) {
strsplit(input, "") %>%
map(table) %>%
map(as.integer) %>%
map_lgl(~any(.x == 3)) %>%
sum()
}
# calculates checksum using TwoMatch and ThreeMatch
BoxCheckSum <- function(input){
TwoMatch(input) * ThreeMatch(input)
}
# run "unit" test for checksum functions
chkTest <- BoxCheckSum(test)
if( chkTest != TESTCHKSUM) {
stop("Checksum does not work for the test case. ",
sprintf("Checksum is %i instead of %i", chkTest, TESTCHKSUM),
call. = FALSE)
} else {
message("Test Checksum Passed")
}
# calculate checksum for test data
print(sprintf("Answer for part 1: %i", BoxCheckSum(data[[1]])))
#### Part 2 ----
test2 <- c("abcde", "fghij", "klmno", "pqrst", "fguij", "axcye", "wvxyz")
PrimStrDist <- function(input){
strsplit(input, "") %>%
map(factor, levels = letters) %>%
map(table) %>%
reduce(bind_rows) %>%
dist(method = "euclidean", diag = TRUE, upper = TRUE) %>%
.^2
}
# return pair of codes differing by one letter
FindPair <- function(input){
distMat <- PrimStrDist(input) %>%
as.matrix() %>%
as.integer() %>%
matrix(nrow = length(input))
shrtLst <-which(distMat == 2, arr.ind = TRUE) %>%
as.vector() %>%
unique() %>%
input[.]
shrtDist <- shrtLst %>%
strsplit("") %>%
map(function(y) map_dbl(., ~sum(.x != y))) %>%
unlist() %>%
matrix(nrow = length(shrtLst))
which(shrtDist == 1, arr.ind = TRUE) %>%
as.vector() %>%
unique() %>%
shrtLst[.]
}
# return the letters matching between closest codes
MatchLetters <- function(input, print.pair = FALSE){
p <- FindPair(input) %>%
strsplit("")
if(print.pair){
message(sprintf("The matching pair is %s and %s",
str_c(p[[1]], collapse = ""), str_c(p[[2]], collapse = "")))
}
p[[1]][p[[1]] == p[[2]]] %>%
str_c(collapse = "")
}
# run "unit" test for matching functions
test2Ans <- MatchLetters(test2)
TEST2CHK <- "fgij"
if(test2Ans != TEST2CHK){
stop("Letter matching does not work for the test case. ",
sprintf("Result is %s instead of %s", test2Ans, TEST2CHK),
call. = FALSE)
} else {
message("Test 2 Passed")
}
print(sprintf("Answer for part 2: %s",
MatchLetters(data[[1]], print.pair = TRUE)))
|
/Day02/BoxEval.R
|
permissive
|
adamsma/AoC2018
|
R
| false
| false
| 2,900
|
r
|
library(stringr)
library(dplyr)
library(purrr)
library(readr)
#### Part 1 ----
test <- c("abcdef", "bababc",
"abbcde", "abcccd",
"aabcdd", "abcdee",
"ababab")
TESTCHKSUM <- 12
data <- read_csv("Input02.txt", col_names = FALSE, col_types = "c")
# counts the number of strings in a vector with exactly two repeating letters
TwoMatch <- function(input) {
strsplit(input, "") %>%
map(table) %>%
map(as.integer) %>%
map_lgl(~any(.x == 2)) %>%
sum()
}
# counts the number of strings in a vector with exactly three repeating letters
ThreeMatch <- function(input) {
strsplit(input, "") %>%
map(table) %>%
map(as.integer) %>%
map_lgl(~any(.x == 3)) %>%
sum()
}
# calculates checksum using TwoMatch and ThreeMatch
BoxCheckSum <- function(input){
TwoMatch(input) * ThreeMatch(input)
}
# run "unit" test for checksum functions
chkTest <- BoxCheckSum(test)
if( chkTest != TESTCHKSUM) {
stop("Checksum does not work for the test case. ",
sprintf("Checksum is %i instead of %i", chkTest, TESTCHKSUM),
call. = FALSE)
} else {
message("Test Checksum Passed")
}
# calculate checksum for test data
print(sprintf("Answer for part 1: %i", BoxCheckSum(data[[1]])))
#### Part 2 ----
test2 <- c("abcde", "fghij", "klmno", "pqrst", "fguij", "axcye", "wvxyz")
PrimStrDist <- function(input){
strsplit(input, "") %>%
map(factor, levels = letters) %>%
map(table) %>%
reduce(bind_rows) %>%
dist(method = "euclidean", diag = TRUE, upper = TRUE) %>%
.^2
}
# return pair of codes differing by one letter
FindPair <- function(input){
distMat <- PrimStrDist(input) %>%
as.matrix() %>%
as.integer() %>%
matrix(nrow = length(input))
shrtLst <-which(distMat == 2, arr.ind = TRUE) %>%
as.vector() %>%
unique() %>%
input[.]
shrtDist <- shrtLst %>%
strsplit("") %>%
map(function(y) map_dbl(., ~sum(.x != y))) %>%
unlist() %>%
matrix(nrow = length(shrtLst))
which(shrtDist == 1, arr.ind = TRUE) %>%
as.vector() %>%
unique() %>%
shrtLst[.]
}
# return the letters matching between closest codes
MatchLetters <- function(input, print.pair = FALSE){
p <- FindPair(input) %>%
strsplit("")
if(print.pair){
message(sprintf("The matching pair is %s and %s",
str_c(p[[1]], collapse = ""), str_c(p[[2]], collapse = "")))
}
p[[1]][p[[1]] == p[[2]]] %>%
str_c(collapse = "")
}
# run "unit" test for matching functions
test2Ans <- MatchLetters(test2)
TEST2CHK <- "fgij"
if(test2Ans != TEST2CHK){
stop("Letter matching does not work for the test case. ",
sprintf("Result is %s instead of %s", test2Ans, TEST2CHK),
call. = FALSE)
} else {
message("Test 2 Passed")
}
print(sprintf("Answer for part 2: %s",
MatchLetters(data[[1]], print.pair = TRUE)))
|
##' load all data from IntAct ftp
##' @name loadIntActFTP
##' @author Vitalii Kleshchevnikov
##' @description \code{loadIntActFTP} loads all data stored on \link{ftp://ftp.ebi.ac.uk/pub/databases/intact/current/psimitab/} in intact.txt file. Details: \link{https://www.ebi.ac.uk/intact/downloads}. This file contains the data from the following databases "IntAct", "MINT", "DIP", "bhf-ucl", "MPIDB", "MatrixDB", "HPIDb","I2D-IMEx","InnateDB-IMEx", "MolCon", "UniProt", "MBInfo"
##' @param dir directory where to save/look for the local copy
##' @param release which locally saved IntAct release to load (the default is to load the latest and read it into R)
##' @return \code{loadIntActFTP} saves intact.txt to a file, returns the object of class RAW_MItab27
##' @import data.table
##' @importFrom R.utils gzip
##' @importFrom R.utils gunzip
##' @importFrom downloader download
##' @export loadIntActFTP
##' @examples
##' {
##' loadIntActFTP("./", release = NULL)
##' }
##' @author Vitalii Kleshchevnikov
loadIntActFTP = function(dir, release = NULL){
if(is.null(release)) file_name = paste0(dir,"intact",lastIntActRelease(), ".txt") else file_name = paste0(dir,"intact",release, ".txt")
file_name.gz = paste0(file_name, ".gz")
file_name.zip = gsub("txt", "zip", file_name)
# download MI-TAB 2.7 from IntAct ftp
if(!file.exists(file_name.gz)) {
if(!is.null(release)) stop(paste0("no data for IntAct release", release," in the directory: ", dir, ", set release to NULL do load the latest release"))
message("... dowloading from IntAct ftp ...")
download(url = "ftp://ftp.ebi.ac.uk/pub/databases/intact/current/psimitab/intact.zip", destfile = file_name.zip)
unzip(file_name.zip, exdir = dir)
unlink(paste0(dir, "intact_negative.txt"))
unlink(file_name.zip)
gzip(filename = paste0(dir, "intact.txt"), destname = file_name.gz,
remove = T, overwrite = T)
} else {message("... loading local copy ...")}
gunzip(filename = file_name.gz, destname = file_name, remove = F, overwrite = T)
intact = fread(file_name, header = T, stringsAsFactors = F)
unlink(file_name)
result = list(data = intact, metadata = "This object contains the data from ftp://ftp.ebi.ac.uk/pub/databases/intact/current/psimitab/intact.txt and contains all molecular interaction data from the following databases: \"IntAct\", \"MINT\", \"DIP\", \"bhf-ucl\", \"MPIDB\", \"MatrixDB\", \"HPIDb\",\"I2D-IMEx\",\"InnateDB-IMEx\", \"MolCon\", \"UniProt\", \"MBInfo\"")
class(result) = "RAW_MItab27"
return(result)
}
|
/R/loadIntActFTP.R
|
permissive
|
vitkl/PItools
|
R
| false
| false
| 2,538
|
r
|
##' load all data from IntAct ftp
##' @name loadIntActFTP
##' @author Vitalii Kleshchevnikov
##' @description \code{loadIntActFTP} loads all data stored on \link{ftp://ftp.ebi.ac.uk/pub/databases/intact/current/psimitab/} in intact.txt file. Details: \link{https://www.ebi.ac.uk/intact/downloads}. This file contains the data from the following databases "IntAct", "MINT", "DIP", "bhf-ucl", "MPIDB", "MatrixDB", "HPIDb","I2D-IMEx","InnateDB-IMEx", "MolCon", "UniProt", "MBInfo"
##' @param dir directory where to save/look for the local copy
##' @param release which locally saved IntAct release to load (the default is to load the latest and read it into R)
##' @return \code{loadIntActFTP} saves intact.txt to a file, returns the object of class RAW_MItab27
##' @import data.table
##' @importFrom R.utils gzip
##' @importFrom R.utils gunzip
##' @importFrom downloader download
##' @export loadIntActFTP
##' @examples
##' {
##' loadIntActFTP("./", release = NULL)
##' }
##' @author Vitalii Kleshchevnikov
loadIntActFTP = function(dir, release = NULL){
if(is.null(release)) file_name = paste0(dir,"intact",lastIntActRelease(), ".txt") else file_name = paste0(dir,"intact",release, ".txt")
file_name.gz = paste0(file_name, ".gz")
file_name.zip = gsub("txt", "zip", file_name)
# download MI-TAB 2.7 from IntAct ftp
if(!file.exists(file_name.gz)) {
if(!is.null(release)) stop(paste0("no data for IntAct release", release," in the directory: ", dir, ", set release to NULL do load the latest release"))
message("... dowloading from IntAct ftp ...")
download(url = "ftp://ftp.ebi.ac.uk/pub/databases/intact/current/psimitab/intact.zip", destfile = file_name.zip)
unzip(file_name.zip, exdir = dir)
unlink(paste0(dir, "intact_negative.txt"))
unlink(file_name.zip)
gzip(filename = paste0(dir, "intact.txt"), destname = file_name.gz,
remove = T, overwrite = T)
} else {message("... loading local copy ...")}
gunzip(filename = file_name.gz, destname = file_name, remove = F, overwrite = T)
intact = fread(file_name, header = T, stringsAsFactors = F)
unlink(file_name)
result = list(data = intact, metadata = "This object contains the data from ftp://ftp.ebi.ac.uk/pub/databases/intact/current/psimitab/intact.txt and contains all molecular interaction data from the following databases: \"IntAct\", \"MINT\", \"DIP\", \"bhf-ucl\", \"MPIDB\", \"MatrixDB\", \"HPIDb\",\"I2D-IMEx\",\"InnateDB-IMEx\", \"MolCon\", \"UniProt\", \"MBInfo\"")
class(result) = "RAW_MItab27"
return(result)
}
|
################################################################################
### IRD
### Lab 05
### Drzewa decyzyjne cd., drzewa regresji, ocena modeli
################################################################################
################################################################################
# Biblioteki
################################################################################
rm(list=ls()) # programowe czyszczenie środowiska
library(rpart) # do drzewa
library(rpart.plot) # do rysowania drzewa
#install.packages('ROCR')
library(ROCR) # do krzywej ROC
library(caret) # do waznosci zmiennych w modelu
# Wczytanie danych - prosze uzupelnic wlasciwa sciezke do pliku
dane <- read.csv2('https://bit.ly/2L0HtVa', stringsAsFactors = FALSE, dec = '.')
#dane <- read.csv2('data/winequality-white.csv', stringsAsFactors = FALSE, dec = '.')
# read.csv2 zamiast read.csv ze wzgledu na separator kolumn
# dec = '.' - wskazujemy, że separatorem dziesiętnym jest kropka
################################################################################
# Eksploracja danych
################################################################################
head(dane) # pierwsze 6 obserwacji
str(dane) # typy zmiennych
summary(dane) # podstawowe statystyki
hist(dane$quality) # rozklad zmiennej objasnianej
################################################################################
# Drzewa klasyfikacyjne - powtorzenie, ocena dokladnosci
################################################################################
# przekodowanie zmiennej quality na binarna: jezeli quality >= 6, to jakosc wysoka, wpp niska:
dane$quality <- ifelse(dane$quality >= 6, 'high', 'low')
set.seed(1)
train_proportion <- 0.7
train_index <- runif(nrow(dane)) < train_proportion
train <- dane[train_index,]
test <- dane[!train_index,]
# budujemy i porownujemy 2 drzewa klasyfikacyjne
d.klas1 <- rpart(quality~., data = train, method = "class")
d.klas2 <- rpart(quality~., data = train, method = "class", cp = 0.005)
#plot(d.klas, margin = 0.2)
#text(d.klas, pretty = 0)
rpart.plot(d.klas1, under=FALSE, fallen.leaves = FALSE, cex = 0.3)
rpart.plot(d.klas2, under=FALSE, fallen.leaves = FALSE, cex = 0.3)
# 3) Macierz pomylek + statystyki oceniajace jakosc modeli
CM <- list()
CM[["d.klas1"]] <- table(predict(d.klas1, new = test, type = "class"), test$quality)
CM[["d.klas2"]] <- table(predict(d.klas2, new = test, type = "class"), test$quality)
EvaluateModel <- function(classif_mx)
{
# Sciagawka: https://en.wikipedia.org/wiki/Sensitivity_and_specificity#Confusion_matrix
true_positive <- classif_mx[1,1]
true_negative <- classif_mx[2,2]
condition_positive <- sum(classif_mx[ ,1])
condition_negative <- sum(classif_mx[ ,2])
predicted_positive <- sum(classif_mx[1, ])
predicted_negative <- sum(classif_mx[2, ])
# Uzywanie zmiennych pomocniczych o sensownych nazwach
# ulatwia zrozumienie, co sie dzieje w funkcji
accuracy <- (true_positive + true_negative) / sum(classif_mx)
MER <- 1 - accuracy # Misclassification Error Rate
# inaczej: MER < - (false_positive + false_positive) / sum(classif_mx)
precision <- true_positive / predicted_positive
sensitivity <- true_positive / condition_positive # inaczej - Recall / True Positive Rate (TPR)
specificity <- true_negative / condition_negative
F1 <- (2 * precision * sensitivity) / (precision + sensitivity)
return(list(accuracy = accuracy,
MER = MER,
precision = precision,
sensitivity = sensitivity,
specificity = specificity,
F1 = F1))
# Notacja "accuracy = accuracy" itd. jest potrzebna,
# zeby elementy listy mialy nazwy.
}
EvaluateModel(CM[["d.klas1"]])
EvaluateModel(CM[["d.klas2"]])
###############################################################################################
# ROC/LIFT/GAIN Curve
###############################################################################################
## Z dokumentacji pakietu ROCR:
# Here is how to call 'performance' to create some standard evaluation plots:
# ROC curves: measure="tpr", x.measure="fpr".
# Precision/recall graphs: measure="prec", x.measure="rec".
# Sensitivity/specificity plots: measure="sens", x.measure="spec".
# Lift charts: measure="lift", x.measure="rpp".
prognoza_ciagla <- predict(d.klas1, newdata = test)
prognoza_ciagla <- as.vector(prognoza_ciagla[,2])
# krzywa ROC - potrzebuje "ciaglej" prognozy
plot(performance(prediction(prognoza_ciagla,test$quality),"tpr","fpr"),lwd=2, colorize=T)
# AUC (Area Under Curve) - pole pod krzywa ROC
performance(prediction(prognoza_ciagla, test$quality),"auc")
# Sensitivity/specificity plots ~ trade-off
plot(performance(prediction(prognoza_ciagla,test$quality),"sens","spec"),lwd=2)
# Lift chart
plot(performance(prediction(prognoza_ciagla,test$quality),"lift","rpp"),lwd=2, col = "darkblue")
#Lift is a measure of the effectiveness of a predictive model calculated
#as the ratio between the results obtained with and without the predictive model.
################################################################################
# Drzewa regresyjne
################################################################################
# Wczytanie danych - ponownie
dane <- read.csv2('https://bit.ly/2L0HtVa', stringsAsFactors = FALSE, dec = '.')
#dane <- read.csv2('data/winequality-white.csv', stringsAsFactors = FALSE, dec = '.')
# Inicjalizacja ziarna do zmiennych pseudolosowych
set.seed(1)
# dzielimy na zbior treningowy i testowy
train_proportion <- 0.7
train_index <- runif(nrow(dane)) < train_proportion
train <- dane[train_index,]
test <- dane[!train_index,]
# Regresja liniowa - dobry punkt odniesienia
lin_m <- lm(quality ~ ., data = train)
# Drzewo regresji
d.regr <- rpart(quality ~., data = train, cp = 0.01)
#plot(d.regr, margin = 0.2)
#text(d.regr, pretty = 0)
rpart.plot(d.regr, under=FALSE, fallen.leaves = FALSE, cex = 0.9)
# Drzewo regresji - wieksze
d.regr.duze <- rpart(quality ~. , data = train, cp = 0.003)
#plot(d.regr.duze, margin = 0.2)
#text(d.regr.duze, pretty = 0)
rpart.plot(d.regr.duze, under=FALSE, fallen.leaves = FALSE, cex = 0.5)
################################################################################
# Metody oceny drzewa regresji: Variable Importance, RSS, MAE, RMSE, RAE, RRSE, R^2
################################################################################
# variable importance
varImp(lin_m)
d.regr$variable.importance
d.regr.duze$variable.importance
# odchylenia reszt - rozne miary
# funkcja residuals liczy reszty = wartosci rzeczywiste - prognoza:
all(as.vector(residuals(d.regr)) == train$quality - predict(d.regr, train))
modele <- list("d.regr" = d.regr, "d.regr.duze" = d.regr.duze, "lin_m" = lin_m)
OcenaModeli <- function(modele, dane, predicted_col_name) {
print("Suma kwadatow reszt RSS")
print(sapply(modele, function(x) sum((dane[[predicted_col_name]] - predict(x, dane))^2) ))
print("Średni błąd absolutny MAE")
print(sapply(modele, function(x) sum(abs((dane[[predicted_col_name]] - predict(x, dane))))/nrow(dane) ))
print("Pierwiastek błędu średniokwadratowego RMSE")
print(sapply(modele, function(x) sqrt(sum((dane[[predicted_col_name]] - predict(x, dane))^2)/nrow(dane)) ))
print("Względny błąd absolutny RAE")
print(sapply(modele, function(x) sum(abs((dane[[predicted_col_name]] - predict(x, dane))))/sum(abs(dane[[predicted_col_name]] - mean(dane[[predicted_col_name]]))) ))
print("Pierwiastek Względnego błędu średniokw RRSE")
print(sapply(modele, function(x) sqrt(sum((dane[[predicted_col_name]] - predict(x, dane))^2)/sum((dane[[predicted_col_name]] - mean(dane[[predicted_col_name]]))^2)) ))
}
OcenaModeli(modele, train, 'quality')
OcenaModeli(modele, test, 'quality')
###############################################################################################
# Zadanie 1
###############################################################################################
# napisz funkcje, ktora na podstawie macierzy klasyfikacji oblicza i zwraca
# 3-elementowa nazwana liste zawierajaca informacje o accuracy, sensitivity i specificity modelu.
# Sciagawka: https://en.wikipedia.org/wiki/Sensitivity_and_specificity#Confusion_matrix
###############################################################################################
# Zadanie 2
###############################################################################################
# Wczytaj dane o czerwonych winach (plik "winequality-red.csv"). Zamien wartosc zmiennej quality na
# binarna, przyjmujac, ze wina o jakosci 6 lub wyzszej sa wysokiej jakosci, a pozostale - niskiej jakosci.
#
# Podziel zbior na uczacy i testowy losowo w proporcji 0.8:0.2.
#
# Zbuduj drzewo klasyfikacyjne przewidujce jakosc czerwonego wina na podstawie jego parametrow chemicznych.
# Ustaw jego parametr zlozonosci (complexity parameter) na wartosc 0.005.
# Zwizualizuj drzewo. Policz jego macierz klasyfikacji.
# Na postawie macierzy klasyfikacji policz accuracy, sensitivity i specificity.
# Narysuj krzywa ROC i lift oraz policz AUC.
|
/rscripts/IRD_lab_05.R
|
no_license
|
JProniewicz/IRD_19_20_Z
|
R
| false
| false
| 9,143
|
r
|
################################################################################
### IRD
### Lab 05
### Drzewa decyzyjne cd., drzewa regresji, ocena modeli
################################################################################
################################################################################
# Biblioteki
################################################################################
rm(list=ls()) # programowe czyszczenie środowiska
library(rpart) # do drzewa
library(rpart.plot) # do rysowania drzewa
#install.packages('ROCR')
library(ROCR) # do krzywej ROC
library(caret) # do waznosci zmiennych w modelu
# Wczytanie danych - prosze uzupelnic wlasciwa sciezke do pliku
dane <- read.csv2('https://bit.ly/2L0HtVa', stringsAsFactors = FALSE, dec = '.')
#dane <- read.csv2('data/winequality-white.csv', stringsAsFactors = FALSE, dec = '.')
# read.csv2 zamiast read.csv ze wzgledu na separator kolumn
# dec = '.' - wskazujemy, że separatorem dziesiętnym jest kropka
################################################################################
# Eksploracja danych
################################################################################
head(dane) # pierwsze 6 obserwacji
str(dane) # typy zmiennych
summary(dane) # podstawowe statystyki
hist(dane$quality) # rozklad zmiennej objasnianej
################################################################################
# Drzewa klasyfikacyjne - powtorzenie, ocena dokladnosci
################################################################################
# przekodowanie zmiennej quality na binarna: jezeli quality >= 6, to jakosc wysoka, wpp niska:
dane$quality <- ifelse(dane$quality >= 6, 'high', 'low')
set.seed(1)
train_proportion <- 0.7
train_index <- runif(nrow(dane)) < train_proportion
train <- dane[train_index,]
test <- dane[!train_index,]
# budujemy i porownujemy 2 drzewa klasyfikacyjne
d.klas1 <- rpart(quality~., data = train, method = "class")
d.klas2 <- rpart(quality~., data = train, method = "class", cp = 0.005)
#plot(d.klas, margin = 0.2)
#text(d.klas, pretty = 0)
rpart.plot(d.klas1, under=FALSE, fallen.leaves = FALSE, cex = 0.3)
rpart.plot(d.klas2, under=FALSE, fallen.leaves = FALSE, cex = 0.3)
# 3) Macierz pomylek + statystyki oceniajace jakosc modeli
CM <- list()
CM[["d.klas1"]] <- table(predict(d.klas1, new = test, type = "class"), test$quality)
CM[["d.klas2"]] <- table(predict(d.klas2, new = test, type = "class"), test$quality)
EvaluateModel <- function(classif_mx)
{
# Sciagawka: https://en.wikipedia.org/wiki/Sensitivity_and_specificity#Confusion_matrix
true_positive <- classif_mx[1,1]
true_negative <- classif_mx[2,2]
condition_positive <- sum(classif_mx[ ,1])
condition_negative <- sum(classif_mx[ ,2])
predicted_positive <- sum(classif_mx[1, ])
predicted_negative <- sum(classif_mx[2, ])
# Uzywanie zmiennych pomocniczych o sensownych nazwach
# ulatwia zrozumienie, co sie dzieje w funkcji
accuracy <- (true_positive + true_negative) / sum(classif_mx)
MER <- 1 - accuracy # Misclassification Error Rate
# inaczej: MER < - (false_positive + false_positive) / sum(classif_mx)
precision <- true_positive / predicted_positive
sensitivity <- true_positive / condition_positive # inaczej - Recall / True Positive Rate (TPR)
specificity <- true_negative / condition_negative
F1 <- (2 * precision * sensitivity) / (precision + sensitivity)
return(list(accuracy = accuracy,
MER = MER,
precision = precision,
sensitivity = sensitivity,
specificity = specificity,
F1 = F1))
# Notacja "accuracy = accuracy" itd. jest potrzebna,
# zeby elementy listy mialy nazwy.
}
EvaluateModel(CM[["d.klas1"]])
EvaluateModel(CM[["d.klas2"]])
###############################################################################################
# ROC/LIFT/GAIN Curve
###############################################################################################
## Z dokumentacji pakietu ROCR:
# Here is how to call 'performance' to create some standard evaluation plots:
# ROC curves: measure="tpr", x.measure="fpr".
# Precision/recall graphs: measure="prec", x.measure="rec".
# Sensitivity/specificity plots: measure="sens", x.measure="spec".
# Lift charts: measure="lift", x.measure="rpp".
prognoza_ciagla <- predict(d.klas1, newdata = test)
prognoza_ciagla <- as.vector(prognoza_ciagla[,2])
# krzywa ROC - potrzebuje "ciaglej" prognozy
plot(performance(prediction(prognoza_ciagla,test$quality),"tpr","fpr"),lwd=2, colorize=T)
# AUC (Area Under Curve) - pole pod krzywa ROC
performance(prediction(prognoza_ciagla, test$quality),"auc")
# Sensitivity/specificity plots ~ trade-off
plot(performance(prediction(prognoza_ciagla,test$quality),"sens","spec"),lwd=2)
# Lift chart
plot(performance(prediction(prognoza_ciagla,test$quality),"lift","rpp"),lwd=2, col = "darkblue")
#Lift is a measure of the effectiveness of a predictive model calculated
#as the ratio between the results obtained with and without the predictive model.
################################################################################
# Drzewa regresyjne
################################################################################
# Wczytanie danych - ponownie
dane <- read.csv2('https://bit.ly/2L0HtVa', stringsAsFactors = FALSE, dec = '.')
#dane <- read.csv2('data/winequality-white.csv', stringsAsFactors = FALSE, dec = '.')
# Inicjalizacja ziarna do zmiennych pseudolosowych
set.seed(1)
# dzielimy na zbior treningowy i testowy
train_proportion <- 0.7
train_index <- runif(nrow(dane)) < train_proportion
train <- dane[train_index,]
test <- dane[!train_index,]
# Regresja liniowa - dobry punkt odniesienia
lin_m <- lm(quality ~ ., data = train)
# Drzewo regresji
d.regr <- rpart(quality ~., data = train, cp = 0.01)
#plot(d.regr, margin = 0.2)
#text(d.regr, pretty = 0)
rpart.plot(d.regr, under=FALSE, fallen.leaves = FALSE, cex = 0.9)
# Drzewo regresji - wieksze
d.regr.duze <- rpart(quality ~. , data = train, cp = 0.003)
#plot(d.regr.duze, margin = 0.2)
#text(d.regr.duze, pretty = 0)
rpart.plot(d.regr.duze, under=FALSE, fallen.leaves = FALSE, cex = 0.5)
################################################################################
# Metody oceny drzewa regresji: Variable Importance, RSS, MAE, RMSE, RAE, RRSE, R^2
################################################################################
# variable importance
varImp(lin_m)
d.regr$variable.importance
d.regr.duze$variable.importance
# odchylenia reszt - rozne miary
# funkcja residuals liczy reszty = wartosci rzeczywiste - prognoza:
all(as.vector(residuals(d.regr)) == train$quality - predict(d.regr, train))
modele <- list("d.regr" = d.regr, "d.regr.duze" = d.regr.duze, "lin_m" = lin_m)
OcenaModeli <- function(modele, dane, predicted_col_name) {
print("Suma kwadatow reszt RSS")
print(sapply(modele, function(x) sum((dane[[predicted_col_name]] - predict(x, dane))^2) ))
print("Średni błąd absolutny MAE")
print(sapply(modele, function(x) sum(abs((dane[[predicted_col_name]] - predict(x, dane))))/nrow(dane) ))
print("Pierwiastek błędu średniokwadratowego RMSE")
print(sapply(modele, function(x) sqrt(sum((dane[[predicted_col_name]] - predict(x, dane))^2)/nrow(dane)) ))
print("Względny błąd absolutny RAE")
print(sapply(modele, function(x) sum(abs((dane[[predicted_col_name]] - predict(x, dane))))/sum(abs(dane[[predicted_col_name]] - mean(dane[[predicted_col_name]]))) ))
print("Pierwiastek Względnego błędu średniokw RRSE")
print(sapply(modele, function(x) sqrt(sum((dane[[predicted_col_name]] - predict(x, dane))^2)/sum((dane[[predicted_col_name]] - mean(dane[[predicted_col_name]]))^2)) ))
}
OcenaModeli(modele, train, 'quality')
OcenaModeli(modele, test, 'quality')
###############################################################################################
# Zadanie 1
###############################################################################################
# napisz funkcje, ktora na podstawie macierzy klasyfikacji oblicza i zwraca
# 3-elementowa nazwana liste zawierajaca informacje o accuracy, sensitivity i specificity modelu.
# Sciagawka: https://en.wikipedia.org/wiki/Sensitivity_and_specificity#Confusion_matrix
###############################################################################################
# Zadanie 2
###############################################################################################
# Wczytaj dane o czerwonych winach (plik "winequality-red.csv"). Zamien wartosc zmiennej quality na
# binarna, przyjmujac, ze wina o jakosci 6 lub wyzszej sa wysokiej jakosci, a pozostale - niskiej jakosci.
#
# Podziel zbior na uczacy i testowy losowo w proporcji 0.8:0.2.
#
# Zbuduj drzewo klasyfikacyjne przewidujce jakosc czerwonego wina na podstawie jego parametrow chemicznych.
# Ustaw jego parametr zlozonosci (complexity parameter) na wartosc 0.005.
# Zwizualizuj drzewo. Policz jego macierz klasyfikacji.
# Na postawie macierzy klasyfikacji policz accuracy, sensitivity i specificity.
# Narysuj krzywa ROC i lift oraz policz AUC.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bigquery_objects.R
\name{JsonObject}
\alias{JsonObject}
\title{JsonObject Object}
\usage{
JsonObject()
}
\value{
JsonObject object
}
\description{
JsonObject Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Represents a single JSON object.
}
|
/googlebigqueryv2.auto/man/JsonObject.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 359
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bigquery_objects.R
\name{JsonObject}
\alias{JsonObject}
\title{JsonObject Object}
\usage{
JsonObject()
}
\value{
JsonObject object
}
\description{
JsonObject Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Represents a single JSON object.
}
|
#' R interface to tencent AI lab's emotion analysis
#'
#' \code{emotion} returns the results of emotion analysis
#' @param text The text is sent to API
#' @param app_key Your app key
#' @param app_id Your app ID
#' @return positive = 1, neuter = 0, nagetive = -1
#' @author Ao Sun <\url{https://ao-sun.github.io/}>
#' @description text can only be Chinese, this function performs poorly in English
#' @examples emotion('很开心', app_id = '1107152791', app_key = 'OpsMj8HXPmbu9SMd')
#' @export
emotion <- function(text, app_key, app_id){
timestamp_num <- function() {ceiling(as.numeric(as.POSIXct(Sys.time(), format="%Y-%m-%d %H:%M:%S")))}
nonce_run <- function() {paste0(sample(c(letters, ceiling(runif(10, 0, 9))), 10), collapse = '')}
params <- c(
'app_id' = app_id,
'nonce_str' = nonce_run(),
'sign' = '',
'text' = URLencode(enc2utf8(text)),
'time_stamp' = timestamp_num()
)
params['sign'] = get_sign(params, app_key)
url = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_textpolar'
webpage <- RCurl::postForm(url, .params = params)
result <- jsonlite::fromJSON(webpage)
return(result$data$polar)
}
|
/.Rproj.user/F268AFAF/sources/s-8EAE03B/56FCCEDE-contents
|
no_license
|
suntiansheng/tencentAI
|
R
| false
| false
| 1,138
|
#' R interface to tencent AI lab's emotion analysis
#'
#' \code{emotion} returns the results of emotion analysis
#' @param text The text is sent to API
#' @param app_key Your app key
#' @param app_id Your app ID
#' @return positive = 1, neuter = 0, nagetive = -1
#' @author Ao Sun <\url{https://ao-sun.github.io/}>
#' @description text can only be Chinese, this function performs poorly in English
#' @examples emotion('很开心', app_id = '1107152791', app_key = 'OpsMj8HXPmbu9SMd')
#' @export
emotion <- function(text, app_key, app_id){
timestamp_num <- function() {ceiling(as.numeric(as.POSIXct(Sys.time(), format="%Y-%m-%d %H:%M:%S")))}
nonce_run <- function() {paste0(sample(c(letters, ceiling(runif(10, 0, 9))), 10), collapse = '')}
params <- c(
'app_id' = app_id,
'nonce_str' = nonce_run(),
'sign' = '',
'text' = URLencode(enc2utf8(text)),
'time_stamp' = timestamp_num()
)
params['sign'] = get_sign(params, app_key)
url = 'https://api.ai.qq.com/fcgi-bin/nlp/nlp_textpolar'
webpage <- RCurl::postForm(url, .params = params)
result <- jsonlite::fromJSON(webpage)
return(result$data$polar)
}
|
|
#set.seed(1000)
mapsize = 201
m = matrix(0, mapsize, mapsize)
r = raster(m, xmn=0, xmx=10, ymn=0, ymx=10)
if(!SA){
snh <- makeClass(r,ceiling(sim_design$snh_fract[row_id]*mapsize*mapsize/sim_design$snh_size[row_id]/100), sim_design$snh_size[row_id], val=1)
snh <- rmSingle(snh)
prop_snh <- mean(values(snh))
patchsize_crop=50
prop_crop=(1-prop_snh-0.4)
tolerance=0.05
npatches_crop=ceiling((prop_crop*mapsize*mapsize)/patchsize_crop)
target=prop_crop
i=0
repeat{
i=i+1
if(i>100){break}
else {
try(landuse<-makeClass(snh,npatches_crop,patchsize_crop,bgr=0,val=2),silent = TRUE)
prop_obs=sum(values(landuse)==2)/length(values(landuse))
if(abs(prop_obs -prop_crop)< tolerance){
print(paste('suitable landuse raster created after',i,'iterations'));
break
}}}
landuse <- rmSingle(landuse)
crop = landuse
values(crop)=0
values(crop)[values(landuse)==2]=1
npatches_crop_late=ceiling((mean(values(crop))*mapsize*mapsize)/patchsize_crop*runif(1))
landuse2<-makeClass(crop,npatches_crop_late,patchsize_crop,bgr=1,val=2)
crop2 = landuse2
values(crop2)=0
values(crop2)[values(landuse2)==2]=1
crop1 = crop
values(crop1)[values(crop2)==1]=0
merged_map <- snh
values(merged_map)[values(crop1)==1]=2 # early crop
values(merged_map)[values(crop2)==1]=3 # late crop
prop_late_crop = mean(values(crop2))
prop_crop = mean(values(crop1))
save(snh, merged_map, prop_snh, prop_late_crop, prop_crop, file=paste0('lu',row_id,'.Rdata'))
}else{
load(paste0('lu',row_id,'.Rdata'))
}
# update 01.04.22 - keeps overwriting in lu1.Rdata
# save to output
sim_df$prop_snh[row_id] <- prop_snh
sim_df$prop_late_crop[row_id] <- prop_late_crop
sim_df$prop_crop[row_id] <- prop_crop
|
/2.generate_maps.R
|
no_license
|
mblasirom/LandscapePhenoBee
|
R
| false
| false
| 1,824
|
r
|
#set.seed(1000)
mapsize = 201
m = matrix(0, mapsize, mapsize)
r = raster(m, xmn=0, xmx=10, ymn=0, ymx=10)
if(!SA){
snh <- makeClass(r,ceiling(sim_design$snh_fract[row_id]*mapsize*mapsize/sim_design$snh_size[row_id]/100), sim_design$snh_size[row_id], val=1)
snh <- rmSingle(snh)
prop_snh <- mean(values(snh))
patchsize_crop=50
prop_crop=(1-prop_snh-0.4)
tolerance=0.05
npatches_crop=ceiling((prop_crop*mapsize*mapsize)/patchsize_crop)
target=prop_crop
i=0
repeat{
i=i+1
if(i>100){break}
else {
try(landuse<-makeClass(snh,npatches_crop,patchsize_crop,bgr=0,val=2),silent = TRUE)
prop_obs=sum(values(landuse)==2)/length(values(landuse))
if(abs(prop_obs -prop_crop)< tolerance){
print(paste('suitable landuse raster created after',i,'iterations'));
break
}}}
landuse <- rmSingle(landuse)
crop = landuse
values(crop)=0
values(crop)[values(landuse)==2]=1
npatches_crop_late=ceiling((mean(values(crop))*mapsize*mapsize)/patchsize_crop*runif(1))
landuse2<-makeClass(crop,npatches_crop_late,patchsize_crop,bgr=1,val=2)
crop2 = landuse2
values(crop2)=0
values(crop2)[values(landuse2)==2]=1
crop1 = crop
values(crop1)[values(crop2)==1]=0
merged_map <- snh
values(merged_map)[values(crop1)==1]=2 # early crop
values(merged_map)[values(crop2)==1]=3 # late crop
prop_late_crop = mean(values(crop2))
prop_crop = mean(values(crop1))
save(snh, merged_map, prop_snh, prop_late_crop, prop_crop, file=paste0('lu',row_id,'.Rdata'))
}else{
load(paste0('lu',row_id,'.Rdata'))
}
# update 01.04.22 - keeps overwriting in lu1.Rdata
# save to output
sim_df$prop_snh[row_id] <- prop_snh
sim_df$prop_late_crop[row_id] <- prop_late_crop
sim_df$prop_crop[row_id] <- prop_crop
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_sir.R
\name{plot_ioa}
\alias{plot_ioa}
\title{OUTPUT FUNCTION}
\usage{
plot_ioa(SIR, file_name = NULL, ioa_names = NULL,
posterior_pred = TRUE, coolors = "#104F55")
}
\arguments{
\item{SIR}{A fit SIR model}
\item{file_name}{name of a file to identified the files exported by the
function. If NULL, does not save.}
\item{ioa_names}{names of indices of abundance used.}
\item{posterior_pred}{Logical. If true, includes a posterior predictive distribution of the estimated IOA}
}
\value{
Returns and saves a figure with the IOA trajectories.
}
\description{
Function that provides a plot of the estimated indices of abundance a SIR model including: median, 95%
credible interval, 90% credible interval, catch, and observed indices of abundance abundance.
}
|
/man/plot_ioa.Rd
|
no_license
|
antarctic-humpback-2019-assessment/HumpbackSIR
|
R
| false
| true
| 843
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_sir.R
\name{plot_ioa}
\alias{plot_ioa}
\title{OUTPUT FUNCTION}
\usage{
plot_ioa(SIR, file_name = NULL, ioa_names = NULL,
posterior_pred = TRUE, coolors = "#104F55")
}
\arguments{
\item{SIR}{A fit SIR model}
\item{file_name}{name of a file to identified the files exported by the
function. If NULL, does not save.}
\item{ioa_names}{names of indices of abundance used.}
\item{posterior_pred}{Logical. If true, includes a posterior predictive distribution of the estimated IOA}
}
\value{
Returns and saves a figure with the IOA trajectories.
}
\description{
Function that provides a plot of the estimated indices of abundance a SIR model including: median, 95%
credible interval, 90% credible interval, catch, and observed indices of abundance abundance.
}
|
library(survey)
#When using these software products, users must specify that the sample design is “
#With Replacement
#stratum variable (_STSTR),
#the primary sampling unit (_PSU),
#weight (FINALWT_F or CHILDWT_F) -- all of which are on the public use data file.
# ACBS Analysis
ACBS_sample <- ACBS %>%
select("X._STATE", "INCIDNT", "CHILDWT_F", "X._CHILDWT", "X._STSTR", "X._PSU" )
mydesign <-
svydesign(
id = ~0 ,
data = ACBS_sample ,
weight = ~X._CHILDWT ,
strata = ~X._STSTR
)
# CDC code ----------------------------------------------------------------
# Call the library for the current R session
library(survey)
# Read in BRFSS data
load("\\BRFSS\\BRFSS.rdata")
# Subset the data for Louisiana
BRFSS <- BRFSS[BRFSS$state == 22, ]
# Set options for allowing a single observation per stratum
options(survey.lonely.psu = "adjust")
# Create survey design
ACBSdsgn <- svydesign(
id=~1,
strata = ~X._STSTR,
weights = ~CHILDWT_F,
data = ACBS_sample)
# calculate average number of physical healthy days
svymean(~physhlth, # Variable to anlayze
brfssdsgn,
na.rm = TRUE)
# calculate percent in each arthritis category
svymean(~factor(havarth3),
brfssdsgn,
na.rm = TRUE)
|
/Script/Testing_weights_2.R
|
no_license
|
raedkm/TTI-Astham-CI
|
R
| false
| false
| 1,255
|
r
|
library(survey)
#When using these software products, users must specify that the sample design is “
#With Replacement
#stratum variable (_STSTR),
#the primary sampling unit (_PSU),
#weight (FINALWT_F or CHILDWT_F) -- all of which are on the public use data file.
# ACBS Analysis
ACBS_sample <- ACBS %>%
select("X._STATE", "INCIDNT", "CHILDWT_F", "X._CHILDWT", "X._STSTR", "X._PSU" )
mydesign <-
svydesign(
id = ~0 ,
data = ACBS_sample ,
weight = ~X._CHILDWT ,
strata = ~X._STSTR
)
# CDC code ----------------------------------------------------------------
# Call the library for the current R session
library(survey)
# Read in BRFSS data
load("\\BRFSS\\BRFSS.rdata")
# Subset the data for Louisiana
BRFSS <- BRFSS[BRFSS$state == 22, ]
# Set options for allowing a single observation per stratum
options(survey.lonely.psu = "adjust")
# Create survey design
ACBSdsgn <- svydesign(
id=~1,
strata = ~X._STSTR,
weights = ~CHILDWT_F,
data = ACBS_sample)
# calculate average number of physical healthy days
svymean(~physhlth, # Variable to anlayze
brfssdsgn,
na.rm = TRUE)
# calculate percent in each arthritis category
svymean(~factor(havarth3),
brfssdsgn,
na.rm = TRUE)
|
# scripts related to the 50 original veg plots and env data
source("https://raw.githubusercontent.com/dackerly/PepperwoodVegPlots/master/Analyses/PWfunctions_GitHub.R")
# scripts for the 17 larger hectare tree plots
source("https://raw.githubusercontent.com/dackerly/PepperwoodVegPlots/master/2016/Hectares/hectare_scripts.r")
# scripts for working with HOBO data
source("https://raw.githubusercontent.com/dackerly/PepperwoodVegPlots/master/HOBO/hobo_functions.r")
|
/source_files.r
|
no_license
|
dackerly/PepperwoodVegPlots
|
R
| false
| false
| 468
|
r
|
# scripts related to the 50 original veg plots and env data
source("https://raw.githubusercontent.com/dackerly/PepperwoodVegPlots/master/Analyses/PWfunctions_GitHub.R")
# scripts for the 17 larger hectare tree plots
source("https://raw.githubusercontent.com/dackerly/PepperwoodVegPlots/master/2016/Hectares/hectare_scripts.r")
# scripts for working with HOBO data
source("https://raw.githubusercontent.com/dackerly/PepperwoodVegPlots/master/HOBO/hobo_functions.r")
|
if (interactive()) {
suppressMessages(require(devtools))
}
|
/.Rprofile
|
permissive
|
whitneymichelle/ccconnectr
|
R
| false
| false
| 62
|
rprofile
|
if (interactive()) {
suppressMessages(require(devtools))
}
|
################################################################################
## myInit.R
## 设置
# __1. 账号、密码__
# 2. 文件路径
# 3. 需要的软件包
# __4. 参数设置__
################################################################################
pkgs <- c("tidyverse", "data.table", "parallel",
"RMySQL", "stringr", "bit64", "Rcpp",
"lubridate","zoo",'beepr','plotly')
##------------------------------------------------------------------------------
if(length(pkgs[!pkgs %in% installed.packages()]) != 0){
sapply(pkgs[!pkgs %in% installed.packages()], install.packages)
}
##------------------------------------------------------------------------------
sapply(pkgs, require, character.only = TRUE)
##------------------------------------------------------------------------------
options(digits = 8, digits.secs = 6, width=120,
datatable.verbose = FALSE, scipen = 10)
##------------------------------------------------------------------------------
################################################################################
## MySQL
## 链接到 MySQL 数据库,以获取数据
################################################################################
MySQL(max.con = 300)
for( conns in dbListConnections(MySQL()) ){
dbDisconnect(conns)
}
mysql_user <- 'fl'
mysql_pwd <- 'abc@123'
mysql_host <- "192.168.1.106"
mysql_port <- 3306
#---------------------------------------------------
# mysqlFetch
# 函数,主要输入为
# database
#---------------------------------------------------
mysqlFetch <- function(x){
temp <- dbConnect(MySQL(),
dbname = as.character(x),
user = mysql_user,
password = mysql_pwd,
host = mysql_host,
port = mysql_port
)
}
################################################################################
## ChinaFuturesCalendar
## 中国期货交易日历
################################################################################
#---------------------------------------------------
mysql <- mysqlFetch('dev')
#---------------------------------------------------
ChinaFuturesCalendar <- dbGetQuery(mysql,
"SELECT * FROM ChinaFuturesCalendar"
) %>% as.data.table()
|
/projects/Alpha101/conf/myInit.R
|
no_license
|
maxclchen/myStrat.bk
|
R
| false
| false
| 2,386
|
r
|
################################################################################
## myInit.R
## 设置
# __1. 账号、密码__
# 2. 文件路径
# 3. 需要的软件包
# __4. 参数设置__
################################################################################
pkgs <- c("tidyverse", "data.table", "parallel",
"RMySQL", "stringr", "bit64", "Rcpp",
"lubridate","zoo",'beepr','plotly')
##------------------------------------------------------------------------------
if(length(pkgs[!pkgs %in% installed.packages()]) != 0){
sapply(pkgs[!pkgs %in% installed.packages()], install.packages)
}
##------------------------------------------------------------------------------
sapply(pkgs, require, character.only = TRUE)
##------------------------------------------------------------------------------
options(digits = 8, digits.secs = 6, width=120,
datatable.verbose = FALSE, scipen = 10)
##------------------------------------------------------------------------------
################################################################################
## MySQL
## 链接到 MySQL 数据库,以获取数据
################################################################################
MySQL(max.con = 300)
for( conns in dbListConnections(MySQL()) ){
dbDisconnect(conns)
}
mysql_user <- 'fl'
mysql_pwd <- 'abc@123'
mysql_host <- "192.168.1.106"
mysql_port <- 3306
#---------------------------------------------------
# mysqlFetch
# 函数,主要输入为
# database
#---------------------------------------------------
mysqlFetch <- function(x){
temp <- dbConnect(MySQL(),
dbname = as.character(x),
user = mysql_user,
password = mysql_pwd,
host = mysql_host,
port = mysql_port
)
}
################################################################################
## ChinaFuturesCalendar
## 中国期货交易日历
################################################################################
#---------------------------------------------------
mysql <- mysqlFetch('dev')
#---------------------------------------------------
ChinaFuturesCalendar <- dbGetQuery(mysql,
"SELECT * FROM ChinaFuturesCalendar"
) %>% as.data.table()
|
# composite figure script
# --------------------------------------
# Loading in data to make figures from script 8
load("processed_data/gam.temp_temp_only.Rdata") # gam.temp
load("processed_data/gam.time_time_only.Rdata") # gam.time
load("processed_data/gam.time.temp_time_temp.Rdata") # gam.time.temp
load("processed_data/gam.full_full_model.Rdata") # gam.full
# needs reorganized to plot in gglpot.
resid.temp <- data.frame(resids = resid(gam.temp),
Year = dat.raw$Year,
type = as.factor("Temp Only"))
resid.time.temp <- data.frame(resids = resid(gam.time.temp),
Year = dat.raw$Year,
type = as.factor("Temp + Time"))
resid.full <- data.frame(resids = resid(gam.full),
Year = dat.raw$Year,
type = as.factor("Temp + Time + Precip."))
resid.graph <- rbind(resid.temp, resid.time.temp, resid.full)
# Residuals
residual.plot <- ggplot(data=resid.graph[resid.graph$Year<2013,]) + facet_grid(type~.) +
geom_point(aes(x=Year, y=resids), alpha=0.2, stroke=0) +
geom_hline(aes(yintercept=0), col="red") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
strip.text.y=element_text(face="bold")) +
scale_x_continuous(breaks = c(1900, 1920, 1940, 1960, 1980, 2000, 2020)) +
labs(x=expression(bold(paste("Year"))), y = expression(bold(paste("Residual Value"))))
# Sensitivity curves
load("processed_data/gam.full_response_graph.Rdata")
load("processed_data/gam.time.temp_response_time_temp.R")
load("processed_data/gam.temp_response_graph.Rdata")
sens.curves <- ggplot() +
geom_hline(yintercept=1, linetype="dashed")+
geom_ribbon(data=temp.ci.out[temp.ci.out$Effect %in% c("tmean"), ], aes(x=x, ymin=lwr.bai, ymax=upr.bai, fill="Temp Only"), alpha=0.5) +
geom_line(data=temp.ci.out[temp.ci.out$Effect %in% c("tmean"), ], aes(x=x, y=mean.bai, color="Temp Only")) +
geom_ribbon(data=time.temp.ci.out2[time.temp.ci.out2$Effect %in% c("tmean"), ], aes(x=x, ymin=lwr.bai, ymax=upr.bai, fill="Temp + Time"), alpha=0.5) +
geom_line(data=time.temp.ci.out2[time.temp.ci.out2$Effect %in% c("tmean"), ], aes(x=x, y=mean.bai, color="Temp + Time")) +
geom_ribbon(data=full.ci.out[full.ci.out$Effect %in% c("tmean"), ], aes(x=x, ymin=lwr.bai, ymax=upr.bai, fill="Temp + Time + Precip"), alpha=0.5) +
geom_line(data=full.ci.out[full.ci.out$Effect %in% c("tmean"), ], aes(x=x, y=mean.bai, color="Temp + Time + Precip")) +
guides(color=F, fill=guide_legend(title=NULL)) +
scale_fill_manual(values=c("#0072B2", "#009E73", "#E69F00")) +
scale_color_manual(values=c("#0072B2", "#009E73", "#E69F00")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5)) +
theme(legend.position=c(0.4,0.25)) +
labs(x = "Temperature", y = expression(bold(paste("Effect on BAI (mm"^"2","y"^"-1",")"))))
#---------------------------------------
# Loading in figure from script 9
load("gam.full_data_graph.Rdata")
# Effects Curve
gam.effects <- ggplot(data.graph[data.graph$Site.Code=="LF" & data.graph$Year<2013,]) +
geom_hline(aes(yintercept=1), linetype="dashed") +
geom_ribbon(aes(x=Year, ymin=fit.tmean.lwr, ymax=fit.tmean.upr, fill="Temp"), alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.precip.lwr, ymax=fit.precip.upr, fill="Precip"), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean, color="Temp"), size=1) +
geom_line(aes(x=Year, y=fit.precip, color="Precip"), size=1) +
scale_color_manual(values=c("blue", "red"), labels=c("Precip", "Temp")) +
scale_fill_manual(values=c("blue", "red"), labels=c("Precip", "Temp"), name="") +
guides(color=F) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
legend.position = c(0.75, 0.25))+
labs(x=expression(bold(paste("Year"))), y = expression(bold(paste("Relative Effect Size"))))
library(cowplot)
combo.curves.effects <- plot_grid(sens.curves, gam.effects, align = "v", nrow = 2, rel_heights = c(1/2, 1/2))
comp.plot <- plot_grid(residual.plot, combo.curves.effects, align = "h", ncol = 2, rel_heights = c(1/2, 1/2))
png(filename="composite_nonstationarity_TR_graph.png", height=8, width=11, unit="in", res=300)
comp.plot
dev.off()
|
/Example_Temporal_TreeRings/scripts/3_composite_figure.R
|
no_license
|
caseyyoungflesh/MSB_Non-Stationarity
|
R
| false
| false
| 5,571
|
r
|
# composite figure script
# --------------------------------------
# Loading in data to make figures from script 8
load("processed_data/gam.temp_temp_only.Rdata") # gam.temp
load("processed_data/gam.time_time_only.Rdata") # gam.time
load("processed_data/gam.time.temp_time_temp.Rdata") # gam.time.temp
load("processed_data/gam.full_full_model.Rdata") # gam.full
# needs reorganized to plot in gglpot.
resid.temp <- data.frame(resids = resid(gam.temp),
Year = dat.raw$Year,
type = as.factor("Temp Only"))
resid.time.temp <- data.frame(resids = resid(gam.time.temp),
Year = dat.raw$Year,
type = as.factor("Temp + Time"))
resid.full <- data.frame(resids = resid(gam.full),
Year = dat.raw$Year,
type = as.factor("Temp + Time + Precip."))
resid.graph <- rbind(resid.temp, resid.time.temp, resid.full)
# Residuals
residual.plot <- ggplot(data=resid.graph[resid.graph$Year<2013,]) + facet_grid(type~.) +
geom_point(aes(x=Year, y=resids), alpha=0.2, stroke=0) +
geom_hline(aes(yintercept=0), col="red") +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
strip.text.y=element_text(face="bold")) +
scale_x_continuous(breaks = c(1900, 1920, 1940, 1960, 1980, 2000, 2020)) +
labs(x=expression(bold(paste("Year"))), y = expression(bold(paste("Residual Value"))))
# Sensitivity curves
load("processed_data/gam.full_response_graph.Rdata")
load("processed_data/gam.time.temp_response_time_temp.R")
load("processed_data/gam.temp_response_graph.Rdata")
sens.curves <- ggplot() +
geom_hline(yintercept=1, linetype="dashed")+
geom_ribbon(data=temp.ci.out[temp.ci.out$Effect %in% c("tmean"), ], aes(x=x, ymin=lwr.bai, ymax=upr.bai, fill="Temp Only"), alpha=0.5) +
geom_line(data=temp.ci.out[temp.ci.out$Effect %in% c("tmean"), ], aes(x=x, y=mean.bai, color="Temp Only")) +
geom_ribbon(data=time.temp.ci.out2[time.temp.ci.out2$Effect %in% c("tmean"), ], aes(x=x, ymin=lwr.bai, ymax=upr.bai, fill="Temp + Time"), alpha=0.5) +
geom_line(data=time.temp.ci.out2[time.temp.ci.out2$Effect %in% c("tmean"), ], aes(x=x, y=mean.bai, color="Temp + Time")) +
geom_ribbon(data=full.ci.out[full.ci.out$Effect %in% c("tmean"), ], aes(x=x, ymin=lwr.bai, ymax=upr.bai, fill="Temp + Time + Precip"), alpha=0.5) +
geom_line(data=full.ci.out[full.ci.out$Effect %in% c("tmean"), ], aes(x=x, y=mean.bai, color="Temp + Time + Precip")) +
guides(color=F, fill=guide_legend(title=NULL)) +
scale_fill_manual(values=c("#0072B2", "#009E73", "#E69F00")) +
scale_color_manual(values=c("#0072B2", "#009E73", "#E69F00")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5)) +
theme(legend.position=c(0.4,0.25)) +
labs(x = "Temperature", y = expression(bold(paste("Effect on BAI (mm"^"2","y"^"-1",")"))))
#---------------------------------------
# Loading in figure from script 9
load("gam.full_data_graph.Rdata")
# Effects Curve
gam.effects <- ggplot(data.graph[data.graph$Site.Code=="LF" & data.graph$Year<2013,]) +
geom_hline(aes(yintercept=1), linetype="dashed") +
geom_ribbon(aes(x=Year, ymin=fit.tmean.lwr, ymax=fit.tmean.upr, fill="Temp"), alpha=0.5) +
geom_ribbon(aes(x=Year, ymin=fit.precip.lwr, ymax=fit.precip.upr, fill="Precip"), alpha=0.5) +
geom_line(aes(x=Year, y=fit.tmean, color="Temp"), size=1) +
geom_line(aes(x=Year, y=fit.precip, color="Precip"), size=1) +
scale_color_manual(values=c("blue", "red"), labels=c("Precip", "Temp")) +
scale_fill_manual(values=c("blue", "red"), labels=c("Precip", "Temp"), name="") +
guides(color=F) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank())+
theme(axis.line.x = element_line(color="black", size = 0.5),
axis.line.y = element_line(color="black", size = 0.5),
legend.position = c(0.75, 0.25))+
labs(x=expression(bold(paste("Year"))), y = expression(bold(paste("Relative Effect Size"))))
library(cowplot)
combo.curves.effects <- plot_grid(sens.curves, gam.effects, align = "v", nrow = 2, rel_heights = c(1/2, 1/2))
comp.plot <- plot_grid(residual.plot, combo.curves.effects, align = "h", ncol = 2, rel_heights = c(1/2, 1/2))
png(filename="composite_nonstationarity_TR_graph.png", height=8, width=11, unit="in", res=300)
comp.plot
dev.off()
|
#################################
########### ECON 494 ############
# DESCRIPTIVE ANALYTICS PROJECT #
######## NOELLE FREEBURG ########
#GETTING & CLEANING PROJECT DATA#
#1. GETTING:
install.packages("readxl") #install package to allow R to read Excel files
library("readxl") #load readxl package
my_data<-read_excel("Analytics Project Data.xlsm") #import data
names(my_data) #view variable names to confirm they are correct
View(my_data) #view dataset
summary(my_data) #view summary statistics
write.table(my_data, "raw_data")
#2. CLEANING:
na.omit(my_data) #omit NA values from dataset
omitted_data<-na.omit(my_data) #name cleaned dataset omitted_data
#lost 11 observations from the original dataset
#3. SAVING CLEANED DATA
write.csv(omitted_data, "TIDY_data") #save ommitted_data as csv named "clean data"
######## VISUALIZATIONS #########
#INSTALLING & LOADING GGPLOT2#
install.packages("ggplot2")
library(ggplot2)
#GDP & GINI SCATTER PLOT#
gdp_gini_plot <- ggplot(omitted_data, aes(x=omitted_data$`Avg GDP/capita 2008-2018`,y=omitted_data$`Avg Gini 2008-2018`)) +
geom_point()
gdp_gini_plot <- gdp_gini_plot + ggtitle("Average GDP Per Capita and Gini Index from 2008-2018") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Average GDP Per Capita ($)") +
ylab("Average Gini (%)") +
geom_smooth(method = "lm")
print(gdp_gini_plot)
#GINI & ECONOMIC STATUS BAR CHART#
gini_status_plot <- ggplot(omitted_data, aes(x=Status, y=omitted_data$`Avg Gini 2008-2018`/100)) +
geom_bar(stat='Identity', width = 0.5)
gini_status_plot <- gini_status_plot + ggtitle("Average Gini Index 2008-2018: Developed vs Developing") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Economic Status ($)") +
ylab("Average Gini (%)")
print(gini_status_plot)
#ECONOMIC STATUS RATIO PIE CHART#
#1. CREATING NEW DATA FRAME:
pie_data<- data.frame(
group = ("Status" = c("Developed", "Developing")),
value=c(52,101))
View(pie_data)
#2. CREATING PIE CHART
pie <- ggplot(pie_data, aes(x="", y=value, fill=group)) +
geom_bar(stat="identity", width=1) +
coord_polar("y", start=0) +
theme_void() +
geom_text(aes(label = paste0(round(value)), "%"), position = position_stack(vjust = 0.5))
pie <- pie + ggtitle("Developed vs. Developing Countries") +
theme(plot.title = element_text(hjust = 0.5))
print(pie)
#GINI & INCOME SHARE OF RICHEST 20% SCATTER PLOT#
gini_richest_plot <- ggplot(omitted_data, aes(x=omitted_data$`Avg Income Share of Richest 20% 2008-2018`,y=omitted_data$`Avg Gini 2008-2018`)) +
geom_point()
gini_richest_plot <- gini_richest_plot + ggtitle("Income Share of the Richest 20% and the Gini Index") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Income Share of the Richest 20% (%)") +
ylab("Average Gini (%)") +
geom_smooth(method = "lm")
print(gini_richest_plot)
#GINI & INCOME SHARE OF POOREST 20% SCATTER PLOT#
gini_poorest_plot <- ggplot(omitted_data, aes(x=omitted_data$`Avg Income Share of Poorest 20% 2008-2018`, y=omitted_data$`Avg Gini 2008-2018`)) +
geom_point()
gini_poorest_plot <- gini_poorest_plot + ggtitle("Income Share of the Poorest 20% and the Gini Index") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Income Share of the Poorest 20% (%)") +
ylab("Average Gini (%)") +
geom_smooth(method = 'lm')
print(gini_poorest_plot)
|
/ECON 494, Project 1.R
|
no_license
|
nfreeburg/Analytics_Projects
|
R
| false
| false
| 3,354
|
r
|
#################################
########### ECON 494 ############
# DESCRIPTIVE ANALYTICS PROJECT #
######## NOELLE FREEBURG ########
#GETTING & CLEANING PROJECT DATA#
#1. GETTING:
install.packages("readxl") #install package to allow R to read Excel files
library("readxl") #load readxl package
my_data<-read_excel("Analytics Project Data.xlsm") #import data
names(my_data) #view variable names to confirm they are correct
View(my_data) #view dataset
summary(my_data) #view summary statistics
write.table(my_data, "raw_data")
#2. CLEANING:
na.omit(my_data) #omit NA values from dataset
omitted_data<-na.omit(my_data) #name cleaned dataset omitted_data
#lost 11 observations from the original dataset
#3. SAVING CLEANED DATA
write.csv(omitted_data, "TIDY_data") #save ommitted_data as csv named "clean data"
######## VISUALIZATIONS #########
#INSTALLING & LOADING GGPLOT2#
install.packages("ggplot2")
library(ggplot2)
#GDP & GINI SCATTER PLOT#
gdp_gini_plot <- ggplot(omitted_data, aes(x=omitted_data$`Avg GDP/capita 2008-2018`,y=omitted_data$`Avg Gini 2008-2018`)) +
geom_point()
gdp_gini_plot <- gdp_gini_plot + ggtitle("Average GDP Per Capita and Gini Index from 2008-2018") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Average GDP Per Capita ($)") +
ylab("Average Gini (%)") +
geom_smooth(method = "lm")
print(gdp_gini_plot)
#GINI & ECONOMIC STATUS BAR CHART#
gini_status_plot <- ggplot(omitted_data, aes(x=Status, y=omitted_data$`Avg Gini 2008-2018`/100)) +
geom_bar(stat='Identity', width = 0.5)
gini_status_plot <- gini_status_plot + ggtitle("Average Gini Index 2008-2018: Developed vs Developing") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Economic Status ($)") +
ylab("Average Gini (%)")
print(gini_status_plot)
#ECONOMIC STATUS RATIO PIE CHART#
#1. CREATING NEW DATA FRAME:
pie_data<- data.frame(
group = ("Status" = c("Developed", "Developing")),
value=c(52,101))
View(pie_data)
#2. CREATING PIE CHART
pie <- ggplot(pie_data, aes(x="", y=value, fill=group)) +
geom_bar(stat="identity", width=1) +
coord_polar("y", start=0) +
theme_void() +
geom_text(aes(label = paste0(round(value)), "%"), position = position_stack(vjust = 0.5))
pie <- pie + ggtitle("Developed vs. Developing Countries") +
theme(plot.title = element_text(hjust = 0.5))
print(pie)
#GINI & INCOME SHARE OF RICHEST 20% SCATTER PLOT#
gini_richest_plot <- ggplot(omitted_data, aes(x=omitted_data$`Avg Income Share of Richest 20% 2008-2018`,y=omitted_data$`Avg Gini 2008-2018`)) +
geom_point()
gini_richest_plot <- gini_richest_plot + ggtitle("Income Share of the Richest 20% and the Gini Index") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Income Share of the Richest 20% (%)") +
ylab("Average Gini (%)") +
geom_smooth(method = "lm")
print(gini_richest_plot)
#GINI & INCOME SHARE OF POOREST 20% SCATTER PLOT#
gini_poorest_plot <- ggplot(omitted_data, aes(x=omitted_data$`Avg Income Share of Poorest 20% 2008-2018`, y=omitted_data$`Avg Gini 2008-2018`)) +
geom_point()
gini_poorest_plot <- gini_poorest_plot + ggtitle("Income Share of the Poorest 20% and the Gini Index") +
theme(plot.title = element_text(hjust = 0.5)) +
xlab("Income Share of the Poorest 20% (%)") +
ylab("Average Gini (%)") +
geom_smooth(method = 'lm')
print(gini_poorest_plot)
|
##############################################################################
## Out of sample validation
## Author: Amelia Bertozzi-Villa
## Description: Give an example of out-of-sample validation, using the
## Salaries dataset. The question: which regression is better at
## predicting salary, a bivariate regression on yrs.since.phd
## or a multivariate regression including yrs.service, sex,
## and discipline?
##############################################################################
## load libraries
library(data.table)
library(ggplot2)
library(car)
## load data
data(Salaries)
Salaries <- data.table(Salaries)
## Hold out a random 10% of the data
random_order <- sample(nrow(Salaries))
Salaries <- Salaries[random_order]
testing_set <- Salaries[1:40]
training_set <- Salaries[41:nrow(Salaries)]
## Run regression on training set:
bivariate_regression <- lm(salary ~ yrs.since.phd, data=training_set)
multivariate_regression <- lm(salary ~ yrs.since.phd + yrs.service + sex + discipline, data=training_set)
## Predicting new values for TESTING set, using "newdata" argument
testing_set[, predict_bi:= predict(bivariate_regression, newdata=testing_set)]
testing_set[, predict_multi:=predict(multivariate_regression, newdata=testing_set)]
## Plot the predicted and observed values: bivariate predictions in blue, multivariate in red
ggplot(testing_set, aes(x=yrs.since.phd)) +
geom_point(aes(y=salary)) +
geom_point(aes(y=predict_bi), color="blue") +
geom_point(aes(y=predict_multi), color="red") +
labs(title="Out of Sample Validation Result for Two Regression Types",
x="Years Since Ph.D",
y="Salary (USD)")
## Calculate Error Statistic
## RMSE:
## 1. Calulate the "error" (the residual) for each point;
## 2. Square that value (the "squared error")
## 3. Take the mean of all squared errors across the dataset ("mean squared error")
## 4. The the square root of that mean value ("root mean squared error")
testing_set[, squared_error_bi:= (predict_bi - salary)^2]
testing_set[, squared_error_multi:=(predict_multi - salary)^2]
rmse_bi <- sqrt(sum(testing_set$squared_error_bi)/nrow(testing_set))
rmse_multi <- sqrt(sum(testing_set$squared_error_multi)/nrow(testing_set))
|
/module_10/part_1/validation_example.r
|
no_license
|
bertozzivill/infx572_winter17
|
R
| false
| false
| 2,275
|
r
|
##############################################################################
## Out of sample validation
## Author: Amelia Bertozzi-Villa
## Description: Give an example of out-of-sample validation, using the
## Salaries dataset. The question: which regression is better at
## predicting salary, a bivariate regression on yrs.since.phd
## or a multivariate regression including yrs.service, sex,
## and discipline?
##############################################################################
## load libraries
library(data.table)
library(ggplot2)
library(car)
## load data
data(Salaries)
Salaries <- data.table(Salaries)
## Hold out a random 10% of the data
random_order <- sample(nrow(Salaries))
Salaries <- Salaries[random_order]
testing_set <- Salaries[1:40]
training_set <- Salaries[41:nrow(Salaries)]
## Run regression on training set:
bivariate_regression <- lm(salary ~ yrs.since.phd, data=training_set)
multivariate_regression <- lm(salary ~ yrs.since.phd + yrs.service + sex + discipline, data=training_set)
## Predicting new values for TESTING set, using "newdata" argument
testing_set[, predict_bi:= predict(bivariate_regression, newdata=testing_set)]
testing_set[, predict_multi:=predict(multivariate_regression, newdata=testing_set)]
## Plot the predicted and observed values: bivariate predictions in blue, multivariate in red
ggplot(testing_set, aes(x=yrs.since.phd)) +
geom_point(aes(y=salary)) +
geom_point(aes(y=predict_bi), color="blue") +
geom_point(aes(y=predict_multi), color="red") +
labs(title="Out of Sample Validation Result for Two Regression Types",
x="Years Since Ph.D",
y="Salary (USD)")
## Calculate Error Statistic
## RMSE:
## 1. Calulate the "error" (the residual) for each point;
## 2. Square that value (the "squared error")
## 3. Take the mean of all squared errors across the dataset ("mean squared error")
## 4. The the square root of that mean value ("root mean squared error")
testing_set[, squared_error_bi:= (predict_bi - salary)^2]
testing_set[, squared_error_multi:=(predict_multi - salary)^2]
rmse_bi <- sqrt(sum(testing_set$squared_error_bi)/nrow(testing_set))
rmse_multi <- sqrt(sum(testing_set$squared_error_multi)/nrow(testing_set))
|
#' Check data for bind function.
#'
#' check if the data is available for \code{rbind()} or \code{cbind()}
#'
#' @param data A list containing different sublists ready to be processed by \code{do.call('rbind')}
#' or \code{do.call('cbind')}
#' @param bind A string showing which bind you are going to use can be 'rbind' or 'cbind'
#' @return data can be processed by bind function; data cannot be processed by bind function
#' @examples
#' data <- list(c(1,1,1),c(2,2,2))
#' bind <- 'rbind'
#' checkBind(data,bind)
#'
#' data(testdl)
#' \dontrun{
#' checkBind(testdl, 'rbind')
#' }
#' # Since the colnames in testdl are not the same, so it cannot be bound.
#' #
#' @export
checkBind <- function(data, bind){
# data has to be a list of values, and will be used in do.call('rbind')
message ('Check if the data list is available for rbind or cbind... \n')
if (bind == 'rbind') {
colNum <- sapply(data, function(x) dim(x)[2])
colLev <- unique(colNum)
if (length(colLev) != 1) {
dif <- colLev[2]
difNum <- which(colNum == dif)
stop(sprintf('Different Colomn number in %s th of the input list \n', difNum))
}
# For rbind, colnames has to be checked as well.
colNameNum <- lapply(data, function(x) colnames(x))
sameName <- sapply(1:length(colNameNum), function(x) colNameNum[[x]] == colNameNum[[1]])
if (any(!is.null(unlist(colNameNum))) & (any(sameName == FALSE) | any(length(unlist(sameName)) == 0))) {
stop('Data in list have Different colnames, which cannot process rbind. ')
}
}else if (bind =='cbind') {
rowNum <- sapply(data, function(x) dim(x)[1])
rowLev <- unique(rowNum)
if (length(rowLev) != 1) {
dif <- rowLev[2]
difNum <- which(rowNum == dif)
stop(sprintf('Different row number in %s th of the input list \n', rowNum))
}
}
message('Data list is OK')
}
# Check if a input file is a hyfo grid file.
checkHyfo <- function(...) {
datalist <- list(...)
lapply(datalist, FUN = checkHyfo_core)
invisible()
}
checkHyfo_core <- function(hyfo) {
#This is to check if the input is a hyfo list.
checkWord <- c('Data', 'xyCoords', 'Dates')
if (any(is.na(match(checkWord, attributes(hyfo)$names)))) {
stop('Input dataset is incorrect, it should contain "Data", "xyCoords", and "Dates",
check help for details or use loadNCDF to read NetCDF file.
If time series input is needed, and your input is a time series, please put "TS = yourinput".')
}
}
# This check dim is based on the name of the dimension
checkDimLength <- function(..., dim) {
datalist <- list(...)
for (x in dim) {
dimLength <- sapply(datalist, function(y) calcuDim(y, x))
if (any(is.na(dimLength))) stop('No input dimension name, check your dimension name.')
if (length(unique(dimLength)) != 1) stop('Input data have different dimemsion length.')
}
invisible()
}
###########################################################################################
##### For biasFactor class
##### Validity functions
checkBiasFactor <- function(object) {
errors <- character()
if (length(object@biasFactor) == 0) {
msg <- 'biasFactors should not be empty.'
errors <- c(errors, msg)
}
if (length(object@method) == 0) {
msg <- 'method should not be empty.'
errors <- c(errors, msg)
}
if (length(object@preci) == 0) {
msg <- 'preci should not be empty.'
errors <- c(errors, msg)
}
prThreshold <- object@prThreshold
if (length(prThreshold) != 0) {
if (prThreshold < 0) {
msg <- 'prThreshold should be greater than 0.'
errors <- c(errors, msg)
}
}
scaleType <- object@scaleType
if (length(scaleType) != 0) {
if (scaleType != 'multi' & scaleType != 'add') {
msg <- paste('scaleType is ', scaleType, '. Should be "multi" or "add".', sep = '')
errors <- c(errors, msg)
}
}
extrapolate <- object@extrapolate
if (length(extrapolate) != 0) {
if (extrapolate != 'no' & extrapolate != 'constant') {
msg <- paste('extrapolate is ', extrapolate, '. Should be "no" or "constant".', sep = '')
errors <- c(errors, msg)
}
}
if (length(errors) == 0) TRUE else errors
}
checkBiasFactor.hyfo <- function(object) {
errors <- character()
length_lonLatDim <- length(object@lonLatDim)
if (length_lonLatDim != 2) {
msg <- paste('lonLatDim is length ', length_lonLatDim, '. Should be 2', sep = '')
errors <- c(errors, msg)
}
if (length(errors) == 0) TRUE else errors
}
|
/hyfo/R/check.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 4,695
|
r
|
#' Check data for bind function.
#'
#' check if the data is available for \code{rbind()} or \code{cbind()}
#'
#' @param data A list containing different sublists ready to be processed by \code{do.call('rbind')}
#' or \code{do.call('cbind')}
#' @param bind A string showing which bind you are going to use can be 'rbind' or 'cbind'
#' @return data can be processed by bind function; data cannot be processed by bind function
#' @examples
#' data <- list(c(1,1,1),c(2,2,2))
#' bind <- 'rbind'
#' checkBind(data,bind)
#'
#' data(testdl)
#' \dontrun{
#' checkBind(testdl, 'rbind')
#' }
#' # Since the colnames in testdl are not the same, so it cannot be bound.
#' #
#' @export
checkBind <- function(data, bind){
# data has to be a list of values, and will be used in do.call('rbind')
message ('Check if the data list is available for rbind or cbind... \n')
if (bind == 'rbind') {
colNum <- sapply(data, function(x) dim(x)[2])
colLev <- unique(colNum)
if (length(colLev) != 1) {
dif <- colLev[2]
difNum <- which(colNum == dif)
stop(sprintf('Different Colomn number in %s th of the input list \n', difNum))
}
# For rbind, colnames has to be checked as well.
colNameNum <- lapply(data, function(x) colnames(x))
sameName <- sapply(1:length(colNameNum), function(x) colNameNum[[x]] == colNameNum[[1]])
if (any(!is.null(unlist(colNameNum))) & (any(sameName == FALSE) | any(length(unlist(sameName)) == 0))) {
stop('Data in list have Different colnames, which cannot process rbind. ')
}
}else if (bind =='cbind') {
rowNum <- sapply(data, function(x) dim(x)[1])
rowLev <- unique(rowNum)
if (length(rowLev) != 1) {
dif <- rowLev[2]
difNum <- which(rowNum == dif)
stop(sprintf('Different row number in %s th of the input list \n', rowNum))
}
}
message('Data list is OK')
}
# Check if a input file is a hyfo grid file.
checkHyfo <- function(...) {
datalist <- list(...)
lapply(datalist, FUN = checkHyfo_core)
invisible()
}
checkHyfo_core <- function(hyfo) {
#This is to check if the input is a hyfo list.
checkWord <- c('Data', 'xyCoords', 'Dates')
if (any(is.na(match(checkWord, attributes(hyfo)$names)))) {
stop('Input dataset is incorrect, it should contain "Data", "xyCoords", and "Dates",
check help for details or use loadNCDF to read NetCDF file.
If time series input is needed, and your input is a time series, please put "TS = yourinput".')
}
}
# This check dim is based on the name of the dimension
checkDimLength <- function(..., dim) {
datalist <- list(...)
for (x in dim) {
dimLength <- sapply(datalist, function(y) calcuDim(y, x))
if (any(is.na(dimLength))) stop('No input dimension name, check your dimension name.')
if (length(unique(dimLength)) != 1) stop('Input data have different dimemsion length.')
}
invisible()
}
###########################################################################################
##### For biasFactor class
##### Validity functions
checkBiasFactor <- function(object) {
errors <- character()
if (length(object@biasFactor) == 0) {
msg <- 'biasFactors should not be empty.'
errors <- c(errors, msg)
}
if (length(object@method) == 0) {
msg <- 'method should not be empty.'
errors <- c(errors, msg)
}
if (length(object@preci) == 0) {
msg <- 'preci should not be empty.'
errors <- c(errors, msg)
}
prThreshold <- object@prThreshold
if (length(prThreshold) != 0) {
if (prThreshold < 0) {
msg <- 'prThreshold should be greater than 0.'
errors <- c(errors, msg)
}
}
scaleType <- object@scaleType
if (length(scaleType) != 0) {
if (scaleType != 'multi' & scaleType != 'add') {
msg <- paste('scaleType is ', scaleType, '. Should be "multi" or "add".', sep = '')
errors <- c(errors, msg)
}
}
extrapolate <- object@extrapolate
if (length(extrapolate) != 0) {
if (extrapolate != 'no' & extrapolate != 'constant') {
msg <- paste('extrapolate is ', extrapolate, '. Should be "no" or "constant".', sep = '')
errors <- c(errors, msg)
}
}
if (length(errors) == 0) TRUE else errors
}
checkBiasFactor.hyfo <- function(object) {
errors <- character()
length_lonLatDim <- length(object@lonLatDim)
if (length_lonLatDim != 2) {
msg <- paste('lonLatDim is length ', length_lonLatDim, '. Should be 2', sep = '')
errors <- c(errors, msg)
}
if (length(errors) == 0) TRUE else errors
}
|
library(GenEst)
### Name: aicc.cpm
### Title: Extract AIC and AICc for a carcass persistence model
### Aliases: aicc.cpm
### ** Examples
data(wind_RP)
mod <- cpm(formula_l = l ~ Season, formula_s = s ~ Season,
data = wind_RP$CP, left = "LastPresent", right = "FirstAbsent")
aicc(mod)
|
/data/genthat_extracted_code/GenEst/examples/aicc.cpm.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 308
|
r
|
library(GenEst)
### Name: aicc.cpm
### Title: Extract AIC and AICc for a carcass persistence model
### Aliases: aicc.cpm
### ** Examples
data(wind_RP)
mod <- cpm(formula_l = l ~ Season, formula_s = s ~ Season,
data = wind_RP$CP, left = "LastPresent", right = "FirstAbsent")
aicc(mod)
|
\name{p.ancova_to_d1}
\alias{p.ancova_to_d1}
\title{One-tailed p-value from ANCOVA to Standardized Mean Difference (d)
}
\description{Converts a one-tailed p-value from ANCOVA reported in the primary study to a standardized mean difference (d)}
\usage{
p.ancova_to_d1(p, n.1, n.2, R, q)
}
\arguments{
\item{p}{One-tailed p-value reported in primary study.
}
\item{n.1}{Treatment group sample size.
}
\item{n.2}{Comparison group sample size.
}
\item{R}{Covariate outcome correlation or multiple correlation.
}
\item{q}{number of covariates.
}
}
\value{
\item{d}{Standardized mean difference (d).}
\item{var_d }{Variance of d.}
}
\author{ AC Del Re & William T. Hoyt
Maintainer: AC Del Re \email{acdelre@gmail.com}
}
\references{Borenstein (2009). Effect sizes for continuous data. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta analysis} (pp. 279-293). New York: Russell Sage Foundation.
}
\seealso{
\code{\link{d_to_g}},
\code{\link{mean_to_d}},
\code{\link{mean_to_d2}},
\code{\link{t_to_d}},
\code{\link{f_to_d}},
\code{\link{p_to_d1}},
\code{\link{p_to_d2}},
\code{\link{ancova_to_d1}},
\code{\link{ancova_to_d2}},
\code{\link{tt.ancova_to_d}},
\code{\link{f.ancova_to_d}},
\code{\link{r_to_d}},
\code{\link{p.ancova_to_d2}},
\code{\link{lor_to_d}},
\code{\link{prop_to_or}},
\code{\link{prop_to_d}},
\code{\link{r_from_chi}},
\code{\link{r_from_d}},
\code{\link{r_from_d1}},
\code{\link{r_from_t}}
}
\keyword{ arith }
|
/man/p.ancova_to_d1.Rd
|
no_license
|
acdelre/MAd
|
R
| false
| false
| 1,502
|
rd
|
\name{p.ancova_to_d1}
\alias{p.ancova_to_d1}
\title{One-tailed p-value from ANCOVA to Standardized Mean Difference (d)
}
\description{Converts a one-tailed p-value from ANCOVA reported in the primary study to a standardized mean difference (d)}
\usage{
p.ancova_to_d1(p, n.1, n.2, R, q)
}
\arguments{
\item{p}{One-tailed p-value reported in primary study.
}
\item{n.1}{Treatment group sample size.
}
\item{n.2}{Comparison group sample size.
}
\item{R}{Covariate outcome correlation or multiple correlation.
}
\item{q}{number of covariates.
}
}
\value{
\item{d}{Standardized mean difference (d).}
\item{var_d }{Variance of d.}
}
\author{ AC Del Re & William T. Hoyt
Maintainer: AC Del Re \email{acdelre@gmail.com}
}
\references{Borenstein (2009). Effect sizes for continuous data. In H. Cooper, L. V. Hedges, & J. C. Valentine (Eds.), \emph{The handbook of research synthesis and meta analysis} (pp. 279-293). New York: Russell Sage Foundation.
}
\seealso{
\code{\link{d_to_g}},
\code{\link{mean_to_d}},
\code{\link{mean_to_d2}},
\code{\link{t_to_d}},
\code{\link{f_to_d}},
\code{\link{p_to_d1}},
\code{\link{p_to_d2}},
\code{\link{ancova_to_d1}},
\code{\link{ancova_to_d2}},
\code{\link{tt.ancova_to_d}},
\code{\link{f.ancova_to_d}},
\code{\link{r_to_d}},
\code{\link{p.ancova_to_d2}},
\code{\link{lor_to_d}},
\code{\link{prop_to_or}},
\code{\link{prop_to_d}},
\code{\link{r_from_chi}},
\code{\link{r_from_d}},
\code{\link{r_from_d1}},
\code{\link{r_from_t}}
}
\keyword{ arith }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spacy_functions.R
\name{SPCY_PreTopicFrame}
\alias{SPCY_PreTopicFrame}
\title{Parse sentences strings using SPACY as the parsing engine, and prep sentences for running through topic model}
\usage{
SPCY_PreTopicFrame(CORPUS_A, sample_num = 0, workingfolder,
removeentities = T, dbexists = FALSE, spcy = T, syntaxnet = F)
}
\arguments{
\item{CORPUS_A}{corpus to analyze}
\item{sample_num}{number of rows to sample default is 0 which keeps all rows.}
\item{workingfolder}{folder to save file in.}
\item{spcy}{use spacy to parse (set to True usually)}
\item{syntaxnet}{use syntaxnet to parse (set to False usually)}
\item{removeentitites}{should named entities be dropped from topic model}
\item{dbexits}{whether the spacy or sqlnet database exists}
}
\value{
will create a database named spacyframe.db
}
\description{
This command creates a sqlite database which parse information is saved in, while also creating the proper frames for running a topic model.
}
\seealso{
the entire dplyr package
}
|
/man/SPCY_PreTopicFrame.Rd
|
permissive
|
qcmuu/eparTextTools
|
R
| false
| true
| 1,081
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spacy_functions.R
\name{SPCY_PreTopicFrame}
\alias{SPCY_PreTopicFrame}
\title{Parse sentences strings using SPACY as the parsing engine, and prep sentences for running through topic model}
\usage{
SPCY_PreTopicFrame(CORPUS_A, sample_num = 0, workingfolder,
removeentities = T, dbexists = FALSE, spcy = T, syntaxnet = F)
}
\arguments{
\item{CORPUS_A}{corpus to analyze}
\item{sample_num}{number of rows to sample default is 0 which keeps all rows.}
\item{workingfolder}{folder to save file in.}
\item{spcy}{use spacy to parse (set to True usually)}
\item{syntaxnet}{use syntaxnet to parse (set to False usually)}
\item{removeentitites}{should named entities be dropped from topic model}
\item{dbexits}{whether the spacy or sqlnet database exists}
}
\value{
will create a database named spacyframe.db
}
\description{
This command creates a sqlite database which parse information is saved in, while also creating the proper frames for running a topic model.
}
\seealso{
the entire dplyr package
}
|
\name{LCTS}
\alias{LCTS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Computes a Linear Combination Test Statistics}
\description{Given a particular linear combination, specified in terms of
coefficients, cfs, this functions forms the linear combination of
two time series, tsx, tsy and returns the result of a stationarity
test statistic on the combination.
}
\usage{
LCTS(cfs, tsx, tsy, filter.number = 1,
family = c("DaubExPhase", "DaubLeAsymm"), plot.it = FALSE,
spec.filter.number = 1,
spec.family = c("DaubExPhase", "DaubLeAsymm"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{cfs}{Coefficients describing the linear combination vectors.
The first half correspond to the first vector (alpha) the second
half to the beta vector. Hence this vector must have an even length,
and each half has a length a power of two minus one.}
\item{tsx}{The x time series}
\item{tsy}{The y time series}
\item{filter.number}{This function turns the coefficients into a linear
combination function (e.g. alpha). This argument specifies the
filter.number of the inverse wavelet transform that turns coefficients
into a lc function.}
\item{family}{Same as filter.number but for the wavelet family}
\item{plot.it}{If TRUE then various things are plotted: both of the linear
combination vectors/time series, the combined time series and
its EWS estimate}
\item{spec.filter.number}{The wavelet filter used to compute the EWS
estimate}
\item{spec.family}{The wavelet family used to compute the EWS estimate}
}
\details{This function forms a time-varying linear combination of two
times series to form a third time series. Then a `stationarity
test' test statistic is applied to the third time series to
compute how stationary (or non-stationary it is). This function
is called by \code{\link{findstysols}} and actually does the work
of forming the lc of two time series and gauging the stationarity}
\value{A single number which is the value of the test of stationarity
for the combined time series. This is the result of \code{\link{TOSts}}
but normalized for the squared coefficient norm}
\references{Cardinali, A. and Nason, Guy P. (2013) Costationarity of
Locally Stationary Time Series Using costat.
\emph{Journal of Statistical Software}, \bold{55}, Issue 1.
Cardinali, A. and Nason, G.P. (2010) Costationarity of locally stationary
time series. \emph{J. Time Series Econometrics}, \bold{2}, Issue 2, Article 1.
}
\author{Guy Nason}
\seealso{\code{\link{findstysols}}, \code{\link{TOSts}},
\code{\link{coeftofn}}}
\examples{
#
# Apply this function to random combination coefficients.
#
# The combination coefficients: comprised of two vectors each of length 3
# Note that 3 = 2^2 - 1, vectors need to be of length a power two minus 1
#
# sret, fret are two time series in the package
#
data(sret)
data(fret)
LCTS( c(rnorm(3), rnorm(3)), sret, fret)
#[1] 1.571728e-13
#
# The value of the test statistic is 1.57e-13
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ts}
|
/man/LCTS.Rd
|
no_license
|
cran/costat
|
R
| false
| false
| 3,120
|
rd
|
\name{LCTS}
\alias{LCTS}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Computes a Linear Combination Test Statistics}
\description{Given a particular linear combination, specified in terms of
coefficients, cfs, this functions forms the linear combination of
two time series, tsx, tsy and returns the result of a stationarity
test statistic on the combination.
}
\usage{
LCTS(cfs, tsx, tsy, filter.number = 1,
family = c("DaubExPhase", "DaubLeAsymm"), plot.it = FALSE,
spec.filter.number = 1,
spec.family = c("DaubExPhase", "DaubLeAsymm"))
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{cfs}{Coefficients describing the linear combination vectors.
The first half correspond to the first vector (alpha) the second
half to the beta vector. Hence this vector must have an even length,
and each half has a length a power of two minus one.}
\item{tsx}{The x time series}
\item{tsy}{The y time series}
\item{filter.number}{This function turns the coefficients into a linear
combination function (e.g. alpha). This argument specifies the
filter.number of the inverse wavelet transform that turns coefficients
into a lc function.}
\item{family}{Same as filter.number but for the wavelet family}
\item{plot.it}{If TRUE then various things are plotted: both of the linear
combination vectors/time series, the combined time series and
its EWS estimate}
\item{spec.filter.number}{The wavelet filter used to compute the EWS
estimate}
\item{spec.family}{The wavelet family used to compute the EWS estimate}
}
\details{This function forms a time-varying linear combination of two
times series to form a third time series. Then a `stationarity
test' test statistic is applied to the third time series to
compute how stationary (or non-stationary it is). This function
is called by \code{\link{findstysols}} and actually does the work
of forming the lc of two time series and gauging the stationarity}
\value{A single number which is the value of the test of stationarity
for the combined time series. This is the result of \code{\link{TOSts}}
but normalized for the squared coefficient norm}
\references{Cardinali, A. and Nason, Guy P. (2013) Costationarity of
Locally Stationary Time Series Using costat.
\emph{Journal of Statistical Software}, \bold{55}, Issue 1.
Cardinali, A. and Nason, G.P. (2010) Costationarity of locally stationary
time series. \emph{J. Time Series Econometrics}, \bold{2}, Issue 2, Article 1.
}
\author{Guy Nason}
\seealso{\code{\link{findstysols}}, \code{\link{TOSts}},
\code{\link{coeftofn}}}
\examples{
#
# Apply this function to random combination coefficients.
#
# The combination coefficients: comprised of two vectors each of length 3
# Note that 3 = 2^2 - 1, vectors need to be of length a power two minus 1
#
# sret, fret are two time series in the package
#
data(sret)
data(fret)
LCTS( c(rnorm(3), rnorm(3)), sret, fret)
#[1] 1.571728e-13
#
# The value of the test statistic is 1.57e-13
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ts}
|
#' Write NSSP BioSense Platform Data Quality Summary Reports for One Facility
#'
#' @description
#' This function is a lightweight version of the `write_reports` function. It will generate summary and example workbooks, but only for
#' one specified facility. The first summary workbook shows percents and counts of nulls and invalids, while the examples workbook
#' generates de tailed information on records and visits that are null or invalid.
#'
#' @param username Your BioSense username, as a string. This is the same username you may use to log into RStudio or Adminer.
#' @param password Your BioSense password, as a string. This is the same password you may use to log into RStudio or Adminer.
#' @param table The table that you want to retrieve the data from, as a string.
#' @param mft The MFT (master facilities table) from where the facility name will be retrieved, as a string.
#' @param start The start date time that you wish to begin pulling data from, as a string.
#' @param end The end data time that you wish to stop pulling data from, as a string.
#' @param facility The C_Biosense_Facility_ID for the facility that you wish to generate and write the report for.
#' @param directory The directory where you would like to write the reports to (i.e., "~/Documents/MyReports"), as a string.
#' @param nexamples An integer number of examples you would like for each type of invalid or null field in the examples workbooks for each facility.
#' This defaults to 0, which will not generate these example workbooks.
#' @import dplyr
#' @import tidyr
#' @import openxlsx
#' @import RODBC
#' @importFrom stringr str_replace_all
#' @export
write_facility <- function(username, password, table, mft, start, end, facility, directory="", nexamples=0) {
# pull data
channel <- odbcConnect("BioSense_Platform", paste0("BIOSENSE\\", username), password) # open channel
data <- sqlQuery(
channel,
paste0("SELECT * FROM ", table, " WHERE C_Visit_Date_Time >= '", start, "' AND C_Visit_Date_Time <= '", end, "' AND C_Biosense_Facility_ID = ", facility) # create sql query
)
if (nrow(data) == 0) stop("The query yielded no data.")
name <- as.character(unlist(unname(c(sqlQuery(channel, paste0("SELECT Facility_Name FROM ", mft, " WHERE C_Biosense_Facility_ID = ", facility)))))) # get name from mft
odbcCloseAll() # close connection
# get hl7 values
data("hl7_values", envir=environment())
hl7_values$Field <- as.character(hl7_values$Field)
# get facility-level state summary of required nulls
req_nulls <- get_req_nulls(data) %>%
select(-c(C_Biosense_Facility_ID)) %>%
gather(Field, Value, 2:ncol(.)) %>%
spread(Measure, Value) %>%
right_join(hl7_values, ., by = "Field")
# get facility-level state summary of optional nulls
opt_nulls <- get_opt_nulls(data) %>%
select(-c(C_Biosense_Facility_ID)) %>%
gather(Field, Value, 2:ncol(.)) %>%
spread(Measure, Value) %>%
right_join(hl7_values, ., by = "Field")
# get facility-level state summary of invalids
invalids <- get_all_invalids(data) %>%
select(-c(C_Biosense_Facility_ID)) %>%
gather(Field, Value, 2:ncol(.)) %>%
spread(Measure, Value) %>%
right_join(hl7_values, ., by = "Field")
# getting first and last visit date times
vmin <- min(as.character(data$C_Visit_Date_Time))
vmax <- max(as.character(data$C_Visit_Date_Time))
amin <- min(as.character(data$Arrived_Date_Time))
amax <- max(as.character(data$Arrived_Date_Time))
# write to xlsx
# initialize workbook
wb <- createWorkbook()
# sheet 1: facility information
sheet1 <- addWorksheet(wb, "Facility Information")
writeDataTable(wb, sheet1,
suppressWarnings(data %>% # take data
select(c(C_Biosense_Facility_ID, Sending_Facility_ID, Sending_Application,
Treating_Facility_ID, Receiving_Application, Receiving_Facility)) %>% # taking only variables we want
gather(key=Field, value=Value, convert=TRUE) %>% # suppressed warnings because this will tell you it converted all to characters
distinct() %>% # get only distinct entries
mutate_all(as.character) %>% # make all fields character so they work well together
bind_rows(data.frame(Field="Facility_Name", Value=name), .) %>% # add name to the top
# bind with date ranges and number of records and visits
bind_rows(data.frame(Field=c("Patient_Visit_Dates", "Message_Arrival_Dates",
"Number of Records", "Number of Visits"),
Value=c(paste("From", vmin, "to", vmax),
paste("From", amin, "to", amax),
nrow(data),
n_groups(group_by(data, C_BioSense_ID))))) %>%
right_join(hl7_values, ., by="Field")), # get hl7 values
firstColumn=TRUE, bandedRows=TRUE)
setColWidths(wb, sheet1, 1:3, "auto")
# sheet 2: required nulls
sheet2 <- addWorksheet(wb, "Required Nulls") # initialize sheet
writeDataTable(wb, sheet2, req_nulls, firstColumn=TRUE, bandedRows=TRUE) # write to table
setColWidths(wb, sheet2, 1:ncol(req_nulls), "auto") # format sheet
freezePane(wb, sheet2, firstActiveRow=2) # format sheet
# sheet 3: optional nulls
sheet3 <- addWorksheet(wb, "Optional Nulls") # initialize sheet
writeDataTable(wb, sheet3, opt_nulls, firstColumn=TRUE, bandedRows=TRUE) # write to table
setColWidths(wb, sheet3, 1:ncol(opt_nulls), "auto") # format sheet
freezePane(wb, sheet3, firstActiveRow=2) # format sheet
# sheet 4: invalids
sheet4 <- addWorksheet(wb, "Invalids") # initialize sheet
writeDataTable(wb, sheet4, invalids, firstColumn=TRUE, bandedRows=TRUE) # write to table
setColWidths(wb, sheet4, 1:ncol(invalids), "auto") # format sheet
freezePane(wb, sheet4, firstActiveRow=2) # format sheet
# write to file
filename <- str_replace_all(name, "[^[a-zA-z\\s0-9]]", "") %>% # get rid of punctuation from faciltiy name
str_replace_all("[\\s]", "_") # replace spaces with underscores
saveWorkbook(wb, paste0(directory, "/", filename, "_Summary.xlsx"), overwrite=TRUE)
if (nexamples > 0) {
# get list of invalid examples data frames
# DO NOT CHANGE THE ORDER OF THIS LIST
invalid_examples <- list(admit_source_invalid(data)[[1]], # 1
age_invalid(data)[[1]], # 2
any_e_invalid(data)[[1]], # 3
blood_pressure_invalid(data)[[1]], # 4
cc_ar_invalid(data)[[1]], # 5
country_invalid(data)[[1]], # 6
death_invalid(data)[[1]], # 7
diagnosis_type_invalid(data)[[1]], # 8
discharge_disposition_invalid(data)[[1]], # 9
ethnicity_invalid(data)[[1]], # 10
facility_type_invalid(data)[[1]], # 11
fpid_mrn_invalid(data)[[1]], # 12
gender_invalid(data)[[1]], # 13
height_invalid(data)[[1]], # 14
patient_class_invalid(data)[[1]], # 15
pulseox_invalid(data)[[1]], # 16
race_invalid(data)[[1]], # 17
smoking_status_invalid(data)[[1]], # 18
state_invalid(data)[[1]], # 19
temperature_invalid(data)[[1]], # 20
weight_invalid(data)[[1]], # 21
zip_invalid(data)[[1]]) # 22
inv_examples <- examples_invalids(facility, invalid_examples) # get examples of invalids from this facility
null_examples <- examples_nulls(facility, data) # get examples of nulls from this faciltiy
# join with other relevant fields
inv_examples <- inv_examples %>% # take examples
left_join(., select(data, c(C_BioSense_ID, C_Visit_Date, C_Visit_Date_Time, First_Patient_ID,
C_Unique_Patient_ID, Medical_Record_Number, Visit_ID, Admit_Date_Time,
Recorded_Date_Time, Message_Date_Time, Create_Raw_Date_Time,
Message_Type, Trigger_Event, Message_Structure, Message_Control_ID)),
by="C_BioSense_ID") %>% # join with all these fields, for every record of that visit
rename(Invalid_Field=Field) %>% # make it clearer that that field is the one that is invalid
group_by(Invalid_Field) %>% # group by type of field
slice(1:nexamples) # get nexamples
# do the same for nulls
null_examples <- null_examples %>%
left_join(., select(data, c(C_BioSense_ID, C_Visit_Date, C_Visit_Date_Time, First_Patient_ID,
C_Unique_Patient_ID, Medical_Record_Number, Visit_ID, Admit_Date_Time,
Recorded_Date_Time, Message_Date_Time, Create_Raw_Date_Time,
Message_Type, Trigger_Event, Message_Structure, Message_Control_ID)),
by="C_BioSense_ID") %>% # join with all these fields, for every record of that visit
group_by(Null_Field) %>% # group by type of field
slice(1:nexamples) # get nexamples
# write to excel workbook
wb <- createWorkbook()
# sheet 1: invalids
sheet1 <- addWorksheet(wb, "Invalids")
writeDataTable(wb, sheet1, inv_examples, firstColumn=TRUE, bandedRows=TRUE)
setColWidths(wb, sheet1, 1:ncol(inv_examples), "auto")
freezePane(wb, sheet1, firstActiveRow=2, firstActiveCol=4)
# sheet2: nulls
sheet2 <- addWorksheet(wb, "Nulls")
writeDataTable(wb, sheet2, null_examples, firstColumn=TRUE, bandedRows=TRUE)
setColWidths(wb, sheet2, 1:ncol(null_examples), "auto")
freezePane(wb, sheet2, firstActiveRow=2, firstActiveCol=3)
# write sheet
filename <- str_replace_all(name, "[^[a-zA-z\\s0-9]]", "") %>% # get rid of punctuation from faciltiy name
str_replace_all("[\\s]", "_") # replace spaces with underscores
saveWorkbook(wb, paste0(directory, "/", filename, "_Examples.xlsx"), overwrite=TRUE)
}
}
|
/R/write_facility.R
|
no_license
|
markhwhiteii/biosensequality
|
R
| false
| false
| 10,686
|
r
|
#' Write NSSP BioSense Platform Data Quality Summary Reports for One Facility
#'
#' @description
#' This function is a lightweight version of the `write_reports` function. It will generate summary and example workbooks, but only for
#' one specified facility. The first summary workbook shows percents and counts of nulls and invalids, while the examples workbook
#' generates de tailed information on records and visits that are null or invalid.
#'
#' @param username Your BioSense username, as a string. This is the same username you may use to log into RStudio or Adminer.
#' @param password Your BioSense password, as a string. This is the same password you may use to log into RStudio or Adminer.
#' @param table The table that you want to retrieve the data from, as a string.
#' @param mft The MFT (master facilities table) from where the facility name will be retrieved, as a string.
#' @param start The start date time that you wish to begin pulling data from, as a string.
#' @param end The end data time that you wish to stop pulling data from, as a string.
#' @param facility The C_Biosense_Facility_ID for the facility that you wish to generate and write the report for.
#' @param directory The directory where you would like to write the reports to (i.e., "~/Documents/MyReports"), as a string.
#' @param nexamples An integer number of examples you would like for each type of invalid or null field in the examples workbooks for each facility.
#' This defaults to 0, which will not generate these example workbooks.
#' @import dplyr
#' @import tidyr
#' @import openxlsx
#' @import RODBC
#' @importFrom stringr str_replace_all
#' @export
write_facility <- function(username, password, table, mft, start, end, facility, directory="", nexamples=0) {
# pull data
channel <- odbcConnect("BioSense_Platform", paste0("BIOSENSE\\", username), password) # open channel
data <- sqlQuery(
channel,
paste0("SELECT * FROM ", table, " WHERE C_Visit_Date_Time >= '", start, "' AND C_Visit_Date_Time <= '", end, "' AND C_Biosense_Facility_ID = ", facility) # create sql query
)
if (nrow(data) == 0) stop("The query yielded no data.")
name <- as.character(unlist(unname(c(sqlQuery(channel, paste0("SELECT Facility_Name FROM ", mft, " WHERE C_Biosense_Facility_ID = ", facility)))))) # get name from mft
odbcCloseAll() # close connection
# get hl7 values
data("hl7_values", envir=environment())
hl7_values$Field <- as.character(hl7_values$Field)
# get facility-level state summary of required nulls
req_nulls <- get_req_nulls(data) %>%
select(-c(C_Biosense_Facility_ID)) %>%
gather(Field, Value, 2:ncol(.)) %>%
spread(Measure, Value) %>%
right_join(hl7_values, ., by = "Field")
# get facility-level state summary of optional nulls
opt_nulls <- get_opt_nulls(data) %>%
select(-c(C_Biosense_Facility_ID)) %>%
gather(Field, Value, 2:ncol(.)) %>%
spread(Measure, Value) %>%
right_join(hl7_values, ., by = "Field")
# get facility-level state summary of invalids
invalids <- get_all_invalids(data) %>%
select(-c(C_Biosense_Facility_ID)) %>%
gather(Field, Value, 2:ncol(.)) %>%
spread(Measure, Value) %>%
right_join(hl7_values, ., by = "Field")
# getting first and last visit date times
vmin <- min(as.character(data$C_Visit_Date_Time))
vmax <- max(as.character(data$C_Visit_Date_Time))
amin <- min(as.character(data$Arrived_Date_Time))
amax <- max(as.character(data$Arrived_Date_Time))
# write to xlsx
# initialize workbook
wb <- createWorkbook()
# sheet 1: facility information
sheet1 <- addWorksheet(wb, "Facility Information")
writeDataTable(wb, sheet1,
suppressWarnings(data %>% # take data
select(c(C_Biosense_Facility_ID, Sending_Facility_ID, Sending_Application,
Treating_Facility_ID, Receiving_Application, Receiving_Facility)) %>% # taking only variables we want
gather(key=Field, value=Value, convert=TRUE) %>% # suppressed warnings because this will tell you it converted all to characters
distinct() %>% # get only distinct entries
mutate_all(as.character) %>% # make all fields character so they work well together
bind_rows(data.frame(Field="Facility_Name", Value=name), .) %>% # add name to the top
# bind with date ranges and number of records and visits
bind_rows(data.frame(Field=c("Patient_Visit_Dates", "Message_Arrival_Dates",
"Number of Records", "Number of Visits"),
Value=c(paste("From", vmin, "to", vmax),
paste("From", amin, "to", amax),
nrow(data),
n_groups(group_by(data, C_BioSense_ID))))) %>%
right_join(hl7_values, ., by="Field")), # get hl7 values
firstColumn=TRUE, bandedRows=TRUE)
setColWidths(wb, sheet1, 1:3, "auto")
# sheet 2: required nulls
sheet2 <- addWorksheet(wb, "Required Nulls") # initialize sheet
writeDataTable(wb, sheet2, req_nulls, firstColumn=TRUE, bandedRows=TRUE) # write to table
setColWidths(wb, sheet2, 1:ncol(req_nulls), "auto") # format sheet
freezePane(wb, sheet2, firstActiveRow=2) # format sheet
# sheet 3: optional nulls
sheet3 <- addWorksheet(wb, "Optional Nulls") # initialize sheet
writeDataTable(wb, sheet3, opt_nulls, firstColumn=TRUE, bandedRows=TRUE) # write to table
setColWidths(wb, sheet3, 1:ncol(opt_nulls), "auto") # format sheet
freezePane(wb, sheet3, firstActiveRow=2) # format sheet
# sheet 4: invalids
sheet4 <- addWorksheet(wb, "Invalids") # initialize sheet
writeDataTable(wb, sheet4, invalids, firstColumn=TRUE, bandedRows=TRUE) # write to table
setColWidths(wb, sheet4, 1:ncol(invalids), "auto") # format sheet
freezePane(wb, sheet4, firstActiveRow=2) # format sheet
# write to file
filename <- str_replace_all(name, "[^[a-zA-z\\s0-9]]", "") %>% # get rid of punctuation from faciltiy name
str_replace_all("[\\s]", "_") # replace spaces with underscores
saveWorkbook(wb, paste0(directory, "/", filename, "_Summary.xlsx"), overwrite=TRUE)
if (nexamples > 0) {
# get list of invalid examples data frames
# DO NOT CHANGE THE ORDER OF THIS LIST
invalid_examples <- list(admit_source_invalid(data)[[1]], # 1
age_invalid(data)[[1]], # 2
any_e_invalid(data)[[1]], # 3
blood_pressure_invalid(data)[[1]], # 4
cc_ar_invalid(data)[[1]], # 5
country_invalid(data)[[1]], # 6
death_invalid(data)[[1]], # 7
diagnosis_type_invalid(data)[[1]], # 8
discharge_disposition_invalid(data)[[1]], # 9
ethnicity_invalid(data)[[1]], # 10
facility_type_invalid(data)[[1]], # 11
fpid_mrn_invalid(data)[[1]], # 12
gender_invalid(data)[[1]], # 13
height_invalid(data)[[1]], # 14
patient_class_invalid(data)[[1]], # 15
pulseox_invalid(data)[[1]], # 16
race_invalid(data)[[1]], # 17
smoking_status_invalid(data)[[1]], # 18
state_invalid(data)[[1]], # 19
temperature_invalid(data)[[1]], # 20
weight_invalid(data)[[1]], # 21
zip_invalid(data)[[1]]) # 22
inv_examples <- examples_invalids(facility, invalid_examples) # get examples of invalids from this facility
null_examples <- examples_nulls(facility, data) # get examples of nulls from this faciltiy
# join with other relevant fields
inv_examples <- inv_examples %>% # take examples
left_join(., select(data, c(C_BioSense_ID, C_Visit_Date, C_Visit_Date_Time, First_Patient_ID,
C_Unique_Patient_ID, Medical_Record_Number, Visit_ID, Admit_Date_Time,
Recorded_Date_Time, Message_Date_Time, Create_Raw_Date_Time,
Message_Type, Trigger_Event, Message_Structure, Message_Control_ID)),
by="C_BioSense_ID") %>% # join with all these fields, for every record of that visit
rename(Invalid_Field=Field) %>% # make it clearer that that field is the one that is invalid
group_by(Invalid_Field) %>% # group by type of field
slice(1:nexamples) # get nexamples
# do the same for nulls
null_examples <- null_examples %>%
left_join(., select(data, c(C_BioSense_ID, C_Visit_Date, C_Visit_Date_Time, First_Patient_ID,
C_Unique_Patient_ID, Medical_Record_Number, Visit_ID, Admit_Date_Time,
Recorded_Date_Time, Message_Date_Time, Create_Raw_Date_Time,
Message_Type, Trigger_Event, Message_Structure, Message_Control_ID)),
by="C_BioSense_ID") %>% # join with all these fields, for every record of that visit
group_by(Null_Field) %>% # group by type of field
slice(1:nexamples) # get nexamples
# write to excel workbook
wb <- createWorkbook()
# sheet 1: invalids
sheet1 <- addWorksheet(wb, "Invalids")
writeDataTable(wb, sheet1, inv_examples, firstColumn=TRUE, bandedRows=TRUE)
setColWidths(wb, sheet1, 1:ncol(inv_examples), "auto")
freezePane(wb, sheet1, firstActiveRow=2, firstActiveCol=4)
# sheet2: nulls
sheet2 <- addWorksheet(wb, "Nulls")
writeDataTable(wb, sheet2, null_examples, firstColumn=TRUE, bandedRows=TRUE)
setColWidths(wb, sheet2, 1:ncol(null_examples), "auto")
freezePane(wb, sheet2, firstActiveRow=2, firstActiveCol=3)
# write sheet
filename <- str_replace_all(name, "[^[a-zA-z\\s0-9]]", "") %>% # get rid of punctuation from faciltiy name
str_replace_all("[\\s]", "_") # replace spaces with underscores
saveWorkbook(wb, paste0(directory, "/", filename, "_Examples.xlsx"), overwrite=TRUE)
}
}
|
install.packages("glmnet_2.0-16")
BiocManager::install("glmnet")
require(devtools)
install_version("glmnet", version = "2.0-16", repos = "http://cran.us.r-project.org")
# Loaging the library
library(glmnet)
# Loading the data
data(swiss)
x_vars <- model.matrix(Fertility~. , swiss)[,-1]
y_var <- swiss$Fertility
data <- cbind(x_vars,y_var)
# Splitting the data into test and train
set.seed(100)
index = sample(1:nrow(data), 0.7*nrow(data))
train = data[index,] # Create the training data
test = data[-index,] # Create the test data
dim(train)
dim(test)
#### Scaling the Numeric Features
library(caret)
cols = c("Agriculture", "Examination", "Education", "Catholic", "Infant.Mortality")
pre_proc_val <- preProcess(train[,cols], method = c("center", "scale"))
train[,cols] = predict(pre_proc_val, train[,cols])
test[,cols] = predict(pre_proc_val, test[,cols])
train <- as.data.frame(train)
summary(train)
## Linear Regression
lr = lm(y_var ~ Agriculture + Examination + Education + Catholic + Infant.Mortality, data = train)
summary(lr)
####### Regularization ############
cols_reg = c("Agriculture", "Examination", "Education", "Catholic", "Infant.Mortality", "y_var")
dummies <- dummyVars(y_var ~ ., data = data[,cols_reg])
train_dummies = predict(dummies, newdata = train[,cols_reg])
test_dummies = predict(dummies, newdata = test[,cols_reg])
print(dim(train_dummies)); print(dim(test_dummies))
### Ridge Regression
library("glmnet")
x = as.matrix(train_dummies)
y_train = train$y_var
x_test = as.matrix(test_dummies)
y_test = test$y_var
lambdas <- 10^seq(2, -3, by = -.1)
ridge_reg = glmnet(x, y_train, nlambda = 25, alpha = 0, family = 'gaussian', lambda = lambdas)
summary(ridge_reg)
cv_ridge <- cv.glmnet(x, y_train, alpha = 0, lambda = lambdas)
optimal_lambda <- cv_ridge$lambda.min
optimal_lambda
### Lasso Regression
lambdas <- 10^seq(2, -3, by = -.1)
# Setting alpha = 1 implements lasso regression
lasso_reg <- cv.glmnet(x, y_train, alpha = 1, lambda = lambdas, standardize = TRUE, nfolds = 5)
# Best
lambda_best <- lasso_reg$lambda.min
lambda_best
lasso_model <- glmnet(x, y_train, alpha = 1, lambda = lambda_best, standardize = TRUE)
coef(lasso_model)
predictions_train <- predict(lasso_model, s = lambda_best, newx = x)
eval_results(y_train, predictions_train, train)
predictions_test <- predict(lasso_model, s = lambda_best, newx = x_test)
eval_results(y_test, predictions_test, test)
|
/test/Regression/Test_Regression.R
|
no_license
|
haojiang9999/HCA_script
|
R
| false
| false
| 2,433
|
r
|
install.packages("glmnet_2.0-16")
BiocManager::install("glmnet")
require(devtools)
install_version("glmnet", version = "2.0-16", repos = "http://cran.us.r-project.org")
# Loaging the library
library(glmnet)
# Loading the data
data(swiss)
x_vars <- model.matrix(Fertility~. , swiss)[,-1]
y_var <- swiss$Fertility
data <- cbind(x_vars,y_var)
# Splitting the data into test and train
set.seed(100)
index = sample(1:nrow(data), 0.7*nrow(data))
train = data[index,] # Create the training data
test = data[-index,] # Create the test data
dim(train)
dim(test)
#### Scaling the Numeric Features
library(caret)
cols = c("Agriculture", "Examination", "Education", "Catholic", "Infant.Mortality")
pre_proc_val <- preProcess(train[,cols], method = c("center", "scale"))
train[,cols] = predict(pre_proc_val, train[,cols])
test[,cols] = predict(pre_proc_val, test[,cols])
train <- as.data.frame(train)
summary(train)
## Linear Regression
lr = lm(y_var ~ Agriculture + Examination + Education + Catholic + Infant.Mortality, data = train)
summary(lr)
####### Regularization ############
cols_reg = c("Agriculture", "Examination", "Education", "Catholic", "Infant.Mortality", "y_var")
dummies <- dummyVars(y_var ~ ., data = data[,cols_reg])
train_dummies = predict(dummies, newdata = train[,cols_reg])
test_dummies = predict(dummies, newdata = test[,cols_reg])
print(dim(train_dummies)); print(dim(test_dummies))
### Ridge Regression
library("glmnet")
x = as.matrix(train_dummies)
y_train = train$y_var
x_test = as.matrix(test_dummies)
y_test = test$y_var
lambdas <- 10^seq(2, -3, by = -.1)
ridge_reg = glmnet(x, y_train, nlambda = 25, alpha = 0, family = 'gaussian', lambda = lambdas)
summary(ridge_reg)
cv_ridge <- cv.glmnet(x, y_train, alpha = 0, lambda = lambdas)
optimal_lambda <- cv_ridge$lambda.min
optimal_lambda
### Lasso Regression
lambdas <- 10^seq(2, -3, by = -.1)
# Setting alpha = 1 implements lasso regression
lasso_reg <- cv.glmnet(x, y_train, alpha = 1, lambda = lambdas, standardize = TRUE, nfolds = 5)
# Best
lambda_best <- lasso_reg$lambda.min
lambda_best
lasso_model <- glmnet(x, y_train, alpha = 1, lambda = lambda_best, standardize = TRUE)
coef(lasso_model)
predictions_train <- predict(lasso_model, s = lambda_best, newx = x)
eval_results(y_train, predictions_train, train)
predictions_test <- predict(lasso_model, s = lambda_best, newx = x_test)
eval_results(y_test, predictions_test, test)
|
# PACKAGES ----------------------------------------------------------------
library(tidyverse)
library(gh)
# GLOBAL VARS -------------------------------------------------------------
outputDir <- "485_responses"
path_dropbox <- "~/dropbox/"
path_csv_files <- paste0(path_dropbox, outputDir)
org_name <- "SEMO-GABD" # Also functions as owner
team_ident <- 2935903 # Team number for Students_2018 team.
# FUNCTIONS ---------------------------------------------------------------
# Functions to make and manipulate repos on GitHub.
# See github_api_examples.R for other possibilities.
# Makes repos first
make_repos <- function(last_name, first_name, user_name, ...) {
gh::gh("POST /orgs/:org/repos",
org = org_name,
description = paste("This repo belongs to", user_name),
name = stringr::str_to_lower(paste(last_name,
first_name,
sep = "_")),
team_id = team_ident,
auto_init = TRUE,
gitignore_template = "R",
gitlicense_template = "mit",
has_wiki = FALSE)
}
# Add the student as collaborator with default push access.
add_collaborator <- function(last_name, first_name, user_name, ...) {
gh::gh("PUT /repos/:owner/:repo/collaborators/:username",
owner = org_name,
repo = paste(last_name, first_name, sep = "_"),
username = user_name)
}
# MAIN --------------------------------------------------------------------
# Import and format data for purrr and gh
# Based on this blog post by Claus Wilke
# https://serialmentor.com/blog/2016/6/13/reading-and-combining-many-tidy-data-files-in-R
# See the comments for map_dfr(), which I changed to map to keep
# students as list structure.
files <- dir(path = path_csv_files, pattern = "*.csv")
students <- files %>%
map(~ read_csv(file.path(path_csv_files, .)))
stu_list <- students %>% {
tibble::tibble(
last_name = map_chr(., "last_name"),
first_name = map_chr(., "first_name"),
user_name = map_chr(., "git_user")
)
}
# Pipe the student list to the Git functions to configure them
# on GitHub.
stu_list %>% pmap(make_repos)
stu_list %>% pmap(add_collaborator)
# TO DO -------------------------------------------------------------------
# Write a function that gets the team repos and extracts the team_id.
# Could just do this manually each year.
# IGNORE ------------------------------------------------------------------
# Working example of pasting two names together
my_func <- function(x, y) {paste(x, y, sep = "_")}
lst_nm <- students %>% map_chr("last_name") %>% stringr::str_to_lower(.)
fst_nm <- students %>% map_chr("first_name")
map2_chr(lst_nm, fst_nm, my_func)
|
/purrr_test.R
|
permissive
|
mtaylor-semo/shiny485
|
R
| false
| false
| 2,754
|
r
|
# PACKAGES ----------------------------------------------------------------
library(tidyverse)
library(gh)
# GLOBAL VARS -------------------------------------------------------------
outputDir <- "485_responses"
path_dropbox <- "~/dropbox/"
path_csv_files <- paste0(path_dropbox, outputDir)
org_name <- "SEMO-GABD" # Also functions as owner
team_ident <- 2935903 # Team number for Students_2018 team.
# FUNCTIONS ---------------------------------------------------------------
# Functions to make and manipulate repos on GitHub.
# See github_api_examples.R for other possibilities.
# Makes repos first
make_repos <- function(last_name, first_name, user_name, ...) {
gh::gh("POST /orgs/:org/repos",
org = org_name,
description = paste("This repo belongs to", user_name),
name = stringr::str_to_lower(paste(last_name,
first_name,
sep = "_")),
team_id = team_ident,
auto_init = TRUE,
gitignore_template = "R",
gitlicense_template = "mit",
has_wiki = FALSE)
}
# Add the student as collaborator with default push access.
add_collaborator <- function(last_name, first_name, user_name, ...) {
gh::gh("PUT /repos/:owner/:repo/collaborators/:username",
owner = org_name,
repo = paste(last_name, first_name, sep = "_"),
username = user_name)
}
# MAIN --------------------------------------------------------------------
# Import and format data for purrr and gh
# Based on this blog post by Claus Wilke
# https://serialmentor.com/blog/2016/6/13/reading-and-combining-many-tidy-data-files-in-R
# See the comments for map_dfr(), which I changed to map to keep
# students as list structure.
files <- dir(path = path_csv_files, pattern = "*.csv")
students <- files %>%
map(~ read_csv(file.path(path_csv_files, .)))
stu_list <- students %>% {
tibble::tibble(
last_name = map_chr(., "last_name"),
first_name = map_chr(., "first_name"),
user_name = map_chr(., "git_user")
)
}
# Pipe the student list to the Git functions to configure them
# on GitHub.
stu_list %>% pmap(make_repos)
stu_list %>% pmap(add_collaborator)
# TO DO -------------------------------------------------------------------
# Write a function that gets the team repos and extracts the team_id.
# Could just do this manually each year.
# IGNORE ------------------------------------------------------------------
# Working example of pasting two names together
my_func <- function(x, y) {paste(x, y, sep = "_")}
lst_nm <- students %>% map_chr("last_name") %>% stringr::str_to_lower(.)
fst_nm <- students %>% map_chr("first_name")
map2_chr(lst_nm, fst_nm, my_func)
|
#' Transform or convert coordinates of simple features directly with Proj.4 (bypassing GDAL)
#'
#' Transform or convert coordinates of simple features directly with Proj.4 (bypassing GDAL)
#'
#' @param x object of class sf, sfc or sfg
#' @param crs either an object of class \code{crs}, or input to \link[sf]{st_crs} (proj4string, or EPSG code), or a length 2 character vector with input proj4string and output proj4string
#' @param ... ignored
#' @details Transforms coordinates of object to new projection, using Proj.4 directly rather than the GDAL API used by \link[sf]{st_transform}.
#'
#' If \code{crs} is a single CRS, it forms the target CRS, and in that case the source CRS is obtained as \code{st_crs(x)}. Since this presumes that the source CRS is accepted by GDAL (which is not always the case), a second option is to specify the source and target CRS as two proj4strings in argument \code{crs}. In the latter case, \code{st_crs(x)} is ignored and may well be \code{NA}.
#' @examples
#' library(sf)
#' p1 = st_point(c(7,52))
#' p2 = st_point(c(-30,20))
#' sfc = st_sfc(p1, p2, crs = 4326)
#' sfc
#' st_transform_proj(sfc, "+proj=wintri")
#' @export
st_transform_proj = function(x, crs, ...) UseMethod("st_transform_proj")
#' @name st_transform_proj
#' @export
st_transform_proj.sfc = function(x, crs, ...) {
if (is.numeric(crs))
crs = st_crs(crs)$proj4string
if (inherits(crs, "crs"))
crs = crs$proj4string
stopifnot(length(crs) %in% c(1,2))
if (length(crs) == 1) # only output
crs = c(st_crs(x)$proj4string, crs) # c(input, output)
st_sfc(CPL_lwgeom_transform(x, crs))
}
#' @name st_transform_proj
#' @export
#' @examples
#' library(sf)
#' nc = st_read(system.file("shape/nc.shp", package="sf"))
#' st_transform_proj(nc[1,], "+proj=wintri +over")
st_transform_proj.sf = function(x, crs, ...) {
x[[ attr(x, "sf_column") ]] = st_transform_proj(st_geometry(x), crs, ...)
x
}
#' @name st_transform_proj
#' @export
#' @details The \code{st_transform_proj} method for \code{sfg} objects assumes that the CRS of the object is available as an attribute of that name.
#' @examples
#' st_transform_proj(structure(p1, proj4string = "+init=epsg:4326"), "+init=epsg:3857")
st_transform_proj.sfg = function(x, crs, ...) {
if (missing(crs))
stop("argument crs cannot be missing") # nocov
if (length(crs) == 1) {
if (is.null(attr(x, "proj4string")))
stop("x does not have a proj4string attribute") # nocov
if (!is.character(attr(x, "proj4string")))
stop("proj4string attribute should be a character string") # nocov
crs = c(attr(x, "proj4string"), crs)
}
structure(st_transform_proj(st_sfc(x), crs, ...)[[1]], proj4string = tail(crs, 1))
}
|
/R/transform.R
|
no_license
|
RafaMariano/lwgeom
|
R
| false
| false
| 2,672
|
r
|
#' Transform or convert coordinates of simple features directly with Proj.4 (bypassing GDAL)
#'
#' Transform or convert coordinates of simple features directly with Proj.4 (bypassing GDAL)
#'
#' @param x object of class sf, sfc or sfg
#' @param crs either an object of class \code{crs}, or input to \link[sf]{st_crs} (proj4string, or EPSG code), or a length 2 character vector with input proj4string and output proj4string
#' @param ... ignored
#' @details Transforms coordinates of object to new projection, using Proj.4 directly rather than the GDAL API used by \link[sf]{st_transform}.
#'
#' If \code{crs} is a single CRS, it forms the target CRS, and in that case the source CRS is obtained as \code{st_crs(x)}. Since this presumes that the source CRS is accepted by GDAL (which is not always the case), a second option is to specify the source and target CRS as two proj4strings in argument \code{crs}. In the latter case, \code{st_crs(x)} is ignored and may well be \code{NA}.
#' @examples
#' library(sf)
#' p1 = st_point(c(7,52))
#' p2 = st_point(c(-30,20))
#' sfc = st_sfc(p1, p2, crs = 4326)
#' sfc
#' st_transform_proj(sfc, "+proj=wintri")
#' @export
st_transform_proj = function(x, crs, ...) UseMethod("st_transform_proj")
#' @name st_transform_proj
#' @export
st_transform_proj.sfc = function(x, crs, ...) {
if (is.numeric(crs))
crs = st_crs(crs)$proj4string
if (inherits(crs, "crs"))
crs = crs$proj4string
stopifnot(length(crs) %in% c(1,2))
if (length(crs) == 1) # only output
crs = c(st_crs(x)$proj4string, crs) # c(input, output)
st_sfc(CPL_lwgeom_transform(x, crs))
}
#' @name st_transform_proj
#' @export
#' @examples
#' library(sf)
#' nc = st_read(system.file("shape/nc.shp", package="sf"))
#' st_transform_proj(nc[1,], "+proj=wintri +over")
st_transform_proj.sf = function(x, crs, ...) {
x[[ attr(x, "sf_column") ]] = st_transform_proj(st_geometry(x), crs, ...)
x
}
#' @name st_transform_proj
#' @export
#' @details The \code{st_transform_proj} method for \code{sfg} objects assumes that the CRS of the object is available as an attribute of that name.
#' @examples
#' st_transform_proj(structure(p1, proj4string = "+init=epsg:4326"), "+init=epsg:3857")
st_transform_proj.sfg = function(x, crs, ...) {
if (missing(crs))
stop("argument crs cannot be missing") # nocov
if (length(crs) == 1) {
if (is.null(attr(x, "proj4string")))
stop("x does not have a proj4string attribute") # nocov
if (!is.character(attr(x, "proj4string")))
stop("proj4string attribute should be a character string") # nocov
crs = c(attr(x, "proj4string"), crs)
}
structure(st_transform_proj(st_sfc(x), crs, ...)[[1]], proj4string = tail(crs, 1))
}
|
library(landscapemetrics)
### Name: lsm_l_te
### Title: TE (landscape level)
### Aliases: lsm_l_te lsm_l_te.RasterLayer lsm_l_te.RasterStack
### lsm_l_te.RasterBrick lsm_l_te.stars lsm_l_te.list
### ** Examples
lsm_l_te(landscape)
|
/data/genthat_extracted_code/landscapemetrics/examples/lsm_l_te.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 241
|
r
|
library(landscapemetrics)
### Name: lsm_l_te
### Title: TE (landscape level)
### Aliases: lsm_l_te lsm_l_te.RasterLayer lsm_l_te.RasterStack
### lsm_l_te.RasterBrick lsm_l_te.stars lsm_l_te.list
### ** Examples
lsm_l_te(landscape)
|
date_tag = "210317"
source(paste0("/.mounts/labs/reimandlab/private/users/oocsenas/CA2M/",
# source(paste0("/.mounts/labs/reimandlab/private/users/jreimand/CA2M/",
date_tag,
"/bin/000_HEADER.R"))
input_data_dir = "/.mounts/labs/reimandlab/private/users/oocsenas/CA2M/INPUT_DATA/"
#Load in pval and qval dt
window_pval_dt = fread(pff("/data/005C_pvaldt_100KBerrorwindows.csv"))
window_qval_dt = fread(pff("/data/005C_qvaldt_100KBerrorwindows.csv"))
windows_gr = GRanges(window_pval_dt$chr,
IRanges(window_pval_dt$start,
window_pval_dt$start + 99999))
#Only keep Chromosomes 1-22,X
chr_to_keep = paste("chr", c(1:22, "X"), sep = "")
#Load in gene datable GENCODE
GENCODE = fread(paste0(input_data_dir, "GENCODE_hg38_PROCESSED.txt"))[gene_type == "protein_coding"][chr %in% chr_to_keep]
GENCODE_gr = GRanges(GENCODE$chr, IRanges(GENCODE$start, GENCODE$end))
get_gene_pvals = function(gene_index, p_val_dt, cohorts_to_keep){
gene_name = GENCODE[gene_index]$gene_name
gene_coordinates = GENCODE_gr[gene_index]
pvals = p_val_dt[, .SD, .SDcols = cohorts_to_keep]
#Get overlapping windows
overlapping_windows = subjectHits(findOverlaps(gene_coordinates, windows_gr))
if(length(overlapping_windows) == 0){
gene_pvals = rep(NA, length(cohorts_to_keep))
}
if(length(overlapping_windows) == 1){
gene_pvals = as.numeric(pvals[overlapping_windows])
}
if(length(overlapping_windows) > 1){
#Get window with most overlap
widths = unlist(lapply(overlapping_windows,
function(x) width(intersect(gene_coordinates,
windows_gr[x]))))
top_window = overlapping_windows[which.max(widths)]
gene_pvals = as.numeric(pvals[top_window])
}
return(gene_pvals)}
#Get pval for each gene
#Keep only core 14 cancer types
cancer_types_to_keep = c("Breast-AdenoCa", "Kidney-RCC", "Liver-HCC", "ColoRect-AdenoCA", "Eso-AdenoCa",
"CNS-GBM", "Stomach-AdenoCA", "Biliary-AdenoCA", "Lung-AdenoCA", "Lung-SCC",
"Head-SCC", "Lymph-BNHL", "Lymph-CLL", "Skin-Melanoma")
gene_pval_dt = as.data.table(do.call("rbind.data.frame",
mclapply(1:nrow(GENCODE),
get_gene_pvals,
window_pval_dt,
cancer_types_to_keep, mc.cores = 8)))
colnames(gene_pval_dt) = cancer_types_to_keep
gene_pval_dt_full = as.data.table(cbind.data.frame(gene_name = GENCODE$gene_name, gene_pval_dt))
fwrite(gene_pval_dt_full, pff("/data/005E_gene_pvaldt_100KBerrorwindows.csv"))
#Get qval for each gene
gene_qval_dt = as.data.table(do.call("cbind.data.frame",
lapply(1:26, get_gene_pvals, window_qval_dt)))
colnames(gene_qval_dt) = colnames(gene_qval_dt)[-c(1, 2)]
gene_qval_dt_full = as.data.table(cbind.data.frame(gene_name = GENCODE$gene_name, gene_qval_dt))
fwrite(gene_qval_dt_full, pff("/data/005E_gene_qvaldt_100KBerrorwindows.csv"))
|
/.ipynb_checkpoints/005E_get_errors_forgenes-checkpoint.R
|
no_license
|
reimandlab/CA2M
|
R
| false
| false
| 3,126
|
r
|
date_tag = "210317"
source(paste0("/.mounts/labs/reimandlab/private/users/oocsenas/CA2M/",
# source(paste0("/.mounts/labs/reimandlab/private/users/jreimand/CA2M/",
date_tag,
"/bin/000_HEADER.R"))
input_data_dir = "/.mounts/labs/reimandlab/private/users/oocsenas/CA2M/INPUT_DATA/"
#Load in pval and qval dt
window_pval_dt = fread(pff("/data/005C_pvaldt_100KBerrorwindows.csv"))
window_qval_dt = fread(pff("/data/005C_qvaldt_100KBerrorwindows.csv"))
windows_gr = GRanges(window_pval_dt$chr,
IRanges(window_pval_dt$start,
window_pval_dt$start + 99999))
#Only keep Chromosomes 1-22,X
chr_to_keep = paste("chr", c(1:22, "X"), sep = "")
#Load in gene datable GENCODE
GENCODE = fread(paste0(input_data_dir, "GENCODE_hg38_PROCESSED.txt"))[gene_type == "protein_coding"][chr %in% chr_to_keep]
GENCODE_gr = GRanges(GENCODE$chr, IRanges(GENCODE$start, GENCODE$end))
get_gene_pvals = function(gene_index, p_val_dt, cohorts_to_keep){
gene_name = GENCODE[gene_index]$gene_name
gene_coordinates = GENCODE_gr[gene_index]
pvals = p_val_dt[, .SD, .SDcols = cohorts_to_keep]
#Get overlapping windows
overlapping_windows = subjectHits(findOverlaps(gene_coordinates, windows_gr))
if(length(overlapping_windows) == 0){
gene_pvals = rep(NA, length(cohorts_to_keep))
}
if(length(overlapping_windows) == 1){
gene_pvals = as.numeric(pvals[overlapping_windows])
}
if(length(overlapping_windows) > 1){
#Get window with most overlap
widths = unlist(lapply(overlapping_windows,
function(x) width(intersect(gene_coordinates,
windows_gr[x]))))
top_window = overlapping_windows[which.max(widths)]
gene_pvals = as.numeric(pvals[top_window])
}
return(gene_pvals)}
#Get pval for each gene
#Keep only core 14 cancer types
cancer_types_to_keep = c("Breast-AdenoCa", "Kidney-RCC", "Liver-HCC", "ColoRect-AdenoCA", "Eso-AdenoCa",
"CNS-GBM", "Stomach-AdenoCA", "Biliary-AdenoCA", "Lung-AdenoCA", "Lung-SCC",
"Head-SCC", "Lymph-BNHL", "Lymph-CLL", "Skin-Melanoma")
gene_pval_dt = as.data.table(do.call("rbind.data.frame",
mclapply(1:nrow(GENCODE),
get_gene_pvals,
window_pval_dt,
cancer_types_to_keep, mc.cores = 8)))
colnames(gene_pval_dt) = cancer_types_to_keep
gene_pval_dt_full = as.data.table(cbind.data.frame(gene_name = GENCODE$gene_name, gene_pval_dt))
fwrite(gene_pval_dt_full, pff("/data/005E_gene_pvaldt_100KBerrorwindows.csv"))
#Get qval for each gene
gene_qval_dt = as.data.table(do.call("cbind.data.frame",
lapply(1:26, get_gene_pvals, window_qval_dt)))
colnames(gene_qval_dt) = colnames(gene_qval_dt)[-c(1, 2)]
gene_qval_dt_full = as.data.table(cbind.data.frame(gene_name = GENCODE$gene_name, gene_qval_dt))
fwrite(gene_qval_dt_full, pff("/data/005E_gene_qvaldt_100KBerrorwindows.csv"))
|
library(ggplot2)
library(tidyr)
library(reshape2)
library(dplyr)
library(tictactoe)
set.seed(123)
p <- ttt_random()
o <- ttt_qlearn(p, N = 5000)
dat <- select(o, -n_sim) %>%
spread(key = res, value = Freq, fill = 0) %>%
melt(id.vars = "n_train", variable.name = "result_num", value.name = "frac")
dat$result <- ""
dat$result[dat$result_num == "0"] <- "Draw"
dat$result[dat$result_num == "1"] <- "Player1"
dat$result[dat$result_num == "2"] <- "Player2"
ggplot(dat, aes(n_train, frac, linetype = result, shape = result)) +
geom_line(size = 0.8, color = "grey25") +
geom_point(size = 2.2, color = "grey50") +
geom_point(size = 2.0, color = "grey10") +
xlab("number of training") + ylab("fraction") +
theme_bw()
ggsave("examples/ttt-qlearn.pdf", width = 8, height = 4)
ggsave("examples/ttt-qlearn.png", width = 8, height = 4)
|
/examples/qlearn-example.R
|
no_license
|
MangalMakwana/tictactoe
|
R
| false
| false
| 840
|
r
|
library(ggplot2)
library(tidyr)
library(reshape2)
library(dplyr)
library(tictactoe)
set.seed(123)
p <- ttt_random()
o <- ttt_qlearn(p, N = 5000)
dat <- select(o, -n_sim) %>%
spread(key = res, value = Freq, fill = 0) %>%
melt(id.vars = "n_train", variable.name = "result_num", value.name = "frac")
dat$result <- ""
dat$result[dat$result_num == "0"] <- "Draw"
dat$result[dat$result_num == "1"] <- "Player1"
dat$result[dat$result_num == "2"] <- "Player2"
ggplot(dat, aes(n_train, frac, linetype = result, shape = result)) +
geom_line(size = 0.8, color = "grey25") +
geom_point(size = 2.2, color = "grey50") +
geom_point(size = 2.0, color = "grey10") +
xlab("number of training") + ylab("fraction") +
theme_bw()
ggsave("examples/ttt-qlearn.pdf", width = 8, height = 4)
ggsave("examples/ttt-qlearn.png", width = 8, height = 4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_calc_ci_coverage.R
\name{calc_ci_coverage}
\alias{calc_ci_coverage}
\title{Calculate coverage of confidence intervals}
\usage{
calc_ci_coverage(x, groups_col)
}
\arguments{
\item{x}{Data frame. Output of boot_sample function.}
\item{groups_col}{Factor. Column containing name(s) of population(s) of interest}
}
\value{
A data frame of CTL summary statistics from bootstrap resamples
}
\description{
Calculate how many bootstrap sample CTL estimates fall within bounds of 95%
confidence interval.
}
\examples{
head(coreid_data)
# First draw bootstrap samples
sims <- boot_sample(data = coreid_data,
groups_col = col,
response = response,
n_max = 49,
iter = 99)
# Now we can calculate coverage
cover <- calc_ci_coverage(x= sims,
groups_col = col)
}
|
/man/calc_ci_coverage.Rd
|
no_license
|
guysutton/ThermalSampleR
|
R
| false
| true
| 941
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_calc_ci_coverage.R
\name{calc_ci_coverage}
\alias{calc_ci_coverage}
\title{Calculate coverage of confidence intervals}
\usage{
calc_ci_coverage(x, groups_col)
}
\arguments{
\item{x}{Data frame. Output of boot_sample function.}
\item{groups_col}{Factor. Column containing name(s) of population(s) of interest}
}
\value{
A data frame of CTL summary statistics from bootstrap resamples
}
\description{
Calculate how many bootstrap sample CTL estimates fall within bounds of 95%
confidence interval.
}
\examples{
head(coreid_data)
# First draw bootstrap samples
sims <- boot_sample(data = coreid_data,
groups_col = col,
response = response,
n_max = 49,
iter = 99)
# Now we can calculate coverage
cover <- calc_ci_coverage(x= sims,
groups_col = col)
}
|
#' Load complete package.
#'
#' @param pkg package description, can be path or package name. See
#' \code{\link{as.package}} for more information
#' @param reset clear package environment and reset file cache before loading
#' any pieces of the package.
#'
#' @keywords programming
#' @export
load_all <- function(pkg = NULL, reset = FALSE) {
pkg <- as.package(pkg)
message("Loading ", pkg$package)
# Check description file is ok
check <- tools:::.check_package_description(
file.path(pkg$path, "DESCRIPTION"))
if (length(check) > 0) {
msg <- capture.output(tools:::print.check_package_description(check))
message("Invalid DESCRIPTION:\n", paste(msg, collapse = "\n"))
}
# If installed version of package loaded, unload it
if (is.loaded(pkg) && is.locked(pkg)) {
unload(pkg)
}
# Load dependencies before creating environment so it sees all the required
# packages
load_deps(pkg)
if (reset) {
clear_cache()
clear_classes(pkg)
clear_pkg_env(pkg)
}
env <- pkg_env(pkg)
load_data(pkg, env)
load_code(pkg, env)
load_c(pkg)
invisible()
}
is.locked <- function(pkg = NULL) {
environmentIsLocked(as.environment(env_name(pkg)))
}
|
/R/load.r
|
no_license
|
andrie/devtools
|
R
| false
| false
| 1,213
|
r
|
#' Load complete package.
#'
#' @param pkg package description, can be path or package name. See
#' \code{\link{as.package}} for more information
#' @param reset clear package environment and reset file cache before loading
#' any pieces of the package.
#'
#' @keywords programming
#' @export
load_all <- function(pkg = NULL, reset = FALSE) {
pkg <- as.package(pkg)
message("Loading ", pkg$package)
# Check description file is ok
check <- tools:::.check_package_description(
file.path(pkg$path, "DESCRIPTION"))
if (length(check) > 0) {
msg <- capture.output(tools:::print.check_package_description(check))
message("Invalid DESCRIPTION:\n", paste(msg, collapse = "\n"))
}
# If installed version of package loaded, unload it
if (is.loaded(pkg) && is.locked(pkg)) {
unload(pkg)
}
# Load dependencies before creating environment so it sees all the required
# packages
load_deps(pkg)
if (reset) {
clear_cache()
clear_classes(pkg)
clear_pkg_env(pkg)
}
env <- pkg_env(pkg)
load_data(pkg, env)
load_code(pkg, env)
load_c(pkg)
invisible()
}
is.locked <- function(pkg = NULL) {
environmentIsLocked(as.environment(env_name(pkg)))
}
|
# Questions
# You must address the following questions and tasks in your exploratory analysis. For each question/task you will need to make a single plot. Unless specified, you can use any plotting system in R to make your plot.
library(plyr);
library(ggplot2);
NEI <- readRDS("exdata_data_NEI_data/summarySCC_PM25.rds");
SCC <- readRDS("exdata_data_NEI_data/Source_Classification_Code.rds");
vehicleSources <- SCC[grep("vehicles", SCC$EI.Sector, ignore.case="TRUE"),1]
vehicleSourcesNEI <- NEI[NEI$SCC %in% vehicleSources,]; #CHECK : the $type do not contain only ONROAD, but also POINT type too.!!
baltiLA <- vehicleSourcesNEI[vehicleSourcesNEI$fips %in% c("24510","06037"),]; #or can use >vehicleSourceNEI[(vehicleSourcesNEI$fips=="24510" | vehicleSourcesNEI$fips=="06037"),];
res <- ddply(baltiLA, c("fips","year") , summarize, TotalEmission = sum(Emissions));
res$fips <- revalue(res$fips, c("06037"="Los Angles", "24510"="Baltimore City"));
g <- ggplot(res, aes(x=year,y=TotalEmission,colour=fips))+geom_line();
g <- g + theme(plot.background = element_rect(fill = 'ivory1'));
g <- g + theme(panel.background = element_rect(fill = 'ivory2'));
g <- g + ggtitle("Comparision of Motor Vehicle Emission in Baltimore City and Los Angles")+xlab("Year")+ylab("Total Emission in 3 years");
png("plot6.png", width=1000, height=600);
print(g);
dev.off();
|
/Project2/plot6.R
|
no_license
|
GityHuby/Exploratory-Data-Analysis
|
R
| false
| false
| 1,354
|
r
|
# Questions
# You must address the following questions and tasks in your exploratory analysis. For each question/task you will need to make a single plot. Unless specified, you can use any plotting system in R to make your plot.
library(plyr);
library(ggplot2);
NEI <- readRDS("exdata_data_NEI_data/summarySCC_PM25.rds");
SCC <- readRDS("exdata_data_NEI_data/Source_Classification_Code.rds");
vehicleSources <- SCC[grep("vehicles", SCC$EI.Sector, ignore.case="TRUE"),1]
vehicleSourcesNEI <- NEI[NEI$SCC %in% vehicleSources,]; #CHECK : the $type do not contain only ONROAD, but also POINT type too.!!
baltiLA <- vehicleSourcesNEI[vehicleSourcesNEI$fips %in% c("24510","06037"),]; #or can use >vehicleSourceNEI[(vehicleSourcesNEI$fips=="24510" | vehicleSourcesNEI$fips=="06037"),];
res <- ddply(baltiLA, c("fips","year") , summarize, TotalEmission = sum(Emissions));
res$fips <- revalue(res$fips, c("06037"="Los Angles", "24510"="Baltimore City"));
g <- ggplot(res, aes(x=year,y=TotalEmission,colour=fips))+geom_line();
g <- g + theme(plot.background = element_rect(fill = 'ivory1'));
g <- g + theme(panel.background = element_rect(fill = 'ivory2'));
g <- g + ggtitle("Comparision of Motor Vehicle Emission in Baltimore City and Los Angles")+xlab("Year")+ylab("Total Emission in 3 years");
png("plot6.png", width=1000, height=600);
print(g);
dev.off();
|
\name{robpca}
\alias{robpca}
\title{
ROBust PCA algorithm
}
\description{
ROBPCA algorithm of Hubert et al. (2005) including reweighting (Engelen et al., 2005) and possible extension to skewed data (Hubert et al., 2009).
}
\usage{
robpca (x, k = 0, kmax = 10, alpha = 0.75, h = NULL, mcd = FALSE,
ndir = "all", skew = FALSE, ...)
}
\arguments{
\item{x}{An \eqn{n} by \eqn{p} matrix or data matrix with observations in the rows and variables in the columns.}
\item{k}{Number of principal components that will be used. When \code{k=0} (default), the number of components is selected using the criterion in Hubert et al. (2005).}
\item{kmax}{Maximal number of principal components that will be computed, default is 10.}
\item{alpha}{Robustness parameter, default is 0.75.}
\item{h}{The number of outliers the algorithm should resist is given by \eqn{n-h}. Any value for \code{h} between \eqn{n/2} and \eqn{n} may be specified. Default is \code{NULL} which uses \code{h=ceiling(alpha*n)+1}. Do not specify \code{alpha} and \code{h} at the same time. }
\item{mcd}{Logical indicating if the MCD adaptation of ROBPCA may be applied when the number of variables is sufficiently small (see Details). If \code{mcd=FALSE} (default), the full ROBPCA algorithm is always applied.}
\item{ndir}{Number of directions used when computing the outlyingness (or the adjusted outlyingness when \code{skew=TRUE}), see \code{\link[mrfDepth]{outlyingness}} and \code{\link[mrfDepth]{adjOutlyingness}} for more details.}
\item{skew}{Logical indicating if the version for skewed data (Hubert et al., 2009) is applied, default is \code{FALSE}.}
\item{...}{Other arguments to pass to methods.}
}
\details{
This function is based extensively on \code{PcaHubert} from \pkg{rrcov} and there are two main differences:
The outlyingness measure that is used for non-skewed data (\code{skew=FALSE}) is the Stahel-Donoho measure as described in Hubert et al. (2005) which is also used in \code{\link[rrcov]{PcaHubert}}. The implementation in \pkg{mrfDepth} (which is used here) is however much faster than the one in \code{\link[rrcov]{PcaHubert}} and hence more, or even all, directions can be considered when computing the outlyingness measure.
Moreover, the extension for skewed data of Hubert et al. (2009) (\code{skew=TRUE}) is also implemented here, but this is not included in \code{\link[rrcov]{PcaHubert}}.
For an extensive description of the ROBPCA algorithm we refer to Hubert et al. (2005) and to \code{\link[rrcov]{PcaHubert}}.
When \code{mcd=TRUE} and \eqn{n<5 \times p}, we do not apply the full ROBPCA algorithm. The loadings and eigenvalues
are then computed as the eigenvectors and eigenvalues of the MCD estimator applied to the data set after the SVD step.
}
\value{
A list with components:\cr
\item{loadings}{Loadings matrix containing the robust loadings (eigenvectors), a numeric matrix of size \eqn{p} by \eqn{k}.}
\item{eigenvalues}{Numeric vector of length \eqn{k} containing the robust eigenvalues.}
\item{scores}{Scores matrix (computed as \eqn{(X-center) \cdot loadings)}, a numeric matrix of size \eqn{n} by \eqn{k}.}
\item{center}{Numeric vector of length \eqn{k} containing the centre of the data.}
\item{k}{Number of (chosen) principal components.}
\item{H0}{Logical vector of size \eqn{n} indicating if an observation is in the initial h-subset.}
\item{H1}{Logical vector of size \eqn{n} indicating if an observation is kept in the reweighting step.}
\item{alpha}{The robustness parameter \eqn{\alpha} used throughout the algorithm.}
\item{h}{The \eqn{h}-parameter used throughout the algorithm.}
\item{sd}{Numeric vector of size \eqn{n} containing the robust score distances within the robust PCA subspace.}
\item{od}{Numeric vector of size \eqn{n} containing the orthogonal distances to the robust PCA subspace.}
\item{cutoff.sd}{Cut-off value for the robust score distances.}
\item{cutoff.od}{Cut-off value for the orthogonal distances.}
\item{flag.sd}{Numeric vector of size \eqn{n} containing the SD-flags of the observations. The observations whose score distance is larger than \code{cutoff.sd} receive an SD-flag equal to zero. The other observations receive an SD-flag equal to 1.}
\item{flag.od}{Numeric vector of size \eqn{n} containing the OD-flags of the observations. The observations whose orthogonal distance is larger than \code{cutoff.od} receive an OD-flag equal to zero. The other observations receive an OD-flag equal to 1.}
\item{flag.all}{Numeric vector of size \eqn{n} containing the flags of the observations. The observations whose score distance is larger than \code{cutoff.sd} or whose orthogonal distance is
larger than \code{cutoff.od} can be considered as outliers and receive a flag equal to zero.
The regular observations receive flag 1.}
}
\references{
Hubert, M., Rousseeuw, P. J., and Vanden Branden, K. (2005), ``ROBPCA: A New Approach to Robust Principal Component Analysis,'' \emph{Technometrics}, 47, 64--79.
Engelen, S., Hubert, M. and Vanden Branden, K. (2005), ``A Comparison of Three Procedures for Robust PCA in
High Dimensions", \emph{Austrian Journal of Statistics}, 34, 117--126.
Hubert, M., Rousseeuw, P. J., and Verdonck, T. (2009), ``Robust PCA for Skewed Data and Its Outlier Map," \emph{Computational Statistics & Data Analysis}, 53, 2264--2274.
}
\author{
Tom Reynkens, based on R code from Valentin Todorov for \code{PcaHubert} in \pkg{rrcov} (released under GPL-3) and Matlab code from Katrien Van Driessen (for the univariate MCD).
}
%% \note{
%% ~~further notes~~
%% }
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link[rrcov]{PcaHubert}}, \code{\link[mrfDepth]{outlyingness}}, \code{\link[mrfDepth]{adjOutlyingness}}
}
\examples{
X <- dataGen(m=1, n=100, p=10, eps=0.2, bLength=4)$data[[1]]
resR <- robpca(X, k=2)
diagPlot(resR)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ robust }% __ONLY ONE__ keyword per line
\keyword{ multivariate }% __ONLY ONE__ keyword per line
|
/man/robpca.Rd
|
no_license
|
Sandy4321/rospca
|
R
| false
| false
| 6,111
|
rd
|
\name{robpca}
\alias{robpca}
\title{
ROBust PCA algorithm
}
\description{
ROBPCA algorithm of Hubert et al. (2005) including reweighting (Engelen et al., 2005) and possible extension to skewed data (Hubert et al., 2009).
}
\usage{
robpca (x, k = 0, kmax = 10, alpha = 0.75, h = NULL, mcd = FALSE,
ndir = "all", skew = FALSE, ...)
}
\arguments{
\item{x}{An \eqn{n} by \eqn{p} matrix or data matrix with observations in the rows and variables in the columns.}
\item{k}{Number of principal components that will be used. When \code{k=0} (default), the number of components is selected using the criterion in Hubert et al. (2005).}
\item{kmax}{Maximal number of principal components that will be computed, default is 10.}
\item{alpha}{Robustness parameter, default is 0.75.}
\item{h}{The number of outliers the algorithm should resist is given by \eqn{n-h}. Any value for \code{h} between \eqn{n/2} and \eqn{n} may be specified. Default is \code{NULL} which uses \code{h=ceiling(alpha*n)+1}. Do not specify \code{alpha} and \code{h} at the same time. }
\item{mcd}{Logical indicating if the MCD adaptation of ROBPCA may be applied when the number of variables is sufficiently small (see Details). If \code{mcd=FALSE} (default), the full ROBPCA algorithm is always applied.}
\item{ndir}{Number of directions used when computing the outlyingness (or the adjusted outlyingness when \code{skew=TRUE}), see \code{\link[mrfDepth]{outlyingness}} and \code{\link[mrfDepth]{adjOutlyingness}} for more details.}
\item{skew}{Logical indicating if the version for skewed data (Hubert et al., 2009) is applied, default is \code{FALSE}.}
\item{...}{Other arguments to pass to methods.}
}
\details{
This function is based extensively on \code{PcaHubert} from \pkg{rrcov} and there are two main differences:
The outlyingness measure that is used for non-skewed data (\code{skew=FALSE}) is the Stahel-Donoho measure as described in Hubert et al. (2005) which is also used in \code{\link[rrcov]{PcaHubert}}. The implementation in \pkg{mrfDepth} (which is used here) is however much faster than the one in \code{\link[rrcov]{PcaHubert}} and hence more, or even all, directions can be considered when computing the outlyingness measure.
Moreover, the extension for skewed data of Hubert et al. (2009) (\code{skew=TRUE}) is also implemented here, but this is not included in \code{\link[rrcov]{PcaHubert}}.
For an extensive description of the ROBPCA algorithm we refer to Hubert et al. (2005) and to \code{\link[rrcov]{PcaHubert}}.
When \code{mcd=TRUE} and \eqn{n<5 \times p}, we do not apply the full ROBPCA algorithm. The loadings and eigenvalues
are then computed as the eigenvectors and eigenvalues of the MCD estimator applied to the data set after the SVD step.
}
\value{
A list with components:\cr
\item{loadings}{Loadings matrix containing the robust loadings (eigenvectors), a numeric matrix of size \eqn{p} by \eqn{k}.}
\item{eigenvalues}{Numeric vector of length \eqn{k} containing the robust eigenvalues.}
\item{scores}{Scores matrix (computed as \eqn{(X-center) \cdot loadings)}, a numeric matrix of size \eqn{n} by \eqn{k}.}
\item{center}{Numeric vector of length \eqn{k} containing the centre of the data.}
\item{k}{Number of (chosen) principal components.}
\item{H0}{Logical vector of size \eqn{n} indicating if an observation is in the initial h-subset.}
\item{H1}{Logical vector of size \eqn{n} indicating if an observation is kept in the reweighting step.}
\item{alpha}{The robustness parameter \eqn{\alpha} used throughout the algorithm.}
\item{h}{The \eqn{h}-parameter used throughout the algorithm.}
\item{sd}{Numeric vector of size \eqn{n} containing the robust score distances within the robust PCA subspace.}
\item{od}{Numeric vector of size \eqn{n} containing the orthogonal distances to the robust PCA subspace.}
\item{cutoff.sd}{Cut-off value for the robust score distances.}
\item{cutoff.od}{Cut-off value for the orthogonal distances.}
\item{flag.sd}{Numeric vector of size \eqn{n} containing the SD-flags of the observations. The observations whose score distance is larger than \code{cutoff.sd} receive an SD-flag equal to zero. The other observations receive an SD-flag equal to 1.}
\item{flag.od}{Numeric vector of size \eqn{n} containing the OD-flags of the observations. The observations whose orthogonal distance is larger than \code{cutoff.od} receive an OD-flag equal to zero. The other observations receive an OD-flag equal to 1.}
\item{flag.all}{Numeric vector of size \eqn{n} containing the flags of the observations. The observations whose score distance is larger than \code{cutoff.sd} or whose orthogonal distance is
larger than \code{cutoff.od} can be considered as outliers and receive a flag equal to zero.
The regular observations receive flag 1.}
}
\references{
Hubert, M., Rousseeuw, P. J., and Vanden Branden, K. (2005), ``ROBPCA: A New Approach to Robust Principal Component Analysis,'' \emph{Technometrics}, 47, 64--79.
Engelen, S., Hubert, M. and Vanden Branden, K. (2005), ``A Comparison of Three Procedures for Robust PCA in
High Dimensions", \emph{Austrian Journal of Statistics}, 34, 117--126.
Hubert, M., Rousseeuw, P. J., and Verdonck, T. (2009), ``Robust PCA for Skewed Data and Its Outlier Map," \emph{Computational Statistics & Data Analysis}, 53, 2264--2274.
}
\author{
Tom Reynkens, based on R code from Valentin Todorov for \code{PcaHubert} in \pkg{rrcov} (released under GPL-3) and Matlab code from Katrien Van Driessen (for the univariate MCD).
}
%% \note{
%% ~~further notes~~
%% }
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link[rrcov]{PcaHubert}}, \code{\link[mrfDepth]{outlyingness}}, \code{\link[mrfDepth]{adjOutlyingness}}
}
\examples{
X <- dataGen(m=1, n=100, p=10, eps=0.2, bLength=4)$data[[1]]
resR <- robpca(X, k=2)
diagPlot(resR)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ robust }% __ONLY ONE__ keyword per line
\keyword{ multivariate }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{s_fish_gfw}
\alias{s_fish_gfw}
\title{Raster stack from Global Fishing Watch analysis of high seas (Sala et al,
2018)}
\format{
A \code{\link[raster]{stack}} with layers of results from analyses.
}
\usage{
s_fish_gfw
}
\description{
Global half-degree raster of high seas. Year of analysis is 2016. See also
\url{https://github.com/SFG-UCSB/The-economics-of-fishing-the-high-seas}.
}
\keyword{datasets}
|
/man/s_fish_gfw.Rd
|
no_license
|
BenioffOceanInitiative/bbnj
|
R
| false
| true
| 509
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{s_fish_gfw}
\alias{s_fish_gfw}
\title{Raster stack from Global Fishing Watch analysis of high seas (Sala et al,
2018)}
\format{
A \code{\link[raster]{stack}} with layers of results from analyses.
}
\usage{
s_fish_gfw
}
\description{
Global half-degree raster of high seas. Year of analysis is 2016. See also
\url{https://github.com/SFG-UCSB/The-economics-of-fishing-the-high-seas}.
}
\keyword{datasets}
|
library(dplyr)
library(h2o)
library(data.table)
packageVersion("h2o")
h2o.init(ip = "localhost", port = 4444, nthreads = 15, max_mem_size = "8g", strict_version_check = FALSE)
# Load datasets
ais <- fread("/Users/mauropelucchi/Desktop/Instacart/aisles.csv", key = "aisle_id")
dept <- fread("/Users/mauropelucchi/Desktop/Instacart/departments.csv", key = "department_id")
prod <- fread("/Users/mauropelucchi/Desktop/Instacart/products.csv", key = c("product_id","aisle_id", "department_id"))
opp <- fread("/Users/mauropelucchi/Desktop/Instacart/order_products__prior.csv")
opt <- fread("/Users/mauropelucchi/Desktop/Instacart/order_products__train.csv")
ord <- fread("/Users/mauropelucchi/Desktop/Instacart/orders.csv")
# Get product department and aisle names
prod <- merge(prod, ais, by="aisle_id", all.x=TRUE, sort=FALSE)
prod <- merge(prod, dept, by="department_id", all.x=TRUE, sort=FALSE)
# For the prior orders get the associated product, aisle, departments, and users
opp <- merge(opp, prod, by="product_id", all.x=TRUE, sort=FALSE)
opp <- merge(opp, ord, by="order_id", all.x=TRUE, sort=FALSE)
ord_max <- opp %>%
group_by(user_id) %>%
summarize(order_max = max(order_number), purch_count = n())
head(ord_max)
opp <- left_join(opp, ord_max, by="user_id")
opp <- opp %>% mutate(orders_ago=order_max - order_number + 1)
select(opp, order_number, user_id, order_max, orders_ago, purch_count)
# Create a few simple features
user_prod_list <- opp %>%
group_by(user_id, product_id) %>%
summarize(last_order_number = max(order_number), purch_count = n(), avg_hour = mean(order_hour_of_day))
# Compure reordered rate
user_prod_list_reordered <- opp %>%
filter(reordered == 1) %>%
group_by(user_id, product_id) %>%
summarize(reordered_count = n())
user_prod_list <- left_join(user_prod_list, user_prod_list_reordered, by=c("user_id", "product_id"))
user_prod_list <- user_prod_list %>% mutate(reorder_rate=reordered_count/purch_count)
user_prod_list <- user_prod_list %>% mutate(reorder_rate=ifelse(is.na(reorder_rate), 0, reorder_rate))
user_prod_list <- user_prod_list %>% mutate(reordered_count=ifelse(is.na(reordered_count), 0, reordered_count))
head(user_prod_list)
user_summ <- opp %>%
group_by(user_id) %>%
summarize(user_total_products_ordered_hist = n(),
uniq_prod = n_distinct(product_name),
uniq_aisle = n_distinct(aisle),
uniq_dept = n_distinct(department),
prior_orders = max(order_number),
avg_hour = mean(order_hour_of_day),
average_days_between_orders = mean(days_since_prior_order),
total_order = n_distinct(order_number),
average_basket = n() / n_distinct(order_number)
)
head(user_summ)
user_prior_prod_cnt <- opp %>%
group_by(user_id, product_id) %>%
summarize(prior_prod_cnt = n(),
last_purchased_orders_ago = min(orders_ago),
first_purchased_orders_ago = max(orders_ago),
average_days_between_ord_prods = mean(days_since_prior_order)
)
head(user_prior_prod_cnt)
# Merge datasets to create training frame
opt_user <- left_join(filter(opt, reordered==1), ord, by="order_id")
dt_expanded <- left_join(user_prod_list, opt_user, by=c("user_id", "product_id"))
dt_expanded <- dt_expanded %>% mutate(curr_prod_purchased=ifelse(!is.na(order_id), 1, 0))
#head(dt_expanded)
train <- left_join(dt_expanded, user_summ, by="user_id")
train <- left_join(train, user_prior_prod_cnt, by=c("user_id", "product_id"))
varnames <- setdiff(colnames(train), c("user_id","order_id","curr_prod_purchased"))
head(train)
# Create the test frame
test_orders <- filter(ord, eval_set=="test")
dt_expanded_test <- inner_join(user_prod_list, test_orders, by=c("user_id"))
dt_expanded_test <- dt_expanded_test %>% mutate(curr_prod_purchased=sample(c(0,1), n(), replace=TRUE))
#head(dt_expanded_test)
test <- inner_join(dt_expanded_test, user_summ, by="user_id")
test <- inner_join(test, user_prior_prod_cnt, by=c("user_id", "product_id"))
head(test)
# Check target
test %>% ungroup %>% distinct(curr_prod_purchased)
train %>% ungroup %>% distinct(curr_prod_purchased)
# Sample users for the validation set
set.seed(2222)
unique_user_id <- select(ord_max, user_id)
head(unique_user_id)
val_users <- sample_n(unique_user_id, size=10000, replace=FALSE)
head(val_users)
# Ungroup and convert to factor
train <- train %>% ungroup() %>% mutate(curr_prod_purchased=as.factor(curr_prod_purchased))
test <- test %>% ungroup() %>% mutate(curr_prod_purchased=as.factor(curr_prod_purchased))
test %>% distinct(curr_prod_purchased)
train %>% distinct(curr_prod_purchased)
# Some exploratory analysis
# The top 10 aisles that represent the 45% of sales
library(ggplot2)
tmp <- filter(opp, reordered == 1) %>%
group_by(aisle,department) %>%
tally(sort=TRUE) %>%
mutate(perc = round(100*n/nrow(opp),2)) %>%
ungroup() %>%
top_n(10,n)
tmp %>%
ggplot(aes(x=reorder(aisle, -n), y=n, fill=department)) +
geom_bar(stat="identity") +
theme(axis.text.x=element_text(angle=90, hjust=1), axis.title.x = element_blank())#
# Products vs number of times ordered/reordered
t <- filter(opp) %>% select(product_id, product_name) %>% group_by(product_id, product_name) %>% summarize(ncount=n()) %>% ungroup()
r <- filter(opp, reordered == 1) %>% select(product_id, product_name) %>% group_by(product_id, product_name) %>% summarize(rcount=n()) %>% ungroup()
t <- left_join(t, r, by="product_id") %>% top_n(20,ncount)
t
t %>%
ggplot() +
geom_bar(aes(x=reorder(product_name.x , -ncount), y=ncount, fill="Number of times ordered" ), stat="identity") +
geom_bar(aes(x=reorder(product_name.x , -ncount), y=rcount, fill="Number of times reordered" ), stat="identity") +
guides(fill=guide_legend(title=element_blank())) +
theme(axis.text.x=element_text(angle=90, hjust=1), axis.title.x = element_blank(), axis.title.y=element_blank())#
# Add data to H2O
colnames(train)
select(train, prior_orders)
train_tpl <- anti_join(train, val_users, by="user_id") %>% select(c("curr_prod_purchased", "user_id", "reordered_count", "product_id", "last_order_number", "purch_count", "avg_hour.x",
"order_dow", "add_to_cart_order", "reordered", "user_total_products_ordered_hist", "uniq_prod", "uniq_aisle", "uniq_dept",
"prior_orders", "avg_hour.y", "average_days_between_orders", "total_order", "average_basket", "prior_prod_cnt",
"last_purchased_orders_ago", "first_purchased_orders_ago", "average_days_between_ord_prods"))
val_tpl <- inner_join(train, val_users, by="user_id") %>% select(c("curr_prod_purchased", "user_id", "reordered_count", "product_id", "last_order_number", "purch_count", "avg_hour.x",
"order_dow", "add_to_cart_order", "reordered", "user_total_products_ordered_hist", "uniq_prod", "uniq_aisle", "uniq_dept",
"prior_orders", "avg_hour.y", "average_days_between_orders", "total_order", "average_basket", "prior_prod_cnt",
"last_purchased_orders_ago", "first_purchased_orders_ago", "average_days_between_ord_prods"))
train.hex <- as.h2o(train_tpl, destination_frame = "train.hex")
val.hex <- as.h2o(val_tpl, destination_frame = "val.hex")
# Free up some memory
rm(train, opp, opt, ord, prod, dept, ais, user_prod_list, user_summ);gc()
rm(train_tpl, val_tpl);gc();
# Train xgboost model
xgb <- h2o.xgboost(x = c("user_id", "reordered_count", "product_id", "last_order_number", "purch_count", "avg_hour.x",
"order_dow", "add_to_cart_order", "reordered", "user_total_products_ordered_hist", "uniq_prod", "uniq_aisle", "uniq_dept",
"prior_orders", "avg_hour.y", "average_days_between_orders", "total_order", "average_basket", "prior_prod_cnt",
"last_purchased_orders_ago", "first_purchased_orders_ago", "average_days_between_ord_prods")
,y = "curr_prod_purchased"
,training_frame = train.hex
,validation_frame = val.hex
,model_id = "xgb_model_1"
,stopping_rounds = 3
,stopping_metric = "logloss"
,distribution = "bernoulli"
,score_tree_interval = 1
,learn_rate=0.1
,ntrees=20
,subsample = 0.75
,colsample_bytree = 0.75
,tree_method = "hist"
,grow_policy = "lossguide"
,booster = "gbtree"
,gamma = 0.0
)
# Make predictions
test.hex <- as.h2o(test, destination_frame = "test.hex")
predictions <- as.data.table(h2o.predict(xgb, test.hex))
predictions <- data.table(order_id=test$order_id, product_id=test$product_id, testPreds=predictions$predict, p0=predictions$p0, p1=predictions$p1)
filter(predictions, testPreds==1)
testPreds <- predictions[,.(products=paste0(product_id[p0>0.21], collapse=" ")), by=order_id]
set(testPreds, which(testPreds[["products"]]==""), "products", "None")
# Create submission file
fwrite(testPreds, "/Users/mauropelucchi/Desktop/Instacart/submission.csv")
|
/xgboost/h2o_instacart.r
|
permissive
|
mauropelucchi/machine-learning-course
|
R
| false
| false
| 9,450
|
r
|
library(dplyr)
library(h2o)
library(data.table)
packageVersion("h2o")
h2o.init(ip = "localhost", port = 4444, nthreads = 15, max_mem_size = "8g", strict_version_check = FALSE)
# Load datasets
ais <- fread("/Users/mauropelucchi/Desktop/Instacart/aisles.csv", key = "aisle_id")
dept <- fread("/Users/mauropelucchi/Desktop/Instacart/departments.csv", key = "department_id")
prod <- fread("/Users/mauropelucchi/Desktop/Instacart/products.csv", key = c("product_id","aisle_id", "department_id"))
opp <- fread("/Users/mauropelucchi/Desktop/Instacart/order_products__prior.csv")
opt <- fread("/Users/mauropelucchi/Desktop/Instacart/order_products__train.csv")
ord <- fread("/Users/mauropelucchi/Desktop/Instacart/orders.csv")
# Get product department and aisle names
prod <- merge(prod, ais, by="aisle_id", all.x=TRUE, sort=FALSE)
prod <- merge(prod, dept, by="department_id", all.x=TRUE, sort=FALSE)
# For the prior orders get the associated product, aisle, departments, and users
opp <- merge(opp, prod, by="product_id", all.x=TRUE, sort=FALSE)
opp <- merge(opp, ord, by="order_id", all.x=TRUE, sort=FALSE)
ord_max <- opp %>%
group_by(user_id) %>%
summarize(order_max = max(order_number), purch_count = n())
head(ord_max)
opp <- left_join(opp, ord_max, by="user_id")
opp <- opp %>% mutate(orders_ago=order_max - order_number + 1)
select(opp, order_number, user_id, order_max, orders_ago, purch_count)
# Create a few simple features
user_prod_list <- opp %>%
group_by(user_id, product_id) %>%
summarize(last_order_number = max(order_number), purch_count = n(), avg_hour = mean(order_hour_of_day))
# Compure reordered rate
user_prod_list_reordered <- opp %>%
filter(reordered == 1) %>%
group_by(user_id, product_id) %>%
summarize(reordered_count = n())
user_prod_list <- left_join(user_prod_list, user_prod_list_reordered, by=c("user_id", "product_id"))
user_prod_list <- user_prod_list %>% mutate(reorder_rate=reordered_count/purch_count)
user_prod_list <- user_prod_list %>% mutate(reorder_rate=ifelse(is.na(reorder_rate), 0, reorder_rate))
user_prod_list <- user_prod_list %>% mutate(reordered_count=ifelse(is.na(reordered_count), 0, reordered_count))
head(user_prod_list)
user_summ <- opp %>%
group_by(user_id) %>%
summarize(user_total_products_ordered_hist = n(),
uniq_prod = n_distinct(product_name),
uniq_aisle = n_distinct(aisle),
uniq_dept = n_distinct(department),
prior_orders = max(order_number),
avg_hour = mean(order_hour_of_day),
average_days_between_orders = mean(days_since_prior_order),
total_order = n_distinct(order_number),
average_basket = n() / n_distinct(order_number)
)
head(user_summ)
user_prior_prod_cnt <- opp %>%
group_by(user_id, product_id) %>%
summarize(prior_prod_cnt = n(),
last_purchased_orders_ago = min(orders_ago),
first_purchased_orders_ago = max(orders_ago),
average_days_between_ord_prods = mean(days_since_prior_order)
)
head(user_prior_prod_cnt)
# Merge datasets to create training frame
opt_user <- left_join(filter(opt, reordered==1), ord, by="order_id")
dt_expanded <- left_join(user_prod_list, opt_user, by=c("user_id", "product_id"))
dt_expanded <- dt_expanded %>% mutate(curr_prod_purchased=ifelse(!is.na(order_id), 1, 0))
#head(dt_expanded)
train <- left_join(dt_expanded, user_summ, by="user_id")
train <- left_join(train, user_prior_prod_cnt, by=c("user_id", "product_id"))
varnames <- setdiff(colnames(train), c("user_id","order_id","curr_prod_purchased"))
head(train)
# Create the test frame
test_orders <- filter(ord, eval_set=="test")
dt_expanded_test <- inner_join(user_prod_list, test_orders, by=c("user_id"))
dt_expanded_test <- dt_expanded_test %>% mutate(curr_prod_purchased=sample(c(0,1), n(), replace=TRUE))
#head(dt_expanded_test)
test <- inner_join(dt_expanded_test, user_summ, by="user_id")
test <- inner_join(test, user_prior_prod_cnt, by=c("user_id", "product_id"))
head(test)
# Check target
test %>% ungroup %>% distinct(curr_prod_purchased)
train %>% ungroup %>% distinct(curr_prod_purchased)
# Sample users for the validation set
set.seed(2222)
unique_user_id <- select(ord_max, user_id)
head(unique_user_id)
val_users <- sample_n(unique_user_id, size=10000, replace=FALSE)
head(val_users)
# Ungroup and convert to factor
train <- train %>% ungroup() %>% mutate(curr_prod_purchased=as.factor(curr_prod_purchased))
test <- test %>% ungroup() %>% mutate(curr_prod_purchased=as.factor(curr_prod_purchased))
test %>% distinct(curr_prod_purchased)
train %>% distinct(curr_prod_purchased)
# Some exploratory analysis
# The top 10 aisles that represent the 45% of sales
library(ggplot2)
tmp <- filter(opp, reordered == 1) %>%
group_by(aisle,department) %>%
tally(sort=TRUE) %>%
mutate(perc = round(100*n/nrow(opp),2)) %>%
ungroup() %>%
top_n(10,n)
tmp %>%
ggplot(aes(x=reorder(aisle, -n), y=n, fill=department)) +
geom_bar(stat="identity") +
theme(axis.text.x=element_text(angle=90, hjust=1), axis.title.x = element_blank())#
# Products vs number of times ordered/reordered
t <- filter(opp) %>% select(product_id, product_name) %>% group_by(product_id, product_name) %>% summarize(ncount=n()) %>% ungroup()
r <- filter(opp, reordered == 1) %>% select(product_id, product_name) %>% group_by(product_id, product_name) %>% summarize(rcount=n()) %>% ungroup()
t <- left_join(t, r, by="product_id") %>% top_n(20,ncount)
t
t %>%
ggplot() +
geom_bar(aes(x=reorder(product_name.x , -ncount), y=ncount, fill="Number of times ordered" ), stat="identity") +
geom_bar(aes(x=reorder(product_name.x , -ncount), y=rcount, fill="Number of times reordered" ), stat="identity") +
guides(fill=guide_legend(title=element_blank())) +
theme(axis.text.x=element_text(angle=90, hjust=1), axis.title.x = element_blank(), axis.title.y=element_blank())#
# Add data to H2O
colnames(train)
select(train, prior_orders)
train_tpl <- anti_join(train, val_users, by="user_id") %>% select(c("curr_prod_purchased", "user_id", "reordered_count", "product_id", "last_order_number", "purch_count", "avg_hour.x",
"order_dow", "add_to_cart_order", "reordered", "user_total_products_ordered_hist", "uniq_prod", "uniq_aisle", "uniq_dept",
"prior_orders", "avg_hour.y", "average_days_between_orders", "total_order", "average_basket", "prior_prod_cnt",
"last_purchased_orders_ago", "first_purchased_orders_ago", "average_days_between_ord_prods"))
val_tpl <- inner_join(train, val_users, by="user_id") %>% select(c("curr_prod_purchased", "user_id", "reordered_count", "product_id", "last_order_number", "purch_count", "avg_hour.x",
"order_dow", "add_to_cart_order", "reordered", "user_total_products_ordered_hist", "uniq_prod", "uniq_aisle", "uniq_dept",
"prior_orders", "avg_hour.y", "average_days_between_orders", "total_order", "average_basket", "prior_prod_cnt",
"last_purchased_orders_ago", "first_purchased_orders_ago", "average_days_between_ord_prods"))
train.hex <- as.h2o(train_tpl, destination_frame = "train.hex")
val.hex <- as.h2o(val_tpl, destination_frame = "val.hex")
# Free up some memory
rm(train, opp, opt, ord, prod, dept, ais, user_prod_list, user_summ);gc()
rm(train_tpl, val_tpl);gc();
# Train xgboost model
xgb <- h2o.xgboost(x = c("user_id", "reordered_count", "product_id", "last_order_number", "purch_count", "avg_hour.x",
"order_dow", "add_to_cart_order", "reordered", "user_total_products_ordered_hist", "uniq_prod", "uniq_aisle", "uniq_dept",
"prior_orders", "avg_hour.y", "average_days_between_orders", "total_order", "average_basket", "prior_prod_cnt",
"last_purchased_orders_ago", "first_purchased_orders_ago", "average_days_between_ord_prods")
,y = "curr_prod_purchased"
,training_frame = train.hex
,validation_frame = val.hex
,model_id = "xgb_model_1"
,stopping_rounds = 3
,stopping_metric = "logloss"
,distribution = "bernoulli"
,score_tree_interval = 1
,learn_rate=0.1
,ntrees=20
,subsample = 0.75
,colsample_bytree = 0.75
,tree_method = "hist"
,grow_policy = "lossguide"
,booster = "gbtree"
,gamma = 0.0
)
# Make predictions
test.hex <- as.h2o(test, destination_frame = "test.hex")
predictions <- as.data.table(h2o.predict(xgb, test.hex))
predictions <- data.table(order_id=test$order_id, product_id=test$product_id, testPreds=predictions$predict, p0=predictions$p0, p1=predictions$p1)
filter(predictions, testPreds==1)
testPreds <- predictions[,.(products=paste0(product_id[p0>0.21], collapse=" ")), by=order_id]
set(testPreds, which(testPreds[["products"]]==""), "products", "None")
# Create submission file
fwrite(testPreds, "/Users/mauropelucchi/Desktop/Instacart/submission.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.fdr_table.R
\name{plot.fdr_table}
\alias{plot.fdr_table}
\title{Plot functionality for results of class "fdr_table" as produced by e.g. the
function assess_fdr_overall()}
\usage{
\method{plot}{fdr_table}(x, output = "Rconsole",
filename = "FDR_report_overall", ...)
}
\arguments{
\item{x}{List of class "fdr_table" as produced e.g. by the function
assess_fdr_overall() from this package.}
\item{output}{Choose output type. "pdf_csv" creates the output as files in
the working directory, "Rconsole" triggers delivery of the output to the
console enabling further computation or custom plotting / output.}
\item{filename}{Basename for output files to be created (if output =
"pdf_csv" has been selected).}
\item{...}{Extra arguments passed on to functions inside this.}
}
\value{
Originally this returned nothing, but now it makes a list of ggplot2
plots which may be passed along and plotted as desired (with that in mind,
I would like to remove the explicit plot() calls in this function).
}
\description{
This function created standard plots from results of class "fdr_table" as
produced by e.g. the function assess_fdr_overall() visualizig ID numbers in
dependence of estimated FDR and also estimated FDR in dependence of m_score
cutoff.
}
\examples{
data("OpenSWATH_data", package="SWATH2stats")
data("Study_design", package="SWATH2stats")
data <- sample_annotation(OpenSWATH_data, Study_design)
x <- assess_fdr_overall(data, FFT=0.7, output="Rconsole", plot=FALSE)
plot.fdr_table(x, output="pdf_csv", filename="Assess_fdr_overall_testplot")
}
\author{
Moritz Heusel
}
|
/man/plot.fdr_table.Rd
|
no_license
|
abelew/SWATH2stats
|
R
| false
| true
| 1,669
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.fdr_table.R
\name{plot.fdr_table}
\alias{plot.fdr_table}
\title{Plot functionality for results of class "fdr_table" as produced by e.g. the
function assess_fdr_overall()}
\usage{
\method{plot}{fdr_table}(x, output = "Rconsole",
filename = "FDR_report_overall", ...)
}
\arguments{
\item{x}{List of class "fdr_table" as produced e.g. by the function
assess_fdr_overall() from this package.}
\item{output}{Choose output type. "pdf_csv" creates the output as files in
the working directory, "Rconsole" triggers delivery of the output to the
console enabling further computation or custom plotting / output.}
\item{filename}{Basename for output files to be created (if output =
"pdf_csv" has been selected).}
\item{...}{Extra arguments passed on to functions inside this.}
}
\value{
Originally this returned nothing, but now it makes a list of ggplot2
plots which may be passed along and plotted as desired (with that in mind,
I would like to remove the explicit plot() calls in this function).
}
\description{
This function created standard plots from results of class "fdr_table" as
produced by e.g. the function assess_fdr_overall() visualizig ID numbers in
dependence of estimated FDR and also estimated FDR in dependence of m_score
cutoff.
}
\examples{
data("OpenSWATH_data", package="SWATH2stats")
data("Study_design", package="SWATH2stats")
data <- sample_annotation(OpenSWATH_data, Study_design)
x <- assess_fdr_overall(data, FFT=0.7, output="Rconsole", plot=FALSE)
plot.fdr_table(x, output="pdf_csv", filename="Assess_fdr_overall_testplot")
}
\author{
Moritz Heusel
}
|
#######################################################################
# dbscan - Density Based Clustering of Applications with Noise
# and Related Algorithms
# Copyright (C) 2015 Michael Hahsler
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
kNNdist <- function(x, k, ...) dbscan::kNN(x, k, sort = TRUE, ...)$dist
kNNdistplot <- function(x, k = 4, ...) {
kNNdist <- sort(kNNdist(x, k ,...))
plot(sort(kNNdist), type="l", ylab=paste(k, "-NN distance", sep=""),
xlab = "Pointes (sample) sorted by distance")
}
|
/dbscan/R/kNNdist.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 1,170
|
r
|
#######################################################################
# dbscan - Density Based Clustering of Applications with Noise
# and Related Algorithms
# Copyright (C) 2015 Michael Hahsler
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
kNNdist <- function(x, k, ...) dbscan::kNN(x, k, sort = TRUE, ...)$dist
kNNdistplot <- function(x, k = 4, ...) {
kNNdist <- sort(kNNdist(x, k ,...))
plot(sort(kNNdist), type="l", ylab=paste(k, "-NN distance", sep=""),
xlab = "Pointes (sample) sorted by distance")
}
|
x1 = c(1, 1, 0, 5, 6, 4)
x2 = c(4, 3, 4, 1, 2, 0)
plot(x1, x2)
df = data.frame(x1, x2)
plot(df)
sample.df = sample(6, 3)
cl1 = df[sample.df,]
cl2 = df[-sample.df,]
mean.cl1.x = mean(cl1[, 1])
mean.cl1.y = mean(cl1[, 2])
centroid1 = c(mean.cl1.x, mean.cl1.y)
plot(centroid1[1], centroid1[2])
mean.cl2.x = mean(cl2[, 1])
mean.cl2.y = mean(cl2[, 2])
centroid2 = c(mean.cl2.x, mean.cl2.y)
plot(centroid2[1], centroid2[2])
plot(df)
points(cl1, col = "blue")
points(centroid1[1], centroid1[2], col = "red")
points(centroid2[1], centroid2[2], col = "green")
euclid = function(a, b) {
return(sqrt((a[1] - b[1])^2 + (a[2]-b[2])^2))
}
assign_labels = function(x, centroid1, centroid2) {
labels = rep(NA, nrow(x))
for (i in 1:nrow(x)) {
if (euclid(x[i,], centroid1) < euclid(x[i,], centroid2)) {
labels[i] = 1
} else {
labels[i] = 2
}
}
return(labels)
}
labels = assign_labels(x, centroid1, centroid2)
labels
?prcomp
|
/191028 - 10.7. Exercises - Conceptual - Exercise 3.r
|
no_license
|
mare-astrorum/introduction-to-statistical-learning-practicals
|
R
| false
| false
| 949
|
r
|
x1 = c(1, 1, 0, 5, 6, 4)
x2 = c(4, 3, 4, 1, 2, 0)
plot(x1, x2)
df = data.frame(x1, x2)
plot(df)
sample.df = sample(6, 3)
cl1 = df[sample.df,]
cl2 = df[-sample.df,]
mean.cl1.x = mean(cl1[, 1])
mean.cl1.y = mean(cl1[, 2])
centroid1 = c(mean.cl1.x, mean.cl1.y)
plot(centroid1[1], centroid1[2])
mean.cl2.x = mean(cl2[, 1])
mean.cl2.y = mean(cl2[, 2])
centroid2 = c(mean.cl2.x, mean.cl2.y)
plot(centroid2[1], centroid2[2])
plot(df)
points(cl1, col = "blue")
points(centroid1[1], centroid1[2], col = "red")
points(centroid2[1], centroid2[2], col = "green")
euclid = function(a, b) {
return(sqrt((a[1] - b[1])^2 + (a[2]-b[2])^2))
}
assign_labels = function(x, centroid1, centroid2) {
labels = rep(NA, nrow(x))
for (i in 1:nrow(x)) {
if (euclid(x[i,], centroid1) < euclid(x[i,], centroid2)) {
labels[i] = 1
} else {
labels[i] = 2
}
}
return(labels)
}
labels = assign_labels(x, centroid1, centroid2)
labels
?prcomp
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 8723
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 8701
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 8701
c
c Input Parameter (command line, file):
c input filename QBFLIB/Wintersteiger/RankingFunctions/rankfunc41_unsigned_32.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3426
c no.of clauses 8723
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 8701
c
c QBFLIB/Wintersteiger/RankingFunctions/rankfunc41_unsigned_32.qdimacs 3426 8723 E1 [578 579 643 644 932 933 1639 1640 1704 1705 1769 1770 2324 2325 2516 2517 2708 2709 2900 2901 3092 3093] 0 320 3082 8701 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Wintersteiger/RankingFunctions/rankfunc41_unsigned_32/rankfunc41_unsigned_32.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 857
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 8723
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 8701
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 8701
c
c Input Parameter (command line, file):
c input filename QBFLIB/Wintersteiger/RankingFunctions/rankfunc41_unsigned_32.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3426
c no.of clauses 8723
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 8701
c
c QBFLIB/Wintersteiger/RankingFunctions/rankfunc41_unsigned_32.qdimacs 3426 8723 E1 [578 579 643 644 932 933 1639 1640 1704 1705 1769 1770 2324 2325 2516 2517 2708 2709 2900 2901 3092 3093] 0 320 3082 8701 RED
|
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#------------- Reconstruccion IPC Provincial 2019 ---------------
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#Carga Datos
library(readxl)
library(ggplot2)
library(dplyr)
source(file = "Code/CargaData.R",local = TRUE)
source(file = "Code/MatrizPesos.R",local = TRUE)
IPC_prov = list()
for(Prov in 1:dim(Poblacion)[1]){
#Data Frames Inicializados -----------------
nfila = dim(IPC_ciudades[[1]])[1]
ncolum = dim(IPC_ciudades[[1]])[2]
Bd = matrix(NA,nrow = nfila, ncol = ncolum)
Bd = data.frame(Bd)
names(Bd) = names(IPC_ciudades[[1]])
Bd$Fecha = IPC_ciudades[[1]]$Fecha
#Rellenamos con IPC Reconstruido ------------
for(j in 2:ncolum){
for(i in 1:nfila){
Bd[i,j] = 0
for(ciuP in 1:dim(CiudadProv)[1] ){
# Sumar IPC de ciudades Principales (Que existen)
if(!is.na(IPC_ciudades[[ciuP]][i,j])){
Bd[i,j] = Bd[i,j] + A[Prov,ciuP]*IPC_ciudades[[ciuP]][i,j]
}
#Si no no suma nada (Ej. Santo Domingo)
}
if(Bd[i,j] == 0){
Bd[i,j] = NA
}
}
}
IPC_prov[[Prov]] = Bd
}
names(IPC_prov) = Poblacion$PROVINCIA
# ---------------------------------------------
save(IPC_prov,file = "Data/IPC_reconstruido.RData")
# ---------------------------------------------
# Guardamos en CSV's -------------
for(i in 1:length(IPC_prov)){
write.csv(IPC_prov[[i]],
file = paste0("Data/Provincias Reconstruidas/",names(IPC_prov)[i],".csv"))
}
#Grafico Ejemplo -----------------
aux = IPC_prov$`SANTO DOMINGO DE LOS TSACHILAS`
BDDgraf = data.frame(Fecha= as.Date(aux$Fecha,format = "%d-%m-%y"),
SerieOrig = aux$GENERAL)
ggplot(data = BDDgraf, aes(x = Fecha, y = SerieOrig)) +
geom_line(size = 0.7,colour = "black")
|
/IPC Provincial-Reconstruido/IPC Provincial - Incompleto/IPC Provincial.R
|
no_license
|
InstitutoInvestigacionesEconomicasPUCE/Analisis_IPC
|
R
| false
| false
| 1,884
|
r
|
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#------------- Reconstruccion IPC Provincial 2019 ---------------
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#Carga Datos
library(readxl)
library(ggplot2)
library(dplyr)
source(file = "Code/CargaData.R",local = TRUE)
source(file = "Code/MatrizPesos.R",local = TRUE)
IPC_prov = list()
for(Prov in 1:dim(Poblacion)[1]){
#Data Frames Inicializados -----------------
nfila = dim(IPC_ciudades[[1]])[1]
ncolum = dim(IPC_ciudades[[1]])[2]
Bd = matrix(NA,nrow = nfila, ncol = ncolum)
Bd = data.frame(Bd)
names(Bd) = names(IPC_ciudades[[1]])
Bd$Fecha = IPC_ciudades[[1]]$Fecha
#Rellenamos con IPC Reconstruido ------------
for(j in 2:ncolum){
for(i in 1:nfila){
Bd[i,j] = 0
for(ciuP in 1:dim(CiudadProv)[1] ){
# Sumar IPC de ciudades Principales (Que existen)
if(!is.na(IPC_ciudades[[ciuP]][i,j])){
Bd[i,j] = Bd[i,j] + A[Prov,ciuP]*IPC_ciudades[[ciuP]][i,j]
}
#Si no no suma nada (Ej. Santo Domingo)
}
if(Bd[i,j] == 0){
Bd[i,j] = NA
}
}
}
IPC_prov[[Prov]] = Bd
}
names(IPC_prov) = Poblacion$PROVINCIA
# ---------------------------------------------
save(IPC_prov,file = "Data/IPC_reconstruido.RData")
# ---------------------------------------------
# Guardamos en CSV's -------------
for(i in 1:length(IPC_prov)){
write.csv(IPC_prov[[i]],
file = paste0("Data/Provincias Reconstruidas/",names(IPC_prov)[i],".csv"))
}
#Grafico Ejemplo -----------------
aux = IPC_prov$`SANTO DOMINGO DE LOS TSACHILAS`
BDDgraf = data.frame(Fecha= as.Date(aux$Fecha,format = "%d-%m-%y"),
SerieOrig = aux$GENERAL)
ggplot(data = BDDgraf, aes(x = Fecha, y = SerieOrig)) +
geom_line(size = 0.7,colour = "black")
|
library(quantmod)
library(PerformanceAnalytics)
library(tawny)
#change working directory
setwd("/home/nick/model_check")
rowsd <- function(xtsobj)
{
rollsd <- rep(0, dim(xtsobj)[1] )
for (i in 1 : dim(xtsobj)[1])
{
rollsd[i] <- sd(as.vector(xtsobj[i, ] ) )
}
rollsd
}
wtsadj <- function(xtsobj)
{
wtsadj <-mat.or.vec(dim(xtsobj)[1],dim(xtsobj)[2])
for (i in 1: dim(xtsobj)[1])
{
for (j in 1: dim(xtsobj)[2])
{
if (xtsobj[i,j] <= -1.5)
{
wtsadj[i,j] = -0.1
}
else if(xtsobj[i,j] <= -1)
{
wtsadj[i,j] = -0.075
}
else if(xtsobj[i,j] <= -0.5)
{
wtsadj[i,j] = -0.05
}
else if(xtsobj[i,j] <= 0.5)
{
wtsadj[i,j] = 0
}
else if(xtsobj[i,j] <= 1)
{
wtsadj[i,j] = 0.05
}
else if(xtsobj[i,j] <= 1.5)
{
wtsadj[i,j] = 0.075
}
else
{
wtsadj[i,j] = 0.1
}
}
}
wtsadj
}
#rolling window size
wdsz <- 1
wdsz <- wdsz * 12
#read return and wts timesreis
returns <- read.csv("returns.csv", header = TRUE, stringsAsFactors=FALSE)
# _g for global, _i for international
ret_xts_g <- xts (returns[ , 2:dim(returns)[2]],
order.by = as.Date(as.character(returns$Date),
"%m/%d/%Y"))
ret_xts_i <- ret_xts_g
ret_xts_i$US =NULL
wts <- read.csv("wts.csv", header = TRUE, stringsAsFactors=FALSE)
wts_xts_g <- xts (wts[ , 2:dim(wts)[2]],
order.by = as.Date(as.character(wts$Date),
"%m/%d/%Y"))
wts_xts_i <- wts_xts_g
wts_xts_i$US =NULL
#benchmark
bm<- read.csv("benchmark.csv", header = TRUE, stringsAsFactors=FALSE)
bm_xts <- xts (bm[ , 2:3],
order.by = as.Date(as.character(bm[,1]),
"%m/%d/%Y"))
#f <-periodReturn(e,period='yearly')
#rolling cumulative returns
roll_ret_xts_i <- rollapply(ret_xts_i, width = wdsz, FUN = function(x) {cumprod(1 +x)[wdsz, ] } )
roll_ret_xts_g <- rollapply(ret_xts_g, width = wdsz, FUN = function(x) {cumprod(1 +x)[wdsz, ] } )
#write.zoo(d, file = "demo1.csv", sep=",")
#rolling avg
mean_ret_g <- rowMeans(roll_ret_xts_g)
mean_ret_i <- rowMeans(roll_ret_xts_i)
#rolling std
std_ret_xts_i <- rowsd(roll_ret_xts_i)
std_ret_xts_g <- rowsd(roll_ret_xts_g)
#rolling z-score
z_ret_g <- (roll_ret_xts_g - mean_ret_g) / std_ret_xts_g
z_ret_i <- (roll_ret_xts_i - mean_ret_i) / std_ret_xts_i
#adjusted weights
##wts change
adjwts_g<-wtsadj(z_ret_g)
adjwts_i<-wtsadj(z_ret_i)
## pue adj wts
adjwts_xts_g <- wts_xts_g[wdsz:dim(wts_xts_g)[1], ] + adjwts_g
adjwts_xts_i <- wts_xts_i[wdsz:dim(wts_xts_i)[1], ] + adjwts_i
## no less than zero
adjwts_xts_g <- pmax(as.matrix(adjwts_xts_g),0)
adjwts_xts_i <- pmax(as.matrix(adjwts_xts_i),0)
## rescale to 100% in total
adjwts_xts_g <- t(apply(adjwts_xts_g,1, FUN=function(x) {x / as.double(sum(x))}))
adjwts_xts_i <- t(apply(adjwts_xts_i,1, FUN=function(x) {x / as.double(sum(x))}))
#wtd return
wtd_ret_xts_g <- ret_xts_g[wdsz:dim(wts_xts_g)[1], ] * adjwts_xts_g
wtd_ret_xts_i <- ret_xts_i[wdsz:dim(wts_xts_i)[1], ] * adjwts_xts_i
#total model return
model_ret_g <- rowSums(wtd_ret_xts_g)
model_ret_i <- rowSums(wtd_ret_xts_i)
#create index
idx_ret_xts_g <- xts(cumprod(1 + model_ret_g),order.by = index(wtd_ret_xts_g))
idx_ret_xts_i <- xts(cumprod(1 + model_ret_i),order.by = index(wtd_ret_xts_i))
colnames(idx_ret_xts_g)<- 'Global'
colnames(idx_ret_xts_i)<-'International'
#convert to annual return
model_ann_ret_g <- periodReturn(idx_ret_xts_g,period='yearly')
model_ann_ret_i <- periodReturn(idx_ret_xts_i,period='yearly')
model_ann_ret_g <-merge(model_ann_ret_g,bm_xts$'Global')
colnames(model_ann_ret_g) <- c('global_return','benchmark')
active_ann_ret_g <- model_ann_ret_g$'global_return'- model_ann_ret_g$'benchmark'
indexFormat(active_ann_ret_g)<- "%Y"
model_ann_ret_i <- merge(model_ann_ret_i,bm_xts$'Inter')
colnames(model_ann_ret_i) <- c('inter_return','benchmark')
active_ann_ret_i <- (model_ann_ret_i$'inter_return'- model_ann_ret_g$'benchmark')
indexFormat(active_ann_ret_g)<- "%Y"
#plot
X11(width=12,height=8);
barplot(active_ann_ret_g, yaxp = c(-0.3,0.7,10), cex.axis=0.7,
las =2, col = 'blue', names.arg=format(index(active_ann_ret_g), "%Y"))
title('Global_Model_Active_Return')
X11(width=12,height=8);
barplot(active_ann_ret_i, yaxp = c(-0.3,0.7,10), cex.axis=0.7,
las =2, col = 'blue', names.arg=format(index(active_ann_ret_i), "%Y"))
title('International_Model_Active_Return')
|
/modelcheck.r
|
no_license
|
nickchy/model_check
|
R
| false
| false
| 4,349
|
r
|
library(quantmod)
library(PerformanceAnalytics)
library(tawny)
#change working directory
setwd("/home/nick/model_check")
rowsd <- function(xtsobj)
{
rollsd <- rep(0, dim(xtsobj)[1] )
for (i in 1 : dim(xtsobj)[1])
{
rollsd[i] <- sd(as.vector(xtsobj[i, ] ) )
}
rollsd
}
wtsadj <- function(xtsobj)
{
wtsadj <-mat.or.vec(dim(xtsobj)[1],dim(xtsobj)[2])
for (i in 1: dim(xtsobj)[1])
{
for (j in 1: dim(xtsobj)[2])
{
if (xtsobj[i,j] <= -1.5)
{
wtsadj[i,j] = -0.1
}
else if(xtsobj[i,j] <= -1)
{
wtsadj[i,j] = -0.075
}
else if(xtsobj[i,j] <= -0.5)
{
wtsadj[i,j] = -0.05
}
else if(xtsobj[i,j] <= 0.5)
{
wtsadj[i,j] = 0
}
else if(xtsobj[i,j] <= 1)
{
wtsadj[i,j] = 0.05
}
else if(xtsobj[i,j] <= 1.5)
{
wtsadj[i,j] = 0.075
}
else
{
wtsadj[i,j] = 0.1
}
}
}
wtsadj
}
#rolling window size
wdsz <- 1
wdsz <- wdsz * 12
#read return and wts timesreis
returns <- read.csv("returns.csv", header = TRUE, stringsAsFactors=FALSE)
# _g for global, _i for international
ret_xts_g <- xts (returns[ , 2:dim(returns)[2]],
order.by = as.Date(as.character(returns$Date),
"%m/%d/%Y"))
ret_xts_i <- ret_xts_g
ret_xts_i$US =NULL
wts <- read.csv("wts.csv", header = TRUE, stringsAsFactors=FALSE)
wts_xts_g <- xts (wts[ , 2:dim(wts)[2]],
order.by = as.Date(as.character(wts$Date),
"%m/%d/%Y"))
wts_xts_i <- wts_xts_g
wts_xts_i$US =NULL
#benchmark
bm<- read.csv("benchmark.csv", header = TRUE, stringsAsFactors=FALSE)
bm_xts <- xts (bm[ , 2:3],
order.by = as.Date(as.character(bm[,1]),
"%m/%d/%Y"))
#f <-periodReturn(e,period='yearly')
#rolling cumulative returns
roll_ret_xts_i <- rollapply(ret_xts_i, width = wdsz, FUN = function(x) {cumprod(1 +x)[wdsz, ] } )
roll_ret_xts_g <- rollapply(ret_xts_g, width = wdsz, FUN = function(x) {cumprod(1 +x)[wdsz, ] } )
#write.zoo(d, file = "demo1.csv", sep=",")
#rolling avg
mean_ret_g <- rowMeans(roll_ret_xts_g)
mean_ret_i <- rowMeans(roll_ret_xts_i)
#rolling std
std_ret_xts_i <- rowsd(roll_ret_xts_i)
std_ret_xts_g <- rowsd(roll_ret_xts_g)
#rolling z-score
z_ret_g <- (roll_ret_xts_g - mean_ret_g) / std_ret_xts_g
z_ret_i <- (roll_ret_xts_i - mean_ret_i) / std_ret_xts_i
#adjusted weights
##wts change
adjwts_g<-wtsadj(z_ret_g)
adjwts_i<-wtsadj(z_ret_i)
## pue adj wts
adjwts_xts_g <- wts_xts_g[wdsz:dim(wts_xts_g)[1], ] + adjwts_g
adjwts_xts_i <- wts_xts_i[wdsz:dim(wts_xts_i)[1], ] + adjwts_i
## no less than zero
adjwts_xts_g <- pmax(as.matrix(adjwts_xts_g),0)
adjwts_xts_i <- pmax(as.matrix(adjwts_xts_i),0)
## rescale to 100% in total
adjwts_xts_g <- t(apply(adjwts_xts_g,1, FUN=function(x) {x / as.double(sum(x))}))
adjwts_xts_i <- t(apply(adjwts_xts_i,1, FUN=function(x) {x / as.double(sum(x))}))
#wtd return
wtd_ret_xts_g <- ret_xts_g[wdsz:dim(wts_xts_g)[1], ] * adjwts_xts_g
wtd_ret_xts_i <- ret_xts_i[wdsz:dim(wts_xts_i)[1], ] * adjwts_xts_i
#total model return
model_ret_g <- rowSums(wtd_ret_xts_g)
model_ret_i <- rowSums(wtd_ret_xts_i)
#create index
idx_ret_xts_g <- xts(cumprod(1 + model_ret_g),order.by = index(wtd_ret_xts_g))
idx_ret_xts_i <- xts(cumprod(1 + model_ret_i),order.by = index(wtd_ret_xts_i))
colnames(idx_ret_xts_g)<- 'Global'
colnames(idx_ret_xts_i)<-'International'
#convert to annual return
model_ann_ret_g <- periodReturn(idx_ret_xts_g,period='yearly')
model_ann_ret_i <- periodReturn(idx_ret_xts_i,period='yearly')
model_ann_ret_g <-merge(model_ann_ret_g,bm_xts$'Global')
colnames(model_ann_ret_g) <- c('global_return','benchmark')
active_ann_ret_g <- model_ann_ret_g$'global_return'- model_ann_ret_g$'benchmark'
indexFormat(active_ann_ret_g)<- "%Y"
model_ann_ret_i <- merge(model_ann_ret_i,bm_xts$'Inter')
colnames(model_ann_ret_i) <- c('inter_return','benchmark')
active_ann_ret_i <- (model_ann_ret_i$'inter_return'- model_ann_ret_g$'benchmark')
indexFormat(active_ann_ret_g)<- "%Y"
#plot
X11(width=12,height=8);
barplot(active_ann_ret_g, yaxp = c(-0.3,0.7,10), cex.axis=0.7,
las =2, col = 'blue', names.arg=format(index(active_ann_ret_g), "%Y"))
title('Global_Model_Active_Return')
X11(width=12,height=8);
barplot(active_ann_ret_i, yaxp = c(-0.3,0.7,10), cex.axis=0.7,
las =2, col = 'blue', names.arg=format(index(active_ann_ret_i), "%Y"))
title('International_Model_Active_Return')
|
#' Create polygons from centroid coordinates
#'
#' Create polygon(s) from a data frame with coordinates of the polygon centroid(s)
#'
#' @param x data frame with at least two columns;
#' the first two columns must contain longitude and latitude coordinates, respectively.
#' See 'Details' section for how additional columns are handled
#' @param y numeric; the perpendicular distance from the polygon centroid (center) to its edge
#' (i.e. half the length of one side of a polygon)
#' @param ... passed to \link[sf:sf]{st_sf} or to \link[sf:sfc]{st_sfc},
#' e.g. for passing named arguments \code{crs} and \code{agr}
#'
#' @details This function was designed for someone who reads in a .csv file
#' with a grid of coordinates representing SDM prediction points and needs to create
#' prediction polygons with the .csv file coordinates as the polygon centroids.
#' However, the function can be used to create square polygons of any size around the provided points,
#' regardless of if those polygons touch or overlap.
#' The created polygons are oriented so that, in a 2D plane, their edges are parallel to either the x or the y axis.
#'
#' If \code{x} contains more than two column, then additional columns will be treated as simple feature attributes,
#' i.e. passed along as the first argument to \link[sf:sf]{st_sf}
#'
#' If a \code{crs} is not specified in \code{...},
#' then the \code{crs} attribute of the polygon(s) will be \code{NULL}.
#'
#' @return Object of class \code{sfc} (if \code{x} has exactly two columns) or class \code{sf}
#' (if \code{x} has exactly more than two columns). The object will have a geometry type of \code{POLYGON}.
#' If the object is of class \code{sf}, the name of the geometry list-column will be "geometry"
#'
#' @examples
#' # Create an sfc object from a data frame of two columns
#' x <- data.frame(
#' lon = c(5, 10, 15, 20, 5, 10, 15, 20),
#' lat = c(5, 5, 5, 5, 10, 10, 10, 10)
#' )
#' pts2poly_centroids(x, 2.5, crs = 4326)
#'
#' # Create an sf object from a data frame of more than two columns
#' x <- data.frame(
#' lon = c(5, 10, 15, 20, 5, 10, 15, 20),
#' lat = c(5, 5, 5, 5, 10, 10, 10, 10),
#' sdm.pred = runif(8),
#' sdm.pred2 = runif(8)
#' )
#' pts2poly_centroids(x, 2.5, crs = 4326, agr = "constant")
#'
#' @export
pts2poly_centroids <- function(x, y, ...) {
# Input checks
stopifnot(
inherits(x, "data.frame"),
ncol(x) >= 2,
is.numeric(y)
)
if (ncol(x) == 2 & ("agr" %in% names(list(...))))
stop("agr cannot be passed to st_sfc(), ",
"meaning when x only has two columns")
# Use first two (lon and lat) columns to create list of sfg objects
x.lonlat <- x %>%
select(c(1, 2)) %>%
set_names(c("lon", "lat"))
sfg.list <- unname(apply(x.lonlat, 1, function(i, j) {
st_polygon(list(matrix(
c(i[1] + j, i[1] - j, i[1] - j, i[1] + j, i[1] + j,
i[2] + j, i[2] + j, i[2] - j, i[2] - j, i[2] + j),
ncol = 2
)))
}, j = y))
# Create sf or sfc object, as appropriate
if (ncol(x) > 2) {
x %>%
select(-c(1, 2)) %>%
st_sf(geometry = st_sfc(sfg.list), ...)
} else {
st_sfc(sfg.list, ...)
}
}
|
/R/pts2poly_centroids.R
|
no_license
|
cran/eSDM
|
R
| false
| false
| 3,265
|
r
|
#' Create polygons from centroid coordinates
#'
#' Create polygon(s) from a data frame with coordinates of the polygon centroid(s)
#'
#' @param x data frame with at least two columns;
#' the first two columns must contain longitude and latitude coordinates, respectively.
#' See 'Details' section for how additional columns are handled
#' @param y numeric; the perpendicular distance from the polygon centroid (center) to its edge
#' (i.e. half the length of one side of a polygon)
#' @param ... passed to \link[sf:sf]{st_sf} or to \link[sf:sfc]{st_sfc},
#' e.g. for passing named arguments \code{crs} and \code{agr}
#'
#' @details This function was designed for someone who reads in a .csv file
#' with a grid of coordinates representing SDM prediction points and needs to create
#' prediction polygons with the .csv file coordinates as the polygon centroids.
#' However, the function can be used to create square polygons of any size around the provided points,
#' regardless of if those polygons touch or overlap.
#' The created polygons are oriented so that, in a 2D plane, their edges are parallel to either the x or the y axis.
#'
#' If \code{x} contains more than two column, then additional columns will be treated as simple feature attributes,
#' i.e. passed along as the first argument to \link[sf:sf]{st_sf}
#'
#' If a \code{crs} is not specified in \code{...},
#' then the \code{crs} attribute of the polygon(s) will be \code{NULL}.
#'
#' @return Object of class \code{sfc} (if \code{x} has exactly two columns) or class \code{sf}
#' (if \code{x} has exactly more than two columns). The object will have a geometry type of \code{POLYGON}.
#' If the object is of class \code{sf}, the name of the geometry list-column will be "geometry"
#'
#' @examples
#' # Create an sfc object from a data frame of two columns
#' x <- data.frame(
#' lon = c(5, 10, 15, 20, 5, 10, 15, 20),
#' lat = c(5, 5, 5, 5, 10, 10, 10, 10)
#' )
#' pts2poly_centroids(x, 2.5, crs = 4326)
#'
#' # Create an sf object from a data frame of more than two columns
#' x <- data.frame(
#' lon = c(5, 10, 15, 20, 5, 10, 15, 20),
#' lat = c(5, 5, 5, 5, 10, 10, 10, 10),
#' sdm.pred = runif(8),
#' sdm.pred2 = runif(8)
#' )
#' pts2poly_centroids(x, 2.5, crs = 4326, agr = "constant")
#'
#' @export
pts2poly_centroids <- function(x, y, ...) {
# Input checks
stopifnot(
inherits(x, "data.frame"),
ncol(x) >= 2,
is.numeric(y)
)
if (ncol(x) == 2 & ("agr" %in% names(list(...))))
stop("agr cannot be passed to st_sfc(), ",
"meaning when x only has two columns")
# Use first two (lon and lat) columns to create list of sfg objects
x.lonlat <- x %>%
select(c(1, 2)) %>%
set_names(c("lon", "lat"))
sfg.list <- unname(apply(x.lonlat, 1, function(i, j) {
st_polygon(list(matrix(
c(i[1] + j, i[1] - j, i[1] - j, i[1] + j, i[1] + j,
i[2] + j, i[2] + j, i[2] - j, i[2] - j, i[2] + j),
ncol = 2
)))
}, j = y))
# Create sf or sfc object, as appropriate
if (ncol(x) > 2) {
x %>%
select(-c(1, 2)) %>%
st_sf(geometry = st_sfc(sfg.list), ...)
} else {
st_sfc(sfg.list, ...)
}
}
|
library(medflex)
### Name: neLht
### Title: Linear hypotheses for natural effect models
### Aliases: neLht neEffdecomp neEffdecomp.neModel neLht.neModel
### ** Examples
data(UPBdata)
impData <- neImpute(UPB ~ att * negaff + gender + educ + age,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ att0 * att1 + gender + educ + age,
family = binomial, expData = impData, se = "robust")
lht <- neLht(neMod, linfct = c("att0 = 0", "att0 + att0:att1 = 0",
"att1 = 0", "att1 + att0:att1 = 0",
"att0 + att1 + att0:att1 = 0"))
summary(lht)
## or obtain directly via neEffdecomp
eff <- neEffdecomp(neMod)
summary(eff)
## changing reference levels for multicategorical exposures
UPBdata$attcat <- factor(cut(UPBdata$att, 3), labels = c("L", "M", "H"))
impData <- neImpute(UPB ~ attcat * negaff + gender + educ + age,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ attcat0 * attcat1 + gender + educ + age,
family = binomial, expData = impData, se = "robust")
neEffdecomp(neMod)
neEffdecomp(neMod, xRef = c("L", "H"))
neEffdecomp(neMod, xRef = c("M", "H"))
## changing reference levels for continuous exposures
impData <- neImpute(UPB ~ (att + I(att^2)) * negaff + gender + educ + age,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ (att0 + I(att0^2)) * (att1 + I(att1^2)) + gender + educ + age,
family = binomial, expData = impData, se = "robust")
neEffdecomp(neMod)
neEffdecomp(neMod, xRef = c(-1, 0))
## changing covariate levels when allowing for modification
## of the indirect effect by baseline covariates
impData <- neImpute(UPB ~ (att + negaff + gender + educ + age)^2,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ att0 * att1 + gender + educ + age + att1:gender + att1:age,
family = binomial, expData = impData, se = "robust")
neEffdecomp(neMod)
neEffdecomp(neMod, covLev = c(gender = "F", age = 0)) # default covariate levels
neEffdecomp(neMod, covLev = c(gender = "M", age = 40))
neEffdecomp(neMod, covLev = c(gender = "M", age = 40, educ = "L"))
neEffdecomp(neMod, covLev = c(gender = "M", age = 40, educ = "M"))
neEffdecomp(neMod, covLev = c(gender = "M", age = 40, educ = "H"))
# effect decomposition is independent of education level
neEffdecomp(neMod, covLev = c(gender = "M"))
# age is set to its default level when left unspecified
|
/data/genthat_extracted_code/medflex/examples/neLht.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 2,530
|
r
|
library(medflex)
### Name: neLht
### Title: Linear hypotheses for natural effect models
### Aliases: neLht neEffdecomp neEffdecomp.neModel neLht.neModel
### ** Examples
data(UPBdata)
impData <- neImpute(UPB ~ att * negaff + gender + educ + age,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ att0 * att1 + gender + educ + age,
family = binomial, expData = impData, se = "robust")
lht <- neLht(neMod, linfct = c("att0 = 0", "att0 + att0:att1 = 0",
"att1 = 0", "att1 + att0:att1 = 0",
"att0 + att1 + att0:att1 = 0"))
summary(lht)
## or obtain directly via neEffdecomp
eff <- neEffdecomp(neMod)
summary(eff)
## changing reference levels for multicategorical exposures
UPBdata$attcat <- factor(cut(UPBdata$att, 3), labels = c("L", "M", "H"))
impData <- neImpute(UPB ~ attcat * negaff + gender + educ + age,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ attcat0 * attcat1 + gender + educ + age,
family = binomial, expData = impData, se = "robust")
neEffdecomp(neMod)
neEffdecomp(neMod, xRef = c("L", "H"))
neEffdecomp(neMod, xRef = c("M", "H"))
## changing reference levels for continuous exposures
impData <- neImpute(UPB ~ (att + I(att^2)) * negaff + gender + educ + age,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ (att0 + I(att0^2)) * (att1 + I(att1^2)) + gender + educ + age,
family = binomial, expData = impData, se = "robust")
neEffdecomp(neMod)
neEffdecomp(neMod, xRef = c(-1, 0))
## changing covariate levels when allowing for modification
## of the indirect effect by baseline covariates
impData <- neImpute(UPB ~ (att + negaff + gender + educ + age)^2,
family = binomial, data = UPBdata)
neMod <- neModel(UPB ~ att0 * att1 + gender + educ + age + att1:gender + att1:age,
family = binomial, expData = impData, se = "robust")
neEffdecomp(neMod)
neEffdecomp(neMod, covLev = c(gender = "F", age = 0)) # default covariate levels
neEffdecomp(neMod, covLev = c(gender = "M", age = 40))
neEffdecomp(neMod, covLev = c(gender = "M", age = 40, educ = "L"))
neEffdecomp(neMod, covLev = c(gender = "M", age = 40, educ = "M"))
neEffdecomp(neMod, covLev = c(gender = "M", age = 40, educ = "H"))
# effect decomposition is independent of education level
neEffdecomp(neMod, covLev = c(gender = "M"))
# age is set to its default level when left unspecified
|
#this file saves almost all plots used in report
rm(list = ls())
library('lattice')
library('ggplot2')
library(latticeExtra)
#prepare data frame
df<-load("ALL")
ALL$X.2<-NULL
ALL$X.1<-NULL
ALL$X<-NULL
#deal with time
ALL$Time <- as.POSIXct(ALL$Time, format("%Y-%m-%d %h:%m:%s"))
ALL<-ALL[order(ALL$Time),]
#choose option which delays to cover
# ALL_n<-ALL_n[ALL_n$Late>0,]
lines <- unique(ALL$Line)
stops <- unique(ALL$StopName)
directions <- unique(ALL$Direction)
#load functions
source('nice_plots.R')
#first plot to check if it works
t22<-delays_and_correlations(ALL = ALL, line = 2,
direction = 'Salwator', color = data_colors[2])
trellis.device(device="png", filename=("correlations.png"))
#plot multiple correlations plot at one .png
data_colors <- topo.colors(4, alpha = 1)
nrows<-2
par(mfrow=c(nrows,nrows))
t1 <- delays_and_correlations(ALL = ALL, line = 6,
direction = 'KurdwanowP+R', color = data_colors[1] )
t2 <- delays_and_correlations(ALL = ALL, line = 6,
direction = 'Salwator', color = data_colors[2])
t3 <- delays_and_correlations(ALL = ALL, line = 18,
direction = 'KrowodrzaGorka', color = data_colors[3])
t4 <- delays_and_correlations(ALL = ALL, line = 18,
direction = 'CzerwoneMakiP+R', color = data_colors[4])
dev.off()
#save plots of delays on each stop into files time_series_line_direction.png
trellis.device(device="png", filename=paste0("time_series_",t1$ylab,".png"))
print(t1)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t2$ylab,".png"))
print(t2)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t3$ylab,".png"))
print(t3)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t4$ylab,".png"))
print(t4)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t22$ylab,".png"))
print(t22)
dev.off()
|
/ploting_nice_plots.R
|
no_license
|
sbysiak/R-ttss-krk
|
R
| false
| false
| 1,977
|
r
|
#this file saves almost all plots used in report
rm(list = ls())
library('lattice')
library('ggplot2')
library(latticeExtra)
#prepare data frame
df<-load("ALL")
ALL$X.2<-NULL
ALL$X.1<-NULL
ALL$X<-NULL
#deal with time
ALL$Time <- as.POSIXct(ALL$Time, format("%Y-%m-%d %h:%m:%s"))
ALL<-ALL[order(ALL$Time),]
#choose option which delays to cover
# ALL_n<-ALL_n[ALL_n$Late>0,]
lines <- unique(ALL$Line)
stops <- unique(ALL$StopName)
directions <- unique(ALL$Direction)
#load functions
source('nice_plots.R')
#first plot to check if it works
t22<-delays_and_correlations(ALL = ALL, line = 2,
direction = 'Salwator', color = data_colors[2])
trellis.device(device="png", filename=("correlations.png"))
#plot multiple correlations plot at one .png
data_colors <- topo.colors(4, alpha = 1)
nrows<-2
par(mfrow=c(nrows,nrows))
t1 <- delays_and_correlations(ALL = ALL, line = 6,
direction = 'KurdwanowP+R', color = data_colors[1] )
t2 <- delays_and_correlations(ALL = ALL, line = 6,
direction = 'Salwator', color = data_colors[2])
t3 <- delays_and_correlations(ALL = ALL, line = 18,
direction = 'KrowodrzaGorka', color = data_colors[3])
t4 <- delays_and_correlations(ALL = ALL, line = 18,
direction = 'CzerwoneMakiP+R', color = data_colors[4])
dev.off()
#save plots of delays on each stop into files time_series_line_direction.png
trellis.device(device="png", filename=paste0("time_series_",t1$ylab,".png"))
print(t1)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t2$ylab,".png"))
print(t2)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t3$ylab,".png"))
print(t3)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t4$ylab,".png"))
print(t4)
dev.off()
trellis.device(device="png", filename=paste0("time_series_",t22$ylab,".png"))
print(t22)
dev.off()
|
name = "You Yi"
SID = "24333111"
email = "youyi316@berkeley.edu"
github_name = "youyi316"
status = "enrolled"
|
/info.r
|
no_license
|
youyi316/stat133
|
R
| false
| false
| 109
|
r
|
name = "You Yi"
SID = "24333111"
email = "youyi316@berkeley.edu"
github_name = "youyi316"
status = "enrolled"
|
#' Convert IRT model fitted using fit_enorm (from dexter package) to a mirt object
#'
#' This function is designed to allow users to fit a Rasch model using the
#' conditional maximum likelihood (CML) procedure but to still have access to all of the interactive
#' graphical displays and other functions that are available in unimirt.
#'
#' Note that model objects derived using the fit_enorm function do not retain the full data set.
#' As a result, the original data needs to be supplied as a second parameter to the conversion function.
#'
#' @param dexter.obj An object derived using the function fit_enorm from the R package dexter.
#' @param dexter.dat The data matrix used to fit the model held in dexter.obj.
#'
#' @examples
#' \dontrun{
#' raschcml=dexter::fit_enorm(as.matrix(mathsdata[,1:5]))
#' coef(raschcml)
#' mirtfromcml=dexter.to.unimirt(raschcml,as.matrix(mathsdata[,1:5]))
#' MirtTidyCoef(mirtfromcml)
#' }
#' @export
dexter.to.unimirt <- function (dexter.obj=NULL, dexter.dat=NULL)
{
# general catch for misspecified argument
if(is.null(dexter.obj)||!"prms" %in% class(dexter.obj)){
stop("An object of class prms must be provided")
}
# if data supplied as dexter database, convert to response matrix
if ("SQLiteConnection"%in%class(dexter.dat)) {
dexter.dat <- dexter::get_resp_matrix(dexter.dat)
}
# check that response data corresponds to model data
if(!all(colnames(dexter.dat) %in% as.character(dexter.obj$inputs$design$item_id))){
stop("dexter.obj and dexter.dat must have the same items")
}
# rearrange to ensure match between model and data
dex.coefs <- coef(dexter.obj)
dex.coefs <- dex.coefs[order(dex.coefs$item_id, dex.coefs$item_score),]
dexter.dat <- dexter.dat[,order(colnames(dexter.dat))]
if(all(c("item_id", "item_score", "beta") %in% names(dex.coefs))){
item_scores <- paste0("Cat", dex.coefs$item_score)
dexlabs = paste(dex.coefs$item_id, item_scores, sep="_")
betapars = dex.coefs$beta
} else {
# error if key values not available
stop("coefs(dexter.obj) must provide item_id, item_score and beta")
}
stop1 = regexpr("_Cat", dexlabs)
dexites = dexlabs
if (max(stop1) > 0) {
dexites = substr(dexlabs, 1, stop1 - 1)
}
dexds = NA + betapars
for (x in unique(dexites)) {
dexds[dexites == x] = -cumsum(betapars[dexites == x])
}
mA = mirt::mirt(dexter.dat, 1, "Rasch", pars = "values")
mA$value[mA$name %in% c("d", paste0("d", 1:1000))] = dexds
mA$est = FALSE
mA$est[mA$class == "GroupPars"] = TRUE
mB = mirt::mirt(dexter.dat, 1, "Rasch", pars = mA)
return(mB)
}
|
/R/dexter.to.unimirt.R
|
permissive
|
CambridgeAssessmentResearch/unimirt
|
R
| false
| false
| 2,694
|
r
|
#' Convert IRT model fitted using fit_enorm (from dexter package) to a mirt object
#'
#' This function is designed to allow users to fit a Rasch model using the
#' conditional maximum likelihood (CML) procedure but to still have access to all of the interactive
#' graphical displays and other functions that are available in unimirt.
#'
#' Note that model objects derived using the fit_enorm function do not retain the full data set.
#' As a result, the original data needs to be supplied as a second parameter to the conversion function.
#'
#' @param dexter.obj An object derived using the function fit_enorm from the R package dexter.
#' @param dexter.dat The data matrix used to fit the model held in dexter.obj.
#'
#' @examples
#' \dontrun{
#' raschcml=dexter::fit_enorm(as.matrix(mathsdata[,1:5]))
#' coef(raschcml)
#' mirtfromcml=dexter.to.unimirt(raschcml,as.matrix(mathsdata[,1:5]))
#' MirtTidyCoef(mirtfromcml)
#' }
#' @export
dexter.to.unimirt <- function (dexter.obj=NULL, dexter.dat=NULL)
{
# general catch for misspecified argument
if(is.null(dexter.obj)||!"prms" %in% class(dexter.obj)){
stop("An object of class prms must be provided")
}
# if data supplied as dexter database, convert to response matrix
if ("SQLiteConnection"%in%class(dexter.dat)) {
dexter.dat <- dexter::get_resp_matrix(dexter.dat)
}
# check that response data corresponds to model data
if(!all(colnames(dexter.dat) %in% as.character(dexter.obj$inputs$design$item_id))){
stop("dexter.obj and dexter.dat must have the same items")
}
# rearrange to ensure match between model and data
dex.coefs <- coef(dexter.obj)
dex.coefs <- dex.coefs[order(dex.coefs$item_id, dex.coefs$item_score),]
dexter.dat <- dexter.dat[,order(colnames(dexter.dat))]
if(all(c("item_id", "item_score", "beta") %in% names(dex.coefs))){
item_scores <- paste0("Cat", dex.coefs$item_score)
dexlabs = paste(dex.coefs$item_id, item_scores, sep="_")
betapars = dex.coefs$beta
} else {
# error if key values not available
stop("coefs(dexter.obj) must provide item_id, item_score and beta")
}
stop1 = regexpr("_Cat", dexlabs)
dexites = dexlabs
if (max(stop1) > 0) {
dexites = substr(dexlabs, 1, stop1 - 1)
}
dexds = NA + betapars
for (x in unique(dexites)) {
dexds[dexites == x] = -cumsum(betapars[dexites == x])
}
mA = mirt::mirt(dexter.dat, 1, "Rasch", pars = "values")
mA$value[mA$name %in% c("d", paste0("d", 1:1000))] = dexds
mA$est = FALSE
mA$est[mA$class == "GroupPars"] = TRUE
mB = mirt::mirt(dexter.dat, 1, "Rasch", pars = mA)
return(mB)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/goelz.R
\name{select_optimal_goelz}
\alias{select_optimal_goelz}
\title{Select optimal Goelz Triangle design sfrom the results of goelz_optim}
\usage{
select_optimal_goelz(optim.results)
}
\arguments{
\item{optim.results}{An object of class goelz-optim.}
}
\value{
A list of objects of class goelz.
}
\description{
Selects optimal Goelz Triangle designs from the results of \code{\link{goelz_optim}}.
}
\examples{
optim.dat <- goelz_optim()
my_goelz <- select_optimal_goelz(optim.results = optim.dat)
}
\seealso{
Other definition functions:
\code{\link{goelz_add_border}()},
\code{\link{goelz_corners}()},
\code{\link{goelz_guides}()},
\code{\link{goelz_mirror}()},
\code{\link{goelz_optim}()},
\code{\link{goelz_starts}()},
\code{\link{goelz}()},
\code{\link{nelder_biculture_competition}()},
\code{\link{nelder_biculture_optim}()},
\code{\link{nelder_biculture}()},
\code{\link{nelder_decision}()},
\code{\link{nelder_interspoke_distance}()},
\code{\link{nelder}()},
\code{\link{select_optimal_nelder_biculture}()}
}
\author{
Kevin J Wolz, \email{kevin@savannainstitute.org}
}
\concept{definition functions}
|
/man/select_optimal_goelz.Rd
|
no_license
|
savannainstitute/sysdesign
|
R
| false
| true
| 1,189
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/goelz.R
\name{select_optimal_goelz}
\alias{select_optimal_goelz}
\title{Select optimal Goelz Triangle design sfrom the results of goelz_optim}
\usage{
select_optimal_goelz(optim.results)
}
\arguments{
\item{optim.results}{An object of class goelz-optim.}
}
\value{
A list of objects of class goelz.
}
\description{
Selects optimal Goelz Triangle designs from the results of \code{\link{goelz_optim}}.
}
\examples{
optim.dat <- goelz_optim()
my_goelz <- select_optimal_goelz(optim.results = optim.dat)
}
\seealso{
Other definition functions:
\code{\link{goelz_add_border}()},
\code{\link{goelz_corners}()},
\code{\link{goelz_guides}()},
\code{\link{goelz_mirror}()},
\code{\link{goelz_optim}()},
\code{\link{goelz_starts}()},
\code{\link{goelz}()},
\code{\link{nelder_biculture_competition}()},
\code{\link{nelder_biculture_optim}()},
\code{\link{nelder_biculture}()},
\code{\link{nelder_decision}()},
\code{\link{nelder_interspoke_distance}()},
\code{\link{nelder}()},
\code{\link{select_optimal_nelder_biculture}()}
}
\author{
Kevin J Wolz, \email{kevin@savannainstitute.org}
}
\concept{definition functions}
|
#' Apply a function to each element of a vector via futures
#'
#' These functions work exactly the same as [purrr::map()] functions, but allow
#' you to run the map in parallel. There are a number of `future.*` arguments
#' to allow you to fine tune the parallel processing. The documentation is
#' adapted from both `purrr::map()`, and `future.apply::future_lapply()`,
#' so look there for more details.
#'
#' @inheritParams purrr::map
#'
#' @param future.globals A logical, a character vector, or a named list for
#' controlling how globals are handled. For details, see below section.
#'
#' @param future.packages (optional) a character vector specifying packages
#' to be attached in the R environment evaluating the future.
#'
#' @param future.seed A logical or an integer (of length one or seven),
#' or a list of `length(.x)` with pre-generated random seeds.
#' For details, see below section.
#'
#' @param future.lazy Specifies whether the futures should be resolved
#' lazily or eagerly (default).
#'
#' @param future.scheduling Average number of futures ("chunks") per worker.
#' If `0.0`, then a single future is used to process all elements
#' of `.x`.
#' If `1.0` or `TRUE`, then one future per worker is used.
#' If `2.0`, then each worker will process two futures
#' (if there are enough elements in `.x`).
#' If `Inf` or `FALSE`, then one future per element of
#' `.x` is used.
#'
#' @return
#' All functions return a vector the same length as `.x`.
#'
#' [future_map()] returns a list, [future_map_lgl()] a logical vector,
#' [future_map_int()] an integer vector, [future_map_dbl()] a double vector,
#' and [future_map_chr()] a character vector.
#' The output of `.f` will be automatically typed upwards,
#' e.g. logical -> integer -> double -> character.
#'
#' @section Global variables:
#' Argument `future.globals` may be used to control how globals
#' should be handled similarly how the `globals` argument is used with
#' `future()`.
#' Since all function calls use the same set of globals, this function can do
#' any gathering of globals upfront (once), which is more efficient than if
#' it would be done for each future independently.
#' If `TRUE`, `NULL` or not is specified (default), then globals
#' are automatically identified and gathered.
#' If a character vector of names is specified, then those globals are gathered.
#' If a named list, then those globals are used as is.
#' In all cases, `.f` and any `...` arguments are automatically
#' passed as globals to each future created as they are always needed.
#'
#' @section Reproducible random number generation (RNG):
#' Unless `future.seed = FALSE`, this function guarantees to generate
#' the exact same sequence of random numbers _given the same initial
#' seed / RNG state_ - this regardless of type of futures and scheduling
#' ("chunking") strategy.
#'
#' RNG reproducibility is achieved by pregenerating the random seeds for all
#' iterations (over `.x`) by using L'Ecuyer-CMRG RNG streams. In each
#' iteration, these seeds are set before calling \code{.f(.x[[ii]], ...)}.
#' _Note, for large `length(.x)` this may introduce a large overhead._
#' As input (`future.seed`), a fixed seed (integer) may be given, either
#' as a full L'Ecuyer-CMRG RNG seed (vector of 1+6 integers) or as a seed
#' generating such a full L'Ecuyer-CMRG seed.
#' If `future.seed = TRUE`, then \code{\link[base:Random]{.Random.seed}}
#' is returned if it holds a L'Ecuyer-CMRG RNG seed, otherwise one is created
#' randomly.
#' If `future.seed = NA`, a L'Ecuyer-CMRG RNG seed is randomly created.
#' If none of the function calls \code{.f(.x[[ii]], ...)} uses random number
#' generation, then `future.seed = FALSE` may be used.
#'
#' In addition to the above, it is possible to specify a pre-generated
#' sequence of RNG seeds as a list such that
#' `length(future.seed) == length(.x)` and where each element is an
#' integer seed that can be assigned to \code{\link[base:Random]{.Random.seed}}.
#' Use this alternative with caution.
#' **Note that `as.list(seq_along(.x))` is _not_ a valid set of such
#' `.Random.seed` values.**
#'
#' In all cases but `future.seed = FALSE`, the RNG state of the calling
#' R processes after this function returns is guaranteed to be
#' "forwarded one step" from the RNG state that was before the call and
#' in the same way regardless of `future.seed`, `future.scheduling`
#' and future strategy used. This is done in order to guarantee that an \R
#' script calling `future_map()` multiple times should be numerically
#' reproducible given the same initial seed.
#'
#'
#' @examples
#'
#' library(furrr)
#' library(dplyr) # for the pipe
#'
#' plan(multiprocess)
#'
#' 1:10 %>%
#' future_map(rnorm, n = 10) %>%
#' future_map_dbl(mean)
#'
#' # If each element of the output is a data frame, use
#' # map_dfr to row-bind them together:
#' mtcars %>%
#' split(.$cyl) %>%
#' future_map(~ lm(mpg ~ wt, data = .x)) %>%
#' future_map_dfr(~ as.data.frame(t(as.matrix(coef(.)))))
#'
#' @importFrom globals globalsByName cleanup
#' @importFrom future future resolve values as.FutureGlobals nbrOfWorkers getGlobalsAndPackages
#' @importFrom parallel nextRNGStream nextRNGSubStream splitIndices
#' @importFrom utils capture.output str
#' @export
future_map <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map, "list", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_chr <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_chr, "character", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_dbl <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_dbl, "double", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_int <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_int, "integer", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_lgl <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_lgl, "logical", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_dfr <- function(.x, .f, ..., .id = NULL, future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
# Passing through the template doesn't work because of the way fold() works.
# Could parameterize around fold(res, ___), but this is easier
if (!rlang::is_installed("dplyr")) {
rlang::abort("`future_map_dfr()` requires dplyr")
}
res <- future_map(.x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
dplyr::bind_rows(res, .id = .id)
}
#' @rdname future_map
#' @export
future_map_dfc <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
# Passing through the template doesn't work because of the way fold() works.
# Could parameterize around fold(res, ___), but this is easier
if (!rlang::is_installed("dplyr")) {
rlang::abort("`future_map_dfc()` requires dplyr")
}
res <- future_map(.x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
dplyr::bind_cols(res)
}
#' @rdname future_map
#' @export
#' @importFrom purrr list_along set_names
future_map_if <- function(.x, .p, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
sel <- probe(.x, .p)
out <- list_along(.x)
out[sel] <- future_map(.x[sel], .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
out[!sel] <- .x[!sel]
set_names(out, names(.x))
}
#' @rdname future_map
#' @export
#' @importFrom purrr list_along set_names
future_map_at <- function(.x, .at, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
sel <- inv_which(.x, .at)
out <- list_along(.x)
out[sel] <- future_map(.x[sel], .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
out[!sel] <- .x[!sel]
set_names(out, names(.x))
}
|
/R/future_map.R
|
no_license
|
bweiher/furrr
|
R
| false
| false
| 9,780
|
r
|
#' Apply a function to each element of a vector via futures
#'
#' These functions work exactly the same as [purrr::map()] functions, but allow
#' you to run the map in parallel. There are a number of `future.*` arguments
#' to allow you to fine tune the parallel processing. The documentation is
#' adapted from both `purrr::map()`, and `future.apply::future_lapply()`,
#' so look there for more details.
#'
#' @inheritParams purrr::map
#'
#' @param future.globals A logical, a character vector, or a named list for
#' controlling how globals are handled. For details, see below section.
#'
#' @param future.packages (optional) a character vector specifying packages
#' to be attached in the R environment evaluating the future.
#'
#' @param future.seed A logical or an integer (of length one or seven),
#' or a list of `length(.x)` with pre-generated random seeds.
#' For details, see below section.
#'
#' @param future.lazy Specifies whether the futures should be resolved
#' lazily or eagerly (default).
#'
#' @param future.scheduling Average number of futures ("chunks") per worker.
#' If `0.0`, then a single future is used to process all elements
#' of `.x`.
#' If `1.0` or `TRUE`, then one future per worker is used.
#' If `2.0`, then each worker will process two futures
#' (if there are enough elements in `.x`).
#' If `Inf` or `FALSE`, then one future per element of
#' `.x` is used.
#'
#' @return
#' All functions return a vector the same length as `.x`.
#'
#' [future_map()] returns a list, [future_map_lgl()] a logical vector,
#' [future_map_int()] an integer vector, [future_map_dbl()] a double vector,
#' and [future_map_chr()] a character vector.
#' The output of `.f` will be automatically typed upwards,
#' e.g. logical -> integer -> double -> character.
#'
#' @section Global variables:
#' Argument `future.globals` may be used to control how globals
#' should be handled similarly how the `globals` argument is used with
#' `future()`.
#' Since all function calls use the same set of globals, this function can do
#' any gathering of globals upfront (once), which is more efficient than if
#' it would be done for each future independently.
#' If `TRUE`, `NULL` or not is specified (default), then globals
#' are automatically identified and gathered.
#' If a character vector of names is specified, then those globals are gathered.
#' If a named list, then those globals are used as is.
#' In all cases, `.f` and any `...` arguments are automatically
#' passed as globals to each future created as they are always needed.
#'
#' @section Reproducible random number generation (RNG):
#' Unless `future.seed = FALSE`, this function guarantees to generate
#' the exact same sequence of random numbers _given the same initial
#' seed / RNG state_ - this regardless of type of futures and scheduling
#' ("chunking") strategy.
#'
#' RNG reproducibility is achieved by pregenerating the random seeds for all
#' iterations (over `.x`) by using L'Ecuyer-CMRG RNG streams. In each
#' iteration, these seeds are set before calling \code{.f(.x[[ii]], ...)}.
#' _Note, for large `length(.x)` this may introduce a large overhead._
#' As input (`future.seed`), a fixed seed (integer) may be given, either
#' as a full L'Ecuyer-CMRG RNG seed (vector of 1+6 integers) or as a seed
#' generating such a full L'Ecuyer-CMRG seed.
#' If `future.seed = TRUE`, then \code{\link[base:Random]{.Random.seed}}
#' is returned if it holds a L'Ecuyer-CMRG RNG seed, otherwise one is created
#' randomly.
#' If `future.seed = NA`, a L'Ecuyer-CMRG RNG seed is randomly created.
#' If none of the function calls \code{.f(.x[[ii]], ...)} uses random number
#' generation, then `future.seed = FALSE` may be used.
#'
#' In addition to the above, it is possible to specify a pre-generated
#' sequence of RNG seeds as a list such that
#' `length(future.seed) == length(.x)` and where each element is an
#' integer seed that can be assigned to \code{\link[base:Random]{.Random.seed}}.
#' Use this alternative with caution.
#' **Note that `as.list(seq_along(.x))` is _not_ a valid set of such
#' `.Random.seed` values.**
#'
#' In all cases but `future.seed = FALSE`, the RNG state of the calling
#' R processes after this function returns is guaranteed to be
#' "forwarded one step" from the RNG state that was before the call and
#' in the same way regardless of `future.seed`, `future.scheduling`
#' and future strategy used. This is done in order to guarantee that an \R
#' script calling `future_map()` multiple times should be numerically
#' reproducible given the same initial seed.
#'
#'
#' @examples
#'
#' library(furrr)
#' library(dplyr) # for the pipe
#'
#' plan(multiprocess)
#'
#' 1:10 %>%
#' future_map(rnorm, n = 10) %>%
#' future_map_dbl(mean)
#'
#' # If each element of the output is a data frame, use
#' # map_dfr to row-bind them together:
#' mtcars %>%
#' split(.$cyl) %>%
#' future_map(~ lm(mpg ~ wt, data = .x)) %>%
#' future_map_dfr(~ as.data.frame(t(as.matrix(coef(.)))))
#'
#' @importFrom globals globalsByName cleanup
#' @importFrom future future resolve values as.FutureGlobals nbrOfWorkers getGlobalsAndPackages
#' @importFrom parallel nextRNGStream nextRNGSubStream splitIndices
#' @importFrom utils capture.output str
#' @export
future_map <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map, "list", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_chr <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_chr, "character", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_dbl <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_dbl, "double", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_int <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_int, "integer", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_lgl <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
future_map_template(purrr::map_lgl, "logical", .x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
}
#' @rdname future_map
#' @export
future_map_dfr <- function(.x, .f, ..., .id = NULL, future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
# Passing through the template doesn't work because of the way fold() works.
# Could parameterize around fold(res, ___), but this is easier
if (!rlang::is_installed("dplyr")) {
rlang::abort("`future_map_dfr()` requires dplyr")
}
res <- future_map(.x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
dplyr::bind_rows(res, .id = .id)
}
#' @rdname future_map
#' @export
future_map_dfc <- function(.x, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
# Passing through the template doesn't work because of the way fold() works.
# Could parameterize around fold(res, ___), but this is easier
if (!rlang::is_installed("dplyr")) {
rlang::abort("`future_map_dfc()` requires dplyr")
}
res <- future_map(.x, .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
dplyr::bind_cols(res)
}
#' @rdname future_map
#' @export
#' @importFrom purrr list_along set_names
future_map_if <- function(.x, .p, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
sel <- probe(.x, .p)
out <- list_along(.x)
out[sel] <- future_map(.x[sel], .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
out[!sel] <- .x[!sel]
set_names(out, names(.x))
}
#' @rdname future_map
#' @export
#' @importFrom purrr list_along set_names
future_map_at <- function(.x, .at, .f, ..., future.globals = TRUE, future.packages = NULL, future.seed = FALSE, future.lazy = FALSE, future.scheduling = 1.0) {
sel <- inv_which(.x, .at)
out <- list_along(.x)
out[sel] <- future_map(.x[sel], .f, ..., future.globals = future.globals, future.packages = future.packages, future.seed = future.seed, future.lazy = future.lazy, future.scheduling = future.scheduling)
out[!sel] <- .x[!sel]
set_names(out, names(.x))
}
|
#Set your working directory
setwd("C:/Coursera - Data Science/4-ExploratoryDataAnalysis/CourseProject1")
## 1.Getting data
fileURL="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipFilename <-"HouseholdPowerConsuption.zip"
download.file(fileURL, destfile = zipFilename)
unzip(zipfile = zipFilename)
## 2.Reading data
# sqldf package is required
library(sqldf)
SQL<-"SELECT * from file WHERE Date = '1/2/2007' OR Date = '2/2/2007'"
Data <- read.csv.sql("household_power_consumption.txt", sql=SQL, sep=";")
Data$DateTime <- as.POSIXct(strptime(paste(Data$Date,Data$Time), "%d/%m/%Y %H:%M:%S"))
## 3.Setting locale to provide english day names
Sys.setlocale("LC_ALL","English")
## 4.Generate PNG file
png(filename = "plot3.png", width = 480, height = 480)
plot(Data$DateTime, Data$Sub_metering_1, type = 'l', ylab = 'Energy sub metering', xlab = '')
points(Data$DateTime, Data$Sub_metering_2, type = 'l', col = 'red')
points(Data$DateTime, Data$Sub_metering_3, type = 'l', col= 'blue')
legend('topright', legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), col = c('black', 'red', 'blue'), lty = c(1, 1, 1))
dev.off()
|
/plot3.R
|
no_license
|
JustinasMockus/ExData_Plotting1
|
R
| false
| false
| 1,173
|
r
|
#Set your working directory
setwd("C:/Coursera - Data Science/4-ExploratoryDataAnalysis/CourseProject1")
## 1.Getting data
fileURL="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
zipFilename <-"HouseholdPowerConsuption.zip"
download.file(fileURL, destfile = zipFilename)
unzip(zipfile = zipFilename)
## 2.Reading data
# sqldf package is required
library(sqldf)
SQL<-"SELECT * from file WHERE Date = '1/2/2007' OR Date = '2/2/2007'"
Data <- read.csv.sql("household_power_consumption.txt", sql=SQL, sep=";")
Data$DateTime <- as.POSIXct(strptime(paste(Data$Date,Data$Time), "%d/%m/%Y %H:%M:%S"))
## 3.Setting locale to provide english day names
Sys.setlocale("LC_ALL","English")
## 4.Generate PNG file
png(filename = "plot3.png", width = 480, height = 480)
plot(Data$DateTime, Data$Sub_metering_1, type = 'l', ylab = 'Energy sub metering', xlab = '')
points(Data$DateTime, Data$Sub_metering_2, type = 'l', col = 'red')
points(Data$DateTime, Data$Sub_metering_3, type = 'l', col= 'blue')
legend('topright', legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), col = c('black', 'red', 'blue'), lty = c(1, 1, 1))
dev.off()
|
nmdsplot <- function(x, col, sh) {ggplot(data = x, mapping = aes(x = nmds_axis1, y = nmds_axis2)) +
geom_point(mapping = aes(color = col, shape = sh))}
#
#shape = depthbin, text = sprintf("Sample: %s", sampleID)
|
/src/shinyprot_nmdsplot.R
|
no_license
|
EEgge/ShinyProtists
|
R
| false
| false
| 219
|
r
|
nmdsplot <- function(x, col, sh) {ggplot(data = x, mapping = aes(x = nmds_axis1, y = nmds_axis2)) +
geom_point(mapping = aes(color = col, shape = sh))}
#
#shape = depthbin, text = sprintf("Sample: %s", sampleID)
|
# Auxiliar function to warn and coerce classes
check_var_class_and_coerce <- function(var, dat, class.ok, class.target, warn.coercion) {
# Has the right class?
cl <- class(dat[[var]])
if (!any(cl %in% class.ok))
stop("The variable -",var,"- is of class ", cl, " which is ",
"not supported. Supported class(es): ", paste0(class.ok, collapse=", "),
".")
# Should be coherced?
if (warn.coercion && !(class.target %in% cl))
warning("Coercing -",var, "- into ",class.target,".")
# Returning
return(methods::as(dat[[var]], class.target))
}
#' Convert survey-like data and edgelists to a \code{diffnet} object
#'
#' These convenient functions turn network nomination datasets and edgelists with
#' vertex attributes datasets into diffnet objects. Both work as wrappers of
#' \code{\link{edgelist_to_adjmat}} and \code{\link{new_diffnet}}.
#'
#' @inheritParams edgelist_to_adjmat
#' @param dat A data frame.
#' @param idvar Character scalar. Name of the id variable.
#' @param netvars Character vector. Names of the network nomination variables.
#' @param toavar Character scalar. Name of the time of adoption variable.
#' @param timevar Character sacalar. In the case of longitudinal data, name of the time var.
#' @param groupvar Character scalar. Name of cohort variable (e.g. city).
#' @param no.unsurveyed Logical scalar. When \code{TRUE} the nominated individuals
#' that do not show in \code{idvar} are set to \code{NA} (see details).
#' @param warn.coercion Logical scalar. When \code{TRUE} warns coercion from numeric to integer.
#' @param ... Further arguments to be passed to \code{\link{new_diffnet}}.
#' @details
#'
#' All of \code{netvars}, \code{toavar} and \code{groupvar}
#' must be integers. Were these numeric they are coerced into integers, otherwise,
#' when neither of both, the function returns with error. \code{idvar}, on the
#' other hand, should only be integer when calling \code{survey_to_diffnet},
#' on the contrary, for \code{edgelist_to_diffnet}, \code{idvar} may be character.
#'
#' In field work it is not unusual that some respondents nominate unsurveyed
#' individuals. In such case, in order to exclude them from the analysis,
#' the user can set \code{no.unsurveyed=TRUE} (the default), telling the
#' function to exclude such individuals from the adjacency matrix. This is
#' done by setting variables in \code{netvars} equal to \code{NA} when the
#' nominated id can't be found in \code{idvar}.
#'
#' If the network nomination process was done in different groups (location
#' for example) the survey id numbers may be define uniquely within each group
#' but not across groups (there may be many individuals with \code{id=1},
#' for example). To encompass this issue, the user can tell the function what
#' variable can be used to distinguish between groups through the \code{groupvar}
#' argument. When \code{groupvar} is provided, function redifines \code{idvar}
#' and the variables in \code{netvars} as follows:
#'
#' \preformatted{
#' dat[[idvar]] <- dat[[idvar]] + dat[[groupvar]]*z
#' }
#'
#' Where \code{z = 10^nchar(max(dat[[idvar]]))}.
#'
#' For longitudinal data, it is assumed that the \code{toavar} holds the same
#' information through time, this is, time-invariable. This as the package does
#' not yet support variable times of adoption.
#'
#' The \code{fill.missing} option can take any of these three values: \code{"edgelist"},
#' \code{"dat"}, or \code{"both"}. This argument works as follows:
#' \enumerate{
#' \item When \code{fill.missing="edgelist"} (or \code{"both"}) the function
#' will check which vertices show in \code{dat} but do not show in \code{edgelist}.
#' If there is any, the function will include these in \code{edgelist} as ego to
#' \code{NA} (so they have no link to anyone), and, if specified, will fill
#' the \code{t0}, \code{t1} vectors with \code{NA}s for those cases. If
#' \code{w} is also specified, the new vertices will be set to
#' \code{min(w, na.rm=TRUE)}.
#' \item When \code{fill.missing="dat"} (or \code{"both"}) the function
#' checks which vertices show in \code{edgelist} but not in \code{dat}. If
#' there is any, the function will include these in \code{dat} by adding
#' one row per individual.
#' }
#'
#' @export
#' @return A \code{\link{diffnet}} object.
#' @seealso \code{\link{fakesurvey}}, \code{\link{fakesurveyDyn}}
#' @family data management functions
#' @author
#' Vega Yon
#' @examples
#' # Loading a fake survey (data frame)
#' data(fakesurvey)
#'
#' # Diffnet object keeping isolated vertices ----------------------------------
#' dn1 <- survey_to_diffnet(fakesurvey, "id", c("net1", "net2", "net3"), "toa",
#' "group", keep.isolates=TRUE)
#'
#' # Diffnet object NOT keeping isolated vertices
#' dn2 <- survey_to_diffnet(fakesurvey, "id", c("net1", "net2", "net3"), "toa",
#' "group", keep.isolates=FALSE)
#'
#' # dn1 has an extra vertex than dn2
#' dn1
#' dn2
#'
#' # Loading a longitudinal survey data (two waves) ----------------------------
#' data(fakesurveyDyn)
#'
#' groupvar <- "group"
#' x <- survey_to_diffnet(
#' fakesurveyDyn, "id", c("net1", "net2", "net3"), "toa", "group" ,
#' timevar = "time", keep.isolates = TRUE, warn.coercion=FALSE)
#'
#' plot_diffnet(x, vertex.label = rownames(x))
#'
#' # Reproducing medInnovationsDiffNet object ----------------------------------
#' data(medInnovations)
#'
#' # What are the netvars
#' netvars <- names(medInnovations)[grepl("^net", names(medInnovations))]
#'
#' medInnovationsDiffNet2 <- survey_to_diffnet(
#' medInnovations,
#' "id", netvars, "toa", "city",
#' warn.coercion=FALSE)
#'
#' medInnovationsDiffNet2
#'
#' # Comparing with the package's version
#' all(diffnet.toa(medInnovationsDiffNet2) == diffnet.toa(medInnovationsDiffNet)) #TRUE
#' all(
#' diffnet.attrs(medInnovationsDiffNet2, as.df = TRUE) ==
#' diffnet.attrs(medInnovationsDiffNet, as.df = TRUE),
#' na.rm=TRUE) #TRUE
#'
#'
survey_to_diffnet <- function(
dat, idvar, netvars, toavar,
groupvar=NULL,
no.unsurveyed=TRUE,
timevar=NULL,
t = NULL,
undirected = getOption("diffnet.undirected", FALSE),
self = getOption("diffnet.self", FALSE),
multiple=getOption("diffnet.multiple", FALSE),
keep.isolates=TRUE, recode.ids=TRUE,
warn.coercion=TRUE,
...) {
# Creating a varlist
varlist <- c(idvar, groupvar, netvars, toavar, timevar)
# Are all in the dataset??
test <- varlist %in% colnames(dat)
if (any(!test))
stop("Variables -", paste(varlist[!test], collapse = "-, -"),"- can't be found on -dat-.")
# If the range turns out to be infinite, then error
if (any(is.infinite(dat[[toavar]])))
stop("Invalid Times of Adoption (Inf values found).")
if (length(timevar) && any(is.infinite(dat[[timevar]])))
stop("Time can't have undefined values (Inf values found).")
# Coercing data into numeric variables
for (x in varlist) {
dat[[x]] <- check_var_class_and_coerce(
x, dat, c("numeric", "integer"), "integer", warn.coercion)
}
# Changing ids
if (length(groupvar)) {
idord <- nchar(max(dat[[idvar]]))
for (x in c(netvars, idvar))
dat[[x]] <- dat[[x]] + dat[[groupvar]]*(10^(idord))
}
# Removing unsurveyed
if (no.unsurveyed) {
surveyed <- unique(dat[[idvar]])
for (x in netvars)
dat[[x]][which(!(dat[[x]] %in% surveyed))] <- NA
}
# Analyzing time data
if (length(timevar)) {
# Checking if data is complete
test <- complete.cases(dat[[timevar]])
if (any(!test)) {
test <- which(!test)
test <- paste0(test, collapse=", ")
stop("Some elements of -timevar- have missing data:\n\t",
ifelse(nchar(test) > 80, paste(strtrim(test,80), "..."), test),".")
}
tvar <- dat[[timevar]]
tran <- range(tvar, na.rm=TRUE)
tran <- tran[1]:tran[2]
} else {
tvar <- rep(1, nrow(dat))
tran <- 1
}
# Reshaping data (so we have an edgelist)
dat.long <- NULL
t0 <- NULL
vertex.attrs <- vector("list", length(tran))
colstoexport <- which(!(colnames(dat) %in% c(toavar)))
for (i in 1:length(tran)) {
subs <- dat[tvar == tran[i],]
vertex.attrs[[i]] <- subs[,colstoexport]
# Reshaping
subs <- reshape(
subs[,c(idvar, netvars)], v.names= "net",
varying = netvars,
idvar="id", direction="long")[,c(idvar, "net")]
# Creating edgelist
dat.long <- rbind(dat.long, subs)
# Times for dyn networks
t0 <- c(t0, rep(tran[i], nrow(subs)))
}
t1 <- t0
# If the time range equals 1, then it implies that the graph data is static
if (length(tran) == 1) {
# Computing the times of adoption
rtoa <- range(dat[[toavar]], na.rm = TRUE)
t <- rtoa[2] - rtoa[1] + 1
# Creating the adjacency matrix
graph <- edgelist_to_adjmat(edgelist = dat.long, t = t,
undirected=undirected, self=self, multiple = multiple,
keep.isolates = keep.isolates, recode.ids = recode.ids)
} else {
# Creating the adjacency matrix
graph <- edgelist_to_adjmat(edgelist = dat.long, t0 = t0, t1=t1,
undirected=undirected, self=self, multiple = multiple,
keep.isolates = keep.isolates, recode.ids = recode.ids)
}
# Used vertices
used.vertex <- data.frame(rownames(graph[[1]]),
`_original_sort` = seq_len(nnodes(graph)),
check.names = FALSE)
colnames(used.vertex)[1] <- idvar
for (i in 1:length(tran)) {
vertex.attrs[[i]] <- merge(
used.vertex, vertex.attrs[[i]],
by = idvar,
all.x=TRUE, sort=FALSE)
# Sorting rows
vertex.attrs[[i]] <- vertex.attrs[[i]][
order(vertex.attrs[[i]][["_original_sort"]]),]
# Removing the idvar
test <- colnames(vertex.attrs[[i]]) %in% c(idvar, "_original_sort")
vertex.attrs[[i]] <- vertex.attrs[[i]][,which(!test)]
}
# Times of adoption
dat <- unique(dat[,c(idvar, toavar)])
toa <- merge(used.vertex, dat, by=idvar, all.x=TRUE, sort=FALSE)
# Sorting rows
toa <- toa[order(toa[["_original_sort"]]),][[toavar]]
if (length(toa) != nrow(used.vertex))
stop("It seems that -toavar- is not time-invariant.")
if (length(tran) == 1) {
new_diffnet(
graph=graph, toa=toa,
vertex.static.attrs = vertex.attrs[[1]],
...
)
} else {
new_diffnet(
graph=graph, toa=toa,
vertex.dyn.attrs = vertex.attrs,
...
)
}
}
#' @rdname survey_to_diffnet
#' @param fill.missing Character scalar. In the case of having unmatching ids
#' between \code{dat} and \code{edgelist}, fills the data (see details).
#' @export
edgelist_to_diffnet <- function(edgelist, w=NULL,
t0=NULL, t1=NULL ,
dat, idvar, toavar, timevar=NULL,
undirected = getOption("diffnet.undirected", FALSE),
self = getOption("diffnet.self", FALSE),
multiple=getOption("diffnet.multiple", FALSE),
fill.missing=NULL,
keep.isolates=TRUE, recode.ids=TRUE,
warn.coercion=TRUE) {
# Step 0.1: Checking dat -----------------------------------------------------
# Creating a varlist
varlist <- c(idvar, toavar, timevar)
# Are all in the dataset??
test <- varlist %in% colnames(dat)
if (any(!test))
stop("Variables -", paste(varlist[!test], collapse = "-, -"),"- can't be found on -dat-.")
# Is it complete? toa may be empty
test <- which(!complete.cases(dat[,varlist[-2]]))
if (length(test))
stop("Incomplete cases in -dat-. All observations must have -idvar- and,",
" if specified, -timevar-. The following rows are incomplete:\n\t",
paste0(test, collapse=", "), ".")
# If the range turns out to be infinite, then error
if (any(is.infinite(dat[[toavar]])))
stop("Invalid Times of Adoption (Inf values found).")
if (length(timevar) && any(is.infinite(dat[[timevar]])))
stop("Time can't have undefined values (Inf values found).")
# Coercing data into numeric variables. idvar can be names
for (x in varlist[-1])
dat[[x]] <- check_var_class_and_coerce(
x, dat, c("numeric", "integer"), "integer", warn.coercion)
# Converting into character (always for safety)
if (inherits(edgelist, "matrix")) {
edgelist <- apply(edgelist, 2, as.character)
edgelist <- as.data.frame(edgelist, stringsAsFactors=FALSE)
} else if (inherits(edgelist, "data.frame")) {
for (x in colnames(edgelist))
edgelist[[x]] <- check_var_class_and_coerce(
x, edgelist, c("factor", "integer", "numeric"), "character", warn.coercion)
}
dat[[idvar]] <- as.character(dat[[idvar]])
# Step 0.2: Checking FILL data -----------------------------------------------
ids.edgelist <- unique(c(edgelist[,1,drop=TRUE], edgelist[,2,drop=TRUE]))
ids.edgelist <- ids.edgelist[!is.na(ids.edgelist)]
ids.dat <- unique(dat[[idvar]])
ids.dat <- ids.dat[!is.na(ids.dat)]
if (length(fill.missing)) {
# Checking argument
if (!inherits(fill.missing, "character") ||
!(fill.missing %in% c("edgelist", "dat", "both"))) {
stop("The only values currently supported for -fill.missing- are:\n\t",
"'edgelist', 'dat', or 'both'.")
}
# Filling missing pieces
if (fill.missing %in% c("edgelist", "both")) {
test <- ids.dat[which(!( ids.dat %in% ids.edgelist))]
# If some missing, then filling with more edges
if (length(test)) {
warning("The following ids will be added to -edgelist-:\n\t",
paste0(test, collapse=", "),".",
ifelse(!keep.isolates, " The option keep.isolates has been changed to TRUE.", ""))
nedgelist <- nrow(edgelist)
edgelist <- rbind(
edgelist,
edgelist[1:length(test),, drop=FALSE]
)
edgelist[(nedgelist + 1):nrow(edgelist),] <- NA
edgelist[(nedgelist + 1):nrow(edgelist),1] <- test
if (length(t0)) t0 <- c(t0, rep(NA, length(test)))
if (length(t1)) t1 <- c(t1, rep(NA, length(test)))
if (length(w)) w <- c(w, rep(min(w, na.rm = TRUE), length(test)))
}
}
if (fill.missing %in% c("dat", "both")) {
test <- ids.edgelist[which(!(ids.edgelist %in% ids.dat))]
# If some missing, then filling with more edges
if (length(test)) {
warning("The following ids will be added to -dat-:\n\t",
paste0(test, collapse=", "),".")
ndat <- nrow(dat)
dat <- rbind(
dat,
dat[1:length(test),, drop=FALSE])
dat[(ndat + 1):nrow(dat),] <- NA
dat[[idvar]][(ndat + 1):nrow(dat)] <- test
}
}
}
# Step 1.1: Converting edgelist into adjmat ------------------------------------
adjmat <- edgelist_to_adjmat(
edgelist, w=w, t0=t0, t1=t1,
undirected = undirected, self=self, multiple=multiple,
keep.isolates = keep.isolates,
recode.ids = recode.ids, simplify = FALSE)
# Step 1.2: Checking times in edgelist and in dat (if any) -------------------
suppressWarnings(dat.ran.toavar <- range(dat[[toavar]], na.rm = TRUE))
if (length(timevar)) {
dat.ran.timevar <- range(dat[[timevar]], na.rm=TRUE)
# range(toa) %within% range(timevar)
if (dat.ran.toavar[1] < dat.ran.timevar[1] ||
dat.ran.toavar[2] > dat.ran.timevar[2])
stop("Invalid range in -toavar- (",dat.ran.toavar[1], " to ",
dat.ran.toavar[2],"). It should be within the range of -timevar-",
" (",dat.ran.timevar[1]," to ",dat.ran.timevar[2],").")
# Setting the range from the data to be the timevar
dat.ran <- dat.ran.timevar
} else dat.ran <- dat.ran.toavar
# Auxiliary time range
tran <- dat.ran[1]:dat.ran[2]
# Number of observations in adjmat
if (length(t0) | length(t1)) { # If dynamic, we have to check
edge.ran <- as.integer(names(adjmat))
edge.ran <- c(edge.ran[1], edge.ran[length(adjmat)])
# Range of dat and edgelist should be equal
if (any(dat.ran != edge.ran))
stop("Time ranges in -edgelist- and -dat- should be the same. Currently ",
"they are ",paste0(edge.ran, collapse = " to "), " and ",
paste0(dat.ran, collapse=" to "), " respectively.")
} else { # If no dynamic, then simply replicate it
adjmat <- lapply(tran, function(x) adjmat[[1]])
names(adjmat) <- tran
}
# Step 2: Getting the ids and checking everything is in order ----------------
used.vertex <- data.frame(rownames(adjmat[[1]]),
`_original_sort` = seq_len(nnodes(adjmat)),
check.names = FALSE)
colnames(used.vertex)[1] <- idvar
# All in the edgelist?
dat.idvar <- unique(dat[[idvar]])
test <- which(!(dat.idvar %in% used.vertex[[idvar]]))
if (length(test))
warning("Some -ids- not present on the adjacency matrix:\n\t",
paste0(dat.idvar[test],collapse = ", "),".")
# Step 3: Checking attributes ------------------------------------------------
# Creating the attributes (this depends on whether these are dynamic or not)
vertex.attrs <- vector("list", length(tran))
names(vertex.attrs) <- tran
if (length(timevar)) {
for (i in tran) {
# In order to access the slices, i do so using a character
ichar <- as.character(i)
vertex.attrs[[ichar]] <- merge(
used.vertex,
dat[is.na(dat[[timevar]]) | (dat[[timevar]] == i),],
by = idvar,
all.x=TRUE, sort=FALSE)
# Sorting back
vertex.attrs[[ichar]] <- vertex.attrs[[ichar]][
order(vertex.attrs[[ichar]][["_original_sort"]]),]
# Removing the id var, the per var and the toa var
test <- colnames(vertex.attrs[[ichar]]) %in% c(varlist, "_original_sort")
vertex.attrs[[ichar]] <- vertex.attrs[[ichar]][,which(!test),drop=FALSE]
}
} else {
# Creating data.frame
vertex.attrs <- merge(used.vertex, dat, by=idvar, all.x=TRUE, sort=FALSE)
# Sorting back
vertex.attrs <- vertex.attrs[
order(vertex.attrs[["_original_sort"]]),]
# Removing the idvar
test <- colnames(vertex.attrs) %in% c(varlist, "_original_sort")
vertex.attrs <- vertex.attrs[,which(!test),drop=FALSE]
}
# Times of Adoption vector
toa <- unique(dat[,c(idvar, toavar)])
toa <- merge(used.vertex, toa, by=idvar, all.x=TRUE,
all.y=FALSE, sort=FALSE)
# Sorting
toa <- toa[order(toa[["_original_sort"]]),][[toavar]]
# It should be of the same length as the used vertex
if (length(toa) != nrow(used.vertex))
stop("Multiple -toavar- by individual. Multiple adoption times are not ",
"supported yet by the package.")
# Step 4: Wrapping all together, creating the diffnet object -----------------
if (length(timevar)) {
new_diffnet(adjmat, toa=toa, t0 = dat.ran[1], t1=dat.ran[2],
vertex.dyn.attrs = vertex.attrs,
undirected=undirected, self=self,
multiple=multiple)
} else {
new_diffnet(adjmat, toa=toa, t0 = dat.ran[1], t1=dat.ran[2],
vertex.static.attrs = vertex.attrs,
undirected=undirected, self=self,
multiple=multiple)
}
}
|
/R/survey_to_diffnet.R
|
permissive
|
USCCANA/netdiffuseR
|
R
| false
| false
| 19,423
|
r
|
# Auxiliar function to warn and coerce classes
check_var_class_and_coerce <- function(var, dat, class.ok, class.target, warn.coercion) {
# Has the right class?
cl <- class(dat[[var]])
if (!any(cl %in% class.ok))
stop("The variable -",var,"- is of class ", cl, " which is ",
"not supported. Supported class(es): ", paste0(class.ok, collapse=", "),
".")
# Should be coherced?
if (warn.coercion && !(class.target %in% cl))
warning("Coercing -",var, "- into ",class.target,".")
# Returning
return(methods::as(dat[[var]], class.target))
}
#' Convert survey-like data and edgelists to a \code{diffnet} object
#'
#' These convenient functions turn network nomination datasets and edgelists with
#' vertex attributes datasets into diffnet objects. Both work as wrappers of
#' \code{\link{edgelist_to_adjmat}} and \code{\link{new_diffnet}}.
#'
#' @inheritParams edgelist_to_adjmat
#' @param dat A data frame.
#' @param idvar Character scalar. Name of the id variable.
#' @param netvars Character vector. Names of the network nomination variables.
#' @param toavar Character scalar. Name of the time of adoption variable.
#' @param timevar Character sacalar. In the case of longitudinal data, name of the time var.
#' @param groupvar Character scalar. Name of cohort variable (e.g. city).
#' @param no.unsurveyed Logical scalar. When \code{TRUE} the nominated individuals
#' that do not show in \code{idvar} are set to \code{NA} (see details).
#' @param warn.coercion Logical scalar. When \code{TRUE} warns coercion from numeric to integer.
#' @param ... Further arguments to be passed to \code{\link{new_diffnet}}.
#' @details
#'
#' All of \code{netvars}, \code{toavar} and \code{groupvar}
#' must be integers. Were these numeric they are coerced into integers, otherwise,
#' when neither of both, the function returns with error. \code{idvar}, on the
#' other hand, should only be integer when calling \code{survey_to_diffnet},
#' on the contrary, for \code{edgelist_to_diffnet}, \code{idvar} may be character.
#'
#' In field work it is not unusual that some respondents nominate unsurveyed
#' individuals. In such case, in order to exclude them from the analysis,
#' the user can set \code{no.unsurveyed=TRUE} (the default), telling the
#' function to exclude such individuals from the adjacency matrix. This is
#' done by setting variables in \code{netvars} equal to \code{NA} when the
#' nominated id can't be found in \code{idvar}.
#'
#' If the network nomination process was done in different groups (location
#' for example) the survey id numbers may be define uniquely within each group
#' but not across groups (there may be many individuals with \code{id=1},
#' for example). To encompass this issue, the user can tell the function what
#' variable can be used to distinguish between groups through the \code{groupvar}
#' argument. When \code{groupvar} is provided, function redifines \code{idvar}
#' and the variables in \code{netvars} as follows:
#'
#' \preformatted{
#' dat[[idvar]] <- dat[[idvar]] + dat[[groupvar]]*z
#' }
#'
#' Where \code{z = 10^nchar(max(dat[[idvar]]))}.
#'
#' For longitudinal data, it is assumed that the \code{toavar} holds the same
#' information through time, this is, time-invariable. This as the package does
#' not yet support variable times of adoption.
#'
#' The \code{fill.missing} option can take any of these three values: \code{"edgelist"},
#' \code{"dat"}, or \code{"both"}. This argument works as follows:
#' \enumerate{
#' \item When \code{fill.missing="edgelist"} (or \code{"both"}) the function
#' will check which vertices show in \code{dat} but do not show in \code{edgelist}.
#' If there is any, the function will include these in \code{edgelist} as ego to
#' \code{NA} (so they have no link to anyone), and, if specified, will fill
#' the \code{t0}, \code{t1} vectors with \code{NA}s for those cases. If
#' \code{w} is also specified, the new vertices will be set to
#' \code{min(w, na.rm=TRUE)}.
#' \item When \code{fill.missing="dat"} (or \code{"both"}) the function
#' checks which vertices show in \code{edgelist} but not in \code{dat}. If
#' there is any, the function will include these in \code{dat} by adding
#' one row per individual.
#' }
#'
#' @export
#' @return A \code{\link{diffnet}} object.
#' @seealso \code{\link{fakesurvey}}, \code{\link{fakesurveyDyn}}
#' @family data management functions
#' @author
#' Vega Yon
#' @examples
#' # Loading a fake survey (data frame)
#' data(fakesurvey)
#'
#' # Diffnet object keeping isolated vertices ----------------------------------
#' dn1 <- survey_to_diffnet(fakesurvey, "id", c("net1", "net2", "net3"), "toa",
#' "group", keep.isolates=TRUE)
#'
#' # Diffnet object NOT keeping isolated vertices
#' dn2 <- survey_to_diffnet(fakesurvey, "id", c("net1", "net2", "net3"), "toa",
#' "group", keep.isolates=FALSE)
#'
#' # dn1 has an extra vertex than dn2
#' dn1
#' dn2
#'
#' # Loading a longitudinal survey data (two waves) ----------------------------
#' data(fakesurveyDyn)
#'
#' groupvar <- "group"
#' x <- survey_to_diffnet(
#' fakesurveyDyn, "id", c("net1", "net2", "net3"), "toa", "group" ,
#' timevar = "time", keep.isolates = TRUE, warn.coercion=FALSE)
#'
#' plot_diffnet(x, vertex.label = rownames(x))
#'
#' # Reproducing medInnovationsDiffNet object ----------------------------------
#' data(medInnovations)
#'
#' # What are the netvars
#' netvars <- names(medInnovations)[grepl("^net", names(medInnovations))]
#'
#' medInnovationsDiffNet2 <- survey_to_diffnet(
#' medInnovations,
#' "id", netvars, "toa", "city",
#' warn.coercion=FALSE)
#'
#' medInnovationsDiffNet2
#'
#' # Comparing with the package's version
#' all(diffnet.toa(medInnovationsDiffNet2) == diffnet.toa(medInnovationsDiffNet)) #TRUE
#' all(
#' diffnet.attrs(medInnovationsDiffNet2, as.df = TRUE) ==
#' diffnet.attrs(medInnovationsDiffNet, as.df = TRUE),
#' na.rm=TRUE) #TRUE
#'
#'
survey_to_diffnet <- function(
dat, idvar, netvars, toavar,
groupvar=NULL,
no.unsurveyed=TRUE,
timevar=NULL,
t = NULL,
undirected = getOption("diffnet.undirected", FALSE),
self = getOption("diffnet.self", FALSE),
multiple=getOption("diffnet.multiple", FALSE),
keep.isolates=TRUE, recode.ids=TRUE,
warn.coercion=TRUE,
...) {
# Creating a varlist
varlist <- c(idvar, groupvar, netvars, toavar, timevar)
# Are all in the dataset??
test <- varlist %in% colnames(dat)
if (any(!test))
stop("Variables -", paste(varlist[!test], collapse = "-, -"),"- can't be found on -dat-.")
# If the range turns out to be infinite, then error
if (any(is.infinite(dat[[toavar]])))
stop("Invalid Times of Adoption (Inf values found).")
if (length(timevar) && any(is.infinite(dat[[timevar]])))
stop("Time can't have undefined values (Inf values found).")
# Coercing data into numeric variables
for (x in varlist) {
dat[[x]] <- check_var_class_and_coerce(
x, dat, c("numeric", "integer"), "integer", warn.coercion)
}
# Changing ids
if (length(groupvar)) {
idord <- nchar(max(dat[[idvar]]))
for (x in c(netvars, idvar))
dat[[x]] <- dat[[x]] + dat[[groupvar]]*(10^(idord))
}
# Removing unsurveyed
if (no.unsurveyed) {
surveyed <- unique(dat[[idvar]])
for (x in netvars)
dat[[x]][which(!(dat[[x]] %in% surveyed))] <- NA
}
# Analyzing time data
if (length(timevar)) {
# Checking if data is complete
test <- complete.cases(dat[[timevar]])
if (any(!test)) {
test <- which(!test)
test <- paste0(test, collapse=", ")
stop("Some elements of -timevar- have missing data:\n\t",
ifelse(nchar(test) > 80, paste(strtrim(test,80), "..."), test),".")
}
tvar <- dat[[timevar]]
tran <- range(tvar, na.rm=TRUE)
tran <- tran[1]:tran[2]
} else {
tvar <- rep(1, nrow(dat))
tran <- 1
}
# Reshaping data (so we have an edgelist)
dat.long <- NULL
t0 <- NULL
vertex.attrs <- vector("list", length(tran))
colstoexport <- which(!(colnames(dat) %in% c(toavar)))
for (i in 1:length(tran)) {
subs <- dat[tvar == tran[i],]
vertex.attrs[[i]] <- subs[,colstoexport]
# Reshaping
subs <- reshape(
subs[,c(idvar, netvars)], v.names= "net",
varying = netvars,
idvar="id", direction="long")[,c(idvar, "net")]
# Creating edgelist
dat.long <- rbind(dat.long, subs)
# Times for dyn networks
t0 <- c(t0, rep(tran[i], nrow(subs)))
}
t1 <- t0
# If the time range equals 1, then it implies that the graph data is static
if (length(tran) == 1) {
# Computing the times of adoption
rtoa <- range(dat[[toavar]], na.rm = TRUE)
t <- rtoa[2] - rtoa[1] + 1
# Creating the adjacency matrix
graph <- edgelist_to_adjmat(edgelist = dat.long, t = t,
undirected=undirected, self=self, multiple = multiple,
keep.isolates = keep.isolates, recode.ids = recode.ids)
} else {
# Creating the adjacency matrix
graph <- edgelist_to_adjmat(edgelist = dat.long, t0 = t0, t1=t1,
undirected=undirected, self=self, multiple = multiple,
keep.isolates = keep.isolates, recode.ids = recode.ids)
}
# Used vertices
used.vertex <- data.frame(rownames(graph[[1]]),
`_original_sort` = seq_len(nnodes(graph)),
check.names = FALSE)
colnames(used.vertex)[1] <- idvar
for (i in 1:length(tran)) {
vertex.attrs[[i]] <- merge(
used.vertex, vertex.attrs[[i]],
by = idvar,
all.x=TRUE, sort=FALSE)
# Sorting rows
vertex.attrs[[i]] <- vertex.attrs[[i]][
order(vertex.attrs[[i]][["_original_sort"]]),]
# Removing the idvar
test <- colnames(vertex.attrs[[i]]) %in% c(idvar, "_original_sort")
vertex.attrs[[i]] <- vertex.attrs[[i]][,which(!test)]
}
# Times of adoption
dat <- unique(dat[,c(idvar, toavar)])
toa <- merge(used.vertex, dat, by=idvar, all.x=TRUE, sort=FALSE)
# Sorting rows
toa <- toa[order(toa[["_original_sort"]]),][[toavar]]
if (length(toa) != nrow(used.vertex))
stop("It seems that -toavar- is not time-invariant.")
if (length(tran) == 1) {
new_diffnet(
graph=graph, toa=toa,
vertex.static.attrs = vertex.attrs[[1]],
...
)
} else {
new_diffnet(
graph=graph, toa=toa,
vertex.dyn.attrs = vertex.attrs,
...
)
}
}
#' @rdname survey_to_diffnet
#' @param fill.missing Character scalar. In the case of having unmatching ids
#' between \code{dat} and \code{edgelist}, fills the data (see details).
#' @export
edgelist_to_diffnet <- function(edgelist, w=NULL,
t0=NULL, t1=NULL ,
dat, idvar, toavar, timevar=NULL,
undirected = getOption("diffnet.undirected", FALSE),
self = getOption("diffnet.self", FALSE),
multiple=getOption("diffnet.multiple", FALSE),
fill.missing=NULL,
keep.isolates=TRUE, recode.ids=TRUE,
warn.coercion=TRUE) {
# Step 0.1: Checking dat -----------------------------------------------------
# Creating a varlist
varlist <- c(idvar, toavar, timevar)
# Are all in the dataset??
test <- varlist %in% colnames(dat)
if (any(!test))
stop("Variables -", paste(varlist[!test], collapse = "-, -"),"- can't be found on -dat-.")
# Is it complete? toa may be empty
test <- which(!complete.cases(dat[,varlist[-2]]))
if (length(test))
stop("Incomplete cases in -dat-. All observations must have -idvar- and,",
" if specified, -timevar-. The following rows are incomplete:\n\t",
paste0(test, collapse=", "), ".")
# If the range turns out to be infinite, then error
if (any(is.infinite(dat[[toavar]])))
stop("Invalid Times of Adoption (Inf values found).")
if (length(timevar) && any(is.infinite(dat[[timevar]])))
stop("Time can't have undefined values (Inf values found).")
# Coercing data into numeric variables. idvar can be names
for (x in varlist[-1])
dat[[x]] <- check_var_class_and_coerce(
x, dat, c("numeric", "integer"), "integer", warn.coercion)
# Converting into character (always for safety)
if (inherits(edgelist, "matrix")) {
edgelist <- apply(edgelist, 2, as.character)
edgelist <- as.data.frame(edgelist, stringsAsFactors=FALSE)
} else if (inherits(edgelist, "data.frame")) {
for (x in colnames(edgelist))
edgelist[[x]] <- check_var_class_and_coerce(
x, edgelist, c("factor", "integer", "numeric"), "character", warn.coercion)
}
dat[[idvar]] <- as.character(dat[[idvar]])
# Step 0.2: Checking FILL data -----------------------------------------------
ids.edgelist <- unique(c(edgelist[,1,drop=TRUE], edgelist[,2,drop=TRUE]))
ids.edgelist <- ids.edgelist[!is.na(ids.edgelist)]
ids.dat <- unique(dat[[idvar]])
ids.dat <- ids.dat[!is.na(ids.dat)]
if (length(fill.missing)) {
# Checking argument
if (!inherits(fill.missing, "character") ||
!(fill.missing %in% c("edgelist", "dat", "both"))) {
stop("The only values currently supported for -fill.missing- are:\n\t",
"'edgelist', 'dat', or 'both'.")
}
# Filling missing pieces
if (fill.missing %in% c("edgelist", "both")) {
test <- ids.dat[which(!( ids.dat %in% ids.edgelist))]
# If some missing, then filling with more edges
if (length(test)) {
warning("The following ids will be added to -edgelist-:\n\t",
paste0(test, collapse=", "),".",
ifelse(!keep.isolates, " The option keep.isolates has been changed to TRUE.", ""))
nedgelist <- nrow(edgelist)
edgelist <- rbind(
edgelist,
edgelist[1:length(test),, drop=FALSE]
)
edgelist[(nedgelist + 1):nrow(edgelist),] <- NA
edgelist[(nedgelist + 1):nrow(edgelist),1] <- test
if (length(t0)) t0 <- c(t0, rep(NA, length(test)))
if (length(t1)) t1 <- c(t1, rep(NA, length(test)))
if (length(w)) w <- c(w, rep(min(w, na.rm = TRUE), length(test)))
}
}
if (fill.missing %in% c("dat", "both")) {
test <- ids.edgelist[which(!(ids.edgelist %in% ids.dat))]
# If some missing, then filling with more edges
if (length(test)) {
warning("The following ids will be added to -dat-:\n\t",
paste0(test, collapse=", "),".")
ndat <- nrow(dat)
dat <- rbind(
dat,
dat[1:length(test),, drop=FALSE])
dat[(ndat + 1):nrow(dat),] <- NA
dat[[idvar]][(ndat + 1):nrow(dat)] <- test
}
}
}
# Step 1.1: Converting edgelist into adjmat ------------------------------------
adjmat <- edgelist_to_adjmat(
edgelist, w=w, t0=t0, t1=t1,
undirected = undirected, self=self, multiple=multiple,
keep.isolates = keep.isolates,
recode.ids = recode.ids, simplify = FALSE)
# Step 1.2: Checking times in edgelist and in dat (if any) -------------------
suppressWarnings(dat.ran.toavar <- range(dat[[toavar]], na.rm = TRUE))
if (length(timevar)) {
dat.ran.timevar <- range(dat[[timevar]], na.rm=TRUE)
# range(toa) %within% range(timevar)
if (dat.ran.toavar[1] < dat.ran.timevar[1] ||
dat.ran.toavar[2] > dat.ran.timevar[2])
stop("Invalid range in -toavar- (",dat.ran.toavar[1], " to ",
dat.ran.toavar[2],"). It should be within the range of -timevar-",
" (",dat.ran.timevar[1]," to ",dat.ran.timevar[2],").")
# Setting the range from the data to be the timevar
dat.ran <- dat.ran.timevar
} else dat.ran <- dat.ran.toavar
# Auxiliary time range
tran <- dat.ran[1]:dat.ran[2]
# Number of observations in adjmat
if (length(t0) | length(t1)) { # If dynamic, we have to check
edge.ran <- as.integer(names(adjmat))
edge.ran <- c(edge.ran[1], edge.ran[length(adjmat)])
# Range of dat and edgelist should be equal
if (any(dat.ran != edge.ran))
stop("Time ranges in -edgelist- and -dat- should be the same. Currently ",
"they are ",paste0(edge.ran, collapse = " to "), " and ",
paste0(dat.ran, collapse=" to "), " respectively.")
} else { # If no dynamic, then simply replicate it
adjmat <- lapply(tran, function(x) adjmat[[1]])
names(adjmat) <- tran
}
# Step 2: Getting the ids and checking everything is in order ----------------
used.vertex <- data.frame(rownames(adjmat[[1]]),
`_original_sort` = seq_len(nnodes(adjmat)),
check.names = FALSE)
colnames(used.vertex)[1] <- idvar
# All in the edgelist?
dat.idvar <- unique(dat[[idvar]])
test <- which(!(dat.idvar %in% used.vertex[[idvar]]))
if (length(test))
warning("Some -ids- not present on the adjacency matrix:\n\t",
paste0(dat.idvar[test],collapse = ", "),".")
# Step 3: Checking attributes ------------------------------------------------
# Creating the attributes (this depends on whether these are dynamic or not)
vertex.attrs <- vector("list", length(tran))
names(vertex.attrs) <- tran
if (length(timevar)) {
for (i in tran) {
# In order to access the slices, i do so using a character
ichar <- as.character(i)
vertex.attrs[[ichar]] <- merge(
used.vertex,
dat[is.na(dat[[timevar]]) | (dat[[timevar]] == i),],
by = idvar,
all.x=TRUE, sort=FALSE)
# Sorting back
vertex.attrs[[ichar]] <- vertex.attrs[[ichar]][
order(vertex.attrs[[ichar]][["_original_sort"]]),]
# Removing the id var, the per var and the toa var
test <- colnames(vertex.attrs[[ichar]]) %in% c(varlist, "_original_sort")
vertex.attrs[[ichar]] <- vertex.attrs[[ichar]][,which(!test),drop=FALSE]
}
} else {
# Creating data.frame
vertex.attrs <- merge(used.vertex, dat, by=idvar, all.x=TRUE, sort=FALSE)
# Sorting back
vertex.attrs <- vertex.attrs[
order(vertex.attrs[["_original_sort"]]),]
# Removing the idvar
test <- colnames(vertex.attrs) %in% c(varlist, "_original_sort")
vertex.attrs <- vertex.attrs[,which(!test),drop=FALSE]
}
# Times of Adoption vector
toa <- unique(dat[,c(idvar, toavar)])
toa <- merge(used.vertex, toa, by=idvar, all.x=TRUE,
all.y=FALSE, sort=FALSE)
# Sorting
toa <- toa[order(toa[["_original_sort"]]),][[toavar]]
# It should be of the same length as the used vertex
if (length(toa) != nrow(used.vertex))
stop("Multiple -toavar- by individual. Multiple adoption times are not ",
"supported yet by the package.")
# Step 4: Wrapping all together, creating the diffnet object -----------------
if (length(timevar)) {
new_diffnet(adjmat, toa=toa, t0 = dat.ran[1], t1=dat.ran[2],
vertex.dyn.attrs = vertex.attrs,
undirected=undirected, self=self,
multiple=multiple)
} else {
new_diffnet(adjmat, toa=toa, t0 = dat.ran[1], t1=dat.ran[2],
vertex.static.attrs = vertex.attrs,
undirected=undirected, self=self,
multiple=multiple)
}
}
|
\name{mirror_sequence}
\alias{mirror_sequence}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
mirror_sequence
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
mirror_sequence(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/man/mirror_sequence.Rd
|
no_license
|
JaimeMLegaz/setup-sequences
|
R
| false
| false
| 1,305
|
rd
|
\name{mirror_sequence}
\alias{mirror_sequence}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
%% ~~function to do ... ~~
mirror_sequence
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
mirror_sequence(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#go to directory
setwd("C:\\datasciencecoursera")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "./household_power_consumption.zip")
unzip("./household_power_consumption.zip", exdir = ".")
#extract file from directory
extract <- read.table(".\\household_power_consumption.txt", header = TRUE, sep = ";", na.strings = c("?"))
#na.strings = "?", stringsAsFactors = FALSE,
#filter for only 02-01-2007 and 02-02-2007
filtered <- extract[extract$Date %in% c("1/2/2007", "2/2/2007"),]
#concatenate date and time into date time
final <- data.frame(strptime(paste(filtered$Date, filtered$Time), "%d/%m/%Y %H:%M"),
filtered$Global_active_power,
filtered$Global_reactive_power,
filtered$Voltage,
filtered$Sub_metering_1,
filtered$Sub_metering_2,
filtered$Sub_metering_3)
colnames(final)[1] <- "DateTime"
colnames(final)[2] <- "Global_active_power"
colnames(final)[3] <- "Global_reactive_power"
colnames(final)[4] <- "Voltage"
colnames(final)[5] <- "Sub_metering_1"
colnames(final)[6] <- "Sub_metering_2"
colnames(final)[7] <- "Sub_metering_3"
#create plot 2 to file
png("plot2.png", width = 480, height = 480)
par(mfrow = c(1, 1))
plot(final$DateTime, as.numeric(final$Global_active_power) / 1000, type = "l", ylab = "Global Active Power (kilowatts)")
dev.off()
|
/Plot2.R
|
no_license
|
trc2112/ExData_Plotting1
|
R
| false
| false
| 1,383
|
r
|
#go to directory
setwd("C:\\datasciencecoursera")
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "./household_power_consumption.zip")
unzip("./household_power_consumption.zip", exdir = ".")
#extract file from directory
extract <- read.table(".\\household_power_consumption.txt", header = TRUE, sep = ";", na.strings = c("?"))
#na.strings = "?", stringsAsFactors = FALSE,
#filter for only 02-01-2007 and 02-02-2007
filtered <- extract[extract$Date %in% c("1/2/2007", "2/2/2007"),]
#concatenate date and time into date time
final <- data.frame(strptime(paste(filtered$Date, filtered$Time), "%d/%m/%Y %H:%M"),
filtered$Global_active_power,
filtered$Global_reactive_power,
filtered$Voltage,
filtered$Sub_metering_1,
filtered$Sub_metering_2,
filtered$Sub_metering_3)
colnames(final)[1] <- "DateTime"
colnames(final)[2] <- "Global_active_power"
colnames(final)[3] <- "Global_reactive_power"
colnames(final)[4] <- "Voltage"
colnames(final)[5] <- "Sub_metering_1"
colnames(final)[6] <- "Sub_metering_2"
colnames(final)[7] <- "Sub_metering_3"
#create plot 2 to file
png("plot2.png", width = 480, height = 480)
par(mfrow = c(1, 1))
plot(final$DateTime, as.numeric(final$Global_active_power) / 1000, type = "l", ylab = "Global Active Power (kilowatts)")
dev.off()
|
################ Figure 7 #####################
# Calculate total C partitioning for individual treatments
#-------------------------------------------------------------------------------------
# cbPalette = c("gray", "orange", "skyblue", "green3", "yellow3", "#0072B2", "#D55E00")
cbPalette = c("gray", "orange", "skyblue", "green3", "#009E73", "yellow3", "#0072B2", "#D55E00")
# vol_group <- list(c(5,10,15),c(20,25,35),1000)
# treat.group = as.factor(c("ambient drought","ambient watered","elevated drought","elevated watered")) # Assign all treatments
Ct.group = data.frame(matrix(ncol = 9, nrow = length(treat.group)))
names(Ct.group) = c("GPP","Rg","Rm","Cs","Cr","Cw","Cf","Cflit","Crlit")
param.summary = result[[2]]
data.summary = result[[3]]
output.summary = result[[4]]
Cstorage.data = result[[7]]
Cstorage.data = Cstorage.data[with(Cstorage.data, order(Date,treatment)), 1:3]
names(Cstorage.data)[1] = "Cs"
data.part = data.all[c("Date","Treatment","GPP")]
names(data.part)[2] = "treatment"
data.part$Date = as.Date(data.part$Date)
Rm.data = subset(output.summary, variable %in% "Rm")
names(Rm.data)[3] = "Rm"
data.part = merge(data.part, Rm.data[,c(1,3,4)], by=c("Date","treatment"))
data.part = merge(data.part, Cstorage.data, by=c("Date","treatment"))
cpool = as.factor(c("Mleaf.modelled","Mwood.modelled","Mroot.modelled","Mlit.modelled","Sleaf.modelled"))
for (i in 1:length(cpool)) {
cpool.data = subset(output.summary, variable %in% cpool[i])
cpool.data = cpool.data[, c("Date","value","treatment")]
cpool.data = cpool.data[with(cpool.data, order(Date,treatment)), ]
names(cpool.data)[2] = as.character(cpool[i])
data.part = merge(data.part,cpool.data, all = TRUE)
}
# combine data and parameters together
var = as.factor(c("k","Y","af","as","ar","sf","sr"))
if (no.param.par.var < 5) {
for (i in 1:length(var)) {
param = subset(param.summary, variable %in% var[i])
param = param[, c("Date","Parameter","treatment")]
param = param[with(param, order(Date,treatment)), ]
names(param)[2] = as.character(var[i])
data.part = merge(data.part,param, all = TRUE)
}
} else {
for (i in 1:length(var)) {
for (j in 1:length(unique(param.summary$Date))) {
if (j < length(unique(param.summary$Date))) {
param = subset(param.summary, variable %in% var[i] & Date >= unique(param.summary$Date)[j] & Date < unique(param.summary$Date)[j+1])
} else {
param = subset(param.summary, variable %in% var[i] & Date >= unique(param.summary$Date)[j])
}
param = param[, c("Date","Parameter","treatment")]
param = param[with(param, order(treatment)), ]
names(param)[2] = as.character(var[i])
if (j < length(unique(param.summary$Date))) {
param.part.set = merge (subset(data.part, Date >= unique(param.summary$Date)[j] & Date < unique(param.summary$Date)[j+1]), param[,c(2:3)], by="treatment")
} else {
param.part.set = merge (subset(data.part, Date >= unique(param.summary$Date)[j]), param[,c(2:3)], by="treatment")
}
if (j == 1) {
param.part = param.part.set
} else {
param.part = rbind(param.part,param.part.set)
}
}
data.part = merge(data.part,param.part, all = TRUE)
}
}
data.part = data.part[with(data.part, order(treatment,Date)), ]
# data.set = data.part[data.part$Date <= as.Date("2013-05-21"), ]
for (v in 1:length(treat.group)) {
Ct.group$GPP[v] = sum ( data.part$GPP[which(data.part$treatment %in% treat.group[v])] )
Ct.group$Rm[v] = sum ( data.part$Rm[which(data.part$treatment %in% treat.group[v])] )
Ct.group$Cs[v] = data.part$Cs[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2014-05-26"))]
Ct.group[v, c(5:7)] = data.part[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2014-05-26")), 8:6] - data.part[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2013-09-17")), 8:6]
Ct.group$Cflit[v] = data.part[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2014-05-26")), 9]
Ct.group$Crlit[v] = sum ( data.part$sr [which(data.part$treatment %in% treat.group[v])] * data.part$Mroot.modelled [which(data.part$treatment %in% treat.group[v])])
Ct.group$Rg[v] = Ct.group$GPP[v] - sum(Ct.group[v,c(3:9)])
}
Ct.fraction.group = Ct.group[, c(2:9)]
Ct.fraction.group[,] = Ct.fraction.group[,] / Ct.group[, 1] * 100
# row.names(Ct.fraction.group) <- c("amb-dry","amb-wet","warm-dry","warm-wet")
row.names(Ct.fraction.group) <- c("amb-wet","warm-wet")
Ct.group$treatment = treat.group
colnames(Ct.group) <- c("GPP (g C)", "Rg (g C)", "Rm (g C)", "Cs (g C)", "Cr (g C)", "Cw (g C)", "Cf (g C)", "Cflit (g C)", "Crlit (g C)", "Treatment")
Ct.group = Ct.group[,c(10,1,2,3,4,7,5,6,8,9)]
write.csv(Ct.group, file = "output/C_partitioning_wtc3.csv", row.names = FALSE)
png("output/Figure_7_C_partitioning_wtc3.png", units="px", width=1200, height=1000, res=200)
par(mfrow = c(1, 1), mar=c(5, 4, 2, 6))
# bb = barplot(as.matrix(t(Ct.fraction.group)), ylim=c(0, 107), ylab = "C Partitioning (%)", xlab = "Treatments (Container size)",
# col = rainbow(20),legend = colnames(Ct.fraction.group),
# args.legend = list(x = "topright", bty = "n", inset=c(-0.15, 0)))
bb = barplot(as.matrix(t(Ct.fraction.group)), ylim=c(0, 107), ylab = "C Partitioning (%)", xlab = "Container size (L))",
col = cbPalette[1:8],legend = c(expression(R[g]),expression(R["m,tot"]),expression(C[s]),expression(C["s,r"]),expression(C["s,w"]),expression(C["s,f"]),expression(C["f,lit"]),expression(C["r,lit"])),
args.legend = list(x = "topright", bty = "n", inset=c(-0.18, 0)))
# text( bb, Ct.fraction.group[,1]-3, labels = round(Ct.group[,3],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]-4, labels = round(Ct.group[,4],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]-1, labels = round(Ct.group[,5],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]-3, labels = round(Ct.group[,6],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]-3, labels = round(Ct.group[,7],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]+Ct.fraction.group[,6]-2, labels = round(Ct.group[,8],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]+Ct.fraction.group[,6]+Ct.fraction.group[,7]-1, labels = round(Ct.group[,9],1), cex=.9)
text( bb, rowSums(Ct.fraction.group)+0.5, labels = round(Ct.group[,2],1), pos = 3, cex=1, col="red")
dev.off()
|
/R/C_partitioning_wtc3.R
|
no_license
|
DataFusion18/DA_WTC3
|
R
| false
| false
| 6,805
|
r
|
################ Figure 7 #####################
# Calculate total C partitioning for individual treatments
#-------------------------------------------------------------------------------------
# cbPalette = c("gray", "orange", "skyblue", "green3", "yellow3", "#0072B2", "#D55E00")
cbPalette = c("gray", "orange", "skyblue", "green3", "#009E73", "yellow3", "#0072B2", "#D55E00")
# vol_group <- list(c(5,10,15),c(20,25,35),1000)
# treat.group = as.factor(c("ambient drought","ambient watered","elevated drought","elevated watered")) # Assign all treatments
Ct.group = data.frame(matrix(ncol = 9, nrow = length(treat.group)))
names(Ct.group) = c("GPP","Rg","Rm","Cs","Cr","Cw","Cf","Cflit","Crlit")
param.summary = result[[2]]
data.summary = result[[3]]
output.summary = result[[4]]
Cstorage.data = result[[7]]
Cstorage.data = Cstorage.data[with(Cstorage.data, order(Date,treatment)), 1:3]
names(Cstorage.data)[1] = "Cs"
data.part = data.all[c("Date","Treatment","GPP")]
names(data.part)[2] = "treatment"
data.part$Date = as.Date(data.part$Date)
Rm.data = subset(output.summary, variable %in% "Rm")
names(Rm.data)[3] = "Rm"
data.part = merge(data.part, Rm.data[,c(1,3,4)], by=c("Date","treatment"))
data.part = merge(data.part, Cstorage.data, by=c("Date","treatment"))
cpool = as.factor(c("Mleaf.modelled","Mwood.modelled","Mroot.modelled","Mlit.modelled","Sleaf.modelled"))
for (i in 1:length(cpool)) {
cpool.data = subset(output.summary, variable %in% cpool[i])
cpool.data = cpool.data[, c("Date","value","treatment")]
cpool.data = cpool.data[with(cpool.data, order(Date,treatment)), ]
names(cpool.data)[2] = as.character(cpool[i])
data.part = merge(data.part,cpool.data, all = TRUE)
}
# combine data and parameters together
var = as.factor(c("k","Y","af","as","ar","sf","sr"))
if (no.param.par.var < 5) {
for (i in 1:length(var)) {
param = subset(param.summary, variable %in% var[i])
param = param[, c("Date","Parameter","treatment")]
param = param[with(param, order(Date,treatment)), ]
names(param)[2] = as.character(var[i])
data.part = merge(data.part,param, all = TRUE)
}
} else {
for (i in 1:length(var)) {
for (j in 1:length(unique(param.summary$Date))) {
if (j < length(unique(param.summary$Date))) {
param = subset(param.summary, variable %in% var[i] & Date >= unique(param.summary$Date)[j] & Date < unique(param.summary$Date)[j+1])
} else {
param = subset(param.summary, variable %in% var[i] & Date >= unique(param.summary$Date)[j])
}
param = param[, c("Date","Parameter","treatment")]
param = param[with(param, order(treatment)), ]
names(param)[2] = as.character(var[i])
if (j < length(unique(param.summary$Date))) {
param.part.set = merge (subset(data.part, Date >= unique(param.summary$Date)[j] & Date < unique(param.summary$Date)[j+1]), param[,c(2:3)], by="treatment")
} else {
param.part.set = merge (subset(data.part, Date >= unique(param.summary$Date)[j]), param[,c(2:3)], by="treatment")
}
if (j == 1) {
param.part = param.part.set
} else {
param.part = rbind(param.part,param.part.set)
}
}
data.part = merge(data.part,param.part, all = TRUE)
}
}
data.part = data.part[with(data.part, order(treatment,Date)), ]
# data.set = data.part[data.part$Date <= as.Date("2013-05-21"), ]
for (v in 1:length(treat.group)) {
Ct.group$GPP[v] = sum ( data.part$GPP[which(data.part$treatment %in% treat.group[v])] )
Ct.group$Rm[v] = sum ( data.part$Rm[which(data.part$treatment %in% treat.group[v])] )
Ct.group$Cs[v] = data.part$Cs[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2014-05-26"))]
Ct.group[v, c(5:7)] = data.part[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2014-05-26")), 8:6] - data.part[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2013-09-17")), 8:6]
Ct.group$Cflit[v] = data.part[which(data.part$treatment %in% treat.group[v] & data.part$Date %in% as.Date("2014-05-26")), 9]
Ct.group$Crlit[v] = sum ( data.part$sr [which(data.part$treatment %in% treat.group[v])] * data.part$Mroot.modelled [which(data.part$treatment %in% treat.group[v])])
Ct.group$Rg[v] = Ct.group$GPP[v] - sum(Ct.group[v,c(3:9)])
}
Ct.fraction.group = Ct.group[, c(2:9)]
Ct.fraction.group[,] = Ct.fraction.group[,] / Ct.group[, 1] * 100
# row.names(Ct.fraction.group) <- c("amb-dry","amb-wet","warm-dry","warm-wet")
row.names(Ct.fraction.group) <- c("amb-wet","warm-wet")
Ct.group$treatment = treat.group
colnames(Ct.group) <- c("GPP (g C)", "Rg (g C)", "Rm (g C)", "Cs (g C)", "Cr (g C)", "Cw (g C)", "Cf (g C)", "Cflit (g C)", "Crlit (g C)", "Treatment")
Ct.group = Ct.group[,c(10,1,2,3,4,7,5,6,8,9)]
write.csv(Ct.group, file = "output/C_partitioning_wtc3.csv", row.names = FALSE)
png("output/Figure_7_C_partitioning_wtc3.png", units="px", width=1200, height=1000, res=200)
par(mfrow = c(1, 1), mar=c(5, 4, 2, 6))
# bb = barplot(as.matrix(t(Ct.fraction.group)), ylim=c(0, 107), ylab = "C Partitioning (%)", xlab = "Treatments (Container size)",
# col = rainbow(20),legend = colnames(Ct.fraction.group),
# args.legend = list(x = "topright", bty = "n", inset=c(-0.15, 0)))
bb = barplot(as.matrix(t(Ct.fraction.group)), ylim=c(0, 107), ylab = "C Partitioning (%)", xlab = "Container size (L))",
col = cbPalette[1:8],legend = c(expression(R[g]),expression(R["m,tot"]),expression(C[s]),expression(C["s,r"]),expression(C["s,w"]),expression(C["s,f"]),expression(C["f,lit"]),expression(C["r,lit"])),
args.legend = list(x = "topright", bty = "n", inset=c(-0.18, 0)))
# text( bb, Ct.fraction.group[,1]-3, labels = round(Ct.group[,3],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]-4, labels = round(Ct.group[,4],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]-1, labels = round(Ct.group[,5],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]-3, labels = round(Ct.group[,6],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]-3, labels = round(Ct.group[,7],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]+Ct.fraction.group[,6]-2, labels = round(Ct.group[,8],1), cex=.9)
# text( bb, Ct.fraction.group[,1]+Ct.fraction.group[,2]+Ct.fraction.group[,3]+Ct.fraction.group[,4]+Ct.fraction.group[,5]+Ct.fraction.group[,6]+Ct.fraction.group[,7]-1, labels = round(Ct.group[,9],1), cex=.9)
text( bb, rowSums(Ct.fraction.group)+0.5, labels = round(Ct.group[,2],1), pos = 3, cex=1, col="red")
dev.off()
|
library(dplyr)
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv", "GDP.csv")
GDP <- read.csv("GDP.csv", colClasses = "character")
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv", "countrystats.csv")
countrystats <- read.csv("countrystats.csv")
new <- merge(GDP, countrystats, by.x = "X", by.y = "CountryCode", all = FALSE)
head(new)
tail(new)
nrow(new)
names(new)
#Cut the GDP ranking into 5 separate quantile groups. Make a table versus Income.Group. How many countries
#are Lower middle income but among the 38 nations with highest GDP?
new2 <- new %>% select(X, Gross.domestic.product.2012, Income.Group) %>%
filter(Gross.domestic.product.2012 != "")
quantile(as.numeric(new2$Gross.domestic.product.2012), probs = (0.20))
splitnew2 <- split(new2, new2$Income.Group, new2$Gross.domestic.product.2012)
counts <- sapply(splitnew2, count)
breaks <- quantile(as.numeric(new2$Gross.domestic.product.2012), probs = seq(0, 1, 0.2), na.rm = TRUE)
new2$quantileGDP <- cut(as.numeric(new2$Gross.domestic.product.2012), breaks = breaks)
new3 <- new2 %>% select(quantileGDP, Income.Group) %>%
arrange(quantileGDP, Income.Group) %>%
group_by(quantileGDP, Income.Group) %>%
summarize(n= n())
swirl(0)
#5 is correct
|
/Archive/w3q5.a.R
|
no_license
|
albertderoos/datasciencecoursera
|
R
| false
| false
| 1,346
|
r
|
library(dplyr)
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv", "GDP.csv")
GDP <- read.csv("GDP.csv", colClasses = "character")
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv", "countrystats.csv")
countrystats <- read.csv("countrystats.csv")
new <- merge(GDP, countrystats, by.x = "X", by.y = "CountryCode", all = FALSE)
head(new)
tail(new)
nrow(new)
names(new)
#Cut the GDP ranking into 5 separate quantile groups. Make a table versus Income.Group. How many countries
#are Lower middle income but among the 38 nations with highest GDP?
new2 <- new %>% select(X, Gross.domestic.product.2012, Income.Group) %>%
filter(Gross.domestic.product.2012 != "")
quantile(as.numeric(new2$Gross.domestic.product.2012), probs = (0.20))
splitnew2 <- split(new2, new2$Income.Group, new2$Gross.domestic.product.2012)
counts <- sapply(splitnew2, count)
breaks <- quantile(as.numeric(new2$Gross.domestic.product.2012), probs = seq(0, 1, 0.2), na.rm = TRUE)
new2$quantileGDP <- cut(as.numeric(new2$Gross.domestic.product.2012), breaks = breaks)
new3 <- new2 %>% select(quantileGDP, Income.Group) %>%
arrange(quantileGDP, Income.Group) %>%
group_by(quantileGDP, Income.Group) %>%
summarize(n= n())
swirl(0)
#5 is correct
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{app}
\alias{app}
\title{app}
\usage{
app()
}
\description{
app
}
|
/man/app.Rd
|
no_license
|
Tutuchan/gettextpo
|
R
| false
| true
| 158
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{app}
\alias{app}
\title{app}
\usage{
app()
}
\description{
app
}
|
/*
IMPORTANT! This file is auto-generated each time you run cmake on your
project - if you alter its contents, your changes may be overwritten!
*/
#include "AppConfig.h"
#include <juce_audio_plugin_client_AU.r>
|
/build/JuceLibraryCode/include_juce_audio_plugin_client_AU.r
|
no_license
|
vincent-chenzhun-huang/Synthesizer
|
R
| false
| false
| 222
|
r
|
/*
IMPORTANT! This file is auto-generated each time you run cmake on your
project - if you alter its contents, your changes may be overwritten!
*/
#include "AppConfig.h"
#include <juce_audio_plugin_client_AU.r>
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution.R
\name{ecosystem}
\alias{ecosystem}
\title{ecosystem}
\usage{
ecosystem(
species_list = NULL,
fields = NULL,
server = getOption("FISHBASE_API", "fishbase"),
version = get_latest_release(),
db = default_db(),
...
)
}
\arguments{
\item{species_list}{A vector of scientific names (each element as "genus species"). If empty, a table for all fish will be returned.}
\item{fields}{a character vector specifying which fields (columns) should be returned. By default,
all available columns recognized by the parser are returned. Mostly for backwards compatibility as users can subset by column later}
\item{server}{can be set to either "fishbase" or "sealifebase" to switch between databases. NOTE: it is usually
easier to leave this as NULL and set the source instead using the environmental variable `FISHBASE_API`, e.g.
`Sys.setenv(FISHBASE_API="sealifebase")`.}
\item{version}{a version string for the database, will default to the latest release. see [get_releases()] for details.}
\item{db}{the}
\item{...}{unused; for backwards compatibility only}
}
\value{
a table of species ecosystems data
}
\description{
ecosystem
}
\examples{
\dontshow{if (interactive() ) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
\dontrun{
ecosystem("Oreochromis niloticus")
}
\dontshow{\}) # examplesIf}
}
|
/man/ecosystem.Rd
|
no_license
|
ropensci/rfishbase
|
R
| false
| true
| 1,421
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distribution.R
\name{ecosystem}
\alias{ecosystem}
\title{ecosystem}
\usage{
ecosystem(
species_list = NULL,
fields = NULL,
server = getOption("FISHBASE_API", "fishbase"),
version = get_latest_release(),
db = default_db(),
...
)
}
\arguments{
\item{species_list}{A vector of scientific names (each element as "genus species"). If empty, a table for all fish will be returned.}
\item{fields}{a character vector specifying which fields (columns) should be returned. By default,
all available columns recognized by the parser are returned. Mostly for backwards compatibility as users can subset by column later}
\item{server}{can be set to either "fishbase" or "sealifebase" to switch between databases. NOTE: it is usually
easier to leave this as NULL and set the source instead using the environmental variable `FISHBASE_API`, e.g.
`Sys.setenv(FISHBASE_API="sealifebase")`.}
\item{version}{a version string for the database, will default to the latest release. see [get_releases()] for details.}
\item{db}{the}
\item{...}{unused; for backwards compatibility only}
}
\value{
a table of species ecosystems data
}
\description{
ecosystem
}
\examples{
\dontshow{if (interactive() ) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
\dontrun{
ecosystem("Oreochromis niloticus")
}
\dontshow{\}) # examplesIf}
}
|
library(hydrostats)
### Name: high.spell.lengths
### Title: Calculate the length of all high flow spells
### Aliases: high.spell.lengths
### ** Examples
data(Cooper)
Cooper<-ts.format(Cooper)
high.spell.lengths(Cooper, threshold=50000)
|
/data/genthat_extracted_code/hydrostats/examples/high.spell.lengths.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 245
|
r
|
library(hydrostats)
### Name: high.spell.lengths
### Title: Calculate the length of all high flow spells
### Aliases: high.spell.lengths
### ** Examples
data(Cooper)
Cooper<-ts.format(Cooper)
high.spell.lengths(Cooper, threshold=50000)
|
#
# This file is part of the CNO software
#
# Copyright (c) 2011-2012 - EMBL - European Bioinformatics Institute
#
# File author(s): CNO developers (cno-dev@ebi.ac.uk)
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# CNO website: http://www.cellnopt.org
#
##############################################################################
# $Id$
makeCNOlist<-function(dataset,subfield, verbose=TRUE){
#check that all the needed elements are present
if(!is.list(dataset)){
stop("The input to this function should be a list with elements 'dataMatrix', 'TRcol','DAcol',and 'DVcol'")
}
if(length(dataset) != 4){
stop("The input to this function should be a list with elements 'dataMatrix', 'TRcol','DAcol',and 'DVcol'")
}
if(sum(c("dataMatrix","TRcol","DAcol","DVcol") %in% names(dataset))!=4){
stop("The input to this function should be a list with elements 'dataMatrix', 'TRcol','DAcol',and 'DVcol'")
}
# first, we summarise the replicates in the dataMatrix: replicates are rows in the dataset$dataMatrix
# that have the exact same values in all TR: and DA: columns
duplCond<-as.matrix(dataset$dataMatrix[,c(dataset$TRcol,dataset$DAcol)])
duplRows<-which(duplicated(duplCond) == TRUE)
if (verbose == TRUE){
if (length(duplRows)>0){
print("Cleaning duplicated rows")
}
}
# creates a variance matrix
variances = dataset$dataMatrix * 0
while(length(duplRows) != 0){
# the all(x == ) is buggy in the case of NA hence the compareNA function.
#dupIndex<-apply(duplCond,MARGIN=1,function(x) all(x == duplCond[duplRows[1],]))
dupIndex<-apply(duplCond,MARGIN=1,function(x)all(compareNA(x,duplCond[duplRows[1],])))
dupIndex<-which(dupIndex == TRUE)
dupMatrix<-dataset$dataMatrix[dupIndex,]
#compute the new row as the average across duplicate rows
newRow<-colMeans(dupMatrix, na.rm=TRUE)
# variance for these rows
newVariance = apply(dupMatrix, MARGIN=2, FUN=var, na.rm=T)
#replace the first duplicated row by the summarised one
dataset$dataMatrix[dupIndex[1],]<-newRow
# same for the variance
variances[dupIndex[1],]<-newVariance
#remove the other summarised rows
dataset$dataMatrix<-dataset$dataMatrix[-dupIndex[2:length(dupIndex)],]
variances<-variances[-dupIndex[2:length(dupIndex)],]
duplCond<-as.matrix(dataset$dataMatrix[,c(dataset$TRcol,dataset$DAcol)])
duplRows<-which(duplicated(duplCond) == TRUE)
}
if (any(is.nan(as.matrix(dataset$dataMatrix)))){
dataset$dataMatrix[is.nan(as.matrix(dataset$dataMatrix))] <- NA
}
#now extract the names of the cues, and the inhibitors/stimuli
namesCues<-colnames(dataset$dataMatrix)[dataset$TRcol]
if(subfield == TRUE){
namesCues<-sub(pattern="(TR:)",x=namesCues, replacement="",perl=TRUE)
tagInhib<-grep(pattern=":Inhibitor", x=namesCues)
# tagInhibL is a logical version of the previous statement.
# must be set now before namesCues is changed. See JIRA bug 27
tagInhibL<-grepl(pattern=":Inhibitor", x=namesCues)
# remove the trailing :Inhibitors and :Stimuli
namesCues<-sub(pattern="(:\\w*$)",x=namesCues, replacement="",perl=TRUE)
# remove trailing i
namesCues[tagInhib]<-sub(pattern="(i$)", x=namesCues[tagInhib], replacement="", perl=TRUE)
# if no inhibitors, grep returns integer(0), so we now need to use grepl
# (logical version of grep)
namesStimuli<-namesCues[tagInhibL==FALSE]
namesInhibitors<-namesCues[tagInhibL==TRUE]
}
if(subfield == FALSE){
namesCues<-sub(pattern="(TR:)",x=namesCues, replacement="",perl=TRUE)
tagInhib<-grep(pattern="(i$)", x=namesCues, perl=TRUE,ignore.case=FALSE)
# tagInhibL must be set now before namesCues is changed See JIRA bug 27
tagInhibL<-grepl(pattern="(i$)", x=namesCues, perl=TRUE,ignore.case=FALSE)
namesCues[tagInhib]<-sub(pattern="(i$)", x=namesCues[tagInhib], replacement="", perl=TRUE)
# if no inhibitors, grep returns integer(0), so we now need to use grepl
# (logical version of grep)
namesStimuli<-namesCues[tagInhibL==FALSE]
namesInhibitors<-namesCues[tagInhibL==TRUE]
}
if(sum("NOCYTO" %in% namesCues) != 0){
namesCues<-namesCues[-grep(pattern="NOCYTO", namesCues)]
namesStimuli<-namesStimuli[-grep(pattern="NOCYTO", namesStimuli)]
}
if(sum("NOINHIB" %in% namesCues) != 0){
namesCues<-namesCues[-grep(pattern="NOINHIB", namesCues)]
namesStimuli<-namesStimuli[-grep(pattern="NOINHIB", namesStimuli)]
}
if(sum("NO-INHIB" %in% namesCues) != 0){
stop("Found a column with NO-INHIB tag. MIDAS files must use NOINHIB instead. Fix your MIDAS file please")
}
if(sum("NO-LIG" %in% namesCues) != 0){
stop("Found a column with NO-LIG tag. MIDAS files do not accept NO-LIG. use NOINHIB or NOCYTO instead. Fix your MIDAS file please")
}
if(sum("NOLIG" %in% namesCues) != 0){
stop("Found a column with NO-LIG tag. MIDAS files do not accept NO-LIG. use NOINHIB or NOCYTO instead. Fix your MIDAS file please")
}
if(sum("NO-CYTO" %in% namesCues) != 0){
stop("Found a column with NO-CYTO tag. MIDAS file must use NOCYTO instead. Fix your MIDAS file please")
}
#now extract the names of the signals
namesSignals<-colnames(dataset$dataMatrix)[dataset$DAcol]
namesSignals<-sub(
pattern="(DA:p-)",
x=namesSignals,
replacement="",
perl=TRUE)
namesSignals<-sub(
pattern="(DA:)",
x=namesSignals,
replacement="",
perl=TRUE)
#now extract the time signals
times<-as.factor(as.vector(as.character(as.matrix(dataset$dataMatrix[,dataset$DAcol]))))
timeSignals<-sort(as.double(levels(times)))
#Build the valueCues matrix (i.e. a matrix with nrows=nrows in dataMatrix and ncol=number of cues,
#filled with 0/1 if the particular cue is present or not)
#1.I create a matrix that is a subset of the data, and only contains the TR columns
#(the cellLine TR column was removed previously)
#2.I remove the columns with NOCYTO or NOINHIB (if they exist), they don't bring any info
if(length(grep(pattern="NOCYTO",colnames(dataset$dataMatrix)[dataset$TRcol])) != 0){
nocyto<-grep(pattern="NOCYTO",colnames(dataset$dataMatrix)[dataset$TRcol])
TRcol<-dataset$TRcol[-nocyto]
cues<-dataset$dataMatrix[,TRcol]
}else{
# use as.matrix and then set colnames to cope for particular case of
# only one cue
cues<-as.matrix(dataset$dataMatrix[,dataset$TRcol])
colnames(cues) = colnames(dataset$dataMatrix)[dataset$TRcol]
}
if(length(grep(pattern="NOINHIB",colnames(cues))) != 0){
noinhib<-grep(pattern="NOINHIB",colnames(cues))
cues<-cues[,-noinhib]
}
#3. The cues sub-data frame now contains 1 if the cue is present and 0/NA otherwise,
#so I just need to transform the data frame into a numerical matrix
#and replace the NA in there by zeros
cues<-as.matrix(cues,nrow=dim(cues)[1],ncol=dim(cues)[2],byrow=TRUE)
cues[is.na(cues)]<-0
#Build the valueSignals matrices. I am going to build one big matrix
#that includes all the time points, and then I will split it into one matrix for each time point
#And then I will arrange the valueCues matrix accordingly
valueSignals<-as.matrix(dataset$dataMatrix[,dataset$DVcol])
valueVariance<-as.matrix(dataset$dataMatrix[,dataset$DVcol])
#This bit will create an index that contains all rows with timept1, 2, 3,...
#1.First I create a matrix that holds the time information for each row
times<-as.matrix(dataset$dataMatrix[,dataset$DAcol])
#2. Now I check that all the columns are tha same, i.e. that each row
#will contain data on the same time point
if (length(dataset$DAcol)>1){
check<-rep(FALSE,(length(dataset$DAcol)-1))
for(i in 1:length(check)){
check[i]<-all.equal(times[,i],times[,(i+1)])
}
if(sum(check) != length(check)) {
warning("Each row of your data file should contain measurements at the same time point.
The times for the first DA column will be considered as the times for all measurements")
}
}
#3.Now I will only use the first column of times
#First, I create a vector timeRows that contains the indexes of the rows that contain data
#about each time point (in increasing order of time), and the vector whereTimes that contain`
#the info about how many rows I have for each time (which will allow me to extract the right
#entries from timesRows)
times<-times[,1]
ntimes<-length(timeSignals)
if (ntimes <2){
stop("Error while parsing the data. Only one time was found.")
}
whereTimes<-rep(0,ntimes)
timesRows<-0 # Melody uses timesRows <- rep(0,1)
for(i in 1:ntimes){
timesRows<-c(timesRows,which(times == timeSignals[i]))
whereTimes[i]<-length(which(times == timeSignals[i]))
}
timesRows<-timesRows[2:length(timesRows)]
#Check that we have data across all conditions for all time points except zero
if(length(unique(whereTimes[2:length(whereTimes)])) != 1){
warning("This program expects data across all conditions at all time points (except t=0) ")
}
if (verbose){
print("Please be aware that if you only have some conditions at time zero (e.g.only inhibitor/no inhibitor), the measurements for these conditions will be copied across matching conditions at t=0")
}
#Do the t=0 matrix, and produce a new cues matrix, that does not contain duplicates
#(1 row per condition and different matrices will be build for the different times)
valueSignals<-list(matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol)))
valueVariance<-list(matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol)))
#This vector tells me which columns of the cues matrix I should pay attention to when
#copying data across for time=0
# bug report 31 and
if (dim(cues)[2] >1){
# bug report 44
if (whereTimes[1] == 1){
zerosCond <- 0
}
else{
zerosCond<-apply(cues[timesRows[1:whereTimes[1]],],1,function(x) which(x > 0))
}
}
else{
warning("unusual case with 1 dimension in cues")
zerosCond<-which(cues[timesRows[1:whereTimes[1]],] > 0)
}
zerosCond<-unique(unlist(zerosCond))
count=1
newcues<-matrix(data=0,nrow=whereTimes[2],ncol=dim(cues)[2])
#fix bug report 38 to be able to have mixed times in a MIDAS file
#for(i in timesRows[1]:timesRows[whereTimes[1]]){
for(i in timesRows[1:whereTimes[1]]){
# 15.12.2017: is this a bug? "i" is already going through some element of timesRow variable.
# is this "timesRows[i]" make sense?! or we can just use i instead
#present<-zerosCond[which(cues[timesRows[i],zerosCond] > 0)]
present<-zerosCond[which(cues[i ,zerosCond] > 0)]
if(length(present) == 0){
for(n in timesRows[(whereTimes[1]+1):(whereTimes[1]+whereTimes[2])]){
if(sum(cues[n,zerosCond]) == 0){
valueSignals[[1]][count,]<-as.numeric(dataset$dataMatrix[i,dataset$DVcol])
valueVariance[[1]][count,]<-as.numeric(variances[i,dataset$DVcol])
newcues[count,]<-cues[n,]
count=count+1
}
}
}else{
for(n in timesRows[(whereTimes[1]+1):(whereTimes[1]+whereTimes[2])]){
if(length(zerosCond[which(cues[n,zerosCond] > 0)]) == length(present)){
if(all(zerosCond[which(cues[n,zerosCond] > 0)] == present) && # same cues are there
length(which(cues[n,zerosCond] > 0)) != 0 && # there is at least one non-zero cue
# same principle as above: use i instead of timesRows[i]
#all(cues[timesRows[i],zerosCond] == cues[n,zerosCond]) # cues have the same level
all(cues[i,zerosCond] == cues[n,zerosCond]) # cues have the same level
){
valueSignals[[1]][count,]<-as.numeric(dataset$dataMatrix[i,dataset$DVcol])
valueVariance[[1]][count,]<-as.numeric(variances[i,dataset$DVcol])
newcues[count,]<-cues[n,]
count=count+1
}
}
}
}
}
#Now build the matrices for the other time points
for(i in 2:length(timeSignals)){
valuesTi<-matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol))
valuesVarianceTi<-matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol))
for(n in 1:dim(newcues)[1]){
rowsMatchCues<-apply(cues,1,function(x) all(x == newcues[n,]))
rowsmatchTime<-times == timeSignals[i]
rowsMatch<-which((rowsMatchCues + rowsmatchTime) == 2)
valuesTi[n,]<-as.numeric(dataset$dataMatrix[rowsMatch,dataset$DVcol])
valuesVarianceTi[n,]<-as.numeric(variances[rowsMatch,dataset$DVcol])
}
valueSignals[[i]]<-valuesTi
valueVariance[[i]]<-valuesVarianceTi
}
#Build the valueInhibitors and valueStimuli matrices, which are a subset of the cues one
if(subfield == TRUE){
valueInhibitors<-newcues[,grep(
pattern="Inhibitor",x=colnames(cues),ignore.case=TRUE)]
valueStimuli<-newcues[,grepl(pattern="Inhibitor",x=colnames(cues),ignore.case=TRUE)==FALSE]
}else{
valueInhibitors<-newcues[,grep(pattern="(i$)",x=colnames(cues),ignore.case=FALSE,perl=TRUE)]
valueStimuli <- newcues[,grepl(pattern="(i$)",x=colnames(cues),ignore.case=FALSE,perl=TRUE)==FALSE]
}
if(is.null(dim(valueInhibitors))){
valueInhibitors<-matrix(valueInhibitors,nrow=dim(newcues)[1])
}
if(is.null(dim(valueStimuli))){
valueStimuli<-matrix(valueStimuli,nrow=dim(newcues)[1])
}
return(list(
namesCues=namesCues,
namesStimuli=namesStimuli,
namesInhibitors=namesInhibitors,
namesSignals=namesSignals,
timeSignals=timeSignals,
valueCues=newcues,
valueInhibitors=valueInhibitors,
valueStimuli=valueStimuli,
valueSignals=valueSignals,
valueVariances=valueVariance
))
}
compareNA <- function(v1,v2) {
# This function returns TRUE wherever elements are the same, including NA's,
# and false everywhere else.
same <- (v1 == v2) | (is.na(v1) & is.na(v2))
same[is.na(same)] <- FALSE
return(same)
}
|
/packages/CellNOptR/R/makeCNOlist.R
|
no_license
|
saezlab/CellNOptR_packages_archived
|
R
| false
| false
| 15,048
|
r
|
#
# This file is part of the CNO software
#
# Copyright (c) 2011-2012 - EMBL - European Bioinformatics Institute
#
# File author(s): CNO developers (cno-dev@ebi.ac.uk)
#
# Distributed under the GPLv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# CNO website: http://www.cellnopt.org
#
##############################################################################
# $Id$
makeCNOlist<-function(dataset,subfield, verbose=TRUE){
#check that all the needed elements are present
if(!is.list(dataset)){
stop("The input to this function should be a list with elements 'dataMatrix', 'TRcol','DAcol',and 'DVcol'")
}
if(length(dataset) != 4){
stop("The input to this function should be a list with elements 'dataMatrix', 'TRcol','DAcol',and 'DVcol'")
}
if(sum(c("dataMatrix","TRcol","DAcol","DVcol") %in% names(dataset))!=4){
stop("The input to this function should be a list with elements 'dataMatrix', 'TRcol','DAcol',and 'DVcol'")
}
# first, we summarise the replicates in the dataMatrix: replicates are rows in the dataset$dataMatrix
# that have the exact same values in all TR: and DA: columns
duplCond<-as.matrix(dataset$dataMatrix[,c(dataset$TRcol,dataset$DAcol)])
duplRows<-which(duplicated(duplCond) == TRUE)
if (verbose == TRUE){
if (length(duplRows)>0){
print("Cleaning duplicated rows")
}
}
# creates a variance matrix
variances = dataset$dataMatrix * 0
while(length(duplRows) != 0){
# the all(x == ) is buggy in the case of NA hence the compareNA function.
#dupIndex<-apply(duplCond,MARGIN=1,function(x) all(x == duplCond[duplRows[1],]))
dupIndex<-apply(duplCond,MARGIN=1,function(x)all(compareNA(x,duplCond[duplRows[1],])))
dupIndex<-which(dupIndex == TRUE)
dupMatrix<-dataset$dataMatrix[dupIndex,]
#compute the new row as the average across duplicate rows
newRow<-colMeans(dupMatrix, na.rm=TRUE)
# variance for these rows
newVariance = apply(dupMatrix, MARGIN=2, FUN=var, na.rm=T)
#replace the first duplicated row by the summarised one
dataset$dataMatrix[dupIndex[1],]<-newRow
# same for the variance
variances[dupIndex[1],]<-newVariance
#remove the other summarised rows
dataset$dataMatrix<-dataset$dataMatrix[-dupIndex[2:length(dupIndex)],]
variances<-variances[-dupIndex[2:length(dupIndex)],]
duplCond<-as.matrix(dataset$dataMatrix[,c(dataset$TRcol,dataset$DAcol)])
duplRows<-which(duplicated(duplCond) == TRUE)
}
if (any(is.nan(as.matrix(dataset$dataMatrix)))){
dataset$dataMatrix[is.nan(as.matrix(dataset$dataMatrix))] <- NA
}
#now extract the names of the cues, and the inhibitors/stimuli
namesCues<-colnames(dataset$dataMatrix)[dataset$TRcol]
if(subfield == TRUE){
namesCues<-sub(pattern="(TR:)",x=namesCues, replacement="",perl=TRUE)
tagInhib<-grep(pattern=":Inhibitor", x=namesCues)
# tagInhibL is a logical version of the previous statement.
# must be set now before namesCues is changed. See JIRA bug 27
tagInhibL<-grepl(pattern=":Inhibitor", x=namesCues)
# remove the trailing :Inhibitors and :Stimuli
namesCues<-sub(pattern="(:\\w*$)",x=namesCues, replacement="",perl=TRUE)
# remove trailing i
namesCues[tagInhib]<-sub(pattern="(i$)", x=namesCues[tagInhib], replacement="", perl=TRUE)
# if no inhibitors, grep returns integer(0), so we now need to use grepl
# (logical version of grep)
namesStimuli<-namesCues[tagInhibL==FALSE]
namesInhibitors<-namesCues[tagInhibL==TRUE]
}
if(subfield == FALSE){
namesCues<-sub(pattern="(TR:)",x=namesCues, replacement="",perl=TRUE)
tagInhib<-grep(pattern="(i$)", x=namesCues, perl=TRUE,ignore.case=FALSE)
# tagInhibL must be set now before namesCues is changed See JIRA bug 27
tagInhibL<-grepl(pattern="(i$)", x=namesCues, perl=TRUE,ignore.case=FALSE)
namesCues[tagInhib]<-sub(pattern="(i$)", x=namesCues[tagInhib], replacement="", perl=TRUE)
# if no inhibitors, grep returns integer(0), so we now need to use grepl
# (logical version of grep)
namesStimuli<-namesCues[tagInhibL==FALSE]
namesInhibitors<-namesCues[tagInhibL==TRUE]
}
if(sum("NOCYTO" %in% namesCues) != 0){
namesCues<-namesCues[-grep(pattern="NOCYTO", namesCues)]
namesStimuli<-namesStimuli[-grep(pattern="NOCYTO", namesStimuli)]
}
if(sum("NOINHIB" %in% namesCues) != 0){
namesCues<-namesCues[-grep(pattern="NOINHIB", namesCues)]
namesStimuli<-namesStimuli[-grep(pattern="NOINHIB", namesStimuli)]
}
if(sum("NO-INHIB" %in% namesCues) != 0){
stop("Found a column with NO-INHIB tag. MIDAS files must use NOINHIB instead. Fix your MIDAS file please")
}
if(sum("NO-LIG" %in% namesCues) != 0){
stop("Found a column with NO-LIG tag. MIDAS files do not accept NO-LIG. use NOINHIB or NOCYTO instead. Fix your MIDAS file please")
}
if(sum("NOLIG" %in% namesCues) != 0){
stop("Found a column with NO-LIG tag. MIDAS files do not accept NO-LIG. use NOINHIB or NOCYTO instead. Fix your MIDAS file please")
}
if(sum("NO-CYTO" %in% namesCues) != 0){
stop("Found a column with NO-CYTO tag. MIDAS file must use NOCYTO instead. Fix your MIDAS file please")
}
#now extract the names of the signals
namesSignals<-colnames(dataset$dataMatrix)[dataset$DAcol]
namesSignals<-sub(
pattern="(DA:p-)",
x=namesSignals,
replacement="",
perl=TRUE)
namesSignals<-sub(
pattern="(DA:)",
x=namesSignals,
replacement="",
perl=TRUE)
#now extract the time signals
times<-as.factor(as.vector(as.character(as.matrix(dataset$dataMatrix[,dataset$DAcol]))))
timeSignals<-sort(as.double(levels(times)))
#Build the valueCues matrix (i.e. a matrix with nrows=nrows in dataMatrix and ncol=number of cues,
#filled with 0/1 if the particular cue is present or not)
#1.I create a matrix that is a subset of the data, and only contains the TR columns
#(the cellLine TR column was removed previously)
#2.I remove the columns with NOCYTO or NOINHIB (if they exist), they don't bring any info
if(length(grep(pattern="NOCYTO",colnames(dataset$dataMatrix)[dataset$TRcol])) != 0){
nocyto<-grep(pattern="NOCYTO",colnames(dataset$dataMatrix)[dataset$TRcol])
TRcol<-dataset$TRcol[-nocyto]
cues<-dataset$dataMatrix[,TRcol]
}else{
# use as.matrix and then set colnames to cope for particular case of
# only one cue
cues<-as.matrix(dataset$dataMatrix[,dataset$TRcol])
colnames(cues) = colnames(dataset$dataMatrix)[dataset$TRcol]
}
if(length(grep(pattern="NOINHIB",colnames(cues))) != 0){
noinhib<-grep(pattern="NOINHIB",colnames(cues))
cues<-cues[,-noinhib]
}
#3. The cues sub-data frame now contains 1 if the cue is present and 0/NA otherwise,
#so I just need to transform the data frame into a numerical matrix
#and replace the NA in there by zeros
cues<-as.matrix(cues,nrow=dim(cues)[1],ncol=dim(cues)[2],byrow=TRUE)
cues[is.na(cues)]<-0
#Build the valueSignals matrices. I am going to build one big matrix
#that includes all the time points, and then I will split it into one matrix for each time point
#And then I will arrange the valueCues matrix accordingly
valueSignals<-as.matrix(dataset$dataMatrix[,dataset$DVcol])
valueVariance<-as.matrix(dataset$dataMatrix[,dataset$DVcol])
#This bit will create an index that contains all rows with timept1, 2, 3,...
#1.First I create a matrix that holds the time information for each row
times<-as.matrix(dataset$dataMatrix[,dataset$DAcol])
#2. Now I check that all the columns are tha same, i.e. that each row
#will contain data on the same time point
if (length(dataset$DAcol)>1){
check<-rep(FALSE,(length(dataset$DAcol)-1))
for(i in 1:length(check)){
check[i]<-all.equal(times[,i],times[,(i+1)])
}
if(sum(check) != length(check)) {
warning("Each row of your data file should contain measurements at the same time point.
The times for the first DA column will be considered as the times for all measurements")
}
}
#3.Now I will only use the first column of times
#First, I create a vector timeRows that contains the indexes of the rows that contain data
#about each time point (in increasing order of time), and the vector whereTimes that contain`
#the info about how many rows I have for each time (which will allow me to extract the right
#entries from timesRows)
times<-times[,1]
ntimes<-length(timeSignals)
if (ntimes <2){
stop("Error while parsing the data. Only one time was found.")
}
whereTimes<-rep(0,ntimes)
timesRows<-0 # Melody uses timesRows <- rep(0,1)
for(i in 1:ntimes){
timesRows<-c(timesRows,which(times == timeSignals[i]))
whereTimes[i]<-length(which(times == timeSignals[i]))
}
timesRows<-timesRows[2:length(timesRows)]
#Check that we have data across all conditions for all time points except zero
if(length(unique(whereTimes[2:length(whereTimes)])) != 1){
warning("This program expects data across all conditions at all time points (except t=0) ")
}
if (verbose){
print("Please be aware that if you only have some conditions at time zero (e.g.only inhibitor/no inhibitor), the measurements for these conditions will be copied across matching conditions at t=0")
}
#Do the t=0 matrix, and produce a new cues matrix, that does not contain duplicates
#(1 row per condition and different matrices will be build for the different times)
valueSignals<-list(matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol)))
valueVariance<-list(matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol)))
#This vector tells me which columns of the cues matrix I should pay attention to when
#copying data across for time=0
# bug report 31 and
if (dim(cues)[2] >1){
# bug report 44
if (whereTimes[1] == 1){
zerosCond <- 0
}
else{
zerosCond<-apply(cues[timesRows[1:whereTimes[1]],],1,function(x) which(x > 0))
}
}
else{
warning("unusual case with 1 dimension in cues")
zerosCond<-which(cues[timesRows[1:whereTimes[1]],] > 0)
}
zerosCond<-unique(unlist(zerosCond))
count=1
newcues<-matrix(data=0,nrow=whereTimes[2],ncol=dim(cues)[2])
#fix bug report 38 to be able to have mixed times in a MIDAS file
#for(i in timesRows[1]:timesRows[whereTimes[1]]){
for(i in timesRows[1:whereTimes[1]]){
# 15.12.2017: is this a bug? "i" is already going through some element of timesRow variable.
# is this "timesRows[i]" make sense?! or we can just use i instead
#present<-zerosCond[which(cues[timesRows[i],zerosCond] > 0)]
present<-zerosCond[which(cues[i ,zerosCond] > 0)]
if(length(present) == 0){
for(n in timesRows[(whereTimes[1]+1):(whereTimes[1]+whereTimes[2])]){
if(sum(cues[n,zerosCond]) == 0){
valueSignals[[1]][count,]<-as.numeric(dataset$dataMatrix[i,dataset$DVcol])
valueVariance[[1]][count,]<-as.numeric(variances[i,dataset$DVcol])
newcues[count,]<-cues[n,]
count=count+1
}
}
}else{
for(n in timesRows[(whereTimes[1]+1):(whereTimes[1]+whereTimes[2])]){
if(length(zerosCond[which(cues[n,zerosCond] > 0)]) == length(present)){
if(all(zerosCond[which(cues[n,zerosCond] > 0)] == present) && # same cues are there
length(which(cues[n,zerosCond] > 0)) != 0 && # there is at least one non-zero cue
# same principle as above: use i instead of timesRows[i]
#all(cues[timesRows[i],zerosCond] == cues[n,zerosCond]) # cues have the same level
all(cues[i,zerosCond] == cues[n,zerosCond]) # cues have the same level
){
valueSignals[[1]][count,]<-as.numeric(dataset$dataMatrix[i,dataset$DVcol])
valueVariance[[1]][count,]<-as.numeric(variances[i,dataset$DVcol])
newcues[count,]<-cues[n,]
count=count+1
}
}
}
}
}
#Now build the matrices for the other time points
for(i in 2:length(timeSignals)){
valuesTi<-matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol))
valuesVarianceTi<-matrix(data=0,nrow=whereTimes[2],ncol=length(dataset$DVcol))
for(n in 1:dim(newcues)[1]){
rowsMatchCues<-apply(cues,1,function(x) all(x == newcues[n,]))
rowsmatchTime<-times == timeSignals[i]
rowsMatch<-which((rowsMatchCues + rowsmatchTime) == 2)
valuesTi[n,]<-as.numeric(dataset$dataMatrix[rowsMatch,dataset$DVcol])
valuesVarianceTi[n,]<-as.numeric(variances[rowsMatch,dataset$DVcol])
}
valueSignals[[i]]<-valuesTi
valueVariance[[i]]<-valuesVarianceTi
}
#Build the valueInhibitors and valueStimuli matrices, which are a subset of the cues one
if(subfield == TRUE){
valueInhibitors<-newcues[,grep(
pattern="Inhibitor",x=colnames(cues),ignore.case=TRUE)]
valueStimuli<-newcues[,grepl(pattern="Inhibitor",x=colnames(cues),ignore.case=TRUE)==FALSE]
}else{
valueInhibitors<-newcues[,grep(pattern="(i$)",x=colnames(cues),ignore.case=FALSE,perl=TRUE)]
valueStimuli <- newcues[,grepl(pattern="(i$)",x=colnames(cues),ignore.case=FALSE,perl=TRUE)==FALSE]
}
if(is.null(dim(valueInhibitors))){
valueInhibitors<-matrix(valueInhibitors,nrow=dim(newcues)[1])
}
if(is.null(dim(valueStimuli))){
valueStimuli<-matrix(valueStimuli,nrow=dim(newcues)[1])
}
return(list(
namesCues=namesCues,
namesStimuli=namesStimuli,
namesInhibitors=namesInhibitors,
namesSignals=namesSignals,
timeSignals=timeSignals,
valueCues=newcues,
valueInhibitors=valueInhibitors,
valueStimuli=valueStimuli,
valueSignals=valueSignals,
valueVariances=valueVariance
))
}
compareNA <- function(v1,v2) {
# This function returns TRUE wherever elements are the same, including NA's,
# and false everywhere else.
same <- (v1 == v2) | (is.na(v1) & is.na(v2))
same[is.na(same)] <- FALSE
return(same)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmmDemo.r
\name{hmmDemo}
\alias{hmmDemo}
\title{HMM computation demo functions}
\usage{
hmmDemo(object, ddl = NULL, state.names = NULL, obs.names = NULL)
}
\arguments{
\item{object}{fitted hmm model}
\item{ddl}{design dat list; if NULL it is created}
\item{state.names}{names for states used to label output; if NULL uses strata.labels + Dead state}
\item{obs.names}{names for observations used to label output; if NULL uses ObsLevels}
}
\value{
hmm demo list which includes 1) lnl - the log-likelihood value, 2) alpha - forward probabilities,
3) beta - backward probabilities, 4) phi - scaled forward probabilities, 5) v- intermediate calculation for phi,
6) dmat - 3-d array with observation probability matrix for each occasion, 7) gamma - 3-d array with state transition probability
matrix for each occasion, 8) stateprob - predicted state probabilities, 9) local_decode - state predictions for each occasion and individual,
10) global_decode - state predictions for entire sequence for each individual.
}
\description{
Uses fitted hmm model to construct HMM state vectors alpha and phi for demonstration purposes
}
\examples{
\donttest{
# This example is excluded from testing to reduce package check time
# cormack-jolly-seber model
data(dipper)
mod=crm(dipper,model="hmmcjs")
x=hmmDemo(mod,state.names=c("Alive","Dead"),obs.names=c("Missed","Seen"))
par(mfrow=c(2,1))
barplot(t(x$alpha[45,,]),beside=TRUE,names.arg=x$chforwardstrings)
barplot(t(x$phi[45,,]),beside=TRUE,names.arg=x$chforwardstrings)
# multi-state example showing state predictions
data(mstrata)
mod=crm(mstrata,model="hmmMSCJS",strata.labels=c("A","B","C"))
#' x=hmmDemo(mod)
# state predictions are normalized by likelihood value which = rowSums(alpha*beta)
cat(paste("\nrowsums = ",rowSums(x$alpha[45,,]*x$beta[45,,],na.rm=TRUE)[2],
"which matches likelihood value",exp(x$lnl[45]),"\n"))
# state predictions given the data
x$stateprob[45,,]
}
}
\author{
Jeff Laake
}
\keyword{models}
|
/marked/man/hmmDemo.Rd
|
no_license
|
jlaake/marked
|
R
| false
| true
| 2,045
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmmDemo.r
\name{hmmDemo}
\alias{hmmDemo}
\title{HMM computation demo functions}
\usage{
hmmDemo(object, ddl = NULL, state.names = NULL, obs.names = NULL)
}
\arguments{
\item{object}{fitted hmm model}
\item{ddl}{design dat list; if NULL it is created}
\item{state.names}{names for states used to label output; if NULL uses strata.labels + Dead state}
\item{obs.names}{names for observations used to label output; if NULL uses ObsLevels}
}
\value{
hmm demo list which includes 1) lnl - the log-likelihood value, 2) alpha - forward probabilities,
3) beta - backward probabilities, 4) phi - scaled forward probabilities, 5) v- intermediate calculation for phi,
6) dmat - 3-d array with observation probability matrix for each occasion, 7) gamma - 3-d array with state transition probability
matrix for each occasion, 8) stateprob - predicted state probabilities, 9) local_decode - state predictions for each occasion and individual,
10) global_decode - state predictions for entire sequence for each individual.
}
\description{
Uses fitted hmm model to construct HMM state vectors alpha and phi for demonstration purposes
}
\examples{
\donttest{
# This example is excluded from testing to reduce package check time
# cormack-jolly-seber model
data(dipper)
mod=crm(dipper,model="hmmcjs")
x=hmmDemo(mod,state.names=c("Alive","Dead"),obs.names=c("Missed","Seen"))
par(mfrow=c(2,1))
barplot(t(x$alpha[45,,]),beside=TRUE,names.arg=x$chforwardstrings)
barplot(t(x$phi[45,,]),beside=TRUE,names.arg=x$chforwardstrings)
# multi-state example showing state predictions
data(mstrata)
mod=crm(mstrata,model="hmmMSCJS",strata.labels=c("A","B","C"))
#' x=hmmDemo(mod)
# state predictions are normalized by likelihood value which = rowSums(alpha*beta)
cat(paste("\nrowsums = ",rowSums(x$alpha[45,,]*x$beta[45,,],na.rm=TRUE)[2],
"which matches likelihood value",exp(x$lnl[45]),"\n"))
# state predictions given the data
x$stateprob[45,,]
}
}
\author{
Jeff Laake
}
\keyword{models}
|
print.persFit <-
function (x, digits = 4, ...) {
if (!inherits(x, "persFit"))
stop("Use only with 'persFit' objects.\n")
cat("\nPerson-Fit Statistics and P-values\n")
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n", sep = "")
Alt <- switch(x$alternative,
"less" = "Inconsistent response pattern under the estimated model",
"greater" = "More consistent response pattern than the model predicts",
"two.sided" = "Either inconsistent or more consistent response pattern under the estimated model")
cat("\nAlternative:", Alt)
if (x$simulate.p.value)
cat("\nMonte Carlo samples:", x$B, "\n\n")
else
cat("\n\n")
out.dat1 <- as.data.frame(round(x$Tobs, digits))
out <- apply(x$p.value, 2, function (x) {
val <- round(x, digits)
res <- formatC(val, digits = digits)
res[val == 0] <- paste("<0.", paste(rep(0, digits - 1), collapse = ""), "1", collapse = "", sep = "")
res
})
out.dat2 <- as.data.frame(if (!is.matrix(out)) rbind(out) else out)
names(out.dat2) <- switch(x$alternative,
"less" = paste("Pr(<", names(out.dat2), ")", sep = ""),
"greater" = paste("Pr(>", names(out.dat2), ")", sep = ""),
"two.sided" = paste("Pr(>|", names(out.dat2), "|)", sep = ""))
out.dat <- cbind(out.dat1, out.dat2)
if (length(out.dat) == 4)
out.dat <- out.dat[c(1, 3, 2, 4)]
print(cbind(as.data.frame(x$resp.patterns), out.dat))
cat("\n\n")
invisible(x)
}
|
/R/print.persFit.R
|
no_license
|
gscriver/ltm
|
R
| false
| false
| 1,542
|
r
|
print.persFit <-
function (x, digits = 4, ...) {
if (!inherits(x, "persFit"))
stop("Use only with 'persFit' objects.\n")
cat("\nPerson-Fit Statistics and P-values\n")
cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n", sep = "")
Alt <- switch(x$alternative,
"less" = "Inconsistent response pattern under the estimated model",
"greater" = "More consistent response pattern than the model predicts",
"two.sided" = "Either inconsistent or more consistent response pattern under the estimated model")
cat("\nAlternative:", Alt)
if (x$simulate.p.value)
cat("\nMonte Carlo samples:", x$B, "\n\n")
else
cat("\n\n")
out.dat1 <- as.data.frame(round(x$Tobs, digits))
out <- apply(x$p.value, 2, function (x) {
val <- round(x, digits)
res <- formatC(val, digits = digits)
res[val == 0] <- paste("<0.", paste(rep(0, digits - 1), collapse = ""), "1", collapse = "", sep = "")
res
})
out.dat2 <- as.data.frame(if (!is.matrix(out)) rbind(out) else out)
names(out.dat2) <- switch(x$alternative,
"less" = paste("Pr(<", names(out.dat2), ")", sep = ""),
"greater" = paste("Pr(>", names(out.dat2), ")", sep = ""),
"two.sided" = paste("Pr(>|", names(out.dat2), "|)", sep = ""))
out.dat <- cbind(out.dat1, out.dat2)
if (length(out.dat) == 4)
out.dat <- out.dat[c(1, 3, 2, 4)]
print(cbind(as.data.frame(x$resp.patterns), out.dat))
cat("\n\n")
invisible(x)
}
|
require(ggplot2)
require(grid)
input <- list()
input$Tmax <- 100 # default = 1
input$Time_sampling <- c(1,100)
input$NP <- 1 # default = 1
input$NI <- 3 # default = 1
input$NT <- 1 # default = 1
input$NG <- 1 # default = 1
input$B <- c(0,0.3,0,0)
input$Vind <- matrix(c(0.7 , 0 , 0 , 0,
1 , 0.5 , 0 , 0,
0 , 0 , 0 , 0,
0 , 0 , 0 , 0
),
4, byrow = TRUE)
test <- input$Vind
input$Ve <- 0.05 # Default 0
input$VG <- 0 # Default 0
# input$Vind <- matrix(c(0.7, 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
# 1 , 0.3 , 1 , 1 , 1 , 1 , 1 , 1 ,
# 1 , 1 , 0 , 1 , 1 , 1 , 1 , 1 ,
# 1 , 1 , 1 , 0 , 1 , 1 , 1 , 1 ,
# 1 , 1 , 1 , 1 , 0.7 , 1 , 1 , 1 ,
# 1 , 1 , 1 , 1 , 1 , 0.3 , 1 , 1 ,
# 1 , 1 , 1 , 1 , 1 , 1 , 0 , 1 ,
# 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0
#
# ),
# 8)
#
#
# input$Vind <- matrix(c(0.7 , 1 ,
# 1 , 0.3
# ),
# 2)
# Environment X1
input$X1_state <- TRUE # default = FALSE
# Stochastic
input$X1_sto_state <- TRUE # default = FALSE
input$X1_sto_shared <- FALSE # default = TRUE
input$X1_sto_V <- 1 # default = 1
# Autocorrelate
input$X1_sto_autocor_state <- FALSE # default = FALSE
input$X1_sto_corr <- 0.7 # default = 0
# Linear
input$X1_lin_state <- TRUE # default = FALSE
input$X1_lin_shared <- FALSE # default = TRUE
input$X1_lin_intercept <- 0 # default = 0
input$X1_lin_slope <- 1 # default = 1
input$X1_lin_V <- 0 # default = 1
# Cyclic
input$X1_cyc_state <- TRUE # default = FALSE
input$X1_cyc_shared <- FALSE # default = TRUE
input$X1_cyc_amplitude <- 10 # default = 10
input$X1_cyc_period <- 10 # default = 10
input$X1_cyc_Hshift <- 0 # default = 0
input$X1_cyc_Vshift <- 0 # default = 0
input$X1_cyc_V <- 0 # default = 0
# Environment X2
input$X2_state <- FALSE
# Stochastic
input$X2_sto_state <- TRUE # default = FALSE
input$X2_sto_shared <- FALSE # default = TRUE
input$X2_sto_V <- 5 # default = 1
# Autocorrelate
input$X2_sto_autocor_state <- TRUE # default = FALSE
input$X2_sto_corr <- 0.9 # default = 0
# Linear
input$X2_lin_state <- TRUE # default = FALSE
input$X2_lin_shared <- FALSE # default = TRUE
input$X2_lin_intercept <- 0 # default = 0
input$X2_lin_slope <- 1 # default = 1
input$X2_lin_V <- 0 # default = 1
# Cyclic
input$X2_cyc_state <- TRUE # default = FALSE
input$X2_cyc_shared <- FALSE # default = TRUE
input$X2_cyc_amplitude <- 10 # default = 10
input$X2_cyc_period <- 10 # default = 10
input$X2_cyc_Hshift <- 0 # default = 0
input$X2_cyc_Vshift <- 0 # default = 0
input$X_Interaction <- FALSE # default = FALSE
input$Vhsi <- 0
input$NR <- 50 # default = 1
input$NR_ind <- TRUE # default = TRUE
input$NR_trait <- TRUE # default = TRUE
input$ST_ind <- FALSE # default = TRUE
input$ST_trait <- FALSE # default = TRUE
data <- squid::squidR(input=input, plot = TRUE)
print(multiplot(data$plot$X1,
data$plot$X2,
data$plot$X1X2,
cols=1))
print(multiplot(data$plot$totPhen,
data$plot$sampPhen,
data$plot$sampTime,
cols=1))
#--------------------------------------------------------------------------------------------
# shared and unshared environment
# Env1 <- data$full_data$X2
# Env2 <- data$full_data$X2
# ind <- data$full_data$Individual
#
# data <- data.frame("Time"=rep(1:100, 3*2),"val" = c(Env1, Env2), "ind"=rep(ind, 2),"type"=c(rep("Shared", 300), rep("Unshared", 300)))
#
#
# plot_X2 <- ggplot2::ggplot(data=data, ggplot2::aes(x = Time,
# y = val,
# color = as.factor(ind))) +
# ggplot2::geom_point() +
# ggplot2::geom_line() +
# ggplot2::xlim(0, 100) +
# facet_wrap(~ type) +
# ggplot2::xlab("Time") +
# ggplot2::ylab("Environmental values") +
# ggplot2::theme(legend.position="none")
#
# plot(plot_X2)
#
# ggsave(plot_X2, file="plot_X2.pdf", scale = 1)
#--------------------------------------------------------------------------------------------
LMR <- lme4::lmer(Phenotype ~ X1 + (1|Individual), data = data$sampled_data)
FIXEF <- lme4::fixef(LMR)
SE.FIXEF <- arm::se.fixef(LMR)
RANDEF <- as.data.frame(lme4::VarCorr(LMR))$vcov
LMR <- lmer(Phenotype ~ 1 + X1 + (X1|Individual), data = test)
summary(LMR)
cov2cor(VarCorr(LMR)$Individual[,])[2]
ggplot(data = data$data_S, aes(y=Phenotype, x=X1, color=as.factor(Individual))) +
stat_smooth(method = "lm", se=FALSE) +
theme(legend.position="none") +
xlab("Environmental effect") +
ylab("Phenotype")
library(lme4)
library(arm)
library(dplyr)
df <- data$data_S
new_data <- df %>%
group_by(Individual) %>%
summarise(Xmean = mean(X1)) %>%
inner_join(df) %>%
mutate(CMW = X1 - Xmean,
CMB = Xmean - mean(X1))
LMR <- lmer(Phenotype ~ Xmean + (Xmean|Individual), data = data$data_S)
summary(LMR)
library(lme4)
library(arm)
LMR <- lmer(Phenotype ~ 0 + X1 + (X1|Individual), data = data$data_S)
summary(LMR)
LMR <- lmer(Phenotype ~ 0 + (1|Individual), data = data$data_S)
LMR <- update(LMR, ~.+ X1)
LMR <- lmer(Phenotype ~ -1 + (1|Individual), data = data$data_S)
LMR <- update(LMR, ~.+ X1 + (X1|Individual) - (1|Individual))
LMR <- update(LMR, ~.+ X2 + (X2|Individual) - (1|individual))
LMR <- lme4::lmer(Phenotype ~ 1 + X1 + (1|Individual) + (0+X1|Individual), data = data$data_S)
RANDEF <- as.data.frame(lme4::VarCorr(LMR))$vcov
summary(LMR)
V <- as.data.frame(VarCorr(LMR))
P <- print(VarCorr(LMR),comp="Variance")
fixef(LMR) # get fixed effect coefficients
se.coef (LMR)
se.fixef(LMR) # get standard error of fixed effect coefficients
as.data.frame(VarCorr(LMR))$vcov # get random effect (variances)
re1 <- ranef(LMR, condVar=TRUE, whichel = "Individual") # get random effect for each individual
print(re1)
dotplot(re1) # plot random effect for each Individual with the standard error
|
/inst/shiny-squid/test.R
|
permissive
|
digideskio/squid-1
|
R
| false
| false
| 6,626
|
r
|
require(ggplot2)
require(grid)
input <- list()
input$Tmax <- 100 # default = 1
input$Time_sampling <- c(1,100)
input$NP <- 1 # default = 1
input$NI <- 3 # default = 1
input$NT <- 1 # default = 1
input$NG <- 1 # default = 1
input$B <- c(0,0.3,0,0)
input$Vind <- matrix(c(0.7 , 0 , 0 , 0,
1 , 0.5 , 0 , 0,
0 , 0 , 0 , 0,
0 , 0 , 0 , 0
),
4, byrow = TRUE)
test <- input$Vind
input$Ve <- 0.05 # Default 0
input$VG <- 0 # Default 0
# input$Vind <- matrix(c(0.7, 1 , 1 , 1 , 1 , 1 , 1 , 1 ,
# 1 , 0.3 , 1 , 1 , 1 , 1 , 1 , 1 ,
# 1 , 1 , 0 , 1 , 1 , 1 , 1 , 1 ,
# 1 , 1 , 1 , 0 , 1 , 1 , 1 , 1 ,
# 1 , 1 , 1 , 1 , 0.7 , 1 , 1 , 1 ,
# 1 , 1 , 1 , 1 , 1 , 0.3 , 1 , 1 ,
# 1 , 1 , 1 , 1 , 1 , 1 , 0 , 1 ,
# 1 , 1 , 1 , 1 , 1 , 1 , 1 , 0
#
# ),
# 8)
#
#
# input$Vind <- matrix(c(0.7 , 1 ,
# 1 , 0.3
# ),
# 2)
# Environment X1
input$X1_state <- TRUE # default = FALSE
# Stochastic
input$X1_sto_state <- TRUE # default = FALSE
input$X1_sto_shared <- FALSE # default = TRUE
input$X1_sto_V <- 1 # default = 1
# Autocorrelate
input$X1_sto_autocor_state <- FALSE # default = FALSE
input$X1_sto_corr <- 0.7 # default = 0
# Linear
input$X1_lin_state <- TRUE # default = FALSE
input$X1_lin_shared <- FALSE # default = TRUE
input$X1_lin_intercept <- 0 # default = 0
input$X1_lin_slope <- 1 # default = 1
input$X1_lin_V <- 0 # default = 1
# Cyclic
input$X1_cyc_state <- TRUE # default = FALSE
input$X1_cyc_shared <- FALSE # default = TRUE
input$X1_cyc_amplitude <- 10 # default = 10
input$X1_cyc_period <- 10 # default = 10
input$X1_cyc_Hshift <- 0 # default = 0
input$X1_cyc_Vshift <- 0 # default = 0
input$X1_cyc_V <- 0 # default = 0
# Environment X2
input$X2_state <- FALSE
# Stochastic
input$X2_sto_state <- TRUE # default = FALSE
input$X2_sto_shared <- FALSE # default = TRUE
input$X2_sto_V <- 5 # default = 1
# Autocorrelate
input$X2_sto_autocor_state <- TRUE # default = FALSE
input$X2_sto_corr <- 0.9 # default = 0
# Linear
input$X2_lin_state <- TRUE # default = FALSE
input$X2_lin_shared <- FALSE # default = TRUE
input$X2_lin_intercept <- 0 # default = 0
input$X2_lin_slope <- 1 # default = 1
input$X2_lin_V <- 0 # default = 1
# Cyclic
input$X2_cyc_state <- TRUE # default = FALSE
input$X2_cyc_shared <- FALSE # default = TRUE
input$X2_cyc_amplitude <- 10 # default = 10
input$X2_cyc_period <- 10 # default = 10
input$X2_cyc_Hshift <- 0 # default = 0
input$X2_cyc_Vshift <- 0 # default = 0
input$X_Interaction <- FALSE # default = FALSE
input$Vhsi <- 0
input$NR <- 50 # default = 1
input$NR_ind <- TRUE # default = TRUE
input$NR_trait <- TRUE # default = TRUE
input$ST_ind <- FALSE # default = TRUE
input$ST_trait <- FALSE # default = TRUE
data <- squid::squidR(input=input, plot = TRUE)
print(multiplot(data$plot$X1,
data$plot$X2,
data$plot$X1X2,
cols=1))
print(multiplot(data$plot$totPhen,
data$plot$sampPhen,
data$plot$sampTime,
cols=1))
#--------------------------------------------------------------------------------------------
# shared and unshared environment
# Env1 <- data$full_data$X2
# Env2 <- data$full_data$X2
# ind <- data$full_data$Individual
#
# data <- data.frame("Time"=rep(1:100, 3*2),"val" = c(Env1, Env2), "ind"=rep(ind, 2),"type"=c(rep("Shared", 300), rep("Unshared", 300)))
#
#
# plot_X2 <- ggplot2::ggplot(data=data, ggplot2::aes(x = Time,
# y = val,
# color = as.factor(ind))) +
# ggplot2::geom_point() +
# ggplot2::geom_line() +
# ggplot2::xlim(0, 100) +
# facet_wrap(~ type) +
# ggplot2::xlab("Time") +
# ggplot2::ylab("Environmental values") +
# ggplot2::theme(legend.position="none")
#
# plot(plot_X2)
#
# ggsave(plot_X2, file="plot_X2.pdf", scale = 1)
#--------------------------------------------------------------------------------------------
LMR <- lme4::lmer(Phenotype ~ X1 + (1|Individual), data = data$sampled_data)
FIXEF <- lme4::fixef(LMR)
SE.FIXEF <- arm::se.fixef(LMR)
RANDEF <- as.data.frame(lme4::VarCorr(LMR))$vcov
LMR <- lmer(Phenotype ~ 1 + X1 + (X1|Individual), data = test)
summary(LMR)
cov2cor(VarCorr(LMR)$Individual[,])[2]
ggplot(data = data$data_S, aes(y=Phenotype, x=X1, color=as.factor(Individual))) +
stat_smooth(method = "lm", se=FALSE) +
theme(legend.position="none") +
xlab("Environmental effect") +
ylab("Phenotype")
library(lme4)
library(arm)
library(dplyr)
df <- data$data_S
new_data <- df %>%
group_by(Individual) %>%
summarise(Xmean = mean(X1)) %>%
inner_join(df) %>%
mutate(CMW = X1 - Xmean,
CMB = Xmean - mean(X1))
LMR <- lmer(Phenotype ~ Xmean + (Xmean|Individual), data = data$data_S)
summary(LMR)
library(lme4)
library(arm)
LMR <- lmer(Phenotype ~ 0 + X1 + (X1|Individual), data = data$data_S)
summary(LMR)
LMR <- lmer(Phenotype ~ 0 + (1|Individual), data = data$data_S)
LMR <- update(LMR, ~.+ X1)
LMR <- lmer(Phenotype ~ -1 + (1|Individual), data = data$data_S)
LMR <- update(LMR, ~.+ X1 + (X1|Individual) - (1|Individual))
LMR <- update(LMR, ~.+ X2 + (X2|Individual) - (1|individual))
LMR <- lme4::lmer(Phenotype ~ 1 + X1 + (1|Individual) + (0+X1|Individual), data = data$data_S)
RANDEF <- as.data.frame(lme4::VarCorr(LMR))$vcov
summary(LMR)
V <- as.data.frame(VarCorr(LMR))
P <- print(VarCorr(LMR),comp="Variance")
fixef(LMR) # get fixed effect coefficients
se.coef (LMR)
se.fixef(LMR) # get standard error of fixed effect coefficients
as.data.frame(VarCorr(LMR))$vcov # get random effect (variances)
re1 <- ranef(LMR, condVar=TRUE, whichel = "Individual") # get random effect for each individual
print(re1)
dotplot(re1) # plot random effect for each Individual with the standard error
|
setwd("~/git/practical_machine_learning/")
pml_train <- read.csv("pml-training.csv")
pml_test <- read.csv("pml-testing.csv")
# find out the columns that are unavailable in the testing set
unmeasured <- which(colSums(is.na(pml_test))==20)
# do not use these columns in the testing set
pml_train <- pml_train[,-unmeasured]
# eliminate id number to avoid overfitting
pml_train <- pml_train[,-1]
head(pml_train)
dim(pml_train)
library(AppliedPredictiveModeling)
library(caret)
inTrain = createDataPartition(pml_train$classe, p = 3/4)[[1]]
training <- pml_train[inTrain,]
testing <- pml_train[-inTrain,]
mymodel = train(classe~., method='rf', data=training,
trControl=trainControl(method="cv",number=5),
prox=TRUE,allowParallel=TRUE, ntree = 50)
summary(mymodel)
plot(mymodel)
predicted_class <- predict(mymodel, newdata = testing)
accuracy <- sum(predicted_class == testing$classe)/length(predicted_class)
pml_test <- read.csv("pml-testing.csv")
answers <- predict(mymodel, newdata = pml_test)
#########################
# write out the answer
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(answers)
|
/PracticalMachineLearning.R
|
no_license
|
xiaopz0/practical_machine_learning
|
R
| false
| false
| 1,299
|
r
|
setwd("~/git/practical_machine_learning/")
pml_train <- read.csv("pml-training.csv")
pml_test <- read.csv("pml-testing.csv")
# find out the columns that are unavailable in the testing set
unmeasured <- which(colSums(is.na(pml_test))==20)
# do not use these columns in the testing set
pml_train <- pml_train[,-unmeasured]
# eliminate id number to avoid overfitting
pml_train <- pml_train[,-1]
head(pml_train)
dim(pml_train)
library(AppliedPredictiveModeling)
library(caret)
inTrain = createDataPartition(pml_train$classe, p = 3/4)[[1]]
training <- pml_train[inTrain,]
testing <- pml_train[-inTrain,]
mymodel = train(classe~., method='rf', data=training,
trControl=trainControl(method="cv",number=5),
prox=TRUE,allowParallel=TRUE, ntree = 50)
summary(mymodel)
plot(mymodel)
predicted_class <- predict(mymodel, newdata = testing)
accuracy <- sum(predicted_class == testing$classe)/length(predicted_class)
pml_test <- read.csv("pml-testing.csv")
answers <- predict(mymodel, newdata = pml_test)
#########################
# write out the answer
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
pml_write_files(answers)
|
context("distinct")
df <- tibble(
x = c(1, 1, 1, 1),
y = c(1, 1, 2, 2),
z = c(1, 2, 1, 2)
)
dfs <- test_load(df)
test_that("distinct equivalent to local unique when keep_all is TRUE", {
dfs %>%
lapply(. %>% distinct()) %>%
expect_equal_tbls(unique(df))
})
test_that("distinct for single column equivalent to local unique (#1937)", {
dfs %>%
lapply(. %>% distinct(x, .keep_all = FALSE)) %>%
expect_equal_tbls(unique(df["x"]))
dfs %>%
lapply(. %>% distinct(y, .keep_all = FALSE)) %>%
expect_equal_tbls(unique(df["y"]))
})
test_that("distinct throws error if column is specified and .keep_all is TRUE", {
mf <- memdb_frame(x = 1:10)
expect_error(
mf %>% distinct(x, .keep_all = TRUE) %>% collect(),
"specified columns.*[.]keep_all"
)
})
# sql-render --------------------------------------------------------------
test_that("distinct adds DISTINCT suffix", {
out <- memdb_frame(x = c(1, 1)) %>% distinct()
expect_match(out %>% sql_render(), "SELECT DISTINCT")
expect_equal(out %>% collect(), tibble(x = 1))
})
test_that("distinct over columns uses GROUP BY", {
out <- memdb_frame(x = c(1, 2), y = c(1, 1)) %>% distinct(y)
expect_match(out %>% sql_render(), "SELECT `y`.*GROUP BY `y`")
expect_equal(out %>% collect(), tibble(y = 1))
})
# sql_build ---------------------------------------------------------------
test_that("distinct sets flagged", {
out1 <- lazy_frame(x = 1) %>%
select() %>%
sql_build()
expect_false(out1$distinct)
out2 <- lazy_frame(x = 1) %>%
distinct() %>%
sql_build()
expect_true(out2$distinct)
})
# ops ---------------------------------------------------------------------
test_that("distinct has complicated rules", {
out <- lazy_frame(x = 1, y = 2) %>% distinct()
expect_equal(op_vars(out), c("x", "y"))
out <- lazy_frame(x = 1, y = 2) %>% distinct(x, .keep_all = TRUE)
expect_equal(op_vars(out), c("x", "y"))
out <- lazy_frame(x = 1, y = 2, z = 3) %>% distinct(x, y)
expect_equal(op_vars(out), c("x", "y"))
out <- lazy_frame(x = 1, y = 2, z = 3) %>% group_by(x) %>% distinct(y)
expect_equal(op_vars(out), c("x", "y"))
})
|
/tests/testthat/test-verb-distinct.R
|
permissive
|
alex-gable/dbplyr
|
R
| false
| false
| 2,164
|
r
|
context("distinct")
df <- tibble(
x = c(1, 1, 1, 1),
y = c(1, 1, 2, 2),
z = c(1, 2, 1, 2)
)
dfs <- test_load(df)
test_that("distinct equivalent to local unique when keep_all is TRUE", {
dfs %>%
lapply(. %>% distinct()) %>%
expect_equal_tbls(unique(df))
})
test_that("distinct for single column equivalent to local unique (#1937)", {
dfs %>%
lapply(. %>% distinct(x, .keep_all = FALSE)) %>%
expect_equal_tbls(unique(df["x"]))
dfs %>%
lapply(. %>% distinct(y, .keep_all = FALSE)) %>%
expect_equal_tbls(unique(df["y"]))
})
test_that("distinct throws error if column is specified and .keep_all is TRUE", {
mf <- memdb_frame(x = 1:10)
expect_error(
mf %>% distinct(x, .keep_all = TRUE) %>% collect(),
"specified columns.*[.]keep_all"
)
})
# sql-render --------------------------------------------------------------
test_that("distinct adds DISTINCT suffix", {
out <- memdb_frame(x = c(1, 1)) %>% distinct()
expect_match(out %>% sql_render(), "SELECT DISTINCT")
expect_equal(out %>% collect(), tibble(x = 1))
})
test_that("distinct over columns uses GROUP BY", {
out <- memdb_frame(x = c(1, 2), y = c(1, 1)) %>% distinct(y)
expect_match(out %>% sql_render(), "SELECT `y`.*GROUP BY `y`")
expect_equal(out %>% collect(), tibble(y = 1))
})
# sql_build ---------------------------------------------------------------
test_that("distinct sets flagged", {
out1 <- lazy_frame(x = 1) %>%
select() %>%
sql_build()
expect_false(out1$distinct)
out2 <- lazy_frame(x = 1) %>%
distinct() %>%
sql_build()
expect_true(out2$distinct)
})
# ops ---------------------------------------------------------------------
test_that("distinct has complicated rules", {
out <- lazy_frame(x = 1, y = 2) %>% distinct()
expect_equal(op_vars(out), c("x", "y"))
out <- lazy_frame(x = 1, y = 2) %>% distinct(x, .keep_all = TRUE)
expect_equal(op_vars(out), c("x", "y"))
out <- lazy_frame(x = 1, y = 2, z = 3) %>% distinct(x, y)
expect_equal(op_vars(out), c("x", "y"))
out <- lazy_frame(x = 1, y = 2, z = 3) %>% group_by(x) %>% distinct(y)
expect_equal(op_vars(out), c("x", "y"))
})
|
library('tidyverse')
library('cowplot')
library('astsa')
library('forecast')
library('reshape2')
library('tsibble')
library('plotly')
library('ggTimeSeries')
source('paths2.R')
load(file = paste(data.path, 'resp_disease.RData'))
# Exploratory Data Analysis -----------------------------------------------
resp.full.plot <-
resp %>% ggplot(aes(x = date, y = admis)) + geom_line() +
background_grid(minor = "xy")
save_plot(paste0(fig.path, 'resp_full.pdf'),
resp.full.plot,
base_aspect_ratio = 1.8)
htmlwidgets::saveWidget(as_widget(ggplotly(resp.full.plot)),
paste0(fig.path, 'resp_full.html'))
resp.train <-
resp %>% filter(date >= "2014-01-01", date <= "2017-12-31")
resp.test <- resp %>% filter(date >= "2018-01-01")
resp.train.plot <-
resp.train %>% ggplot(aes(x = date, y = admis)) + geom_line() +
background_grid(major = "xy", minor = "none")
save_plot(paste0(fig.path, 'resp_train.pdf'),
resp.train.plot,
base_aspect_ratio = 1.8)
htmlwidgets::saveWidget(as_widget(ggplotly(resp.train.plot)),
paste0(fig.path, 'resp_train.html'))
resp.train.acf <- tibble(
lag = seq(1:(7 * 8)),
acf = astsa::acf2(resp.train$admis, max.lag = 7 * 8, plot = FALSE)[, 1],
pacf = astsa::acf2(resp.train$admis, max.lag = 7 * 8, plot = FALSE)[, 2]
)
resp.train.acf %>%
write_csv(paste(data.path, 'resp_acf_pacf.csv'))
resp.train.acf.plot <-
resp.train.acf %>% ggplot(mapping = aes(x = lag, y = acf)) +
coord_cartesian(ylim = c(-1, 1)) + background_grid(major = "xy", minor = "none") +
scale_x_continuous(breaks = seq(0, nrow(resp.train.acf), 7)) +
geom_segment(mapping = aes(xend = lag, yend = 0)) + labs(x = "Lag (days)", y = "ACF")
resp.train.pacf.plot <-
resp.train.acf %>% ggplot(mapping = aes(x = lag, y = pacf)) +
coord_cartesian(ylim = c(-1, 1)) + background_grid(major = "xy", minor = "none") +
scale_x_continuous(breaks = seq(0, nrow(resp.train.acf), 7)) +
geom_segment(mapping = aes(xend = lag, yend = 0)) + labs(x = "Lag (days)", y = "PACF")
save_plot(paste0(fig.path, 'acf_plot.pdf'),
resp.acf.plot)
save_plot(paste0(fig.path, 'pacf_plot.pdf'),
resp.pacf.plot)
resp.train.acf.semiplot <- resp.train.acf.plot + labs(y = "ACF") +
theme(
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.x = element_blank()
)
save_plot(
paste0(fig.path, 'acf_pacf_plot.pdf'),
plot_grid(
resp.acf.semiplot,
resp.pacf.plot,
nrow = 2,
align = "v"
)
)
resp.train.loess.plot <-
resp.train.plot + geom_smooth(method = "loess",
size = 1.5,
colour = "red")
save_plot(paste0(fig.path, 'loess_plot.pdf'),
resp.train.loess.plot,
base_aspect_ratio = 2)
resp.train.ksmooth <-
ksmooth(seq(1:nrow(resp.train)), resp$admis, "normal", bandwidth = 6)
resp.train.ksmooth <- resp.train %>% mutate(ksmooth = resp.train.ksmooth$y)
resp.ksmooth.plot <-
resp.train.ksmooth %>% ggplot(aes(x = date)) + geom_line(aes(y = admis)) +
geom_line(aes(y = ksmooth), colour = "red") +
background_grid(major = "xy", minor = "none")
save_plot(paste0(fig.path, 'ksmooth_plot.pdf'),
resp.ksmooth.plot,
base_aspect_ratio = 2)
resp.train.monthly <-
resp.train %>% index_by(year.month = yearmonth(date)) %>% summarize(admis.avg.month = mean(admis))
resp.train.monthly.plot <-
resp.monthly %>% ggplot(aes(x = year.month, y = admis.avg.month)) + geom_line() +
background_grid(major = "xy", minor = "none")
save_plot(paste0(fig.path, 'resp_monthly_plot.pdf'),
resp.monthly.plot,
base_aspect_ratio = 1.5)
resp.train.density.plot <- resp.train %>% ggplot(aes(admis)) +
geom_histogram(aes(y = stat(density)),
binwidth = 60,
col = "black",
fill = "gray90") +
geom_density(aes(y = ..density..), col = 'red')
save_plot(paste0(fig.path, 'resp_density_plot.pdf'),
resp.density.plot,
base_aspect_ratio = 1.3)
resp.heatmap <- resp.train %>% ggplot_calendar_heatmap('date',
'admis') +
xlab(NULL) +
ylab(NULL) +
scale_fill_continuous(low = 'green', high = 'red') +
facet_wrap( ~ Year, ncol = 1)
save_plot(paste0(fig.path, 'resp_heatmap.pdf'),
resp.heatmap,
base_aspect_ratio = 2)
resp.train %>% tsibble::index_by(week.day = day) %>% summarize(admis.avg = mean(admis),
admis.sd = sd(admis)) %>% ggplot(aes(
x = week.day,
y = admis.avg,
ymin = admis.avg - admis.sd,
ymax = admis.avg + admis.sd
)) + geom_errorbar(width = 0.3) + geom_pointrange()
resp.bp <-
melt(resp.train,
id.vars = c("week.day", "day"),
measure.vars = "admis")
resp.box.plot <- resp.bp %>% ggplot(aes(x = day, y = value)) + geom_boxplot() + background_grid(minor = "xy")
save_plot(paste0(fig.path, 'resp_box_plot.pdf'),
resp.box.plot,
base_aspect_ratio = 1.5)
resp.yearly <- resp %>% index_by(year) %>% nest()
msts(resp.train$admis, seasonal.periods=c(7,365.25), start = c(2014,1,1)) %>% seasplot(trend = FALSE, outplot = 1)
ts(resp.train$admis, frequency = 7, start = c(2014,1,1)) %>% ggseasonplot()
ggplot() +
geom_line(data = resp.yearly$data[[1]], aes(x = date, y = admis), color = 'red') +
geom_line(data = resp.yearly$data[[2]], aes(x = date, y = admis), color = 'blue') +
xlab('Date') + ylab('Admissions')
ggplot(resp, aes(x = date, y = admis, colour = as.factor(year))) + geom_line()
resp.yearly.plot <- resp %>% ggplot(aes(date, admis, colour = year)) + geom_line()
#
par(mfrow=c(5,1))
ts.plot(fmri1[,2:5], col=1:4, ylab="BOLD", main="Cortex")
ts.plot(fmri1[,6:9], col=1:4, ylab="BOLD", main="Thalamus & Cerebellum")
resp.yearly <- resp %>% index_by(year) %>% nest()
msts(resp.train$admis, seasonal.periods=c(7,365.25)) %>% seasplot(trend = FALSE, outplot = 1)
msts(resp.train$admis, seasonal.periods=c(7,365.25), start = c(2014,1,1)) %>% seasplot(trend = FALSE, outplot = 1)
ts(resp.train$admis, frequency = 365.25, start = c(2014,1,1)) %>% ggseasonplot(polar = TRUE)
msts(resp.train$admis, seasonal.periods=c(7,365.25)) %>% ggseasonplot()
|
/eda.R
|
no_license
|
bastianabaleiv/hospadmis
|
R
| false
| false
| 6,787
|
r
|
library('tidyverse')
library('cowplot')
library('astsa')
library('forecast')
library('reshape2')
library('tsibble')
library('plotly')
library('ggTimeSeries')
source('paths2.R')
load(file = paste(data.path, 'resp_disease.RData'))
# Exploratory Data Analysis -----------------------------------------------
resp.full.plot <-
resp %>% ggplot(aes(x = date, y = admis)) + geom_line() +
background_grid(minor = "xy")
save_plot(paste0(fig.path, 'resp_full.pdf'),
resp.full.plot,
base_aspect_ratio = 1.8)
htmlwidgets::saveWidget(as_widget(ggplotly(resp.full.plot)),
paste0(fig.path, 'resp_full.html'))
resp.train <-
resp %>% filter(date >= "2014-01-01", date <= "2017-12-31")
resp.test <- resp %>% filter(date >= "2018-01-01")
resp.train.plot <-
resp.train %>% ggplot(aes(x = date, y = admis)) + geom_line() +
background_grid(major = "xy", minor = "none")
save_plot(paste0(fig.path, 'resp_train.pdf'),
resp.train.plot,
base_aspect_ratio = 1.8)
htmlwidgets::saveWidget(as_widget(ggplotly(resp.train.plot)),
paste0(fig.path, 'resp_train.html'))
resp.train.acf <- tibble(
lag = seq(1:(7 * 8)),
acf = astsa::acf2(resp.train$admis, max.lag = 7 * 8, plot = FALSE)[, 1],
pacf = astsa::acf2(resp.train$admis, max.lag = 7 * 8, plot = FALSE)[, 2]
)
resp.train.acf %>%
write_csv(paste(data.path, 'resp_acf_pacf.csv'))
resp.train.acf.plot <-
resp.train.acf %>% ggplot(mapping = aes(x = lag, y = acf)) +
coord_cartesian(ylim = c(-1, 1)) + background_grid(major = "xy", minor = "none") +
scale_x_continuous(breaks = seq(0, nrow(resp.train.acf), 7)) +
geom_segment(mapping = aes(xend = lag, yend = 0)) + labs(x = "Lag (days)", y = "ACF")
resp.train.pacf.plot <-
resp.train.acf %>% ggplot(mapping = aes(x = lag, y = pacf)) +
coord_cartesian(ylim = c(-1, 1)) + background_grid(major = "xy", minor = "none") +
scale_x_continuous(breaks = seq(0, nrow(resp.train.acf), 7)) +
geom_segment(mapping = aes(xend = lag, yend = 0)) + labs(x = "Lag (days)", y = "PACF")
save_plot(paste0(fig.path, 'acf_plot.pdf'),
resp.acf.plot)
save_plot(paste0(fig.path, 'pacf_plot.pdf'),
resp.pacf.plot)
resp.train.acf.semiplot <- resp.train.acf.plot + labs(y = "ACF") +
theme(
axis.text.x = element_blank(),
axis.ticks.x = element_blank(),
axis.title.x = element_blank()
)
save_plot(
paste0(fig.path, 'acf_pacf_plot.pdf'),
plot_grid(
resp.acf.semiplot,
resp.pacf.plot,
nrow = 2,
align = "v"
)
)
resp.train.loess.plot <-
resp.train.plot + geom_smooth(method = "loess",
size = 1.5,
colour = "red")
save_plot(paste0(fig.path, 'loess_plot.pdf'),
resp.train.loess.plot,
base_aspect_ratio = 2)
resp.train.ksmooth <-
ksmooth(seq(1:nrow(resp.train)), resp$admis, "normal", bandwidth = 6)
resp.train.ksmooth <- resp.train %>% mutate(ksmooth = resp.train.ksmooth$y)
resp.ksmooth.plot <-
resp.train.ksmooth %>% ggplot(aes(x = date)) + geom_line(aes(y = admis)) +
geom_line(aes(y = ksmooth), colour = "red") +
background_grid(major = "xy", minor = "none")
save_plot(paste0(fig.path, 'ksmooth_plot.pdf'),
resp.ksmooth.plot,
base_aspect_ratio = 2)
resp.train.monthly <-
resp.train %>% index_by(year.month = yearmonth(date)) %>% summarize(admis.avg.month = mean(admis))
resp.train.monthly.plot <-
resp.monthly %>% ggplot(aes(x = year.month, y = admis.avg.month)) + geom_line() +
background_grid(major = "xy", minor = "none")
save_plot(paste0(fig.path, 'resp_monthly_plot.pdf'),
resp.monthly.plot,
base_aspect_ratio = 1.5)
resp.train.density.plot <- resp.train %>% ggplot(aes(admis)) +
geom_histogram(aes(y = stat(density)),
binwidth = 60,
col = "black",
fill = "gray90") +
geom_density(aes(y = ..density..), col = 'red')
save_plot(paste0(fig.path, 'resp_density_plot.pdf'),
resp.density.plot,
base_aspect_ratio = 1.3)
resp.heatmap <- resp.train %>% ggplot_calendar_heatmap('date',
'admis') +
xlab(NULL) +
ylab(NULL) +
scale_fill_continuous(low = 'green', high = 'red') +
facet_wrap( ~ Year, ncol = 1)
save_plot(paste0(fig.path, 'resp_heatmap.pdf'),
resp.heatmap,
base_aspect_ratio = 2)
resp.train %>% tsibble::index_by(week.day = day) %>% summarize(admis.avg = mean(admis),
admis.sd = sd(admis)) %>% ggplot(aes(
x = week.day,
y = admis.avg,
ymin = admis.avg - admis.sd,
ymax = admis.avg + admis.sd
)) + geom_errorbar(width = 0.3) + geom_pointrange()
resp.bp <-
melt(resp.train,
id.vars = c("week.day", "day"),
measure.vars = "admis")
resp.box.plot <- resp.bp %>% ggplot(aes(x = day, y = value)) + geom_boxplot() + background_grid(minor = "xy")
save_plot(paste0(fig.path, 'resp_box_plot.pdf'),
resp.box.plot,
base_aspect_ratio = 1.5)
resp.yearly <- resp %>% index_by(year) %>% nest()
msts(resp.train$admis, seasonal.periods=c(7,365.25), start = c(2014,1,1)) %>% seasplot(trend = FALSE, outplot = 1)
ts(resp.train$admis, frequency = 7, start = c(2014,1,1)) %>% ggseasonplot()
ggplot() +
geom_line(data = resp.yearly$data[[1]], aes(x = date, y = admis), color = 'red') +
geom_line(data = resp.yearly$data[[2]], aes(x = date, y = admis), color = 'blue') +
xlab('Date') + ylab('Admissions')
ggplot(resp, aes(x = date, y = admis, colour = as.factor(year))) + geom_line()
resp.yearly.plot <- resp %>% ggplot(aes(date, admis, colour = year)) + geom_line()
#
par(mfrow=c(5,1))
ts.plot(fmri1[,2:5], col=1:4, ylab="BOLD", main="Cortex")
ts.plot(fmri1[,6:9], col=1:4, ylab="BOLD", main="Thalamus & Cerebellum")
resp.yearly <- resp %>% index_by(year) %>% nest()
msts(resp.train$admis, seasonal.periods=c(7,365.25)) %>% seasplot(trend = FALSE, outplot = 1)
msts(resp.train$admis, seasonal.periods=c(7,365.25), start = c(2014,1,1)) %>% seasplot(trend = FALSE, outplot = 1)
ts(resp.train$admis, frequency = 365.25, start = c(2014,1,1)) %>% ggseasonplot(polar = TRUE)
msts(resp.train$admis, seasonal.periods=c(7,365.25)) %>% ggseasonplot()
|
load_db_as_list <- function(
user_file = NA,
hashtag_file = NA,
tweet_file = NA,
url_file = NA,
as_csv = TRUE
) {
if (as_csv) {
if (is.na(user_file)) { user_file <- file.path('.','db','atusers.csv') }
if (is.na(hashtag_file)) { hashtag_file <- file.path('.','db','hashtags.csv') }
if (is.na(tweet_file)) { tweet_file <- file.path('.','db','tweets.csv') }
if (is.na(url_file)) { url_file <- file.path('.','db','urlrefs.csv') }
ret <- list(
atusers = read.csv(user_file),
hashtags = read.csv(hashtag_file),
tweets = read.csv(tweet_file),
urlrefs = read.csv(url_file)
)
}
return(ret)
}
|
/R/load_db_as_list.r
|
no_license
|
dannhek/covid_gov_tweets
|
R
| false
| false
| 664
|
r
|
load_db_as_list <- function(
user_file = NA,
hashtag_file = NA,
tweet_file = NA,
url_file = NA,
as_csv = TRUE
) {
if (as_csv) {
if (is.na(user_file)) { user_file <- file.path('.','db','atusers.csv') }
if (is.na(hashtag_file)) { hashtag_file <- file.path('.','db','hashtags.csv') }
if (is.na(tweet_file)) { tweet_file <- file.path('.','db','tweets.csv') }
if (is.na(url_file)) { url_file <- file.path('.','db','urlrefs.csv') }
ret <- list(
atusers = read.csv(user_file),
hashtags = read.csv(hashtag_file),
tweets = read.csv(tweet_file),
urlrefs = read.csv(url_file)
)
}
return(ret)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printPercentage.R
\name{printPercentage}
\alias{printPercentage}
\alias{printCount}
\title{Print Percentage of Activity Completed to stdout}
\usage{
printPercentage(i, n, dp = 0, first = 1, last = n, prev = i - 1)
}
\arguments{
\item{i}{the number of iterations completed.}
\item{n}{total number of iterations.}
\item{dp}{number of decimal places to display.}
\item{first}{number of the first iteration for which this percentage was
displayed}
\item{last}{number of the final iteration for which this percentage will be
displayed}
\item{prev}{number of the previous iteration for which this percentage was
displayed}
}
\value{
\code{NULL}
}
\description{
Prints percentage (or alternatively just a count) of loop or similar process
which has been completed to the standard output.
}
\details{
\code{printPercentage} will use \code{cat} to print the proportion of loops
which have been completed (i.e. \code{i/n}) to the standard output. In
doing so it will erase the previous such percentage, except when \code{i =
first}. A new line is added when \code{i = last}, assuming that the loop is
finished.
}
\section{Warning}{
This will fail to work nicely if other information is
printed to the standard output % during the process.
}
\examples{
x = numeric(100)
for (i in 1:100) {
x[i] = mean(rnorm(1e5))
printPercentage(i,100)
}
i = 0
repeat {
i = i+1
if (runif(1) > 0.99) {
break
}
printCount(i)
}
print("\n")
}
\author{
Robin Evans
}
\keyword{IO}
\keyword{iteration}
\keyword{print}
|
/man/printPercentage.Rd
|
no_license
|
rje42/rje
|
R
| false
| true
| 1,591
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/printPercentage.R
\name{printPercentage}
\alias{printPercentage}
\alias{printCount}
\title{Print Percentage of Activity Completed to stdout}
\usage{
printPercentage(i, n, dp = 0, first = 1, last = n, prev = i - 1)
}
\arguments{
\item{i}{the number of iterations completed.}
\item{n}{total number of iterations.}
\item{dp}{number of decimal places to display.}
\item{first}{number of the first iteration for which this percentage was
displayed}
\item{last}{number of the final iteration for which this percentage will be
displayed}
\item{prev}{number of the previous iteration for which this percentage was
displayed}
}
\value{
\code{NULL}
}
\description{
Prints percentage (or alternatively just a count) of loop or similar process
which has been completed to the standard output.
}
\details{
\code{printPercentage} will use \code{cat} to print the proportion of loops
which have been completed (i.e. \code{i/n}) to the standard output. In
doing so it will erase the previous such percentage, except when \code{i =
first}. A new line is added when \code{i = last}, assuming that the loop is
finished.
}
\section{Warning}{
This will fail to work nicely if other information is
printed to the standard output % during the process.
}
\examples{
x = numeric(100)
for (i in 1:100) {
x[i] = mean(rnorm(1e5))
printPercentage(i,100)
}
i = 0
repeat {
i = i+1
if (runif(1) > 0.99) {
break
}
printCount(i)
}
print("\n")
}
\author{
Robin Evans
}
\keyword{IO}
\keyword{iteration}
\keyword{print}
|
as_override_type_sum <- function(x) {
structure(x, class = "override_type_sum")
}
type_sum.override_type_sum <- function(x, ...) {
"SC"
}
registerS3method("type_sum", "override_type_sum", type_sum.override_type_sum, envir = asNamespace("pillar"))
|
/tests/testthat/helper-type-sum.R
|
no_license
|
jrnold/tibble
|
R
| false
| false
| 253
|
r
|
as_override_type_sum <- function(x) {
structure(x, class = "override_type_sum")
}
type_sum.override_type_sum <- function(x, ...) {
"SC"
}
registerS3method("type_sum", "override_type_sum", type_sum.override_type_sum, envir = asNamespace("pillar"))
|
library(MASS)
library(coda)
library(Matrix)
library(metafor)
library(hbmem)
library(truncnorm)
wd = '/home/ZijunKe/Research/MetaR/Simulation5/'
#wd <- "E:/MetaR/Simulation3/"
openbugs.d = NULL
source(paste(wd,'RCode/RFuncs_Sim.R',sep=''))
nsim = 500
n.iter = 10000
Nstudy.all = c(28,60,100)
mu.N.all = c(150,350)
L.All <- cbind(rep(.6,4),rep(.8,4))
rho0.All <- c(0,0.3,-0.5)
Phi.All <- c(0.1,0.2)
sdL.All <- c(0,0.06)
sdPhi.All <- c(0,0.1)
ind.All <- list(L = matrix(c(1,1,1,1,1,1,1,1,1,1,1,1,2,1,1,2,1,2,1,1,1,1,2,1,1,1,2,1,
5,5,5,5,5,4,4,4,6,8,7,3,3,3,3,3,4,5,4,5,4,3,4,3,6,6,4,3),28,2),
Phi = c(3,3,3,3,3,2,2,2,0,0,0,1,0,1,1,0,2,0,2,3,2,1,0,1,0,0,0,1))
resample.s = vector('list',length(Nstudy.all))
resample.s[[1]] = 1:28
set.seed(1037302)
for(i in 2:length(Nstudy.all)){
resample.s[[i]] = sample(1:nrow(ind.All$L),Nstudy.all[i],replace = T)
}
sdPhii = 2
sdLi = sdPhii
Ri = 1
SRi = 1
RRi = 2
NSi = 3
Nbari = 2
Nstudy = Nstudy.all[NSi]
mu.N = mu.N.all[Nbari]
indL = ind.All$L[resample.s[[NSi]],]
indP = ind.All$Phi[resample.s[[NSi]]]
T.Values = list(
T.Values1 = list(rho0 = rho0.All[Ri],V.rho=0.04),
T.Values2 = list(rho0 = rho0.All[Ri],V.rho=0.04),
T.Values3 = list(rho0 = rho0.All[Ri],V.rho=0.04,
sd.rho = 0.2,Phi = rep(Phi.All[SRi],3)))
prm <- list(
prm1 = c('rho0','V.rho'),
prm2 = c('rho0','V.rho'),
prm3 = c('rho0','V.rho','sd.rho','Phi'))
senario.n <- paste('VPhi',sdPhii-1,'VL',sdLi-1,sep='')
cond.n <- paste('R',Ri,'SR',SRi,'RR',RRi,sep='')
print(paste('Senario.Name=',senario.n,sep=''))
print(paste('Cond.Name=',cond.n,sep=''))
newf = paste('mkdir ',wd,'Results/',senario.n,'/',sep='')
try(system(newf))
newf = paste('mkdir ',wd,'Results/',senario.n,'/',cond.n,'/',sep='')
try(system(newf))
work.d <- paste(wd,'Results/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'/',sep='')
newf = paste('mkdir ',work.d,sep='')
try(system(newf))
data.fn = c(
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.dat',sep=''),
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.N.dat',sep=''),
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.rr.dat',sep=''),
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.valL.dat',sep='') )
out.fn = c(
paste(work.d,Nstudy,mu.N,'.M1',sep=''),
paste(work.d,Nstudy,mu.N,'.M2',sep=''),
paste(work.d,Nstudy,mu.N,'.M3',sep='') )
mR <- read.table(data.fn[1])[,-1]
mN <- read.table(data.fn[2])[,-1]
mrr <- read.table(data.fn[3])[,-1]
mVrr <- read.table(data.fn[4])[,-1]
for(simi in 1:nsim){
print(paste('simi =',simi,sep=''))
if(is.na(mrr[simi,1])==0){
r = as.numeric(mR[simi,]) # ind study corrs
N = as.numeric(mN[simi,]) # ind study sample size
vi = ((1-r^2)^2)/(N-1) # Sampling variance of correlaiton
mr <- sum(r/vi)/sum(1/vi) # weighted average mean corrs
rr.obs = as.numeric(mrr[simi,]) # reliabilities
Linfo = v2m.Lambda(as.numeric(mVrr[simi,]),8)
vL.obs = Linfo$v
V.L = Linfo$V
# Data analysis
# bare-bone + HS
res.meta4 = vector('list',2)
res.meta4[[1]] = rma(r,vi,weights = N,method = 'HS' )
res.meta4[[2]] = rma(r/rr.obs,vi/(rr.obs^2),weights = N*(rr.obs^2),method = 'HS' )
for(mi in 1:2){ org.res.meta4(simi,res.meta4[[mi]],out.fn[mi],T.Values[[mi]]) }
# Bayesian methods
inits = list(rho0 = 0,V.rho = 0.09,Phi = rep(0,3),rhoi = r)
prior = list(Phi = list(mu = rep(0,3),sigma = rep(100,3)))
res = try(wrbugs(r,N,vL.obs,V.L,indL,indP,inits,nburnin=1,niter=n.iter,nthin=1,prior))
if(inherits(res,'try-error')==0){
org.res(simi,res,out.fn[3],T.Values[[3]],prm[[3]])
}else{
org.resNA(simi,out.fn[3],T.Values[[3]],prm[[3]])
}
}else{
for(mi in 1:3){
org.resNA(simi,out.fn[mi],T.Values[[mi]],prm[[mi]])
}
}
}
print(paste('Nstudy = ',Nstudy,'Nbar = ', mu.N,sep=''))
for(mi in 1:3){
summary.s(mi,out.fn[mi],T.Values[[mi]],prm[[mi]])
}
|
/Simulations/RCode/MetaRR48.R
|
no_license
|
zijunke/CAM
|
R
| false
| false
| 3,833
|
r
|
library(MASS)
library(coda)
library(Matrix)
library(metafor)
library(hbmem)
library(truncnorm)
wd = '/home/ZijunKe/Research/MetaR/Simulation5/'
#wd <- "E:/MetaR/Simulation3/"
openbugs.d = NULL
source(paste(wd,'RCode/RFuncs_Sim.R',sep=''))
nsim = 500
n.iter = 10000
Nstudy.all = c(28,60,100)
mu.N.all = c(150,350)
L.All <- cbind(rep(.6,4),rep(.8,4))
rho0.All <- c(0,0.3,-0.5)
Phi.All <- c(0.1,0.2)
sdL.All <- c(0,0.06)
sdPhi.All <- c(0,0.1)
ind.All <- list(L = matrix(c(1,1,1,1,1,1,1,1,1,1,1,1,2,1,1,2,1,2,1,1,1,1,2,1,1,1,2,1,
5,5,5,5,5,4,4,4,6,8,7,3,3,3,3,3,4,5,4,5,4,3,4,3,6,6,4,3),28,2),
Phi = c(3,3,3,3,3,2,2,2,0,0,0,1,0,1,1,0,2,0,2,3,2,1,0,1,0,0,0,1))
resample.s = vector('list',length(Nstudy.all))
resample.s[[1]] = 1:28
set.seed(1037302)
for(i in 2:length(Nstudy.all)){
resample.s[[i]] = sample(1:nrow(ind.All$L),Nstudy.all[i],replace = T)
}
sdPhii = 2
sdLi = sdPhii
Ri = 1
SRi = 1
RRi = 2
NSi = 3
Nbari = 2
Nstudy = Nstudy.all[NSi]
mu.N = mu.N.all[Nbari]
indL = ind.All$L[resample.s[[NSi]],]
indP = ind.All$Phi[resample.s[[NSi]]]
T.Values = list(
T.Values1 = list(rho0 = rho0.All[Ri],V.rho=0.04),
T.Values2 = list(rho0 = rho0.All[Ri],V.rho=0.04),
T.Values3 = list(rho0 = rho0.All[Ri],V.rho=0.04,
sd.rho = 0.2,Phi = rep(Phi.All[SRi],3)))
prm <- list(
prm1 = c('rho0','V.rho'),
prm2 = c('rho0','V.rho'),
prm3 = c('rho0','V.rho','sd.rho','Phi'))
senario.n <- paste('VPhi',sdPhii-1,'VL',sdLi-1,sep='')
cond.n <- paste('R',Ri,'SR',SRi,'RR',RRi,sep='')
print(paste('Senario.Name=',senario.n,sep=''))
print(paste('Cond.Name=',cond.n,sep=''))
newf = paste('mkdir ',wd,'Results/',senario.n,'/',sep='')
try(system(newf))
newf = paste('mkdir ',wd,'Results/',senario.n,'/',cond.n,'/',sep='')
try(system(newf))
work.d <- paste(wd,'Results/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'/',sep='')
newf = paste('mkdir ',work.d,sep='')
try(system(newf))
data.fn = c(
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.dat',sep=''),
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.N.dat',sep=''),
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.rr.dat',sep=''),
paste(wd,'Data/',senario.n,'/',cond.n,'/',Nstudy,mu.N,'.valL.dat',sep='') )
out.fn = c(
paste(work.d,Nstudy,mu.N,'.M1',sep=''),
paste(work.d,Nstudy,mu.N,'.M2',sep=''),
paste(work.d,Nstudy,mu.N,'.M3',sep='') )
mR <- read.table(data.fn[1])[,-1]
mN <- read.table(data.fn[2])[,-1]
mrr <- read.table(data.fn[3])[,-1]
mVrr <- read.table(data.fn[4])[,-1]
for(simi in 1:nsim){
print(paste('simi =',simi,sep=''))
if(is.na(mrr[simi,1])==0){
r = as.numeric(mR[simi,]) # ind study corrs
N = as.numeric(mN[simi,]) # ind study sample size
vi = ((1-r^2)^2)/(N-1) # Sampling variance of correlaiton
mr <- sum(r/vi)/sum(1/vi) # weighted average mean corrs
rr.obs = as.numeric(mrr[simi,]) # reliabilities
Linfo = v2m.Lambda(as.numeric(mVrr[simi,]),8)
vL.obs = Linfo$v
V.L = Linfo$V
# Data analysis
# bare-bone + HS
res.meta4 = vector('list',2)
res.meta4[[1]] = rma(r,vi,weights = N,method = 'HS' )
res.meta4[[2]] = rma(r/rr.obs,vi/(rr.obs^2),weights = N*(rr.obs^2),method = 'HS' )
for(mi in 1:2){ org.res.meta4(simi,res.meta4[[mi]],out.fn[mi],T.Values[[mi]]) }
# Bayesian methods
inits = list(rho0 = 0,V.rho = 0.09,Phi = rep(0,3),rhoi = r)
prior = list(Phi = list(mu = rep(0,3),sigma = rep(100,3)))
res = try(wrbugs(r,N,vL.obs,V.L,indL,indP,inits,nburnin=1,niter=n.iter,nthin=1,prior))
if(inherits(res,'try-error')==0){
org.res(simi,res,out.fn[3],T.Values[[3]],prm[[3]])
}else{
org.resNA(simi,out.fn[3],T.Values[[3]],prm[[3]])
}
}else{
for(mi in 1:3){
org.resNA(simi,out.fn[mi],T.Values[[mi]],prm[[mi]])
}
}
}
print(paste('Nstudy = ',Nstudy,'Nbar = ', mu.N,sep=''))
for(mi in 1:3){
summary.s(mi,out.fn[mi],T.Values[[mi]],prm[[mi]])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{autocrop}
\alias{autocrop}
\title{Autocrop image region}
\usage{
autocrop(im, color, axes = "zyx")
}
\arguments{
\item{im}{an image}
\item{color}{Color used for the crop. If 0, color is guessed.}
\item{axes}{Axes used for the crop.}
}
\description{
Autocrop image region
}
\examples{
#Add pointless padding
padded <- pad(boats,30,"xy")
plot(padded)
#Remove padding
autocrop(padded,color=c(0,0,0)) \%>\% plot
}
|
/man/autocrop.Rd
|
no_license
|
MartinRoth/imager
|
R
| false
| true
| 512
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{autocrop}
\alias{autocrop}
\title{Autocrop image region}
\usage{
autocrop(im, color, axes = "zyx")
}
\arguments{
\item{im}{an image}
\item{color}{Color used for the crop. If 0, color is guessed.}
\item{axes}{Axes used for the crop.}
}
\description{
Autocrop image region
}
\examples{
#Add pointless padding
padded <- pad(boats,30,"xy")
plot(padded)
#Remove padding
autocrop(padded,color=c(0,0,0)) \%>\% plot
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/do-logistic-univ.R
\name{.do_logistic_univ}
\alias{.do_logistic_univ}
\title{Do univariable logistic regression and extract results in nice format.}
\usage{
.do_logistic_univ(
data,
formula,
format = "html",
conf_level = 0.95,
exponentiate = TRUE,
include_last_row = TRUE,
...
)
}
\arguments{
\item{data}{a data frame or tibble}
\item{formula}{A character string}
\item{format}{Display format in case I need to escape some characters. A
place holder for now in case I need it in the future. Default is "html".}
\item{conf_level}{The confidence level to use for the confidence interval.
Must be strictly greater than 0 and less than 1. Defaults to 0.95, which
corresponds to a 95 percent confidence interval.}
\item{exponentiate}{Logical indicating whether or not to exponentiate the
the coefficient estimates. This is typical for logistic and multinomial
regressions, but a bad idea if there is no log or logit link. Defaults to
\code{TRUE}.}
\item{include_last_row}{Adds a row at the end of each set of results to give
some breathing room. Default is \code{TRUE}.}
\item{...}{Additional arguments}
}
\value{
A tibble or data frame
}
\description{
A helper function to be used in a loop to do univariable regression and give
some nice lookin' results/
}
|
/man/dot-do_logistic_univ.Rd
|
permissive
|
emilelatour/latable
|
R
| false
| true
| 1,352
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/do-logistic-univ.R
\name{.do_logistic_univ}
\alias{.do_logistic_univ}
\title{Do univariable logistic regression and extract results in nice format.}
\usage{
.do_logistic_univ(
data,
formula,
format = "html",
conf_level = 0.95,
exponentiate = TRUE,
include_last_row = TRUE,
...
)
}
\arguments{
\item{data}{a data frame or tibble}
\item{formula}{A character string}
\item{format}{Display format in case I need to escape some characters. A
place holder for now in case I need it in the future. Default is "html".}
\item{conf_level}{The confidence level to use for the confidence interval.
Must be strictly greater than 0 and less than 1. Defaults to 0.95, which
corresponds to a 95 percent confidence interval.}
\item{exponentiate}{Logical indicating whether or not to exponentiate the
the coefficient estimates. This is typical for logistic and multinomial
regressions, but a bad idea if there is no log or logit link. Defaults to
\code{TRUE}.}
\item{include_last_row}{Adds a row at the end of each set of results to give
some breathing room. Default is \code{TRUE}.}
\item{...}{Additional arguments}
}
\value{
A tibble or data frame
}
\description{
A helper function to be used in a loop to do univariable regression and give
some nice lookin' results/
}
|
#####################################################
#
# ********* ********* *********
# * * * * *
# * * * * *
# * * * * *
# ********* ********* * *
# * * * *
# * * * *
# * * * *
# * ********* *********
#
######################################################
# author: Shu Yang
# date: Dec 14, 2017
# implement: argmin(x^2 + y^2 + z^2) without constraints
# Reference: Eberhart, R., & Kennedy, J. (1995, October). A new optimizer using particle swarm theory. In Micro Machine and Human Science, 1995. MHS'95., Proceedings of the Sixth International Symposium on (pp. 39-43). IEEE.
# Key concepts in the paper:
# 1. optimization of continous nonlinear functions
# 2. global best solution
# a. in the paper: gbest
# b. in the code: GolbalBestSolution
# 3. local best solution
# a. in the paper: pbest
# b. in the code: CurrentBestSolution
# 4. single generation contains multiple solutions
# a. generation (usually in genetic algorithm) = swarm
# b. solution = particle
library(ggplot2)
library(lubridate)
# The paper says: 'It is also necessary to clamp velocities to some maximum to prevent overflow'
PSO.Threshold.Velocity = function(x, threshold)
{
if (x < -threshold)
{
return(-threshold);
}
else if (x > threshold)
{
return(threshold);
}
else
{
return(x);
}
}
PSO.InitializeVelocity = function(generation)
{
Velocity = matrix(0, nrow = nrow(generation), ncol = ncol(generation));
return(Velocity);
}
PSO.InitializeGeneration = function(numSolution)
{
X = runif(n = numSolution, min = -5, max = 5);
Y = runif(n = numSolution, min = -5, max = 5);
Z = runif(n = numSolution, min = -5, max = 5);
InitialGerantion = data.frame(X, Y, Z);
return(InitialGerantion);
}
PSO.EvaluateSolution = function(singleSolution)
{
Result = singleSolution$X ^ 2 + singleSolution$Y ^ 2 + singleSolution$Z ^ 2;
return(Result);
}
PSO.EvaluateGeneration = function(generation)
{
Fitness = c();
for (i in 1: nrow(generation))
{
Fitness[i] = PSO.EvaluateSolution(generation[i,]);
}
SortedFitness = sort(Fitness, index.return = T);
return(SortedFitness);
}
PSO.SortGeneration = function(originalGen, sortIndex)
{
return(originalGen[sortIndex, ]);
}
PSO.BestSolution = function(sortedGen)
{
return(sortedGen[1, ]);
}
PSO.BestFitness = function(sortedFitness)
{
return(sortedFitness[1]);
}
PSO.NewGeneration = function(originalGen, velocity, inertiaWeight, globalBestSolution, globalBestFitness)
{
NumSolution = nrow(originalGen);
NumVariable = ncol(originalGen);
SortedFitness = PSO.EvaluateGeneration(originalGen);
SortedOriginalGen = PSO.SortGeneration(originalGen, SortedFitness$ix);
CurrentBestSolution = PSO.BestSolution(SortedOriginalGen);
CurrentBestFitness = PSO.BestFitness(SortedFitness$x);
# in the paper: C1 = C2 = ACC_CONST = 2.0
C1 = 2;
C2 = 2;
Velocity = c();
for (i in 1: NumSolution)
{
Velocity.Solution = inertiaWeight * velocity[i,] +
C1 * runif(n = NumVariable) * (CurrentBestSolution - originalGen[i,]) +
C2 * runif(n = NumVariable) * (globalBestSolution - originalGen[i,]);
# Minimizing (x^2 + y^2 + z^2) is an easy task
# No significant differences of PSO behaviors can be observed with or wo velocity threshold
Velocity.Solution = apply(Velocity.Solution, 2, PSO.Threshold.Velocity, 4);
originalGen[i,] = originalGen[i,] + Velocity.Solution;
Velocity = rbind(Velocity, Velocity.Solution);
}
Result = list();
if (CurrentBestFitness < globalBestFitness)
{
Result$GlobalBestSolution = CurrentBestSolution;
Result$GlobalBestFitness = CurrentBestFitness;
Result$OriginalGen = originalGen;
Result$Velocity = Velocity;
}
else
{
Result$GlobalBestSolution = globalBestSolution;
Result$GlobalBestFitness = globalBestFitness;
Result$OriginalGen = originalGen;
Result$Velocity = Velocity;
}
return(Result);
}
PSO.UnitTest = function()
{
TrackBestFitness = c();
InitialGeneration = PSO.InitializeGeneration(numSolution = 50);
SortedFitness = PSO.EvaluateGeneration(InitialGeneration);
SortedGeneration = PSO.SortGeneration(InitialGeneration, SortedFitness$ix);
GlobalBestSolution = PSO.BestSolution(SortedGeneration);
GlobalBestFitness = PSO.BestFitness(SortedFitness$x);
TrackBestFitness = c(TrackBestFitness, GlobalBestFitness);
NumIteration = 50;
Velocity = PSO.InitializeVelocity(InitialGeneration);
InertiaWeight = seq(from = 0.1, to = 0.01, length = NumIteration);
for (i in 1: NumIteration)
{
Result = PSO.NewGeneration(InitialGeneration, Velocity, InertiaWeight[i],
GlobalBestSolution, GlobalBestFitness);
GlobalBestSolution = Result$GlobalBestSolution;
GlobalBestFitness = Result$GlobalBestFitness;
InitialGeneration = Result$OriginalGen;
Velocity = Result$Velocity;
TrackBestFitness = c(TrackBestFitness, GlobalBestFitness);
print(paste(c('global best solution = ', GlobalBestSolution,
'global best fitness = ', GlobalBestFitness), collapse = ' '));
}
TrackBestFitness = as.data.frame(cbind(1:length(TrackBestFitness), TrackBestFitness));
colnames(TrackBestFitness) = c('iteration', 'fitness');
ResultPlot = ggplot() +
geom_line(data = TrackBestFitness, aes(x = iteration, y = fitness), col = 'darkblue', size = 2) +
ggtitle('Best fitness');
print(ResultPlot);
}
|
/1_ParticleSwarm.R
|
no_license
|
onthejeep/PSO
|
R
| false
| false
| 5,947
|
r
|
#####################################################
#
# ********* ********* *********
# * * * * *
# * * * * *
# * * * * *
# ********* ********* * *
# * * * *
# * * * *
# * * * *
# * ********* *********
#
######################################################
# author: Shu Yang
# date: Dec 14, 2017
# implement: argmin(x^2 + y^2 + z^2) without constraints
# Reference: Eberhart, R., & Kennedy, J. (1995, October). A new optimizer using particle swarm theory. In Micro Machine and Human Science, 1995. MHS'95., Proceedings of the Sixth International Symposium on (pp. 39-43). IEEE.
# Key concepts in the paper:
# 1. optimization of continous nonlinear functions
# 2. global best solution
# a. in the paper: gbest
# b. in the code: GolbalBestSolution
# 3. local best solution
# a. in the paper: pbest
# b. in the code: CurrentBestSolution
# 4. single generation contains multiple solutions
# a. generation (usually in genetic algorithm) = swarm
# b. solution = particle
library(ggplot2)
library(lubridate)
# The paper says: 'It is also necessary to clamp velocities to some maximum to prevent overflow'
PSO.Threshold.Velocity = function(x, threshold)
{
if (x < -threshold)
{
return(-threshold);
}
else if (x > threshold)
{
return(threshold);
}
else
{
return(x);
}
}
PSO.InitializeVelocity = function(generation)
{
Velocity = matrix(0, nrow = nrow(generation), ncol = ncol(generation));
return(Velocity);
}
PSO.InitializeGeneration = function(numSolution)
{
X = runif(n = numSolution, min = -5, max = 5);
Y = runif(n = numSolution, min = -5, max = 5);
Z = runif(n = numSolution, min = -5, max = 5);
InitialGerantion = data.frame(X, Y, Z);
return(InitialGerantion);
}
PSO.EvaluateSolution = function(singleSolution)
{
Result = singleSolution$X ^ 2 + singleSolution$Y ^ 2 + singleSolution$Z ^ 2;
return(Result);
}
PSO.EvaluateGeneration = function(generation)
{
Fitness = c();
for (i in 1: nrow(generation))
{
Fitness[i] = PSO.EvaluateSolution(generation[i,]);
}
SortedFitness = sort(Fitness, index.return = T);
return(SortedFitness);
}
PSO.SortGeneration = function(originalGen, sortIndex)
{
return(originalGen[sortIndex, ]);
}
PSO.BestSolution = function(sortedGen)
{
return(sortedGen[1, ]);
}
PSO.BestFitness = function(sortedFitness)
{
return(sortedFitness[1]);
}
PSO.NewGeneration = function(originalGen, velocity, inertiaWeight, globalBestSolution, globalBestFitness)
{
NumSolution = nrow(originalGen);
NumVariable = ncol(originalGen);
SortedFitness = PSO.EvaluateGeneration(originalGen);
SortedOriginalGen = PSO.SortGeneration(originalGen, SortedFitness$ix);
CurrentBestSolution = PSO.BestSolution(SortedOriginalGen);
CurrentBestFitness = PSO.BestFitness(SortedFitness$x);
# in the paper: C1 = C2 = ACC_CONST = 2.0
C1 = 2;
C2 = 2;
Velocity = c();
for (i in 1: NumSolution)
{
Velocity.Solution = inertiaWeight * velocity[i,] +
C1 * runif(n = NumVariable) * (CurrentBestSolution - originalGen[i,]) +
C2 * runif(n = NumVariable) * (globalBestSolution - originalGen[i,]);
# Minimizing (x^2 + y^2 + z^2) is an easy task
# No significant differences of PSO behaviors can be observed with or wo velocity threshold
Velocity.Solution = apply(Velocity.Solution, 2, PSO.Threshold.Velocity, 4);
originalGen[i,] = originalGen[i,] + Velocity.Solution;
Velocity = rbind(Velocity, Velocity.Solution);
}
Result = list();
if (CurrentBestFitness < globalBestFitness)
{
Result$GlobalBestSolution = CurrentBestSolution;
Result$GlobalBestFitness = CurrentBestFitness;
Result$OriginalGen = originalGen;
Result$Velocity = Velocity;
}
else
{
Result$GlobalBestSolution = globalBestSolution;
Result$GlobalBestFitness = globalBestFitness;
Result$OriginalGen = originalGen;
Result$Velocity = Velocity;
}
return(Result);
}
PSO.UnitTest = function()
{
TrackBestFitness = c();
InitialGeneration = PSO.InitializeGeneration(numSolution = 50);
SortedFitness = PSO.EvaluateGeneration(InitialGeneration);
SortedGeneration = PSO.SortGeneration(InitialGeneration, SortedFitness$ix);
GlobalBestSolution = PSO.BestSolution(SortedGeneration);
GlobalBestFitness = PSO.BestFitness(SortedFitness$x);
TrackBestFitness = c(TrackBestFitness, GlobalBestFitness);
NumIteration = 50;
Velocity = PSO.InitializeVelocity(InitialGeneration);
InertiaWeight = seq(from = 0.1, to = 0.01, length = NumIteration);
for (i in 1: NumIteration)
{
Result = PSO.NewGeneration(InitialGeneration, Velocity, InertiaWeight[i],
GlobalBestSolution, GlobalBestFitness);
GlobalBestSolution = Result$GlobalBestSolution;
GlobalBestFitness = Result$GlobalBestFitness;
InitialGeneration = Result$OriginalGen;
Velocity = Result$Velocity;
TrackBestFitness = c(TrackBestFitness, GlobalBestFitness);
print(paste(c('global best solution = ', GlobalBestSolution,
'global best fitness = ', GlobalBestFitness), collapse = ' '));
}
TrackBestFitness = as.data.frame(cbind(1:length(TrackBestFitness), TrackBestFitness));
colnames(TrackBestFitness) = c('iteration', 'fitness');
ResultPlot = ggplot() +
geom_line(data = TrackBestFitness, aes(x = iteration, y = fitness), col = 'darkblue', size = 2) +
ggtitle('Best fitness');
print(ResultPlot);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.